Merge branch 'master' into poisson

Conflicts:
	Makefile
pull/1948/head
vjpai 10 years ago
commit ccced5389d
  1. 3
      BUILD
  2. 308
      Makefile
  3. 2
      README.md
  4. 47
      build.json
  5. 65
      doc/connection-backoff.md
  6. 24
      doc/interop-test-descriptions.md
  7. 4
      gRPC.podspec
  8. 6
      include/grpc++/async_generic_service.h
  9. 7
      include/grpc++/completion_queue.h
  10. 37
      include/grpc++/impl/service_type.h
  11. 8
      include/grpc++/server.h
  12. 7
      include/grpc++/server_builder.h
  13. 60
      include/grpc/grpc.h
  14. 4
      include/grpc/support/port_platform.h
  15. 49
      include/grpc/support/subprocess.h
  16. 2
      include/grpc/support/tls.h
  17. 6
      include/grpc/support/tls_pthread.h
  18. 103
      src/compiler/cpp_generator.cc
  19. 2
      src/core/iomgr/endpoint_pair_windows.c
  20. 1
      src/core/iomgr/iocp_windows.h
  21. 17
      src/core/iomgr/iomgr.c
  22. 6
      src/core/iomgr/socket_windows.c
  23. 4
      src/core/iomgr/socket_windows.h
  24. 30
      src/core/iomgr/tcp_windows.c
  25. 34
      src/core/security/security_connector.c
  26. 108
      src/core/support/subprocess_posix.c
  27. 45
      src/core/support/tls_pthread.c
  28. 81
      src/core/surface/call.c
  29. 3
      src/core/surface/call.h
  30. 96
      src/core/surface/completion_queue.c
  31. 25
      src/core/surface/completion_queue.h
  32. 26
      src/core/surface/event_string.c
  33. 85
      src/core/surface/server.c
  34. 3
      src/core/surface/server.h
  35. 5
      src/core/surface/server_create.c
  36. 107
      src/core/tsi/ssl_transport_security.c
  37. 8
      src/core/tsi/ssl_transport_security.h
  38. 97
      src/core/tsi/transport_security.c
  39. 9
      src/core/tsi/transport_security.h
  40. 37
      src/core/tsi/transport_security_interface.h
  41. 12
      src/cpp/client/client_context.cc
  42. 52
      src/cpp/common/completion_queue.cc
  43. 10
      src/cpp/server/async_generic_service.cc
  44. 68
      src/cpp/server/server.cc
  45. 9
      src/cpp/server/server_builder.cc
  46. 1
      src/csharp/.gitignore
  47. 1
      src/csharp/EXPERIMENTAL-ONLY
  48. 26
      src/csharp/Grpc.Auth/Grpc.Auth.nuspec
  49. 4
      src/csharp/Grpc.Auth/Properties/AssemblyInfo.cs
  50. 2
      src/csharp/Grpc.Core.Tests/PInvokeTest.cs
  51. 2
      src/csharp/Grpc.Core.Tests/Properties/AssemblyInfo.cs
  52. 1
      src/csharp/Grpc.Core/.gitignore
  53. 2
      src/csharp/Grpc.Core/Call.cs
  54. 12
      src/csharp/Grpc.Core/Grpc.Core.nuspec
  55. 6
      src/csharp/Grpc.Core/Internal/AsyncCall.cs
  56. 15
      src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
  57. 2
      src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
  58. 2
      src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
  59. 43
      src/csharp/Grpc.Core/Internal/Enums.cs
  60. 11
      src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
  61. 9
      src/csharp/Grpc.Core/Method.cs
  62. 2
      src/csharp/Grpc.Core/Properties/AssemblyInfo.cs
  63. 12
      src/csharp/Grpc.Core/Server.cs
  64. 13
      src/csharp/Grpc.Core/ServerServiceDefinition.cs
  65. 2
      src/csharp/Grpc.Examples.MathClient/Properties/AssemblyInfo.cs
  66. 4
      src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
  67. 2
      src/csharp/Grpc.Examples.MathServer/Properties/AssemblyInfo.cs
  68. 2
      src/csharp/Grpc.Examples.Tests/Properties/AssemblyInfo.cs
  69. 2
      src/csharp/Grpc.Examples/Properties/AssemblyInfo.cs
  70. 2
      src/csharp/Grpc.IntegrationTesting.Client/Properties/AssemblyInfo.cs
  71. 2
      src/csharp/Grpc.IntegrationTesting.Server/Properties/AssemblyInfo.cs
  72. 2
      src/csharp/Grpc.IntegrationTesting/Properties/AssemblyInfo.cs
  73. 15
      src/csharp/Grpc.nuspec
  74. 111
      src/csharp/README.md
  75. 22
      src/csharp/build_packages.bat
  76. 18
      src/csharp/buildall.bat
  77. 37
      src/csharp/ext/grpc_csharp_ext.c
  78. 14
      src/node/examples/math.proto
  79. 24
      src/node/examples/route_guide.proto
  80. 8
      src/node/examples/stock.proto
  81. 16
      src/node/ext/completion_queue_async_worker.cc
  82. 2
      src/node/ext/completion_queue_async_worker.h
  83. 6
      src/node/ext/server.cc
  84. 2
      src/node/interop/empty.proto
  85. 58
      src/node/interop/interop_client.js
  86. 23
      src/node/interop/interop_server.js
  87. 36
      src/node/interop/messages.proto
  88. 3
      src/node/interop/test.proto
  89. 4
      src/node/package.json
  90. 13
      src/node/src/client.js
  91. 2
      src/node/src/common.js
  92. 39
      src/node/test/echo_service.proto
  93. 4
      src/node/test/interop_sanity_test.js
  94. 30
      src/node/test/surface_test.js
  95. 6
      src/node/test/test_service.proto
  96. 1
      src/objective-c/.gitignore
  97. 14
      src/objective-c/GRPCClient/private/GRPCChannel.m
  98. 2
      src/objective-c/GRPCClient/private/GRPCCompletionQueue.h
  99. 11
      src/objective-c/GRPCClient/private/GRPCCompletionQueue.m
  100. 4
      src/objective-c/GRPCClient/private/GRPCWrappedCall.m
  101. Some files were not shown because too many files have changed in this diff Show More

@ -74,6 +74,7 @@ cc_library(
"src/core/support/string.c",
"src/core/support/string_posix.c",
"src/core/support/string_win32.c",
"src/core/support/subprocess_posix.c",
"src/core/support/sync.c",
"src/core/support/sync_posix.c",
"src/core/support/sync_win32.c",
@ -83,6 +84,7 @@ cc_library(
"src/core/support/time.c",
"src/core/support/time_posix.c",
"src/core/support/time_win32.c",
"src/core/support/tls_pthread.c",
],
hdrs = [
"include/grpc/support/alloc.h",
@ -100,6 +102,7 @@ cc_library(
"include/grpc/support/port_platform.h",
"include/grpc/support/slice.h",
"include/grpc/support/slice_buffer.h",
"include/grpc/support/subprocess.h",
"include/grpc/support/sync.h",
"include/grpc/support/sync_generic.h",
"include/grpc/support/sync_posix.h",

File diff suppressed because one or more lines are too long

@ -37,7 +37,7 @@ Libraries in different languages are in different state of development. We are s
* C++ Library: [src/cpp] (src/cpp) : Early adopter ready - Alpha.
* Ruby Library: [src/ruby] (src/ruby) : Early adopter ready - Alpha.
* NodeJS Library: [src/node] (src/node) : Early adopter ready - Alpha.
* Python Library: [src/python] (src/python) : Usable with limitations - Pre-Alpha.
* Python Library: [src/python] (src/python) : Usable with limitations - Alpha.
* PHP Library: [src/php] (src/php) : Pre-Alpha.
* C# Library: [src/csharp] (src/csharp) : Pre-Alpha.
* Objective-C Library: [src/objective-c] (src/objective-c): Pre-Alpha.

@ -315,6 +315,7 @@
"include/grpc/support/port_platform.h",
"include/grpc/support/slice.h",
"include/grpc/support/slice_buffer.h",
"include/grpc/support/subprocess.h",
"include/grpc/support/sync.h",
"include/grpc/support/sync_generic.h",
"include/grpc/support/sync_posix.h",
@ -362,6 +363,7 @@
"src/core/support/string.c",
"src/core/support/string_posix.c",
"src/core/support/string_win32.c",
"src/core/support/subprocess_posix.c",
"src/core/support/sync.c",
"src/core/support/sync_posix.c",
"src/core/support/sync_win32.c",
@ -370,7 +372,8 @@
"src/core/support/thd_win32.c",
"src/core/support/time.c",
"src/core/support/time_posix.c",
"src/core/support/time_win32.c"
"src/core/support/time_win32.c",
"src/core/support/tls_pthread.c"
],
"secure": "no",
"vs_project_guid": "{B23D3D1A-9438-4EDA-BEB6-9A0A03D17792}"
@ -1306,20 +1309,6 @@
"gpr"
]
},
{
"name": "grpc_completion_queue_benchmark",
"build": "benchmark",
"language": "c",
"src": [
"test/core/surface/completion_queue_benchmark.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "grpc_completion_queue_test",
"build": "test",
@ -1822,6 +1811,24 @@
"gpr"
]
},
{
"name": "async_streaming_ping_pong_test",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/cpp/qps/async_streaming_ping_pong_test.cc"
],
"deps": [
"qps",
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "async_unary_ping_pong_test",
"build": "test",
@ -2127,7 +2134,7 @@
},
{
"name": "qps_driver",
"build": "tool",
"build": "benchmark",
"language": "c++",
"src": [
"test/cpp/qps/qps_driver.cc"
@ -2163,8 +2170,7 @@
},
{
"name": "qps_test",
"build": "test",
"run": false,
"build": "benchmark",
"language": "c++",
"src": [
"test/cpp/qps/qps_test.cc"
@ -2176,12 +2182,13 @@
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
"gpr",
"grpc++_test_config"
]
},
{
"name": "qps_worker",
"build": "tool",
"build": "benchmark",
"language": "c++",
"headers": [
"test/cpp/qps/client.h",

@ -0,0 +1,65 @@
GRPC Connection Backoff Protocol
================================
When we do a connection to a backend which fails, it is typically desirable to
not retry immediately (to avoid flooding the network or the server with
requests) and instead do some form of exponential backoff.
We have several parameters:
1. INITIAL_BACKOFF (how long to wait after the first failure before retrying)
2. MULTIPLIER (factor with which to multiply backoff after a failed retry)
3. MAX_BACKOFF (Upper bound on backoff)
4. MIN_CONNECTION_TIMEOUT
## Proposed Backoff Algorithm
Exponentially back off the start time of connection attempts up to a limit of
MAX_BACKOFF.
```
ConnectWithBackoff()
current_backoff = INITIAL_BACKOFF
current_deadline = now() + INITIAL_BACKOFF
while (TryConnect(Max(current_deadline, MIN_CONNECT_TIMEOUT))
!= SUCCESS)
SleepUntil(current_deadline)
current_backoff = Min(current_backoff * MULTIPLIER, MAX_BACKOFF)
current_deadline = now() + current_backoff
```
## Historical Algorithm in Stubby
Exponentially increase up to a limit of MAX_BACKOFF the intervals between
connection attempts. This is what stubby 2 uses, and is equivalent if
TryConnect() fails instantly.
```
LegacyConnectWithBackoff()
current_backoff = INITIAL_BACKOFF
while (TryConnect(MIN_CONNECT_TIMEOUT) != SUCCESS)
SleepFor(current_backoff)
current_backoff = Min(current_backoff * MULTIPLIER, MAX_BACKOFF)
```
The grpc C implementation currently uses this approach with an initial backoff
of 1 second, multiplier of 2, and maximum backoff of 120 seconds. (This will
change)
Stubby, or at least rpc2, uses exactly this algorithm with an initial backoff
of 1 second, multiplier of 1.2, and a maximum backoff of 120 seconds.
## Use Cases to Consider
* Client tries to connect to a server which is down for multiple hours, eg for
maintenance
* Client tries to connect to a server which is overloaded
* User is bringing up both a client and a server at the same time
* In particular, we would like to avoid a large unnecessary delay if the
client connects to a server which is about to come up
* Client/server are misconfigured such that connection attempts always fail
* We want to make sure these don’t put too much load on the server by
default.
* Server is overloaded and wants to transiently make clients back off
* Application has out of band reason to believe a server is back
* We should consider an out of band mechanism for the client to hint that
we should short circuit the backoff.

@ -517,6 +517,28 @@ Procedure:
Asserts:
* Call completed with status CANCELLED
### timeout_on_sleeping_server
This test verifies that an RPC request whose lifetime exceeds its configured
timeout value will end with the DeadlineExceeded status.
Server features:
* [FullDuplexCall][]
Procedure:
1. Client calls FullDuplexCall with the following request and sets its timeout to 1ms.
```
{
payload:{
body: 27182 bytes of zeros
}
}
```
Asserts:
* Call completed with status DEADLINE_EXCEEDED.
### concurrent_large_unary
Status: TODO
@ -540,8 +562,6 @@ Cancel after sent headers (ctiller - done)
Cancel after received first message (ctiller - done)
Timeout after expire (zhaoq)
Zero-message streams (ejona)
Multiple thousand simultaneous calls on same Channel (ctiller - done)

@ -1,7 +1,7 @@
Pod::Spec.new do |s|
s.name = 'gRPC'
s.version = '0.0.1'
s.summary = 'Generic gRPC client library for iOS'
s.summary = 'Generic gRPC client library for iOS/OSX'
s.homepage = 'https://www.grpc.io'
s.license = 'New BSD'
s.authors = { 'Jorge Canizales' => 'jcanizales@google.com',
@ -9,8 +9,8 @@ Pod::Spec.new do |s|
# s.source = { :git => 'https://github.com/grpc/grpc.git', :tag => 'release-0_5_0' }
s.platform = :ios
s.ios.deployment_target = '6.0'
s.osx.deployment_target = '10.8'
s.requires_arc = true
s.subspec 'RxLibrary' do |rs|

@ -65,10 +65,8 @@ class AsyncGenericService GRPC_FINAL {
void RequestCall(GenericServerContext* ctx,
GenericServerAsyncReaderWriter* reader_writer,
CompletionQueue* cq, void* tag);
// The new rpc event should be obtained from this completion queue.
CompletionQueue* completion_queue();
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag);
private:
friend class Server;

@ -58,6 +58,7 @@ class ServerReaderWriter;
class CompletionQueue;
class Server;
class ServerBuilder;
class ServerContext;
class CompletionQueueTag {
@ -137,6 +138,12 @@ class CompletionQueue : public GrpcLibrary {
grpc_completion_queue* cq_; // owned
};
class ServerCompletionQueue : public CompletionQueue {
private:
friend class ServerBuilder;
ServerCompletionQueue() {}
};
} // namespace grpc
#endif // GRPCXX_COMPLETION_QUEUE_H

@ -39,8 +39,10 @@
namespace grpc {
class Call;
class CompletionQueue;
class RpcService;
class Server;
class ServerCompletionQueue;
class ServerContext;
class Status;
@ -70,52 +72,55 @@ class AsynchronousService {
ServerContext* context,
::grpc::protobuf::Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq, void* tag) = 0;
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) = 0;
};
AsynchronousService(CompletionQueue* cq, const char** method_names,
size_t method_count)
: cq_(cq),
dispatch_impl_(nullptr),
AsynchronousService(const char** method_names, size_t method_count)
: dispatch_impl_(nullptr),
method_names_(method_names),
method_count_(method_count),
request_args_(nullptr) {}
~AsynchronousService() { delete[] request_args_; }
CompletionQueue* completion_queue() const { return cq_; }
protected:
void RequestAsyncUnary(int index, ServerContext* context,
grpc::protobuf::Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq, void* tag) {
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
dispatch_impl_->RequestAsyncCall(request_args_[index], context, request,
stream, cq, tag);
stream, call_cq, notification_cq, tag);
}
void RequestClientStreaming(int index, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq, void* tag) {
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
dispatch_impl_->RequestAsyncCall(request_args_[index], context, nullptr,
stream, cq, tag);
stream, call_cq, notification_cq, tag);
}
void RequestServerStreaming(int index, ServerContext* context,
grpc::protobuf::Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq, void* tag) {
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
dispatch_impl_->RequestAsyncCall(request_args_[index], context, request,
stream, cq, tag);
stream, call_cq, notification_cq, tag);
}
void RequestBidiStreaming(int index, ServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq, void* tag) {
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) {
dispatch_impl_->RequestAsyncCall(request_args_[index], context, nullptr,
stream, cq, tag);
stream, call_cq, notification_cq, tag);
}
private:
friend class Server;
CompletionQueue* const cq_;
DispatchImpl* dispatch_impl_;
const char** const method_names_;
size_t method_count_;

@ -101,11 +101,15 @@ class Server GRPC_FINAL : public GrpcLibrary,
void RequestAsyncCall(void* registered_method, ServerContext* context,
grpc::protobuf::Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq, void* tag) GRPC_OVERRIDE;
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) GRPC_OVERRIDE;
void RequestAsyncGenericCall(GenericServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq, void* tag);
CompletionQueue* cq,
ServerCompletionQueue* notification_cq,
void* tag);
const int max_message_size_;

@ -46,6 +46,7 @@ class AsynchronousService;
class CompletionQueue;
class RpcService;
class Server;
class ServerCompletionQueue;
class ServerCredentials;
class SynchronousService;
class ThreadPoolInterface;
@ -82,6 +83,11 @@ class ServerBuilder {
// Does not take ownership.
void SetThreadPool(ThreadPoolInterface* thread_pool);
// Add a completion queue for handling asynchronous services
// Caller is required to keep this completion queue live until calling
// BuildAndStart()
std::unique_ptr<ServerCompletionQueue> AddCompletionQueue();
// Return a running server which is ready for processing rpcs.
std::unique_ptr<Server> BuildAndStart();
@ -96,6 +102,7 @@ class ServerBuilder {
std::vector<RpcService*> services_;
std::vector<AsynchronousService*> async_services_;
std::vector<Port> ports_;
std::vector<ServerCompletionQueue*> cqs_;
std::shared_ptr<ServerCredentials> creds_;
AsyncGenericService* generic_service_;
ThreadPoolInterface* thread_pool_;

@ -145,14 +145,6 @@ typedef enum grpc_call_error {
GRPC_CALL_ERROR_INVALID_METADATA
} grpc_call_error;
/* Result of a grpc operation */
typedef enum grpc_op_error {
/* everything went ok */
GRPC_OP_OK = 0,
/* something failed, we don't know what */
GRPC_OP_ERROR
} grpc_op_error;
/* Write Flags: */
/* Hint that the write may be buffered and need not go out on the wire
immediately. GRPC is free to buffer the message until the next non-buffered
@ -201,22 +193,15 @@ typedef struct grpc_metadata {
} grpc_metadata;
typedef enum grpc_completion_type {
GRPC_QUEUE_SHUTDOWN, /* Shutting down */
GRPC_OP_COMPLETE, /* operation completion */
GRPC_SERVER_SHUTDOWN, /* The server has finished shutting down */
GRPC_COMPLETION_DO_NOT_USE /* must be last, forces users to include
a default: case */
GRPC_QUEUE_SHUTDOWN, /* Shutting down */
GRPC_QUEUE_TIMEOUT, /* No event before timeout */
GRPC_OP_COMPLETE /* operation completion */
} grpc_completion_type;
typedef struct grpc_event {
grpc_completion_type type;
int success;
void *tag;
grpc_call *call;
/* Data associated with the completion type. Field names match the type of
completion as listed in grpc_completion_type. */
union {
grpc_op_error op_complete;
} data;
} grpc_event;
typedef struct {
@ -352,26 +337,21 @@ grpc_completion_queue *grpc_completion_queue_create(void);
/* Blocks until an event is available, the completion queue is being shut down,
or deadline is reached. Returns NULL on timeout, otherwise the event that
occurred. Callers should call grpc_event_finish once they have processed
the event.
occurred.
Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */
grpc_event *grpc_completion_queue_next(grpc_completion_queue *cq,
gpr_timespec deadline);
grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
gpr_timespec deadline);
/* Blocks until an event with tag 'tag' is available, the completion queue is
being shutdown or deadline is reached. Returns NULL on timeout, or a pointer
to the event that occurred. Callers should call grpc_event_finish once they
have processed the event.
to the event that occurred.
Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */
grpc_event *grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
gpr_timespec deadline);
/* Clean up any data owned by the event */
void grpc_event_finish(grpc_event *event);
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
gpr_timespec deadline);
/* Begin destruction of a completion queue. Once all possible events are
drained then grpc_completion_queue_next will start to produce
@ -462,7 +442,8 @@ void grpc_call_destroy(grpc_call *call);
grpc_call_error grpc_server_request_call(
grpc_server *server, grpc_call **call, grpc_call_details *details,
grpc_metadata_array *request_metadata,
grpc_completion_queue *cq_bound_to_call, void *tag_new);
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag_new);
/* Registers a method in the server.
Methods to this (host, method) pair will not be reported by
@ -472,21 +453,26 @@ grpc_call_error grpc_server_request_call(
Must be called before grpc_server_start.
Returns NULL on failure. */
void *grpc_server_register_method(grpc_server *server, const char *method,
const char *host,
grpc_completion_queue *new_call_cq);
const char *host);
/* Request notification of a new pre-registered call */
grpc_call_error grpc_server_request_registered_call(
grpc_server *server, void *registered_method, grpc_call **call,
gpr_timespec *deadline, grpc_metadata_array *request_metadata,
grpc_byte_buffer **optional_payload,
grpc_completion_queue *cq_bound_to_call, void *tag_new);
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag_new);
/* Create a server. Additional configuration for each incoming channel can
be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. */
grpc_server *grpc_server_create(grpc_completion_queue *cq,
const grpc_channel_args *args);
grpc_server *grpc_server_create(const grpc_channel_args *args);
/* Register a completion queue with the server. Must be done for any completion
queue that is passed to grpc_server_request_* call. Must be performed prior
to grpc_server_start. */
void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq);
/* Add a HTTP2 over plaintext over tcp listener.
Returns bound port number on success, 0 on failure.
@ -502,7 +488,7 @@ void grpc_server_start(grpc_server *server);
Shutdown is idempotent. */
void grpc_server_shutdown(grpc_server *server);
/* As per grpc_server_shutdown, but send a GRPC_SERVER_SHUTDOWN event when
/* As per grpc_server_shutdown, but send a GRPC_OP_COMPLETE event when
there are no more calls being serviced.
Shutdown is idempotent, and all tags will be notified at once if multiple
grpc_server_shutdown_and_notify calls are made. */

@ -77,6 +77,7 @@
#define GPR_POSIX_ENV 1
#define GPR_POSIX_FILE 1
#define GPR_POSIX_STRING 1
#define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1
#define GPR_GETPID_IN_UNISTD_H 1
@ -122,6 +123,7 @@
#endif
#define GPR_POSIX_FILE 1
#define GPR_POSIX_STRING 1
#define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1
#define GPR_GETPID_IN_UNISTD_H 1
@ -154,6 +156,7 @@
#define GPR_POSIX_ENV 1
#define GPR_POSIX_FILE 1
#define GPR_POSIX_STRING 1
#define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1
#define GPR_GETPID_IN_UNISTD_H 1
@ -180,6 +183,7 @@
#define GPR_POSIX_ENV 1
#define GPR_POSIX_FILE 1
#define GPR_POSIX_STRING 1
#define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1
#define GPR_GETPID_IN_UNISTD_H 1

@ -0,0 +1,49 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_SUPPORT_SUBPROCESS_H
#define GRPC_SUPPORT_SUBPROCESS_H
typedef struct gpr_subprocess gpr_subprocess;
/* .exe on windows, empty on unices */
char *gpr_subprocess_binary_extension();
gpr_subprocess *gpr_subprocess_create(int argc, char **argv);
/* if subprocess has not been joined, kill it */
void gpr_subprocess_destroy(gpr_subprocess *p);
/* returns exit status; can be called at most once */
int gpr_subprocess_join(gpr_subprocess *p);
void gpr_subprocess_interrupt(gpr_subprocess *p);
#endif

@ -54,7 +54,7 @@
Destroying a thread local:
gpr_tls_destroy(&foo);
Setting a thread local:
Setting a thread local (returns new_value):
gpr_tls_set(&foo, new_value);
Accessing a thread local:

@ -34,6 +34,9 @@
#ifndef GRPC_SUPPORT_TLS_PTHREAD_H
#define GRPC_SUPPORT_TLS_PTHREAD_H
#include <grpc/support/log.h> /* for GPR_ASSERT */
#include <pthread.h>
/* Thread local storage based on pthread library calls.
#include tls.h to use this - and see that file for documentation */
@ -46,8 +49,7 @@ struct gpr_pthread_thread_local {
#define gpr_tls_init(tls) GPR_ASSERT(0 == pthread_key_create(&(tls)->key, NULL))
#define gpr_tls_destroy(tls) pthread_key_delete((tls)->key)
#define gpr_tls_set(tls, new_value) \
GPR_ASSERT(pthread_setspecific((tls)->key, (void*)(new_value)) == 0)
gpr_intptr gpr_tls_set(struct gpr_pthread_thread_local *tls, gpr_intptr value);
#define gpr_tls_get(tls) ((gpr_intptr)pthread_getspecific((tls)->key))
#endif

@ -120,6 +120,7 @@ grpc::string GetHeaderIncludes(const grpc::protobuf::FileDescriptor *file,
"class CompletionQueue;\n"
"class ChannelInterface;\n"
"class RpcService;\n"
"class ServerCompletionQueue;\n"
"class ServerContext;\n"
"} // namespace grpc\n\n";
@ -499,30 +500,37 @@ void PrintHeaderServerMethodAsync(
(*vars)["Response"] =
grpc_cpp_generator::ClassName(method->output_type(), true);
if (NoStreaming(method)) {
printer->Print(*vars,
"void Request$Method$("
"::grpc::ServerContext* context, $Request$* request, "
"::grpc::ServerAsyncResponseWriter< $Response$>* response, "
"::grpc::CompletionQueue* cq, void *tag);\n");
printer->Print(
*vars,
"void Request$Method$("
"::grpc::ServerContext* context, $Request$* request, "
"::grpc::ServerAsyncResponseWriter< $Response$>* response, "
"::grpc::CompletionQueue* new_call_cq, "
"::grpc::ServerCompletionQueue* notification_cq, void *tag);\n");
} else if (ClientOnlyStreaming(method)) {
printer->Print(*vars,
"void Request$Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerAsyncReader< $Response$, $Request$>* reader, "
"::grpc::CompletionQueue* cq, void *tag);\n");
printer->Print(
*vars,
"void Request$Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerAsyncReader< $Response$, $Request$>* reader, "
"::grpc::CompletionQueue* new_call_cq, "
"::grpc::ServerCompletionQueue* notification_cq, void *tag);\n");
} else if (ServerOnlyStreaming(method)) {
printer->Print(*vars,
"void Request$Method$("
"::grpc::ServerContext* context, $Request$* request, "
"::grpc::ServerAsyncWriter< $Response$>* writer, "
"::grpc::CompletionQueue* cq, void *tag);\n");
printer->Print(
*vars,
"void Request$Method$("
"::grpc::ServerContext* context, $Request$* request, "
"::grpc::ServerAsyncWriter< $Response$>* writer, "
"::grpc::CompletionQueue* new_call_cq, "
"::grpc::ServerCompletionQueue* notification_cq, void *tag);\n");
} else if (BidiStreaming(method)) {
printer->Print(
*vars,
"void Request$Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerAsyncReaderWriter< $Response$, $Request$>* stream, "
"::grpc::CompletionQueue* cq, void *tag);\n");
"::grpc::CompletionQueue* new_call_cq, "
"::grpc::ServerCompletionQueue* notification_cq, void *tag);\n");
}
}
@ -603,7 +611,7 @@ void PrintHeaderService(grpc::protobuf::io::Printer *printer,
" public:\n");
printer->Indent();
(*vars)["MethodCount"] = as_string(service->method_count());
printer->Print("explicit AsyncService(::grpc::CompletionQueue* cq);\n");
printer->Print("explicit AsyncService();\n");
printer->Print("~AsyncService() {};\n");
for (int i = 0; i < service->method_count(); ++i) {
PrintHeaderServerMethodAsync(printer, service->method(i), vars);
@ -878,36 +886,43 @@ void PrintSourceServerAsyncMethod(
(*vars)["Response"] =
grpc_cpp_generator::ClassName(method->output_type(), true);
if (NoStreaming(method)) {
printer->Print(*vars,
"void $ns$$Service$::AsyncService::Request$Method$("
"::grpc::ServerContext* context, "
"$Request$* request, "
"::grpc::ServerAsyncResponseWriter< $Response$>* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(
*vars,
"void $ns$$Service$::AsyncService::Request$Method$("
"::grpc::ServerContext* context, "
"$Request$* request, "
"::grpc::ServerAsyncResponseWriter< $Response$>* response, "
"::grpc::CompletionQueue* new_call_cq, "
"::grpc::ServerCompletionQueue* notification_cq, void *tag) {\n");
printer->Print(*vars,
" AsynchronousService::RequestAsyncUnary($Idx$, context, "
"request, response, cq, tag);\n");
"request, response, new_call_cq, notification_cq, tag);\n");
printer->Print("}\n\n");
} else if (ClientOnlyStreaming(method)) {
printer->Print(*vars,
"void $ns$$Service$::AsyncService::Request$Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerAsyncReader< $Response$, $Request$>* reader, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(
*vars,
"void $ns$$Service$::AsyncService::Request$Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerAsyncReader< $Response$, $Request$>* reader, "
"::grpc::CompletionQueue* new_call_cq, "
"::grpc::ServerCompletionQueue* notification_cq, void *tag) {\n");
printer->Print(*vars,
" AsynchronousService::RequestClientStreaming($Idx$, "
"context, reader, cq, tag);\n");
"context, reader, new_call_cq, notification_cq, tag);\n");
printer->Print("}\n\n");
} else if (ServerOnlyStreaming(method)) {
printer->Print(*vars,
"void $ns$$Service$::AsyncService::Request$Method$("
"::grpc::ServerContext* context, "
"$Request$* request, "
"::grpc::ServerAsyncWriter< $Response$>* writer, "
"::grpc::CompletionQueue* cq, void* tag) {\n");
printer->Print(*vars,
" AsynchronousService::RequestServerStreaming($Idx$, "
"context, request, writer, cq, tag);\n");
printer->Print(
*vars,
"void $ns$$Service$::AsyncService::Request$Method$("
"::grpc::ServerContext* context, "
"$Request$* request, "
"::grpc::ServerAsyncWriter< $Response$>* writer, "
"::grpc::CompletionQueue* new_call_cq, "
"::grpc::ServerCompletionQueue* notification_cq, void *tag) {\n");
printer->Print(
*vars,
" AsynchronousService::RequestServerStreaming($Idx$, "
"context, request, writer, new_call_cq, notification_cq, tag);\n");
printer->Print("}\n\n");
} else if (BidiStreaming(method)) {
printer->Print(
@ -915,10 +930,11 @@ void PrintSourceServerAsyncMethod(
"void $ns$$Service$::AsyncService::Request$Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerAsyncReaderWriter< $Response$, $Request$>* stream, "
"::grpc::CompletionQueue* cq, void *tag) {\n");
"::grpc::CompletionQueue* new_call_cq, "
"::grpc::ServerCompletionQueue* notification_cq, void *tag) {\n");
printer->Print(*vars,
" AsynchronousService::RequestBidiStreaming($Idx$, "
"context, stream, cq, tag);\n");
"context, stream, new_call_cq, notification_cq, tag);\n");
printer->Print("}\n\n");
}
}
@ -980,9 +996,8 @@ void PrintSourceService(grpc::protobuf::io::Printer *printer,
(*vars)["MethodCount"] = as_string(service->method_count());
printer->Print(*vars,
"$ns$$Service$::AsyncService::AsyncService(::grpc::"
"CompletionQueue* cq) : "
"::grpc::AsynchronousService(cq, "
"$ns$$Service$::AsyncService::AsyncService() : "
"::grpc::AsynchronousService("
"$prefix$$Service$_method_names, $MethodCount$) "
"{}\n\n");

@ -56,6 +56,8 @@ static void create_sockets(SOCKET sv[2]) {
GPR_ASSERT(lst_sock != INVALID_SOCKET);
memset(&addr, 0, sizeof(addr));
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_family = AF_INET;
GPR_ASSERT(bind(lst_sock, (struct sockaddr*)&addr, sizeof(addr)) != SOCKET_ERROR);
GPR_ASSERT(listen(lst_sock, SOMAXCONN) != SOCKET_ERROR);
GPR_ASSERT(getsockname(lst_sock, (struct sockaddr*)&addr, &addr_len) != SOCKET_ERROR);

@ -40,6 +40,7 @@
#include "src/core/iomgr/socket_windows.h"
void grpc_iocp_init(void);
void grpc_iocp_kick(void);
void grpc_iocp_shutdown(void);
void grpc_iocp_add_socket(grpc_winsocket *);
void grpc_iocp_socket_orphan(grpc_winsocket *);

@ -50,7 +50,6 @@ typedef struct delayed_callback {
} delayed_callback;
static gpr_mu g_mu;
static gpr_cv g_cv;
static gpr_cv g_rcv;
static delayed_callback *g_cbs_head = NULL;
static delayed_callback *g_cbs_tail = NULL;
@ -64,6 +63,8 @@ static void background_callback_executor(void *ignored) {
gpr_mu_lock(&g_mu);
while (!g_shutdown) {
gpr_timespec deadline = gpr_inf_future;
gpr_timespec short_deadline =
gpr_time_add(gpr_now(), gpr_time_from_millis(100));
if (g_cbs_head) {
delayed_callback *cb = g_cbs_head;
g_cbs_head = cb->next;
@ -74,19 +75,25 @@ static void background_callback_executor(void *ignored) {
gpr_mu_lock(&g_mu);
} else if (grpc_alarm_check(&g_mu, gpr_now(), &deadline)) {
} else {
gpr_cv_wait(&g_cv, &g_mu, deadline);
gpr_mu_unlock(&g_mu);
gpr_sleep_until(gpr_time_min(short_deadline, deadline));
gpr_mu_lock(&g_mu);
}
}
gpr_mu_unlock(&g_mu);
gpr_event_set(&g_background_callback_executor_done, (void *)1);
}
void grpc_kick_poller(void) { gpr_cv_broadcast(&g_cv); }
void grpc_kick_poller(void) {
/* Empty. The background callback executor polls periodically. The activity
* the kicker is trying to draw the executor's attention to will be picked up
* either by one of the periodic wakeups or by one of the polling application
* threads. */
}
void grpc_iomgr_init(void) {
gpr_thd_id id;
gpr_mu_init(&g_mu);
gpr_cv_init(&g_cv);
gpr_cv_init(&g_rcv);
grpc_alarm_list_init(gpr_now());
g_refs = 0;
@ -143,7 +150,6 @@ void grpc_iomgr_shutdown(void) {
grpc_iomgr_platform_shutdown();
grpc_alarm_list_shutdown();
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_cv);
gpr_cv_destroy(&g_rcv);
}
@ -175,7 +181,6 @@ void grpc_iomgr_add_delayed_callback(grpc_iomgr_cb_func cb, void *cb_arg,
g_cbs_tail->next = dcb;
g_cbs_tail = dcb;
}
gpr_cv_signal(&g_cv);
gpr_mu_unlock(&g_mu);
}

@ -59,17 +59,21 @@ grpc_winsocket *grpc_winsocket_create(SOCKET socket) {
operations to abort them. We need to do that this way because of the
various callsites of that function, which happens to be in various
mutex hold states, and that'd be unsafe to call them directly. */
void grpc_winsocket_shutdown(grpc_winsocket *socket) {
int grpc_winsocket_shutdown(grpc_winsocket *socket) {
int callbacks_set = 0;
gpr_mu_lock(&socket->state_mu);
if (socket->read_info.cb) {
callbacks_set++;
grpc_iomgr_add_delayed_callback(socket->read_info.cb,
socket->read_info.opaque, 0);
}
if (socket->write_info.cb) {
callbacks_set++;
grpc_iomgr_add_delayed_callback(socket->write_info.cb,
socket->write_info.opaque, 0);
}
gpr_mu_unlock(&socket->state_mu);
return callbacks_set;
}
/* Abandons a socket. Either we're going to queue it up for garbage collecting

@ -100,8 +100,8 @@ typedef struct grpc_winsocket {
grpc_winsocket *grpc_winsocket_create(SOCKET socket);
/* Initiate an asynchronous shutdown of the socket. Will call off any pending
operation to cancel them. */
void grpc_winsocket_shutdown(grpc_winsocket *socket);
operation to cancel them. Returns the number of callbacks that got setup. */
int grpc_winsocket_shutdown(grpc_winsocket *socket);
/* Abandon a socket. */
void grpc_winsocket_orphan(grpc_winsocket *socket);

@ -118,12 +118,14 @@ static void on_read(void *tcpp, int from_iocp) {
gpr_slice *slice = NULL;
size_t nslices = 0;
grpc_endpoint_cb_status status;
grpc_endpoint_read_cb cb = tcp->read_cb;
grpc_endpoint_read_cb cb;
grpc_winsocket_callback_info *info = &socket->read_info;
void *opaque = tcp->read_user_data;
int do_abort = 0;
gpr_mu_lock(&tcp->mu);
cb = tcp->read_cb;
tcp->read_cb = NULL;
if (!from_iocp || tcp->shutting_down) {
/* If we are here with from_iocp set to true, it means we got raced to
shutting down the endpoint. No actual abort callback will happen
@ -133,9 +135,12 @@ static void on_read(void *tcpp, int from_iocp) {
gpr_mu_unlock(&tcp->mu);
if (do_abort) {
if (from_iocp) gpr_slice_unref(tcp->read_slice);
if (from_iocp) {
tcp->socket->read_info.outstanding = 0;
gpr_slice_unref(tcp->read_slice);
}
tcp_unref(tcp);
cb(opaque, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN);
if (cb) cb(opaque, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN);
return;
}
@ -225,11 +230,13 @@ static void on_write(void *tcpp, int from_iocp) {
grpc_winsocket *handle = tcp->socket;
grpc_winsocket_callback_info *info = &handle->write_info;
grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK;
grpc_endpoint_write_cb cb = tcp->write_cb;
grpc_endpoint_write_cb cb;
void *opaque = tcp->write_user_data;
int do_abort = 0;
gpr_mu_lock(&tcp->mu);
cb = tcp->write_cb;
tcp->write_cb = NULL;
if (!from_iocp || tcp->shutting_down) {
/* If we are here with from_iocp set to true, it means we got raced to
shutting down the endpoint. No actual abort callback will happen
@ -238,15 +245,18 @@ static void on_write(void *tcpp, int from_iocp) {
}
gpr_mu_unlock(&tcp->mu);
GPR_ASSERT(tcp->socket->write_info.outstanding);
if (do_abort) {
if (from_iocp) gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
if (from_iocp) {
tcp->socket->write_info.outstanding = 0;
gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
}
tcp_unref(tcp);
cb(opaque, GRPC_ENDPOINT_CB_SHUTDOWN);
if (cb) cb(opaque, GRPC_ENDPOINT_CB_SHUTDOWN);
return;
}
GPR_ASSERT(tcp->socket->write_info.outstanding);
if (info->wsa_error != 0) {
char *utf8_message = gpr_format_message(info->wsa_error);
gpr_log(GPR_ERROR, "WSASend overlapped error: %s", utf8_message);
@ -361,11 +371,13 @@ static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
concurrent access of the data structure in that regard. */
static void win_shutdown(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *) ep;
int extra_refs = 0;
gpr_mu_lock(&tcp->mu);
/* At that point, what may happen is that we're already inside the IOCP
callback. See the comments in on_read and on_write. */
tcp->shutting_down = 1;
grpc_winsocket_shutdown(tcp->socket);
extra_refs = grpc_winsocket_shutdown(tcp->socket);
while (extra_refs--) tcp_ref(tcp);
gpr_mu_unlock(&tcp->mu);
}

@ -81,6 +81,24 @@ static const char *ssl_cipher_suites(void) {
/* -- Common methods. -- */
/* Returns the first property with that name. */
static const tsi_peer_property *tsi_peer_get_property_by_name(
const tsi_peer *peer, const char *name) {
size_t i;
if (peer == NULL) return NULL;
for (i = 0; i < peer->property_count; i++) {
const tsi_peer_property* property = &peer->properties[i];
if (name == NULL && property->name == NULL) {
return property;
}
if (name != NULL && property->name != NULL &&
strcmp(property->name, name) == 0) {
return property;
}
}
return NULL;
}
grpc_security_status grpc_security_connector_create_handshaker(
grpc_security_connector *sc, tsi_handshaker **handshaker) {
if (sc == NULL || handshaker == NULL) return GRPC_SECURITY_ERROR;
@ -212,13 +230,8 @@ static grpc_security_status fake_check_peer(grpc_security_connector *sc,
status = GRPC_SECURITY_ERROR;
goto end;
}
if (peer.properties[0].type != TSI_PEER_PROPERTY_TYPE_STRING) {
gpr_log(GPR_ERROR, "Invalid type of cert type property.");
status = GRPC_SECURITY_ERROR;
goto end;
}
if (strncmp(peer.properties[0].value.string.data, TSI_FAKE_CERTIFICATE_TYPE,
peer.properties[0].value.string.length)) {
if (strncmp(peer.properties[0].value.data, TSI_FAKE_CERTIFICATE_TYPE,
peer.properties[0].value.length)) {
gpr_log(GPR_ERROR, "Invalid value for cert type property.");
status = GRPC_SECURITY_ERROR;
goto end;
@ -365,12 +378,7 @@ static grpc_security_status ssl_check_peer(const char *peer_name,
gpr_log(GPR_ERROR, "Missing selected ALPN property.");
return GRPC_SECURITY_ERROR;
}
if (p->type != TSI_PEER_PROPERTY_TYPE_STRING) {
gpr_log(GPR_ERROR, "Invalid selected ALPN property.");
return GRPC_SECURITY_ERROR;
}
if (!grpc_chttp2_is_alpn_version_supported(p->value.string.data,
p->value.string.length)) {
if (!grpc_chttp2_is_alpn_version_supported(p->value.data, p->value.length)) {
gpr_log(GPR_ERROR, "Invalid ALPN value.");
return GRPC_SECURITY_ERROR;
}

@ -0,0 +1,108 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SUBPROCESS
#include <grpc/support/subprocess.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
struct gpr_subprocess {
int pid;
int joined;
};
char *gpr_subprocess_binary_extension() { return ""; }
gpr_subprocess *gpr_subprocess_create(int argc, char **argv) {
gpr_subprocess *r;
int pid;
char **exec_args;
pid = fork();
if (pid == -1) {
return NULL;
} else if (pid == 0) {
exec_args = gpr_malloc((argc + 1) * sizeof(char *));
memcpy(exec_args, argv, argc * sizeof(char *));
exec_args[argc] = NULL;
execv(exec_args[0], exec_args);
/* if we reach here, an error has occurred */
gpr_log(GPR_ERROR, "execv '%s' failed: %s", exec_args[0], strerror(errno));
_exit(1);
return NULL;
} else {
r = gpr_malloc(sizeof(gpr_subprocess));
memset(r, 0, sizeof(*r));
r->pid = pid;
return r;
}
}
void gpr_subprocess_destroy(gpr_subprocess *p) {
if (!p->joined) {
kill(p->pid, SIGKILL);
gpr_subprocess_join(p);
}
gpr_free(p);
}
int gpr_subprocess_join(gpr_subprocess *p) {
int status;
if (waitpid(p->pid, &status, 0) == -1) {
gpr_log(GPR_ERROR, "waitpid failed: %s", strerror(errno));
return -1;
}
return status;
}
void gpr_subprocess_interrupt(gpr_subprocess *p) {
if (!p->joined) {
kill(p->pid, SIGINT);
}
}
#endif /* GPR_POSIX_SUBPROCESS */

@ -0,0 +1,45 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_PTHREAD_TLS
#include <grpc/support/tls.h>
gpr_intptr gpr_tls_set(struct gpr_pthread_thread_local *tls, gpr_intptr value) {
GPR_ASSERT(0 == pthread_setspecific(tls->key, (void*)value));
return value;
}
#endif /* GPR_PTHREAD_TLS */

@ -62,7 +62,7 @@ typedef enum {
typedef struct {
grpc_ioreq_completion_func on_complete;
void *user_data;
grpc_op_error status;
int success;
} completed_request;
/* See request_set in grpc_call below for a description */
@ -74,7 +74,7 @@ typedef struct {
typedef struct {
/* Overall status of the operation: starts OK, may degrade to
non-OK */
grpc_op_error status;
int success;
/* Completion function to call at the end of the operation */
grpc_ioreq_completion_func on_complete;
void *user_data;
@ -235,7 +235,6 @@ struct grpc_call {
#define CALL_FROM_TOP_ELEM(top_elem) \
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
static void do_nothing(void *ignored, grpc_op_error also_ignored) {}
static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline);
static void call_on_done_recv(void *call, int success);
static void call_on_done_send(void *call, int success);
@ -457,7 +456,7 @@ static void unlock(grpc_call *call) {
if (completing_requests > 0) {
for (i = 0; i < completing_requests; i++) {
completed_requests[i].on_complete(call, completed_requests[i].status,
completed_requests[i].on_complete(call, completed_requests[i].success,
completed_requests[i].user_data);
}
lock(call);
@ -517,7 +516,7 @@ no_details:
}
static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
grpc_op_error status) {
int success) {
completed_request *cr;
gpr_uint8 master_set = call->request_set[op];
reqinfo_master *master;
@ -525,8 +524,8 @@ static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
/* ioreq is live: we need to do something */
master = &call->masters[master_set];
master->complete_mask |= 1u << op;
if (status != GRPC_OP_OK) {
master->status = status;
if (!success) {
master->success = 0;
}
if (master->complete_mask == master->need_mask) {
for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
@ -537,7 +536,7 @@ static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
switch ((grpc_ioreq_op)i) {
case GRPC_IOREQ_RECV_MESSAGE:
case GRPC_IOREQ_SEND_MESSAGE:
if (master->status == GRPC_OP_OK) {
if (master->success) {
call->request_set[i] = REQSET_EMPTY;
} else {
call->write_state = WRITE_STATE_WRITE_CLOSED;
@ -572,33 +571,31 @@ static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
}
}
cr = &call->completed_requests[call->num_completed_requests++];
cr->status = master->status;
cr->success = master->success;
cr->on_complete = master->on_complete;
cr->user_data = master->user_data;
}
}
static void finish_ioreq_op(grpc_call *call, grpc_ioreq_op op,
grpc_op_error status) {
static void finish_ioreq_op(grpc_call *call, grpc_ioreq_op op, int success) {
if (is_op_live(call, op)) {
finish_live_ioreq_op(call, op, status);
finish_live_ioreq_op(call, op, success);
}
}
static void call_on_done_send(void *pc, int success) {
grpc_call *call = pc;
grpc_op_error error = success ? GRPC_OP_OK : GRPC_OP_ERROR;
lock(call);
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, error);
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, success);
}
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_MESSAGE)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, error);
finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, success);
}
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_CLOSE)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, error);
finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, error);
finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, GRPC_OP_OK);
finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, success);
finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, success);
finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1);
}
call->last_send_contains = 0;
call->sending = 0;
@ -721,12 +718,12 @@ static void call_on_done_recv(void *pc, int success) {
}
finish_read_ops(call);
} else {
finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 0);
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, 0);
finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, 0);
finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, 0);
finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, 0);
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, 0);
}
call->recv_ops.nops = 0;
unlock(call);
@ -891,7 +888,7 @@ static void finish_read_ops(grpc_call *call) {
(NULL == (*call->request_data[GRPC_IOREQ_RECV_MESSAGE].recv_message =
grpc_bbq_pop(&call->incoming_queue)));
if (!empty) {
finish_live_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, GRPC_OP_OK);
finish_live_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 1);
empty = grpc_bbq_empty(&call->incoming_queue);
}
} else {
@ -901,19 +898,19 @@ static void finish_read_ops(grpc_call *call) {
switch (call->read_state) {
case READ_STATE_STREAM_CLOSED:
if (empty) {
finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, GRPC_OP_OK);
finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, 1);
}
/* fallthrough */
case READ_STATE_READ_CLOSED:
if (empty) {
finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, GRPC_OP_OK);
finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 1);
}
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, GRPC_OP_OK);
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, GRPC_OP_OK);
finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, GRPC_OP_OK);
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, 1);
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, 1);
finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, 1);
/* fallthrough */
case READ_STATE_GOT_INITIAL_METADATA:
finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, GRPC_OP_OK);
finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, 1);
/* fallthrough */
case READ_STATE_INITIAL:
/* do nothing */
@ -924,13 +921,13 @@ static void finish_read_ops(grpc_call *call) {
static void early_out_write_ops(grpc_call *call) {
switch (call->write_state) {
case WRITE_STATE_WRITE_CLOSED:
finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, GRPC_OP_OK);
finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, 0);
finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, 0);
finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, 0);
finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1);
/* fallthrough */
case WRITE_STATE_STARTED:
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, 0);
/* fallthrough */
case WRITE_STATE_INITIAL:
/* do nothing */
@ -979,7 +976,7 @@ static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
}
master = &call->masters[set];
master->status = GRPC_OP_OK;
master->success = 1;
master->need_mask = have_ops;
master->complete_mask = 0;
master->on_complete = completion;
@ -1177,8 +1174,8 @@ static void set_cancelled_value(grpc_status_code status, void *dest) {
*(grpc_status_code *)dest = (status != GRPC_STATUS_OK);
}
static void finish_batch(grpc_call *call, grpc_op_error result, void *tag) {
grpc_cq_end_op(call->cq, tag, call, do_nothing, NULL, GRPC_OP_OK);
static void finish_batch(grpc_call *call, int success, void *tag) {
grpc_cq_end_op(call->cq, tag, call, 1);
}
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
@ -1192,8 +1189,8 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, tag);
if (nops == 0) {
grpc_cq_begin_op(call->cq, call, GRPC_OP_COMPLETE);
grpc_cq_end_op(call->cq, tag, call, do_nothing, NULL, GRPC_OP_OK);
grpc_cq_begin_op(call->cq, call);
grpc_cq_end_op(call->cq, tag, call, 1);
return GRPC_CALL_OK;
}
@ -1284,7 +1281,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
}
}
grpc_cq_begin_op(call->cq, call, GRPC_OP_COMPLETE);
grpc_cq_begin_op(call->cq, call);
return grpc_call_start_ioreq_and_call_back(call, reqs, out, finish_batch,
tag);

@ -81,8 +81,7 @@ typedef struct {
grpc_ioreq_data data;
} grpc_ioreq;
typedef void (*grpc_ioreq_completion_func)(grpc_call *call,
grpc_op_error status,
typedef void (*grpc_ioreq_completion_func)(grpc_call *call, int success,
void *user_data);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,

@ -51,8 +51,6 @@
function (on_finish) that is hidden from outside this module */
typedef struct event {
grpc_event base;
grpc_event_finish_func on_finish;
void *on_finish_user_data;
struct event *queue_next;
struct event *queue_prev;
struct event *bucket_next;
@ -78,16 +76,8 @@ struct grpc_completion_queue {
event *queue;
/* Fixed size chained hash table of events for pluck() */
event *buckets[NUM_TAG_BUCKETS];
#ifndef NDEBUG
/* Debug support: track which operations are in flight at any given time */
gpr_atm pending_op_count[GRPC_COMPLETION_DO_NOT_USE];
#endif
};
/* Default do-nothing on_finish function */
static void null_on_finish(void *user_data, grpc_op_error error) {}
grpc_completion_queue *grpc_completion_queue_create(void) {
grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue));
memset(cc, 0, sizeof(*cc));
@ -124,15 +114,11 @@ void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc) {
members can be filled in.
Requires GRPC_POLLSET_MU(&cc->pollset) locked. */
static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type,
void *tag, grpc_call *call,
grpc_event_finish_func on_finish, void *user_data) {
void *tag, grpc_call *call) {
event *ev = gpr_malloc(sizeof(event));
gpr_uintptr bucket = ((gpr_uintptr)tag) % NUM_TAG_BUCKETS;
ev->base.type = type;
ev->base.tag = tag;
ev->base.call = call;
ev->on_finish = on_finish ? on_finish : null_on_finish;
ev->on_finish_user_data = user_data;
if (cc->queue == NULL) {
cc->queue = ev->queue_next = ev->queue_prev = ev;
} else {
@ -152,22 +138,15 @@ static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type,
return ev;
}
void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call,
grpc_completion_type type) {
void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call) {
gpr_ref(&cc->refs);
if (call) GRPC_CALL_INTERNAL_REF(call, "cq");
#ifndef NDEBUG
gpr_atm_no_barrier_fetch_add(&cc->pending_op_count[type], 1);
#endif
}
/* Signal the end of an operation - if this is the last waiting-to-be-queued
event, then enter shutdown mode */
static void end_op_locked(grpc_completion_queue *cc,
grpc_completion_type type) {
#ifndef NDEBUG
GPR_ASSERT(gpr_atm_full_fetch_add(&cc->pending_op_count[type], -1) > 0);
#endif
if (gpr_unref(&cc->refs)) {
GPR_ASSERT(!cc->shutdown);
GPR_ASSERT(cc->shutdown_called);
@ -176,37 +155,29 @@ static void end_op_locked(grpc_completion_queue *cc,
}
}
void grpc_cq_end_server_shutdown(grpc_completion_queue *cc, void *tag) {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
add_locked(cc, GRPC_SERVER_SHUTDOWN, tag, NULL, NULL, NULL);
end_op_locked(cc, GRPC_SERVER_SHUTDOWN);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
grpc_event_finish_func on_finish, void *user_data,
grpc_op_error error) {
int success) {
event *ev;
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call, on_finish, user_data);
ev->base.data.op_complete = error;
ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call);
ev->base.success = success;
end_op_locked(cc, GRPC_OP_COMPLETE);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
if (call) GRPC_CALL_INTERNAL_UNREF(call, "cq", 0);
}
/* Create a GRPC_QUEUE_SHUTDOWN event without queuing it anywhere */
static event *create_shutdown_event(void) {
event *ev = gpr_malloc(sizeof(event));
ev->base.type = GRPC_QUEUE_SHUTDOWN;
ev->base.call = NULL;
ev->base.tag = NULL;
ev->on_finish = null_on_finish;
return ev;
}
grpc_event *grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline) {
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline) {
event *ev = NULL;
grpc_event ret;
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {
@ -240,12 +211,17 @@ grpc_event *grpc_completion_queue_next(grpc_completion_queue *cc,
if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset),
GRPC_POLLSET_MU(&cc->pollset), deadline)) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
return NULL;
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
return ret;
}
}
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
return &ev->base;
ret = ev->base;
gpr_free(ev);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
return ret;
}
static event *pluck_event(grpc_completion_queue *cc, void *tag) {
@ -277,9 +253,10 @@ static event *pluck_event(grpc_completion_queue *cc, void *tag) {
return NULL;
}
grpc_event *grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec deadline) {
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec deadline) {
event *ev = NULL;
grpc_event ret;
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {
@ -296,12 +273,17 @@ grpc_event *grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset),
GRPC_POLLSET_MU(&cc->pollset), deadline)) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
return NULL;
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
return ret;
}
}
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
ret = ev->base;
gpr_free(ev);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
return &ev->base;
return ret;
}
/* Shutdown simply drops a ref that we reserved at creation time; if we drop
@ -324,30 +306,6 @@ void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
grpc_cq_internal_unref(cc);
}
void grpc_event_finish(grpc_event *base) {
event *ev = (event *)base;
ev->on_finish(ev->on_finish_user_data, GRPC_OP_OK);
if (ev->base.call) {
GRPC_CALL_INTERNAL_UNREF(ev->base.call, "cq", 1);
}
gpr_free(ev);
}
void grpc_cq_dump_pending_ops(grpc_completion_queue *cc) {
#ifndef NDEBUG
char tmp[GRPC_COMPLETION_DO_NOT_USE * (1 + GPR_LTOA_MIN_BUFSIZE)];
char *p = tmp;
int i;
for (i = 0; i < GRPC_COMPLETION_DO_NOT_USE; i++) {
*p++ = ' ';
p += gpr_ltoa(cc->pending_op_count[i], p);
}
gpr_log(GPR_INFO, "pending ops:%s", tmp);
#endif
}
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
return &cc->pollset;
}

@ -39,41 +39,20 @@
#include "src/core/iomgr/pollset.h"
#include <grpc/grpc.h>
/* A finish func is executed whenever the event consumer calls
grpc_event_finish */
typedef void (*grpc_event_finish_func)(void *user_data, grpc_op_error error);
void grpc_cq_internal_ref(grpc_completion_queue *cc);
void grpc_cq_internal_unref(grpc_completion_queue *cc);
/* Flag that an operation is beginning: the completion channel will not finish
shutdown until a corrensponding grpc_cq_end_* call is made */
void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call,
grpc_completion_type type);
/* grpc_cq_end_* functions pair with a grpc_cq_begin_op
grpc_cq_end_* common arguments:
cc - the completion channel to queue on
tag - the user supplied operation tag
on_finish - grpc_event_finish_func that is called during grpc_event_finish
can be NULL to not get a callback
user_data - user_data parameter to be passed to on_finish
Other parameters match the data member of grpc_event */
void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call);
/* Queue a GRPC_OP_COMPLETED operation */
void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
grpc_event_finish_func on_finish, void *user_data,
grpc_op_error error);
void grpc_cq_end_server_shutdown(grpc_completion_queue *cc, void *tag);
int success);
/* disable polling for some tests */
void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc);
void grpc_cq_dump_pending_ops(grpc_completion_queue *cc);
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc);

@ -40,23 +40,15 @@
static void addhdr(gpr_strvec *buf, grpc_event *ev) {
char *tmp;
gpr_asprintf(&tmp, "tag:%p call:%p", ev->tag, (void *)ev->call);
gpr_asprintf(&tmp, "tag:%p", ev->tag);
gpr_strvec_add(buf, tmp);
}
static const char *errstr(grpc_op_error err) {
switch (err) {
case GRPC_OP_OK:
return "OK";
case GRPC_OP_ERROR:
return "ERROR";
}
return "UNKNOWN_UNKNOWN";
}
static const char *errstr(int success) { return success ? "OK" : "ERROR"; }
static void adderr(gpr_strvec *buf, grpc_op_error err) {
static void adderr(gpr_strvec *buf, int success) {
char *tmp;
gpr_asprintf(&tmp, " err=%s", errstr(err));
gpr_asprintf(&tmp, " %s", errstr(success));
gpr_strvec_add(buf, tmp);
}
@ -69,8 +61,8 @@ char *grpc_event_string(grpc_event *ev) {
gpr_strvec_init(&buf);
switch (ev->type) {
case GRPC_SERVER_SHUTDOWN:
gpr_strvec_add(&buf, gpr_strdup("SERVER_SHUTDOWN"));
case GRPC_QUEUE_TIMEOUT:
gpr_strvec_add(&buf, gpr_strdup("QUEUE_TIMEOUT"));
break;
case GRPC_QUEUE_SHUTDOWN:
gpr_strvec_add(&buf, gpr_strdup("QUEUE_SHUTDOWN"));
@ -78,11 +70,7 @@ char *grpc_event_string(grpc_event *ev) {
case GRPC_OP_COMPLETE:
gpr_strvec_add(&buf, gpr_strdup("OP_COMPLETE: "));
addhdr(&buf, ev);
adderr(&buf, ev->data.op_complete);
break;
case GRPC_COMPLETION_DO_NOT_USE:
gpr_strvec_add(&buf, gpr_strdup("DO_NOT_USE (this is a bug)"));
addhdr(&buf, ev);
adderr(&buf, ev->success);
break;
}

@ -74,16 +74,15 @@ typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
typedef struct {
requested_call_type type;
void *tag;
grpc_completion_queue *cq_bound_to_call;
grpc_completion_queue *cq_for_notification;
grpc_call **call;
union {
struct {
grpc_completion_queue *cq_bind;
grpc_call **call;
grpc_call_details *details;
grpc_metadata_array *initial_metadata;
} batch;
struct {
grpc_completion_queue *cq_bind;
grpc_call **call;
registered_method *registered_method;
gpr_timespec *deadline;
grpc_metadata_array *initial_metadata;
@ -103,7 +102,6 @@ struct registered_method {
char *host;
call_data *pending;
requested_call_array requested;
grpc_completion_queue *cq;
registered_method *next;
};
@ -130,7 +128,6 @@ struct grpc_server {
size_t channel_filter_count;
const grpc_channel_filter **channel_filters;
grpc_channel_args *channel_args;
grpc_completion_queue *unregistered_cq;
grpc_completion_queue **cqs;
grpc_pollset **pollsets;
@ -188,8 +185,6 @@ struct call_data {
#define SERVER_FROM_CALL_ELEM(elem) \
(((channel_data *)(elem)->channel_data)->server)
static void do_nothing(void *unused, grpc_op_error ignored) {}
static void begin_call(grpc_server *server, call_data *calld,
requested_call *rc);
static void fail_call(grpc_server *server, requested_call *rc);
@ -538,8 +533,8 @@ static void destroy_call_elem(grpc_call_element *elem) {
if (chand->server->shutdown && chand->server->lists[ALL_CALLS] == NULL) {
for (i = 0; i < chand->server->num_shutdown_tags; i++) {
for (j = 0; j < chand->server->cq_count; j++) {
grpc_cq_end_server_shutdown(chand->server->cqs[j],
chand->server->shutdown_tags[i]);
grpc_cq_end_op(chand->server->cqs[j], chand->server->shutdown_tags[i],
NULL, 1);
}
}
}
@ -602,7 +597,8 @@ static const grpc_channel_filter server_surface_filter = {
destroy_channel_elem, "server",
};
static void addcq(grpc_server *server, grpc_completion_queue *cq) {
void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq) {
size_t i, n;
for (i = 0; i < server->cq_count; i++) {
if (server->cqs[i] == cq) return;
@ -614,8 +610,7 @@ static void addcq(grpc_server *server, grpc_completion_queue *cq) {
server->cqs[n] = cq;
}
grpc_server *grpc_server_create_from_filters(grpc_completion_queue *cq,
grpc_channel_filter **filters,
grpc_server *grpc_server_create_from_filters(grpc_channel_filter **filters,
size_t filter_count,
const grpc_channel_args *args) {
size_t i;
@ -626,12 +621,10 @@ grpc_server *grpc_server_create_from_filters(grpc_completion_queue *cq,
GPR_ASSERT(grpc_is_initialized() && "call grpc_init()");
memset(server, 0, sizeof(grpc_server));
if (cq) addcq(server, cq);
gpr_mu_init(&server->mu);
gpr_cv_init(&server->cv);
server->unregistered_cq = cq;
/* decremented by grpc_server_destroy */
gpr_ref_init(&server->internal_refcount, 1);
server->root_channel_data.next = server->root_channel_data.prev =
@ -667,8 +660,7 @@ static int streq(const char *a, const char *b) {
}
void *grpc_server_register_method(grpc_server *server, const char *method,
const char *host,
grpc_completion_queue *cq_new_rpc) {
const char *host) {
registered_method *m;
if (!method) {
gpr_log(GPR_ERROR, "%s method string cannot be NULL", __FUNCTION__);
@ -681,13 +673,11 @@ void *grpc_server_register_method(grpc_server *server, const char *method,
return NULL;
}
}
addcq(server, cq_new_rpc);
m = gpr_malloc(sizeof(registered_method));
memset(m, 0, sizeof(*m));
m->method = gpr_strdup(method);
m->host = gpr_strdup(host);
m->next = server->registered_methods;
m->cq = cq_new_rpc;
server->registered_methods = m;
return m;
}
@ -817,7 +807,7 @@ static void shutdown_internal(grpc_server *server, gpr_uint8 have_shutdown_tag,
gpr_mu_lock(&server->mu);
if (have_shutdown_tag) {
for (i = 0; i < server->cq_count; i++) {
grpc_cq_begin_op(server->cqs[i], NULL, GRPC_SERVER_SHUTDOWN);
grpc_cq_begin_op(server->cqs[i], NULL);
}
server->shutdown_tags =
gpr_realloc(server->shutdown_tags,
@ -867,7 +857,7 @@ static void shutdown_internal(grpc_server *server, gpr_uint8 have_shutdown_tag,
if (server->lists[ALL_CALLS] == NULL) {
for (i = 0; i < server->num_shutdown_tags; i++) {
for (j = 0; j < server->cq_count; j++) {
grpc_cq_end_server_shutdown(server->cqs[j], server->shutdown_tags[i]);
grpc_cq_end_op(server->cqs[j], server->shutdown_tags[i], NULL, 1);
}
}
}
@ -1012,17 +1002,18 @@ static grpc_call_error queue_call_request(grpc_server *server,
}
}
grpc_call_error grpc_server_request_call(grpc_server *server, grpc_call **call,
grpc_call_details *details,
grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bind,
void *tag) {
grpc_call_error grpc_server_request_call(
grpc_server *server, grpc_call **call, grpc_call_details *details,
grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag) {
requested_call rc;
grpc_cq_begin_op(server->unregistered_cq, NULL, GRPC_OP_COMPLETE);
grpc_cq_begin_op(cq_for_notification, NULL);
rc.type = BATCH_CALL;
rc.tag = tag;
rc.data.batch.cq_bind = cq_bind;
rc.data.batch.call = call;
rc.cq_bound_to_call = cq_bound_to_call;
rc.cq_for_notification = cq_for_notification;
rc.call = call;
rc.data.batch.details = details;
rc.data.batch.initial_metadata = initial_metadata;
return queue_call_request(server, &rc);
@ -1031,14 +1022,16 @@ grpc_call_error grpc_server_request_call(grpc_server *server, grpc_call **call,
grpc_call_error grpc_server_request_registered_call(
grpc_server *server, void *rm, grpc_call **call, gpr_timespec *deadline,
grpc_metadata_array *initial_metadata, grpc_byte_buffer **optional_payload,
grpc_completion_queue *cq_bind, void *tag) {
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag) {
requested_call rc;
registered_method *registered_method = rm;
grpc_cq_begin_op(registered_method->cq, NULL, GRPC_OP_COMPLETE);
grpc_cq_begin_op(cq_for_notification, NULL);
rc.type = REGISTERED_CALL;
rc.tag = tag;
rc.data.registered.cq_bind = cq_bind;
rc.data.registered.call = call;
rc.cq_bound_to_call = cq_bound_to_call;
rc.cq_for_notification = cq_for_notification;
rc.call = call;
rc.data.registered.registered_method = registered_method;
rc.data.registered.deadline = deadline;
rc.data.registered.initial_metadata = initial_metadata;
@ -1046,10 +1039,9 @@ grpc_call_error grpc_server_request_registered_call(
return queue_call_request(server, &rc);
}
static void publish_registered_or_batch(grpc_call *call, grpc_op_error status,
static void publish_registered_or_batch(grpc_call *call, int success,
void *tag);
static void publish_was_not_set(grpc_call *call, grpc_op_error status,
void *tag) {
static void publish_was_not_set(grpc_call *call, int success, void *tag) {
abort();
}
@ -1076,6 +1068,9 @@ static void begin_call(grpc_server *server, call_data *calld,
fill in the metadata array passed by the client, we need to perform
an ioreq op, that should complete immediately. */
grpc_call_set_completion_queue(calld->call, rc->cq_bound_to_call);
*rc->call = calld->call;
calld->cq_new = rc->cq_for_notification;
switch (rc->type) {
case BATCH_CALL:
cpstr(&rc->data.batch.details->host,
@ -1083,18 +1078,13 @@ static void begin_call(grpc_server *server, call_data *calld,
cpstr(&rc->data.batch.details->method,
&rc->data.batch.details->method_capacity, calld->path);
rc->data.batch.details->deadline = calld->deadline;
grpc_call_set_completion_queue(calld->call, rc->data.batch.cq_bind);
*rc->data.batch.call = calld->call;
r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
r->data.recv_metadata = rc->data.batch.initial_metadata;
r++;
calld->cq_new = server->unregistered_cq;
publish = publish_registered_or_batch;
break;
case REGISTERED_CALL:
*rc->data.registered.deadline = calld->deadline;
grpc_call_set_completion_queue(calld->call, rc->data.registered.cq_bind);
*rc->data.registered.call = calld->call;
r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
r->data.recv_metadata = rc->data.registered.initial_metadata;
r++;
@ -1103,7 +1093,6 @@ static void begin_call(grpc_server *server, call_data *calld,
r->data.recv_message = rc->data.registered.optional_payload;
r++;
}
calld->cq_new = rc->data.registered.registered_method->cq;
publish = publish_registered_or_batch;
break;
}
@ -1114,28 +1103,24 @@ static void begin_call(grpc_server *server, call_data *calld,
}
static void fail_call(grpc_server *server, requested_call *rc) {
*rc->call = NULL;
switch (rc->type) {
case BATCH_CALL:
*rc->data.batch.call = NULL;
rc->data.batch.initial_metadata->count = 0;
grpc_cq_end_op(server->unregistered_cq, rc->tag, NULL, do_nothing, NULL,
GRPC_OP_ERROR);
break;
case REGISTERED_CALL:
*rc->data.registered.call = NULL;
rc->data.registered.initial_metadata->count = 0;
grpc_cq_end_op(rc->data.registered.registered_method->cq, rc->tag, NULL,
do_nothing, NULL, GRPC_OP_ERROR);
break;
}
grpc_cq_end_op(rc->cq_for_notification, rc->tag, NULL, 0);
}
static void publish_registered_or_batch(grpc_call *call, grpc_op_error status,
static void publish_registered_or_batch(grpc_call *call, int success,
void *tag) {
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
call_data *calld = elem->call_data;
grpc_cq_end_op(calld->cq_new, tag, call, do_nothing, NULL, status);
grpc_cq_end_op(calld->cq_new, tag, call, success);
}
const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) {

@ -39,8 +39,7 @@
#include "src/core/transport/transport.h"
/* Create a server */
grpc_server *grpc_server_create_from_filters(grpc_completion_queue *cq,
grpc_channel_filter **filters,
grpc_server *grpc_server_create_from_filters(grpc_channel_filter **filters,
size_t filter_count,
const grpc_channel_args *args);

@ -35,7 +35,6 @@
#include "src/core/surface/completion_queue.h"
#include "src/core/surface/server.h"
grpc_server *grpc_server_create(grpc_completion_queue *cq,
const grpc_channel_args *args) {
return grpc_server_create_from_filters(cq, NULL, 0, args);
grpc_server *grpc_server_create(const grpc_channel_args *args) {
return grpc_server_create_from_filters(NULL, 0, args);
}

@ -43,6 +43,7 @@
#include "src/core/tsi/transport_security.h"
#include <openssl/bio.h>
#include <openssl/crypto.h> /* For OPENSSL_free */
#include <openssl/err.h>
#include <openssl/ssl.h>
#include <openssl/x509.h>
@ -268,31 +269,16 @@ static tsi_result peer_property_from_x509_common_name(
}
/* Gets the subject SANs from an X509 cert as a tsi_peer_property. */
static tsi_result peer_property_from_x509_subject_alt_names(
X509* cert, tsi_peer_property* property) {
int i = 0;
int subject_alt_name_count = 0;
static tsi_result add_subject_alt_names_properties_to_peer(
tsi_peer* peer, GENERAL_NAMES* subject_alt_names,
int subject_alt_name_count) {
int i;
tsi_result result = TSI_OK;
GENERAL_NAMES* subject_alt_names =
X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
if (subject_alt_names == NULL) {
/* Empty list. */
return tsi_construct_list_peer_property(
TSI_X509_SUBJECT_ALTERNATIVE_NAMES_PEER_PROPERTY, 0, property);
}
subject_alt_name_count = sk_GENERAL_NAME_num(subject_alt_names);
result = tsi_construct_list_peer_property(
TSI_X509_SUBJECT_ALTERNATIVE_NAMES_PEER_PROPERTY, subject_alt_name_count,
property);
if (result != TSI_OK) return result;
/* Reset for DNS entries filtering. */
subject_alt_name_count = property->value.list.child_count;
property->value.list.child_count = 0;
peer->property_count -= subject_alt_name_count;
for (i = 0; i < subject_alt_name_count; i++) {
tsi_peer_property* child_property = NULL;
GENERAL_NAME* subject_alt_name =
sk_GENERAL_NAME_value(subject_alt_names, i);
/* Filter out the non-dns entries names. */
@ -305,40 +291,50 @@ static tsi_result peer_property_from_x509_subject_alt_names(
result = TSI_INTERNAL_ERROR;
break;
}
child_property =
&property->value.list.children[property->value.list.child_count++];
result = tsi_construct_string_peer_property(
NULL, (const char*)dns_name, dns_name_size, child_property);
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY,
(const char*)dns_name, dns_name_size,
&peer->properties[peer->property_count++]);
OPENSSL_free(dns_name);
if (result != TSI_OK) break;
}
}
if (result != TSI_OK) tsi_peer_property_destruct(property);
sk_GENERAL_NAME_pop_free(subject_alt_names, GENERAL_NAME_free);
return TSI_OK;
return result;
}
/* Gets information about the peer's X509 cert as a tsi_peer object. */
static tsi_result peer_from_x509(X509* cert, int include_certificate_type,
tsi_peer* peer) {
/* TODO(jboeuf): Maybe add more properties. */
size_t property_count = include_certificate_type ? 3 : 2;
GENERAL_NAMES* subject_alt_names =
X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
int subject_alt_name_count =
(subject_alt_names != NULL) ? sk_GENERAL_NAME_num(subject_alt_names) : 0;
size_t property_count = (include_certificate_type ? 1 : 0) +
1 /* common name */ + subject_alt_name_count;
tsi_result result = tsi_construct_peer(property_count, peer);
if (result != TSI_OK) return result;
do {
result = peer_property_from_x509_common_name(cert, &peer->properties[0]);
if (result != TSI_OK) break;
result =
peer_property_from_x509_subject_alt_names(cert, &peer->properties[1]);
if (result != TSI_OK) break;
if (include_certificate_type) {
result = tsi_construct_string_peer_property_from_cstring(
TSI_CERTIFICATE_TYPE_PEER_PROPERTY, TSI_X509_CERTIFICATE_TYPE,
&peer->properties[2]);
&peer->properties[0]);
if (result != TSI_OK) break;
}
result = peer_property_from_x509_common_name(
cert, &peer->properties[include_certificate_type ? 1 : 0]);
if (result != TSI_OK) break;
if (subject_alt_name_count != 0) {
result = add_subject_alt_names_properties_to_peer(peer, subject_alt_names,
subject_alt_name_count);
if (result != TSI_OK) break;
}
} while (0);
if (subject_alt_names != NULL) {
sk_GENERAL_NAME_pop_free(subject_alt_names, GENERAL_NAME_free);
}
if (result != TSI_OK) tsi_peer_destruct(peer);
return result;
}
@ -1344,43 +1340,32 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name) {
size_t i = 0;
size_t san_count = 0;
const tsi_peer_property* property = NULL;
const tsi_peer_property* cn_property = NULL;
/* For now reject what looks like an IP address. */
if (looks_like_ip_address(name)) return 0;
/* Check the SAN first. */
property = tsi_peer_get_property_by_name(
peer, TSI_X509_SUBJECT_ALTERNATIVE_NAMES_PEER_PROPERTY);
if (property == NULL || property->type != TSI_PEER_PROPERTY_TYPE_LIST) {
gpr_log(GPR_ERROR, "Invalid x509 subject alternative names property.");
return 0;
}
san_count = property->value.list.child_count;
for (i = 0; i < san_count; i++) {
const tsi_peer_property* alt_name_property =
&property->value.list.children[i];
if (alt_name_property->type != TSI_PEER_PROPERTY_TYPE_STRING) {
gpr_log(GPR_ERROR, "Invalid x509 subject alternative name property.");
return 0;
}
if (does_entry_match_name(alt_name_property->value.string.data,
alt_name_property->value.string.length, name)) {
return 1;
for (i = 0; i < peer->property_count; i++) {
const tsi_peer_property* property = &peer->properties[i];
if (property->name == NULL) continue;
if (strcmp(property->name,
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) {
san_count++;
if (does_entry_match_name(property->value.data, property->value.length,
name)) {
return 1;
}
} else if (strcmp(property->name,
TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) {
cn_property = property;
}
}
/* If there's no SAN, try the CN. */
if (san_count == 0) {
property = tsi_peer_get_property_by_name(
peer, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY);
if (property == NULL || property->type != TSI_PEER_PROPERTY_TYPE_STRING) {
gpr_log(GPR_ERROR, "Invalid x509 subject common name property.");
return 0;
}
if (does_entry_match_name(property->value.string.data,
property->value.string.length, name)) {
if (san_count == 0 && cn_property != NULL) {
if (does_entry_match_name(cn_property->value.data,
cn_property->value.length, name)) {
return 1;
}
}

@ -45,13 +45,9 @@ extern "C" {
/* This property is of type TSI_PEER_PROPERTY_STRING. */
#define TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY "x509_subject_common_name"
#define TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY \
"x509_subject_alternative_name"
/* This property is of type TSI_PEER_PROPERTY_LIST and the children contain
unnamed (name == NULL) properties of type TSI_PEER_PROPERTY_STRING. */
#define TSI_X509_SUBJECT_ALTERNATIVE_NAMES_PEER_PROPERTY \
"x509_subject_alternative_names"
/* This property is of type TSI_PEER_PROPERTY_STRING. */
#define TSI_SSL_ALPN_SELECTED_PROTOCOL "ssl_alpn_selected_protocol"
/* --- tsi_ssl_handshaker_factory object ---

@ -198,23 +198,6 @@ void tsi_handshaker_destroy(tsi_handshaker* self) {
/* --- tsi_peer implementation. --- */
const tsi_peer_property* tsi_peer_get_property_by_name(const tsi_peer* self,
const char* name) {
size_t i;
if (self == NULL) return NULL;
for (i = 0; i < self->property_count; i++) {
const tsi_peer_property* property = &self->properties[i];
if (name == NULL && property->name == NULL) {
return property;
}
if (name != NULL && property->name != NULL &&
strcmp(property->name, name) == 0) {
return property;
}
}
return NULL;
}
tsi_peer_property tsi_init_peer_property(void) {
tsi_peer_property property;
memset(&property, 0, sizeof(tsi_peer_property));
@ -234,18 +217,8 @@ void tsi_peer_property_destruct(tsi_peer_property* property) {
if (property->name != NULL) {
free(property->name);
}
switch (property->type) {
case TSI_PEER_PROPERTY_TYPE_STRING:
if (property->value.string.data != NULL) {
free(property->value.string.data);
}
break;
case TSI_PEER_PROPERTY_TYPE_LIST:
tsi_peer_destroy_list_property(property->value.list.children,
property->value.list.child_count);
default:
/* Nothing to free. */
break;
if (property->value.data != NULL) {
free(property->value.data);
}
*property = tsi_init_peer_property(); /* Reset everything to 0. */
}
@ -259,57 +232,20 @@ void tsi_peer_destruct(tsi_peer* self) {
self->property_count = 0;
}
tsi_result tsi_construct_signed_integer_peer_property(
const char* name, int64_t value, tsi_peer_property* property) {
*property = tsi_init_peer_property();
property->type = TSI_PEER_PROPERTY_TYPE_SIGNED_INTEGER;
if (name != NULL) {
property->name = tsi_strdup(name);
if (property->name == NULL) return TSI_OUT_OF_RESOURCES;
}
property->value.signed_int = value;
return TSI_OK;
}
tsi_result tsi_construct_unsigned_integer_peer_property(
const char* name, uint64_t value, tsi_peer_property* property) {
*property = tsi_init_peer_property();
property->type = TSI_PEER_PROPERTY_TYPE_UNSIGNED_INTEGER;
if (name != NULL) {
property->name = tsi_strdup(name);
if (property->name == NULL) return TSI_OUT_OF_RESOURCES;
}
property->value.unsigned_int = value;
return TSI_OK;
}
tsi_result tsi_construct_real_peer_property(const char* name, double value,
tsi_peer_property* property) {
*property = tsi_init_peer_property();
property->type = TSI_PEER_PROPERTY_TYPE_REAL;
if (name != NULL) {
property->name = tsi_strdup(name);
if (property->name == NULL) return TSI_OUT_OF_RESOURCES;
}
property->value.real = value;
return TSI_OK;
}
tsi_result tsi_construct_allocated_string_peer_property(
const char* name, size_t value_length, tsi_peer_property* property) {
*property = tsi_init_peer_property();
property->type = TSI_PEER_PROPERTY_TYPE_STRING;
if (name != NULL) {
property->name = tsi_strdup(name);
if (property->name == NULL) return TSI_OUT_OF_RESOURCES;
}
if (value_length > 0) {
property->value.string.data = calloc(1, value_length);
if (property->value.string.data == NULL) {
property->value.data = calloc(1, value_length);
if (property->value.data == NULL) {
tsi_peer_property_destruct(property);
return TSI_OUT_OF_RESOURCES;
}
property->value.string.length = value_length;
property->value.length = value_length;
}
return TSI_OK;
}
@ -328,28 +264,7 @@ tsi_result tsi_construct_string_peer_property(const char* name,
name, value_length, property);
if (result != TSI_OK) return result;
if (value_length > 0) {
memcpy(property->value.string.data, value, value_length);
}
return TSI_OK;
}
tsi_result tsi_construct_list_peer_property(const char* name,
size_t child_count,
tsi_peer_property* property) {
*property = tsi_init_peer_property();
property->type = TSI_PEER_PROPERTY_TYPE_LIST;
if (name != NULL) {
property->name = tsi_strdup(name);
if (property->name == NULL) return TSI_OUT_OF_RESOURCES;
}
if (child_count > 0) {
property->value.list.children =
calloc(child_count, sizeof(tsi_peer_property));
if (property->value.list.children == NULL) {
tsi_peer_property_destruct(property);
return TSI_OUT_OF_RESOURCES;
}
property->value.list.child_count = child_count;
memcpy(property->value.data, value, value_length);
}
return TSI_OK;
}

@ -92,12 +92,6 @@ struct tsi_handshaker {
tsi_result tsi_construct_peer(size_t property_count, tsi_peer* peer);
tsi_peer_property tsi_init_peer_property(void);
void tsi_peer_property_destruct(tsi_peer_property* property);
tsi_result tsi_construct_signed_integer_peer_property(
const char* name, int64_t value, tsi_peer_property* property);
tsi_result tsi_construct_unsigned_integer_peer_property(
const char* name, uint64_t value, tsi_peer_property* property);
tsi_result tsi_construct_real_peer_property(const char* name, double value,
tsi_peer_property* property);
tsi_result tsi_construct_string_peer_property(const char* name,
const char* value,
size_t value_length,
@ -106,9 +100,6 @@ tsi_result tsi_construct_allocated_string_peer_property(
const char* name, size_t value_length, tsi_peer_property* property);
tsi_result tsi_construct_string_peer_property_from_cstring(
const char* name, const char* value, tsi_peer_property* property);
tsi_result tsi_construct_list_peer_property(const char* name,
size_t child_count,
tsi_peer_property* property);
/* Utils. */
char* tsi_strdup(const char* src); /* Sadly, no strdup in C89. */

@ -179,33 +179,13 @@ void tsi_frame_protector_destroy(tsi_frame_protector* self);
/* This property is of type TSI_PEER_PROPERTY_STRING. */
#define TSI_CERTIFICATE_TYPE_PEER_PROPERTY "certificate_type"
/* Properties of type TSI_PEER_PROPERTY_TYPE_STRING may contain NULL characters
just like C++ strings. The length field gives the length of the string. */
typedef enum {
TSI_PEER_PROPERTY_TYPE_SIGNED_INTEGER,
TSI_PEER_PROPERTY_TYPE_UNSIGNED_INTEGER,
TSI_PEER_PROPERTY_TYPE_REAL,
TSI_PEER_PROPERTY_TYPE_STRING,
TSI_PEER_PROPERTY_TYPE_LIST
} tsi_peer_property_type;
/* The relevant field in the union value is dictated by the type field.
name may be NULL in case of an unnamed property. */
/* Property values may contain NULL characters just like C++ strings.
The length field gives the length of the string. */
typedef struct tsi_peer_property {
char* name;
tsi_peer_property_type type;
union {
int64_t signed_int;
uint64_t unsigned_int;
double real;
struct {
char* data;
size_t length;
} string;
struct {
struct tsi_peer_property* children;
size_t child_count;
} list;
struct {
char* data;
size_t length;
} value;
} tsi_peer_property;
@ -214,13 +194,6 @@ typedef struct {
size_t property_count;
} tsi_peer;
/* Gets the first property with the specified name. Iteration over the
properties of the peer should be used if the client of the API is expecting
several properties with the same name.
Returns NULL if there is no corresponding property. */
const tsi_peer_property* tsi_peer_get_property_by_name(const tsi_peer* self,
const char* name);
/* Destructs the tsi_peer object. */
void tsi_peer_destruct(tsi_peer* self);

@ -49,15 +49,11 @@ ClientContext::~ClientContext() {
grpc_call_destroy(call_);
}
if (cq_) {
grpc_completion_queue_shutdown(cq_);
// Drain cq_.
grpc_event* ev;
grpc_completion_type t;
do {
ev = grpc_completion_queue_next(cq_, gpr_inf_future);
t = ev->type;
grpc_event_finish(ev);
} while (t != GRPC_QUEUE_SHUTDOWN);
grpc_completion_queue_shutdown(cq_);
while (grpc_completion_queue_next(cq_, gpr_inf_future).type !=
GRPC_QUEUE_SHUTDOWN)
;
grpc_completion_queue_destroy(cq_);
}
}

@ -48,53 +48,41 @@ CompletionQueue::~CompletionQueue() { grpc_completion_queue_destroy(cq_); }
void CompletionQueue::Shutdown() { grpc_completion_queue_shutdown(cq_); }
// Helper class so we can declare a unique_ptr with grpc_event
class EventDeleter {
public:
void operator()(grpc_event* ev) {
if (ev) grpc_event_finish(ev);
}
};
CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
void** tag, bool* ok, gpr_timespec deadline) {
std::unique_ptr<grpc_event, EventDeleter> ev;
for (;;) {
ev.reset(grpc_completion_queue_next(cq_, deadline));
if (!ev) { /* got a NULL back because deadline passed */
return TIMEOUT;
}
if (ev->type == GRPC_QUEUE_SHUTDOWN) {
return SHUTDOWN;
}
auto cq_tag = static_cast<CompletionQueueTag*>(ev->tag);
*ok = ev->data.op_complete == GRPC_OP_OK;
*tag = cq_tag;
if (cq_tag->FinalizeResult(tag, ok)) {
return GOT_EVENT;
auto ev = grpc_completion_queue_next(cq_, deadline);
switch (ev.type) {
case GRPC_QUEUE_TIMEOUT:
return TIMEOUT;
case GRPC_QUEUE_SHUTDOWN:
return SHUTDOWN;
case GRPC_OP_COMPLETE:
auto cq_tag = static_cast<CompletionQueueTag*>(ev.tag);
*ok = ev.success != 0;
*tag = cq_tag;
if (cq_tag->FinalizeResult(tag, ok)) {
return GOT_EVENT;
}
break;
}
}
}
bool CompletionQueue::Pluck(CompletionQueueTag* tag) {
std::unique_ptr<grpc_event, EventDeleter> ev;
ev.reset(grpc_completion_queue_pluck(cq_, tag, gpr_inf_future));
bool ok = ev->data.op_complete == GRPC_OP_OK;
auto ev = grpc_completion_queue_pluck(cq_, tag, gpr_inf_future);
bool ok = ev.success != 0;
void* ignored = tag;
GPR_ASSERT(tag->FinalizeResult(&ignored, &ok));
GPR_ASSERT(ignored == tag);
// Ignore mutations by FinalizeResult: Pluck returns the C API status
return ev->data.op_complete == GRPC_OP_OK;
return ev.success != 0;
}
void CompletionQueue::TryPluck(CompletionQueueTag* tag) {
std::unique_ptr<grpc_event, EventDeleter> ev;
ev.reset(grpc_completion_queue_pluck(cq_, tag, gpr_time_0));
if (!ev) return;
bool ok = ev->data.op_complete == GRPC_OP_OK;
auto ev = grpc_completion_queue_pluck(cq_, tag, gpr_time_0);
if (ev.type == GRPC_QUEUE_TIMEOUT) return;
bool ok = ev.success != 0;
void* ignored = tag;
// the tag must be swallowed if using TryPluck
GPR_ASSERT(!tag->FinalizeResult(&ignored, &ok));

@ -39,12 +39,10 @@ namespace grpc {
void AsyncGenericService::RequestCall(
GenericServerContext* ctx, GenericServerAsyncReaderWriter* reader_writer,
CompletionQueue* cq, void* tag) {
server_->RequestAsyncGenericCall(ctx, reader_writer, cq, tag);
}
CompletionQueue* AsyncGenericService::completion_queue() {
return &server_->cq_;
CompletionQueue* call_cq, ServerCompletionQueue* notification_cq,
void* tag) {
server_->RequestAsyncGenericCall(ctx, reader_writer, call_cq, notification_cq,
tag);
}
} // namespace grpc

@ -78,7 +78,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
return mrd;
}
void Request(grpc_server* server) {
void Request(grpc_server* server, grpc_completion_queue* notify_cq) {
GPR_ASSERT(!in_flight_);
in_flight_ = true;
cq_ = grpc_completion_queue_create();
@ -86,7 +86,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
grpc_server_request_registered_call(
server, tag_, &call_, &deadline_, &request_metadata_,
has_request_payload_ ? &request_payload_ : nullptr, cq_,
this));
notify_cq, this));
}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
@ -179,16 +179,16 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
grpc_completion_queue* cq_;
};
grpc_server* CreateServer(grpc_completion_queue* cq, int max_message_size) {
static grpc_server* CreateServer(int max_message_size) {
if (max_message_size > 0) {
grpc_arg arg;
arg.type = GRPC_ARG_INTEGER;
arg.key = const_cast<char*>(GRPC_ARG_MAX_MESSAGE_LENGTH);
arg.value.integer = max_message_size;
grpc_channel_args args = {1, &arg};
return grpc_server_create(cq, &args);
return grpc_server_create(&args);
} else {
return grpc_server_create(cq, nullptr);
return grpc_server_create(nullptr);
}
}
@ -199,9 +199,11 @@ Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned,
shutdown_(false),
num_running_cb_(0),
sync_methods_(new std::list<SyncRequest>),
server_(CreateServer(cq_.cq(), max_message_size)),
server_(CreateServer(max_message_size)),
thread_pool_(thread_pool),
thread_pool_owned_(thread_pool_owned) {}
thread_pool_owned_(thread_pool_owned) {
grpc_server_register_completion_queue(server_, cq_.cq());
}
Server::~Server() {
{
@ -221,8 +223,7 @@ Server::~Server() {
bool Server::RegisterService(RpcService* service) {
for (int i = 0; i < service->GetMethodCount(); ++i) {
RpcServiceMethod* method = service->GetMethod(i);
void* tag =
grpc_server_register_method(server_, method->name(), nullptr, cq_.cq());
void* tag = grpc_server_register_method(server_, method->name(), nullptr);
if (!tag) {
gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
method->name());
@ -240,9 +241,8 @@ bool Server::RegisterAsyncService(AsynchronousService* service) {
service->dispatch_impl_ = this;
service->request_args_ = new void*[service->method_count_];
for (size_t i = 0; i < service->method_count_; ++i) {
void* tag =
grpc_server_register_method(server_, service->method_names_[i], nullptr,
service->completion_queue()->cq());
void* tag = grpc_server_register_method(server_, service->method_names_[i],
nullptr);
if (!tag) {
gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
service->method_names_[i]);
@ -273,7 +273,7 @@ bool Server::Start() {
// Start processing rpcs.
if (!sync_methods_->empty()) {
for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) {
m->Request(server_);
m->Request(server_, cq_.cq());
}
ScheduleCallback();
@ -316,12 +316,12 @@ class Server::AsyncRequest GRPC_FINAL : public CompletionQueueTag {
public:
AsyncRequest(Server* server, void* registered_method, ServerContext* ctx,
grpc::protobuf::Message* request,
ServerAsyncStreamingInterface* stream, CompletionQueue* cq,
void* tag)
ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag)
: tag_(tag),
request_(request),
stream_(stream),
cq_(cq),
call_cq_(call_cq),
ctx_(ctx),
generic_ctx_(nullptr),
server_(server),
@ -329,18 +329,21 @@ class Server::AsyncRequest GRPC_FINAL : public CompletionQueueTag {
payload_(nullptr) {
memset(&array_, 0, sizeof(array_));
grpc_call_details_init(&call_details_);
GPR_ASSERT(notification_cq);
GPR_ASSERT(call_cq);
grpc_server_request_registered_call(
server->server_, registered_method, &call_, &call_details_.deadline,
&array_, request ? &payload_ : nullptr, cq->cq(), this);
&array_, request ? &payload_ : nullptr, call_cq->cq(),
notification_cq->cq(), this);
}
AsyncRequest(Server* server, GenericServerContext* ctx,
ServerAsyncStreamingInterface* stream, CompletionQueue* cq,
void* tag)
ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag)
: tag_(tag),
request_(nullptr),
stream_(stream),
cq_(cq),
call_cq_(call_cq),
ctx_(nullptr),
generic_ctx_(ctx),
server_(server),
@ -348,8 +351,10 @@ class Server::AsyncRequest GRPC_FINAL : public CompletionQueueTag {
payload_(nullptr) {
memset(&array_, 0, sizeof(array_));
grpc_call_details_init(&call_details_);
GPR_ASSERT(notification_cq);
GPR_ASSERT(call_cq);
grpc_server_request_call(server->server_, &call_, &call_details_, &array_,
cq->cq(), this);
call_cq->cq(), notification_cq->cq(), this);
}
~AsyncRequest() {
@ -392,8 +397,8 @@ class Server::AsyncRequest GRPC_FINAL : public CompletionQueueTag {
}
}
ctx->call_ = call_;
ctx->cq_ = cq_;
Call call(call_, server_, cq_, server_->max_message_size_);
ctx->cq_ = call_cq_;
Call call(call_, server_, call_cq_, server_->max_message_size_);
if (orig_status && call_) {
ctx->BeginCompletionOp(&call);
}
@ -407,7 +412,7 @@ class Server::AsyncRequest GRPC_FINAL : public CompletionQueueTag {
void* const tag_;
grpc::protobuf::Message* const request_;
ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const cq_;
CompletionQueue* const call_cq_;
ServerContext* const ctx_;
GenericServerContext* const generic_ctx_;
Server* const server_;
@ -420,14 +425,19 @@ class Server::AsyncRequest GRPC_FINAL : public CompletionQueueTag {
void Server::RequestAsyncCall(void* registered_method, ServerContext* context,
grpc::protobuf::Message* request,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq, void* tag) {
new AsyncRequest(this, registered_method, context, request, stream, cq, tag);
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
new AsyncRequest(this, registered_method, context, request, stream, call_cq,
notification_cq, tag);
}
void Server::RequestAsyncGenericCall(GenericServerContext* context,
ServerAsyncStreamingInterface* stream,
CompletionQueue* cq, void* tag) {
new AsyncRequest(this, context, stream, cq, tag);
CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq,
void* tag) {
new AsyncRequest(this, context, stream, call_cq, notification_cq, tag);
}
void Server::ScheduleCallback() {
@ -449,7 +459,7 @@ void Server::RunRpc() {
{
grpc::unique_lock<grpc::mutex> lock(mu_);
if (!shutdown_) {
mrd->Request(server_);
mrd->Request(server_, cq_.cq());
}
}
cd.Run();

@ -44,6 +44,12 @@ namespace grpc {
ServerBuilder::ServerBuilder()
: max_message_size_(-1), generic_service_(nullptr), thread_pool_(nullptr) {}
std::unique_ptr<ServerCompletionQueue> ServerBuilder::AddCompletionQueue() {
ServerCompletionQueue* cq = new ServerCompletionQueue();
cqs_.push_back(cq);
return std::unique_ptr<ServerCompletionQueue>(cq);
}
void ServerBuilder::RegisterService(SynchronousService* service) {
services_.push_back(service->service());
}
@ -88,6 +94,9 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
}
std::unique_ptr<Server> server(
new Server(thread_pool_, thread_pool_owned, max_message_size_));
for (auto cq = cqs_.begin(); cq != cqs_.end(); ++cq) {
grpc_server_register_completion_queue(server->server_, (*cq)->cq());
}
for (auto service = services_.begin(); service != services_.end();
service++) {
if (!server->RegisterService(*service)) {

@ -5,3 +5,4 @@ test-results
packages
Grpc.v12.suo
TestResult.xml
*.nupkg

@ -1 +0,0 @@
gRPC C# is work-in-progress and is not intended to be used. See README.

@ -0,0 +1,26 @@
<?xml version="1.0" encoding="utf-8"?>
<package>
<metadata>
<id>Grpc.Auth</id>
<title>gRPC C# Auth</title>
<summary>Auth library for C# implementation of gRPC - an RPC library and framework</summary>
<description>Auth library for C# implementation of gRPC - an RPC library and framework. See project site for more info.</description>
<version>0.5.0</version>
<authors>Google Inc.</authors>
<owners>jtattermusch</owners>
<licenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</licenseUrl>
<projectUrl>https://github.com/grpc/grpc</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<releaseNotes>Release 0.5.0 of gRPC C#</releaseNotes>
<copyright>Copyright 2015, Google Inc.</copyright>
<tags>gRPC RPC Protocol HTTP/2 Auth OAuth2</tags>
<dependencies>
<dependency id="BouncyCastle" version="1.7.0" />
<dependency id="Google.Apis.Auth" version="1.9.1" />
<dependency id="Grpc.Core" version="0.5.0" />
</dependencies>
</metadata>
<files>
<file src="bin/Release/Grpc.Auth.dll" target="lib/net45" />
</files>
</package>

@ -9,6 +9,6 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: AssemblyVersion("0.5.*")]
[assembly: InternalsVisibleTo("Grpc.Auth.Tests")]
[assembly: InternalsVisibleTo("Grpc.Auth.Tests")]

@ -134,7 +134,7 @@ namespace Grpc.Core.Tests
});
}
private void Handler(GRPCOpError op, IntPtr ptr)
private void Handler(bool success, IntPtr ptr)
{
counter++;
}

@ -9,4 +9,4 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: AssemblyVersion("0.5.*")]

@ -1,3 +1,2 @@
bin
obj
*.nupkg

@ -52,7 +52,7 @@ namespace Grpc.Core
public Call(string serviceName, Method<TRequest, TResponse> method, Channel channel, Metadata headers)
{
this.name = Preconditions.CheckNotNull(serviceName) + "/" + method.Name;
this.name = method.GetFullName(serviceName);
this.requestMarshaller = method.RequestMarshaller;
this.responseMarshaller = method.ResponseMarshaller;
this.channel = Preconditions.CheckNotNull(channel);

@ -2,23 +2,21 @@
<package>
<metadata>
<id>Grpc.Core</id>
<title>gRPC Core</title>
<title>gRPC C# Core</title>
<summary>Core C# implementation of gRPC - an RPC library and framework</summary>
<description>Core C# implementation of gRPC - an RPC library and framework. See project site for more info.
This is an experimental release, not ready to use.
</description>
<version>0.2.1</version>
<description>Core C# implementation of gRPC - an RPC library and framework. See project site for more info.</description>
<version>0.5.0</version>
<authors>Google Inc.</authors>
<owners>jtattermusch</owners>
<licenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</licenseUrl>
<projectUrl>https://github.com/grpc/grpc</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<releaseNotes>The first experimental release. Not ready to use.</releaseNotes>
<releaseNotes>Release 0.5.0 of gRPC C#</releaseNotes>
<copyright>Copyright 2015, Google Inc.</copyright>
<tags>gRPC RPC Protocol HTTP/2</tags>
<dependencies>
<dependency id="Microsoft.Bcl.Immutable" version="1.0.34" />
<dependency id="grpc.native.csharp_ext" version="0.6.0.0" />
<dependency id="grpc.native.csharp_ext" version="0.8.0.0" />
</dependencies>
</metadata>
<files>

@ -274,7 +274,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handler for unary response completion.
/// </summary>
private void HandleUnaryResponse(bool wasError, BatchContextSafeHandleNotOwned ctx)
private void HandleUnaryResponse(bool success, BatchContextSafeHandleNotOwned ctx)
{
lock (myLock)
{
@ -284,7 +284,7 @@ namespace Grpc.Core.Internal
ReleaseResourcesIfPossible();
}
if (wasError)
if (!success)
{
unaryResponseTcs.SetException(new RpcException(new Status(StatusCode.Internal, "Internal error occured.")));
return;
@ -307,7 +307,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handles receive status completion for calls with streaming response.
/// </summary>
private void HandleFinished(bool wasError, BatchContextSafeHandleNotOwned ctx)
private void HandleFinished(bool success, BatchContextSafeHandleNotOwned ctx)
{
var status = ctx.GetReceivedStatus();

@ -287,13 +287,12 @@ namespace Grpc.Core.Internal
/// </summary>
protected CompletionCallbackDelegate CreateBatchCompletionCallback(Action<bool, BatchContextSafeHandleNotOwned> handler)
{
return new CompletionCallbackDelegate((error, batchContextPtr) =>
return new CompletionCallbackDelegate((success, batchContextPtr) =>
{
try
{
var ctx = new BatchContextSafeHandleNotOwned(batchContextPtr);
bool wasError = (error != GRPCOpError.GRPC_OP_OK);
handler(wasError, ctx);
handler(success, ctx);
}
catch (Exception e)
{
@ -305,7 +304,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handles send completion.
/// </summary>
private void HandleSendFinished(bool wasError, BatchContextSafeHandleNotOwned ctx)
private void HandleSendFinished(bool success, BatchContextSafeHandleNotOwned ctx)
{
AsyncCompletionDelegate<object> origCompletionDelegate = null;
lock (myLock)
@ -316,7 +315,7 @@ namespace Grpc.Core.Internal
ReleaseResourcesIfPossible();
}
if (wasError)
if (!success)
{
FireCompletion(origCompletionDelegate, null, new OperationFailedException("Send failed"));
}
@ -329,7 +328,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handles halfclose completion.
/// </summary>
private void HandleHalfclosed(bool wasError, BatchContextSafeHandleNotOwned ctx)
private void HandleHalfclosed(bool success, BatchContextSafeHandleNotOwned ctx)
{
AsyncCompletionDelegate<object> origCompletionDelegate = null;
lock (myLock)
@ -341,7 +340,7 @@ namespace Grpc.Core.Internal
ReleaseResourcesIfPossible();
}
if (wasError)
if (!success)
{
FireCompletion(origCompletionDelegate, null, new OperationFailedException("Halfclose failed"));
}
@ -354,7 +353,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handles streaming read completion.
/// </summary>
private void HandleReadFinished(bool wasError, BatchContextSafeHandleNotOwned ctx)
private void HandleReadFinished(bool success, BatchContextSafeHandleNotOwned ctx)
{
var payload = ctx.GetReceivedMessage();

@ -121,7 +121,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handles the server side close completion.
/// </summary>
private void HandleFinishedServerside(bool wasError, BatchContextSafeHandleNotOwned ctx)
private void HandleFinishedServerside(bool success, BatchContextSafeHandleNotOwned ctx)
{
bool cancelled = ctx.GetReceivedCloseOnServerCancelled();

@ -37,7 +37,7 @@ using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
internal delegate void CompletionCallbackDelegate(GRPCOpError error, IntPtr batchContextPtr);
internal delegate void CompletionCallbackDelegate(bool success, IntPtr batchContextPtr);
/// <summary>
/// grpc_call from <grpc/grpc.h>

@ -70,45 +70,12 @@ namespace Grpc.Core.Internal
internal enum GRPCCompletionType
{
/* Shutting down */
GRPC_QUEUE_SHUTDOWN,
GRPC_QUEUE_SHUTDOWN,
/* operation completion */
GRPC_OP_COMPLETE,
/* A read has completed */
GRPC_READ,
/* A write has been accepted by flow control */
GRPC_WRITE_ACCEPTED,
/* writes_done or write_status has been accepted */
GRPC_FINISH_ACCEPTED,
/* The metadata array sent by server received at client */
GRPC_CLIENT_METADATA_READ,
/* An RPC has finished. The event contains status.
* On the server this will be OK or Cancelled. */
GRPC_FINISHED,
/* A new RPC has arrived at the server */
GRPC_SERVER_RPC_NEW,
/* The server has finished shutting down */
GRPC_SERVER_SHUTDOWN,
/* No event before timeout */
GRPC_QUEUE_TIMEOUT,
/* must be last, forces users to include a default: case */
GRPC_COMPLETION_DO_NOT_USE
}
/// <summary>
/// grpc_op_error from grpc/grpc.h
/// </summary>
internal enum GRPCOpError
{
/* everything went ok */
GRPC_OP_OK = 0,
/* something failed, we don't know what */
GRPC_OP_ERROR
/* operation completion */
GRPC_OP_COMPLETE
}
}

@ -40,7 +40,7 @@ using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
// TODO: we need to make sure that the delegates are not collected before invoked.
internal delegate void ServerShutdownCallbackDelegate(IntPtr eventPtr);
//internal delegate void ServerShutdownCallbackDelegate(bool success);
/// <summary>
/// grpc_server from grpc/grpc.h
@ -65,9 +65,8 @@ namespace Grpc.Core.Internal
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_shutdown(ServerSafeHandle server);
// TODO: get rid of the old callback style
[DllImport("grpc_csharp_ext.dll", EntryPoint = "grpcsharp_server_shutdown_and_notify")]
static extern void grpcsharp_server_shutdown_and_notify_CALLBACK(ServerSafeHandle server, [MarshalAs(UnmanagedType.FunctionPtr)] ServerShutdownCallbackDelegate callback);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_shutdown_and_notify_callback(ServerSafeHandle server, [MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_destroy(IntPtr server);
@ -101,9 +100,9 @@ namespace Grpc.Core.Internal
grpcsharp_server_shutdown(this);
}
public void ShutdownAndNotify(ServerShutdownCallbackDelegate callback)
public void ShutdownAndNotify(CompletionCallbackDelegate callback)
{
grpcsharp_server_shutdown_and_notify_CALLBACK(this, callback);
grpcsharp_server_shutdown_and_notify_callback(this, callback);
}
public void RequestCall(CompletionQueueSafeHandle cq, CompletionCallbackDelegate callback)

@ -32,6 +32,7 @@
#endregion
using System;
using Grpc.Core.Utils;
namespace Grpc.Core
{
@ -95,5 +96,13 @@ namespace Grpc.Core
return this.responseMarshaller;
}
}
/// <summary>
/// Gets full name of the method including the service name.
/// </summary>
internal string GetFullName(string serviceName)
{
return "/" + Preconditions.CheckNotNull(serviceName) + "/" + this.Name;
}
}
}

@ -9,6 +9,6 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: AssemblyVersion("0.5.*")]
[assembly: InternalsVisibleTo("Grpc.Core.Tests")]

@ -54,7 +54,7 @@ namespace Grpc.Core
// TODO(jtattermusch) : make sure the delegate doesn't get garbage collected while
// native callbacks are in the completion queue.
readonly ServerShutdownCallbackDelegate serverShutdownHandler;
readonly CompletionCallbackDelegate serverShutdownHandler;
readonly CompletionCallbackDelegate newServerRpcHandler;
readonly ServerSafeHandle handle;
@ -222,16 +222,13 @@ namespace Grpc.Core
/// <summary>
/// Handles the native callback.
/// </summary>
private void HandleNewServerRpc(GRPCOpError error, IntPtr batchContextPtr)
private void HandleNewServerRpc(bool success, IntPtr batchContextPtr)
{
try
{
var ctx = new BatchContextSafeHandleNotOwned(batchContextPtr);
if (error != GRPCOpError.GRPC_OP_OK)
{
// TODO: handle error
}
// TODO: handle error
CallSafeHandle call = ctx.GetServerRpcNewCall();
string method = ctx.GetServerRpcNewMethod();
@ -253,8 +250,7 @@ namespace Grpc.Core
/// <summary>
/// Handles native callback.
/// </summary>
/// <param name="eventPtr"></param>
private void HandleServerShutdown(IntPtr eventPtr)
private void HandleServerShutdown(bool success, IntPtr batchContextPtr)
{
try
{

@ -79,7 +79,7 @@ namespace Grpc.Core
where TRequest : class
where TResponse : class
{
callHandlers.Add(GetFullMethodName(serviceName, method.Name), ServerCalls.UnaryCall(method, handler));
callHandlers.Add(method.GetFullName(serviceName), ServerCalls.UnaryCall(method, handler));
return this;
}
@ -89,7 +89,7 @@ namespace Grpc.Core
where TRequest : class
where TResponse : class
{
callHandlers.Add(GetFullMethodName(serviceName, method.Name), ServerCalls.ClientStreamingCall(method, handler));
callHandlers.Add(method.GetFullName(serviceName), ServerCalls.ClientStreamingCall(method, handler));
return this;
}
@ -99,7 +99,7 @@ namespace Grpc.Core
where TRequest : class
where TResponse : class
{
callHandlers.Add(GetFullMethodName(serviceName, method.Name), ServerCalls.ServerStreamingCall(method, handler));
callHandlers.Add(method.GetFullName(serviceName), ServerCalls.ServerStreamingCall(method, handler));
return this;
}
@ -109,7 +109,7 @@ namespace Grpc.Core
where TRequest : class
where TResponse : class
{
callHandlers.Add(GetFullMethodName(serviceName, method.Name), ServerCalls.DuplexStreamingCall(method, handler));
callHandlers.Add(method.GetFullName(serviceName), ServerCalls.DuplexStreamingCall(method, handler));
return this;
}
@ -117,11 +117,6 @@ namespace Grpc.Core
{
return new ServerServiceDefinition(callHandlers.ToImmutableDictionary());
}
private string GetFullMethodName(string serviceName, string methodName)
{
return serviceName + "/" + methodName;
}
}
}
}

@ -9,4 +9,4 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: AssemblyVersion("0.5.*")]

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
@ -8,7 +8,7 @@
<ProjectGuid>{BF62FE08-373A-43D6-9D73-41CAA38B7011}</ProjectGuid>
<OutputType>Exe</OutputType>
<RootNamespace>Grpc.Examples.MathServer</RootNamespace>
<AssemblyName>Grpc.Examples.MathServer</AssemblyName>
<AssemblyName>MathServer</AssemblyName>
<TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x86' ">

@ -9,4 +9,4 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.1.*")]
[assembly: AssemblyVersion("0.5.*")]

@ -9,4 +9,4 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: AssemblyVersion("0.5.*")]

@ -9,4 +9,4 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: AssemblyVersion("0.5.*")]

@ -9,4 +9,4 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: AssemblyVersion("0.5.*")]

@ -9,4 +9,4 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: AssemblyVersion("0.5.*")]

@ -9,4 +9,4 @@ using System.Runtime.CompilerServices;
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: AssemblyVersion("0.5.*")]

@ -1,23 +1,22 @@
<?xml version="1.0" encoding="utf-8"?>
<package >
<package>
<metadata>
<id>Grpc</id>
<title>gRPC</title>
<title>gRPC C#</title>
<summary>C# implementation of gRPC - an RPC library and framework</summary>
<description>C# implementation of gRPC - an RPC library and framework. See project site for more info.
This is an experimental release, not ready to use.
</description>
<version>0.2.0</version>
<description>C# implementation of gRPC - an RPC library and framework. See project site for more info.</description>
<version>0.5.0</version>
<authors>Google Inc.</authors>
<owners>jtattermusch</owners>
<licenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</licenseUrl>
<projectUrl>https://github.com/grpc/grpc</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<releaseNotes>The first experimental release. Not ready to use.</releaseNotes>
<releaseNotes>Release 0.5.0 of gRPC C#</releaseNotes>
<copyright>Copyright 2015, Google Inc.</copyright>
<tags>gRPC RPC Protocol HTTP/2</tags>
<dependencies>
<dependency id="Grpc.Core" version="0.2.0" />
<dependency id="Grpc.Core" version="0.5.0" />
</dependencies>
</metadata>
<files/>
</package>

@ -4,74 +4,117 @@ gRPC C#
A C# implementation of gRPC.
Status
-----------------
------
**This gRPC C# implementation is work-in-progress and is not expected to work yet.**
Ready for early adopters.
- The implementation is a wrapper around gRPC C core library
- Code only runs under mono currently, building gRPC C core library under Windows
is in progress.
- It is very possible that some parts of the code will be heavily refactored or
completely rewritten.
Usage: Windows
--------------
- Prerequisites: .NET Framework 4.5+, Visual Studio 2013 with NuGet extension installed (VS2015 should work).
INSTALLATION AND USAGE: WINDOWS
-------------------------------
- Open Visual Studio and start a new project/solution.
- Open Grpc.sln using Visual Studio 2013. NuGet dependencies will be restored
upon build.
- Add NuGet package `Grpc` as a dependency (Project options -> Manage NuGet Packages).
That will also pull all the transitive dependencies (including the native libraries that
gRPC C# is internally using).
- TODO: link to Helloworld example
INSTALLATION AND USAGE: LINUX & MONO
------------------------------------
Usage: Linux (Mono)
--------------
- Compile and install the gRPC C# extension library (that will be used via
P/Invoke from C#).
- Prerequisites: Mono framework, MonoDevelop 5.9 with NuGet add-in installed.
- Install gRPC C Core using instructions in https://github.com/grpc/homebrew-grpc
- TODO: explain using LD_LIBRARY_PATH or installation to /usr/local
- Open MonoDevelop and start a new project/solution.
- Add NuGet package `Grpc` as a dependency (Project -> Add NuGet packages).
- TODO: link to Helloworld example
Building: Windows
-----------------
You only need to go through these steps if you are planning to develop gRPC C#.
If you are a user of gRPC C#, go to Usage section above.
- Prerequisites for development: NET Framework 4.5+, Visual Studio 2013 (with NuGet and NUnit extensions installed).
- The grpc_csharp_ext native library needs to be built so you can build the Grpc C# solution. You can
either build the native solution in `vsprojects/grpc.sln` from Visual Studio manually, or you can use
a convenience batch script that builds everything for you.
```
make grpc_csharp_ext
sudo make install_grpc_csharp_ext
buildall.bat
```
- Prerequisites for development: Mono framework, MonoDevelop (IDE)
- Open Grpc.sln using Visual Studio 2013. NuGet dependencies will be restored
upon build (you need to have NuGet add-in installed).
Building: Linux & Mono
----------------------
You only need to go through these steps if you are planning to develop gRPC C#.
If you are a user of gRPC C#, go to Usage section above.
- Prerequisites for development: Mono framework, MonoDevelop 5.9 with NuGet and Nunit add-ins installed.
```
sudo apt-get install mono-devel
sudo apt-get install monodevelop monodevelop-nunit
sudo apt-get install nunit nunit-console
```
- NuGet is used to manage project's dependencies. Prior opening Grpc.sln,
download dependencies using NuGet restore command:
You can use older versions of MonoDevelop, but then you might need to restore
NuGet dependencies manually (by `nuget restore`), because older versions of MonoDevelop
don't support NuGet add-in.
- Compile and install the gRPC C# extension library (that will be used via
P/Invoke from C#).
```
# Import needed certicates into Mono certificate store:
mozroots --import --sync
# Download NuGet.exe http://nuget.codeplex.com/releases/
# Restore the nuget packages with Grpc C# dependencies
mono ~/Downloads/NuGet.exe restore Grpc.sln
make grpc_csharp_ext
sudo make install_grpc_csharp_ext
```
- Use MonoDevelop to open the solution Grpc.sln (you can also run unit tests
from there).
- Use MonoDevelop to open the solution Grpc.sln
- Build the solution & run all the tests from test view.
Tests
-----
- After building the solution with MonoDevelop, you can use
nunit-console to run the unit tests (currently only running one by
one will make them pass.
gRPC C# is using NUnit as the testing framework.
Under Visual Studio, make sure NUnit test adapter is installed (under "Extensions and Updates").
Then you should be able to run all the tests using Test Explorer.
Under Monodevelop, make sure you installed "NUnit support" in Add-in manager.
Then you should be able to run all the test from the Test View.
After building the solution, you can also run the tests from command line
using nunit-console tool.
```
# from Grpc.Core.Test/bin/Debug directory
nunit-console Grpc.Core.Tests.dll
```
CONTENTS
Contents
--------
- ext:
The extension library that wraps C API to be more digestible by C#.
- Grpc.Auth:
gRPC OAuth2 support.
- Grpc.Core:
The main gRPC C# library.
- Grpc.Examples:
API examples for math.proto
- Grpc.Examples.MathClient:
An example client that sends some requests to math server.
- Grpc.Examples.MathServer:
An example client that sends some requests to math server.
- Grpc.IntegrationTesting:
Client for cross-language gRPC implementation testing (interop testing).
Cross-language gRPC implementation testing (interop testing).

@ -0,0 +1,22 @@
@rem Builds gRPC NuGet packages
@rem Adjust the location of nuget.exe
set NUGET=C:\nuget\nuget.exe
setlocal
cd ..\..\vsprojects\nuget_package
@call buildall.bat || goto :error
endlocal
@call buildall.bat || goto :error
%NUGET% pack ..\..\vsprojects\nuget_package\grpc.native.csharp_ext.nuspec || goto :error
%NUGET% pack Grpc.Core\Grpc.Core.nuspec || goto :error
%NUGET% pack Grpc.Auth\Grpc.Auth.nuspec || goto :error
%NUGET% pack Grpc.nuspec || goto :error
goto :EOF
:error
echo Failed!
exit /b %errorlevel%

@ -0,0 +1,18 @@
@rem Convenience script to build gRPC C# from command line
setlocal
@rem Set VS variables (uses Visual Studio 2013)
@call "%VS120COMNTOOLS%\..\..\vc\vcvarsall.bat" x86
@rem Build the C# native extension
msbuild ..\..\vsprojects\grpc.sln /t:grpc_csharp_ext || goto :error
msbuild Grpc.sln /p:Configuration=Debug || goto :error
msbuild Grpc.sln /p:Configuration=Release || goto :error
endlocal
goto :EOF
:error
echo Failed!
exit /b %errorlevel%

@ -63,8 +63,7 @@ grpc_byte_buffer *string_to_byte_buffer(const char *buffer, size_t len) {
return bb;
}
typedef void(GPR_CALLTYPE *callback_funcptr)(grpc_op_error op_error,
void *batch_context);
typedef void(GPR_CALLTYPE *callback_funcptr)(gpr_int32 success, void *batch_context);
/*
* Helper to maintain lifetime of batch op inputs and store batch op outputs.
@ -308,27 +307,18 @@ grpcsharp_completion_queue_destroy(grpc_completion_queue *cq) {
GPR_EXPORT grpc_completion_type GPR_CALLTYPE
grpcsharp_completion_queue_next_with_callback(grpc_completion_queue *cq) {
grpc_event *ev;
grpc_event ev;
grpcsharp_batch_context *batch_context;
grpc_completion_type t;
void(GPR_CALLTYPE * callback)(grpc_event *);
ev = grpc_completion_queue_next(cq, gpr_inf_future);
t = ev->type;
if (t == GRPC_OP_COMPLETE && ev->tag) {
t = ev.type;
if (t == GRPC_OP_COMPLETE && ev.tag) {
/* NEW API handler */
batch_context = (grpcsharp_batch_context *)ev->tag;
batch_context->callback(ev->data.op_complete, batch_context);
batch_context = (grpcsharp_batch_context *)ev.tag;
batch_context->callback((gpr_int32) ev.success, batch_context);
grpcsharp_batch_context_destroy(batch_context);
} else if (ev->tag) {
/* call the callback in ev->tag */
/* C forbids to cast object pointers to function pointers, so
* we cast to intptr first.
*/
callback = (void(GPR_CALLTYPE *)(grpc_event *))(gpr_intptr)ev->tag;
(*callback)(ev);
}
grpc_event_finish(ev);
/* return completion type to allow some handling for events that have no
* tag - such as GRPC_QUEUE_SHUTDOWN
@ -673,7 +663,9 @@ grpcsharp_call_start_serverside(grpc_call *call, callback_funcptr callback) {
GPR_EXPORT grpc_server *GPR_CALLTYPE
grpcsharp_server_create(grpc_completion_queue *cq,
const grpc_channel_args *args) {
return grpc_server_create(cq, args);
grpc_server *server = grpc_server_create(args);
grpc_server_register_completion_queue(server, cq);
return server;
}
GPR_EXPORT gpr_int32 GPR_CALLTYPE
@ -690,8 +682,11 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_shutdown(grpc_server *server) {
}
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_server_shutdown_and_notify(grpc_server *server, void *tag) {
grpc_server_shutdown_and_notify(server, tag);
grpcsharp_server_shutdown_and_notify_callback(grpc_server *server,
callback_funcptr callback) {
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
grpc_server_shutdown_and_notify(server, ctx);
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_destroy(grpc_server *server) {
@ -706,7 +701,7 @@ grpcsharp_server_request_call(grpc_server *server, grpc_completion_queue *cq,
return grpc_server_request_call(
server, &(ctx->server_rpc_new.call), &(ctx->server_rpc_new.call_details),
&(ctx->server_rpc_new.request_metadata), cq, ctx);
&(ctx->server_rpc_new.request_metadata), cq, cq, ctx);
}
/* Security */
@ -795,7 +790,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_redirect_log(grpcsharp_log_func func) {
/* For testing */
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_test_callback(callback_funcptr callback) {
callback(GRPC_OP_OK, NULL);
callback(1, NULL);
}
/* For testing */

@ -33,25 +33,25 @@ syntax = "proto3";
package math;
message DivArgs {
optional int64 dividend = 1;
optional int64 divisor = 2;
int64 dividend = 1;
int64 divisor = 2;
}
message DivReply {
optional int64 quotient = 1;
optional int64 remainder = 2;
int64 quotient = 1;
int64 remainder = 2;
}
message FibArgs {
optional int64 limit = 1;
int64 limit = 1;
}
message Num {
optional int64 num = 1;
int64 num = 1;
}
message FibReply {
optional int64 count = 1;
int64 count = 1;
}
service Math {

@ -66,18 +66,18 @@ service RouteGuide {
// Latitudes should be in the range +/- 90 degrees and longitude should be in
// the range +/- 180 degrees (inclusive).
message Point {
optional int32 latitude = 1;
optional int32 longitude = 2;
int32 latitude = 1;
int32 longitude = 2;
}
// A latitude-longitude rectangle, represented as two diagonally opposite
// points "lo" and "hi".
message Rectangle {
// One corner of the rectangle.
optional Point lo = 1;
Point lo = 1;
// The other corner of the rectangle.
optional Point hi = 2;
Point hi = 2;
}
// A feature names something at a given point.
@ -85,19 +85,19 @@ message Rectangle {
// If a feature could not be named, the name is empty.
message Feature {
// The name of the feature.
optional string name = 1;
string name = 1;
// The point where the feature is detected.
optional Point location = 2;
Point location = 2;
}
// A RouteNote is a message sent while at a given point.
message RouteNote {
// The location from which the message is sent.
optional Point location = 1;
Point location = 1;
// The message to be sent.
optional string message = 2;
string message = 2;
}
// A RouteSummary is received in response to a RecordRoute rpc.
@ -107,14 +107,14 @@ message RouteNote {
// the distance between each point.
message RouteSummary {
// The number of points received.
optional int32 point_count = 1;
int32 point_count = 1;
// The number of known features passed while traversing the route.
optional int32 feature_count = 2;
int32 feature_count = 2;
// The distance covered in metres.
optional int32 distance = 3;
int32 distance = 3;
// The duration of the traversal in seconds.
optional int32 elapsed_time = 4;
int32 elapsed_time = 4;
}

@ -33,13 +33,13 @@ package examples;
// Protocol type definitions
message StockRequest {
optional string symbol = 1;
optional int32 num_trades_to_watch = 2 [default=0];
string symbol = 1;
int32 num_trades_to_watch = 2;
}
message StockReply {
optional float price = 1;
optional string symbol = 2;
float price = 1;
string symbol = 2;
}

@ -63,7 +63,7 @@ CompletionQueueAsyncWorker::~CompletionQueueAsyncWorker() {}
void CompletionQueueAsyncWorker::Execute() {
result = grpc_completion_queue_next(queue, gpr_inf_future);
if (result->data.op_complete != GRPC_OP_OK) {
if (!result.success) {
SetErrorMessage("The batch encountered an error");
}
}
@ -96,25 +96,21 @@ void CompletionQueueAsyncWorker::HandleOKCallback() {
} else {
current_threads -= 1;
}
NanCallback *callback = GetTagCallback(result->tag);
Handle<Value> argv[] = {NanNull(), GetTagNodeValue(result->tag)};
NanCallback *callback = GetTagCallback(result.tag);
Handle<Value> argv[] = {NanNull(), GetTagNodeValue(result.tag)};
callback->Call(2, argv);
DestroyTag(result->tag);
grpc_event_finish(result);
result = NULL;
DestroyTag(result.tag);
}
void CompletionQueueAsyncWorker::HandleErrorCallback() {
NanScope();
NanCallback *callback = GetTagCallback(result->tag);
NanCallback *callback = GetTagCallback(result.tag);
Handle<Value> argv[] = {NanError(ErrorMessage())};
callback->Call(1, argv);
DestroyTag(result->tag);
grpc_event_finish(result);
result = NULL;
DestroyTag(result.tag);
}
} // namespace node

@ -70,7 +70,7 @@ class CompletionQueueAsyncWorker : public NanAsyncWorker {
void HandleErrorCallback();
private:
grpc_event *result;
grpc_event result;
static grpc_completion_queue *queue;

@ -161,7 +161,7 @@ NAN_METHOD(Server::New) {
grpc_server *wrapped_server;
grpc_completion_queue *queue = CompletionQueueAsyncWorker::GetQueue();
if (args[0]->IsUndefined()) {
wrapped_server = grpc_server_create(queue, NULL);
wrapped_server = grpc_server_create(NULL);
} else if (args[0]->IsObject()) {
Handle<Object> args_hash(args[0]->ToObject());
Handle<Array> keys(args_hash->GetOwnPropertyNames());
@ -190,11 +190,12 @@ NAN_METHOD(Server::New) {
return NanThrowTypeError("Arg values must be strings");
}
}
wrapped_server = grpc_server_create(queue, &channel_args);
wrapped_server = grpc_server_create(&channel_args);
free(channel_args.args);
} else {
return NanThrowTypeError("Server expects an object");
}
grpc_server_register_completion_queue(wrapped_server, queue);
Server *server = new Server(wrapped_server);
server->Wrap(args.This());
NanReturnValue(args.This());
@ -212,6 +213,7 @@ NAN_METHOD(Server::RequestCall) {
grpc_call_error error = grpc_server_request_call(
server->wrapped_server, &op->call, &op->details, &op->request_metadata,
CompletionQueueAsyncWorker::GetQueue(),
CompletionQueueAsyncWorker::GetQueue(),
new struct tag(new NanCallback(args[0].As<Function>()), ops.release(),
shared_ptr<Resources>(nullptr)));
if (error != GRPC_CALL_OK) {

@ -28,7 +28,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto2";
syntax = "proto3";
package grpc.testing;

@ -86,7 +86,7 @@ function emptyUnary(client, done) {
*/
function largeUnary(client, done) {
var arg = {
response_type: testProto.PayloadType.COMPRESSABLE,
response_type: 'COMPRESSABLE',
response_size: 314159,
payload: {
body: zeroBuffer(271828)
@ -94,9 +94,8 @@ function largeUnary(client, done) {
};
var call = client.unaryCall(arg, function(err, resp) {
assert.ifError(err);
assert.strictEqual(resp.payload.type, testProto.PayloadType.COMPRESSABLE);
assert.strictEqual(resp.payload.body.limit - resp.payload.body.offset,
314159);
assert.strictEqual(resp.payload.type, 'COMPRESSABLE');
assert.strictEqual(resp.payload.body.length, 314159);
});
call.on('status', function(status) {
assert.strictEqual(status.code, grpc.status.OK);
@ -138,7 +137,7 @@ function clientStreaming(client, done) {
*/
function serverStreaming(client, done) {
var arg = {
response_type: testProto.PayloadType.COMPRESSABLE,
response_type: 'COMPRESSABLE',
response_parameters: [
{size: 31415},
{size: 9},
@ -150,8 +149,8 @@ function serverStreaming(client, done) {
var resp_index = 0;
call.on('data', function(value) {
assert(resp_index < 4);
assert.strictEqual(value.payload.type, testProto.PayloadType.COMPRESSABLE);
assert.strictEqual(value.payload.body.limit - value.payload.body.offset,
assert.strictEqual(value.payload.type, 'COMPRESSABLE');
assert.strictEqual(value.payload.body.length,
arg.response_parameters[resp_index].size);
resp_index += 1;
});
@ -182,23 +181,21 @@ function pingPong(client, done) {
});
var index = 0;
call.write({
response_type: testProto.PayloadType.COMPRESSABLE,
response_type: 'COMPRESSABLE',
response_parameters: [
{size: response_sizes[index]}
],
payload: {body: zeroBuffer(payload_sizes[index])}
});
call.on('data', function(response) {
assert.strictEqual(response.payload.type,
testProto.PayloadType.COMPRESSABLE);
assert.equal(response.payload.body.limit - response.payload.body.offset,
response_sizes[index]);
assert.strictEqual(response.payload.type, 'COMPRESSABLE');
assert.equal(response.payload.body.length, response_sizes[index]);
index += 1;
if (index === 4) {
call.end();
} else {
call.write({
response_type: testProto.PayloadType.COMPRESSABLE,
response_type: 'COMPRESSABLE',
response_parameters: [
{size: response_sizes[index]}
],
@ -251,7 +248,7 @@ function cancelAfterBegin(client, done) {
function cancelAfterFirstResponse(client, done) {
var call = client.fullDuplexCall();
call.write({
response_type: testProto.PayloadType.COMPRESSABLE,
response_type: 'COMPRESSABLE',
response_parameters: [
{size: 31415}
],
@ -266,22 +263,36 @@ function cancelAfterFirstResponse(client, done) {
});
}
function timeoutOnSleepingServer(client, done) {
var deadline = new Date();
deadline.setMilliseconds(deadline.getMilliseconds() + 1);
var call = client.fullDuplexCall(null, deadline);
call.write({
payload: {body: zeroBuffer(27182)}
});
call.on('error', function(error) {
assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED);
done();
});
}
/**
* Run one of the authentication tests.
* @param {string} expected_user The expected username in the response
* @param {Client} client The client to test against
* @param {?string} scope The scope to apply to the credentials
* @param {function} done Callback to call when the test is completed. Included
* primarily for use with mocha
*/
function authTest(expected_user, client, done) {
function authTest(expected_user, scope, client, done) {
(new GoogleAuth()).getApplicationDefault(function(err, credential) {
assert.ifError(err);
if (credential.createScopedRequired()) {
credential = credential.createScoped(AUTH_SCOPE);
if (credential.createScopedRequired() && scope) {
credential = credential.createScoped(scope);
}
client.updateMetadata = grpc.getGoogleAuthDelegate(credential);
var arg = {
response_type: testProto.PayloadType.COMPRESSABLE,
response_type: 'COMPRESSABLE',
response_size: 314159,
payload: {
body: zeroBuffer(271828)
@ -291,9 +302,8 @@ function authTest(expected_user, client, done) {
};
var call = client.unaryCall(arg, function(err, resp) {
assert.ifError(err);
assert.strictEqual(resp.payload.type, testProto.PayloadType.COMPRESSABLE);
assert.strictEqual(resp.payload.body.limit - resp.payload.body.offset,
314159);
assert.strictEqual(resp.payload.type, 'COMPRESSABLE');
assert.strictEqual(resp.payload.body.length, 314159);
assert.strictEqual(resp.username, expected_user);
assert.strictEqual(resp.oauth_scope, AUTH_SCOPE_RESPONSE);
});
@ -318,8 +328,10 @@ var test_cases = {
empty_stream: emptyStream,
cancel_after_begin: cancelAfterBegin,
cancel_after_first_response: cancelAfterFirstResponse,
compute_engine_creds: _.partial(authTest, COMPUTE_ENGINE_USER),
service_account_creds: _.partial(authTest, AUTH_USER)
timeout_on_sleeping_server: timeoutOnSleepingServer,
compute_engine_creds: _.partial(authTest, COMPUTE_ENGINE_USER, null),
service_account_creds: _.partial(authTest, AUTH_USER, AUTH_SCOPE),
jwt_token_creds: _.partial(authTest, AUTH_USER, null)
};
/**

@ -72,10 +72,9 @@ function handleUnary(call, callback) {
var req = call.request;
var zeros = zeroBuffer(req.response_size);
var payload_type = req.response_type;
if (payload_type === testProto.PayloadType.RANDOM) {
payload_type = [
testProto.PayloadType.COMPRESSABLE,
testProto.PayloadType.UNCOMPRESSABLE][Math.random() < 0.5 ? 0 : 1];
if (payload_type === 'RANDOM') {
payload_type = ['COMPRESSABLE',
'UNCOMPRESSABLE'][Math.random() < 0.5 ? 0 : 1];
}
callback(null, {payload: {type: payload_type, body: zeros}});
}
@ -89,7 +88,7 @@ function handleUnary(call, callback) {
function handleStreamingInput(call, callback) {
var aggregate_size = 0;
call.on('data', function(value) {
aggregate_size += value.payload.body.limit - value.payload.body.offset;
aggregate_size += value.payload.body.length;
});
call.on('end', function() {
callback(null, {aggregated_payload_size: aggregate_size});
@ -103,10 +102,9 @@ function handleStreamingInput(call, callback) {
function handleStreamingOutput(call) {
var req = call.request;
var payload_type = req.response_type;
if (payload_type === testProto.PayloadType.RANDOM) {
payload_type = [
testProto.PayloadType.COMPRESSABLE,
testProto.PayloadType.UNCOMPRESSABLE][Math.random() < 0.5 ? 0 : 1];
if (payload_type === 'RANDOM') {
payload_type = ['COMPRESSABLE',
'UNCOMPRESSABLE'][Math.random() < 0.5 ? 0 : 1];
}
_.each(req.response_parameters, function(resp_param) {
call.write({
@ -127,10 +125,9 @@ function handleStreamingOutput(call) {
function handleFullDuplex(call) {
call.on('data', function(value) {
var payload_type = value.response_type;
if (payload_type === testProto.PayloadType.RANDOM) {
payload_type = [
testProto.PayloadType.COMPRESSABLE,
testProto.PayloadType.UNCOMPRESSABLE][Math.random() < 0.5 ? 0 : 1];
if (payload_type === 'RANDOM') {
payload_type = ['COMPRESSABLE',
'UNCOMPRESSABLE'][Math.random() < 0.5 ? 0 : 1];
}
_.each(value.response_parameters, function(resp_param) {
call.write({

@ -30,7 +30,7 @@
// Message definitions to be used by integration test service definitions.
syntax = "proto2";
syntax = "proto3";
package grpc.testing;
@ -49,46 +49,46 @@ enum PayloadType {
// A block of data, to simply increase gRPC message size.
message Payload {
// The type of data in body.
optional PayloadType type = 1 [default = COMPRESSABLE];
PayloadType type = 1;
// Primary contents of payload.
optional bytes body = 2;
bytes body = 2;
}
// Unary request.
message SimpleRequest {
// Desired payload type in the response from the server.
// If response_type is RANDOM, server randomly chooses one from other formats.
optional PayloadType response_type = 1 [default = COMPRESSABLE];
PayloadType response_type = 1;
// Desired payload size in the response from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
optional int32 response_size = 2;
int32 response_size = 2;
// Optional input payload sent along with the request.
optional Payload payload = 3;
Payload payload = 3;
// Whether SimpleResponse should include username.
optional bool fill_username = 4;
bool fill_username = 4;
// Whether SimpleResponse should include OAuth scope.
optional bool fill_oauth_scope = 5;
bool fill_oauth_scope = 5;
}
// Unary response, as configured by the request.
message SimpleResponse {
// Payload to increase message size.
optional Payload payload = 1;
Payload payload = 1;
// The user the request came from, for verifying authentication was
// successful when the client expected it.
optional string username = 2;
string username = 2;
// OAuth scope.
optional string oauth_scope = 3;
string oauth_scope = 3;
}
// Client-streaming request.
message StreamingInputCallRequest {
// Optional input payload sent along with the request.
optional Payload payload = 1;
Payload payload = 1;
// Not expecting any payload from the response.
}
@ -96,18 +96,18 @@ message StreamingInputCallRequest {
// Client-streaming response.
message StreamingInputCallResponse {
// Aggregated size of payloads received from the client.
optional int32 aggregated_payload_size = 1;
int32 aggregated_payload_size = 1;
}
// Configuration for a particular response.
message ResponseParameters {
// Desired payload sizes in responses from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
optional int32 size = 1;
int32 size = 1;
// Desired interval between consecutive responses in the response stream in
// microseconds.
optional int32 interval_us = 2;
int32 interval_us = 2;
}
// Server-streaming request.
@ -116,17 +116,17 @@ message StreamingOutputCallRequest {
// If response_type is RANDOM, the payload from each response in the stream
// might be of different types. This is to simulate a mixed type of payload
// stream.
optional PayloadType response_type = 1 [default = COMPRESSABLE];
PayloadType response_type = 1;
// Configuration for each expected response message.
repeated ResponseParameters response_parameters = 2;
// Optional input payload sent along with the request.
optional Payload payload = 3;
Payload payload = 3;
}
// Server-streaming response, as configured by the request and parameters.
message StreamingOutputCallResponse {
// Payload to increase response size.
optional Payload payload = 1;
Payload payload = 1;
}

@ -30,7 +30,8 @@
// An integration test service that covers all the method signature permutations
// of unary/streaming requests/responses.
syntax = "proto2";
syntax = "proto3";
import "empty.proto";
import "messages.proto";

@ -1,6 +1,6 @@
{
"name": "grpc",
"version": "0.7.0",
"version": "0.8.0",
"author": "Google Inc.",
"description": "gRPC Library for Node",
"homepage": "http://www.grpc.io/",
@ -26,7 +26,7 @@
"dependencies": {
"bindings": "^1.2.0",
"nan": "^1.5.0",
"protobufjs": "^4.0.0-b2",
"protobufjs": "dcodeIO/ProtoBuf.js",
"underscore": "^1.6.0",
"underscore.string": "^3.0.0"
},

@ -223,7 +223,7 @@ function makeUnaryRequestFunction(method, serialize, deserialize) {
emitter.cancel = function cancel() {
call.cancel();
};
this.updateMetadata(metadata, function(error, metadata) {
this.updateMetadata(this.auth_uri, metadata, function(error, metadata) {
if (error) {
call.cancel();
callback(error);
@ -289,7 +289,7 @@ function makeClientStreamRequestFunction(method, serialize, deserialize) {
metadata = {};
}
var stream = new ClientWritableStream(call, serialize);
this.updateMetadata(metadata, function(error, metadata) {
this.updateMetadata(this.auth_uri, metadata, function(error, metadata) {
if (error) {
call.cancel();
callback(error);
@ -360,7 +360,7 @@ function makeServerStreamRequestFunction(method, serialize, deserialize) {
metadata = {};
}
var stream = new ClientReadableStream(call, deserialize);
this.updateMetadata(metadata, function(error, metadata) {
this.updateMetadata(this.auth_uri, metadata, function(error, metadata) {
if (error) {
call.cancel();
stream.emit('error', error);
@ -427,7 +427,7 @@ function makeBidiStreamRequestFunction(method, serialize, deserialize) {
metadata = {};
}
var stream = new ClientDuplexStream(call, serialize, deserialize);
this.updateMetadata(metadata, function(error, metadata) {
this.updateMetadata(this.auth_uri, metadata, function(error, metadata) {
if (error) {
call.cancel();
stream.emit('error', error);
@ -503,10 +503,11 @@ function makeClientConstructor(methods, serviceName) {
callback(null, metadata);
};
}
this.server_address = address.replace(/\/$/, '');
this.channel = new grpc.Channel(address, options);
this.updateMetadata = _.partial(updateMetadata,
this.server_address + '/' + serviceName);
this.auth_uri = this.server_address + '/' + serviceName;
this.updateMetadata = updateMetadata;
}
_.each(methods, function(attrs, name) {

@ -50,7 +50,7 @@ function deserializeCls(cls) {
* @return {cls} The resulting object
*/
return function deserialize(arg_buf) {
return cls.decode(arg_buf);
return cls.decode(arg_buf).toRaw();
};
}

@ -0,0 +1,39 @@
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
message EchoMessage {
string value = 1;
int32 value2 = 2;
}
service EchoService {
rpc Echo (EchoMessage) returns (EchoMessage);
}

@ -86,4 +86,8 @@ describe('Interop tests', function() {
interop_client.runTest(port, name_override, 'cancel_after_first_response',
true, true, done);
});
it('should pass timeout_on_sleeping_server', function(done) {
interop_client.runTest(port, name_override, 'timeout_on_sleeping_server',
true, true, done);
});
});

@ -99,6 +99,36 @@ describe('Surface server constructor', function() {
}, /math.Math/);
});
});
describe('Echo service', function() {
var server;
var client;
before(function() {
var test_proto = ProtoBuf.loadProtoFile(__dirname + '/echo_service.proto');
var echo_service = test_proto.lookup('EchoService');
var Server = grpc.buildServer([echo_service]);
server = new Server({
'EchoService': {
echo: function(call, callback) {
callback(null, call.request);
}
}
});
var port = server.bind('localhost:0');
var Client = surface_client.makeProtobufClientConstructor(echo_service);
client = new Client('localhost:' + port);
server.listen();
});
after(function() {
server.shutdown();
});
it('should echo the recieved message directly', function(done) {
client.echo({value: 'test value', value2: 3}, function(error, response) {
assert.ifError(error);
assert.deepEqual(response, {value: 'test value', value2: 3});
done();
});
});
});
describe('Generic client and server', function() {
function toString(val) {
return val.toString();

@ -27,14 +27,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto2";
syntax = "proto3";
message Request {
optional bool error = 1;
bool error = 1;
}
message Response {
optional int32 count = 1;
int32 count = 1;
}
service TestService {

@ -16,3 +16,4 @@ DerivedData
*.hmap
*.ipa
*.xcuserstate
*.DS_Store

@ -41,8 +41,18 @@
@implementation GRPCChannel
+ (instancetype)channelToHost:(NSString *)host {
// TODO(jcanizales): Reuse channels.
return [[self alloc] initWithHost:host];
// TODO(mlumish): Investigate whether a cache with strong links is a good idea
static NSMutableDictionary *channelCache;
static dispatch_once_t cacheInitialization;
dispatch_once(&cacheInitialization, ^{
channelCache = [NSMutableDictionary dictionary];
});
GRPCChannel *channel = channelCache[host];
if (!channel) {
channel = [[self alloc] initWithHost:host];
channelCache[host] = channel;
}
return channel;
}
- (instancetype)init {

@ -34,7 +34,7 @@
#import <Foundation/Foundation.h>
#include <grpc/grpc.h>
typedef void(^GRPCQueueCompletionHandler)(grpc_op_error error);
typedef void(^GRPCQueueCompletionHandler)(bool success);
// This class lets one more easily use grpc_completion_queue. To use it, pass
// the value of the unmanagedQueue property of an instance of this class to

@ -65,20 +65,17 @@
dispatch_async(gDefaultConcurrentQueue, ^{
while (YES) {
// The following call blocks until an event is available.
grpc_event *event = grpc_completion_queue_next(unmanagedQueue, gpr_inf_future);
grpc_event event = grpc_completion_queue_next(unmanagedQueue, gpr_inf_future);
GRPCQueueCompletionHandler handler;
switch (event->type) {
switch (event.type) {
case GRPC_OP_COMPLETE:
handler = (__bridge_transfer GRPCQueueCompletionHandler)event->tag;
handler(event->data.op_complete);
grpc_event_finish(event);
handler = (__bridge_transfer GRPCQueueCompletionHandler)event.tag;
handler(event.success);
break;
case GRPC_QUEUE_SHUTDOWN:
grpc_event_finish(event);
grpc_completion_queue_destroy(unmanagedQueue);
return;
default:
grpc_event_finish(event);
[NSException raise:@"Unrecognized completion type" format:@""];
}
};

@ -296,8 +296,8 @@
[op getOp:&ops_array[i++]];
}
grpc_call_error error = grpc_call_start_batch(_call, ops_array, nops,
(__bridge_retained void *)(^(grpc_op_error error){
if (error != GRPC_OP_OK) {
(__bridge_retained void *)(^(bool success){
if (!success) {
if (errorHandler) {
errorHandler();
} else {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save