Merge branch 'master' of github.com:grpc/grpc into compression-metadata

pull/2073/head
David Garcia Quintas 10 years ago
commit 9e9aaa88e1
  1. 4
      BUILD
  2. 307
      Makefile
  3. 9
      build.json
  4. 7
      gRPC.podspec
  5. 4
      include/grpc++/async_unary_call.h
  6. 4
      include/grpc++/channel_arguments.h
  7. 2
      include/grpc++/client_context.h
  8. 2
      include/grpc++/impl/client_unary_call.h
  9. 2
      include/grpc++/impl/service_type.h
  10. 1
      include/grpc++/server.h
  11. 9
      include/grpc++/status.h
  12. 4
      include/grpc++/stream.h
  13. 49
      include/grpc/grpc.h
  14. 8
      src/compiler/cpp_generator.cc
  15. 24
      src/core/channel/channel_args.c
  16. 18
      src/core/channel/channel_args.h
  17. 2
      src/core/compression/algorithm.c
  18. 31
      src/core/iomgr/pollset_posix.c
  19. 2
      src/core/iomgr/tcp_windows.c
  20. 4
      src/core/support/sync.c
  21. 41
      src/core/surface/call.c
  22. 1
      src/core/surface/call.h
  23. 38
      src/core/surface/completion_queue.c
  24. 201
      src/core/surface/server.c
  25. 9
      src/core/transport/stream_op.h
  26. 5
      src/cpp/client/channel_arguments.cc
  27. 2
      src/cpp/client/client_unary_call.cc
  28. 12
      src/cpp/common/call.cc
  29. 13
      src/cpp/server/server.cc
  30. 2
      src/cpp/util/status.cc
  31. 30
      src/csharp/Grpc.Core.Tests/ClientServerTest.cs
  32. 2
      src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
  33. 3
      src/csharp/Grpc.Core/Internal/CompletionRegistry.cs
  34. 21
      src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
  35. 19
      src/csharp/Grpc.Core/Server.cs
  36. 35
      src/csharp/ext/grpc_csharp_ext.c
  37. 1
      src/node/ext/call.cc
  38. 41
      src/node/ext/server.cc
  39. 3
      src/node/ext/server.h
  40. 13
      src/objective-c/GRPCClient/GRPCCall.m
  41. 2
      src/objective-c/README.md
  42. 20
      src/php/README.md
  43. 30
      src/php/bin/run_tests.sh
  44. 3
      src/php/ext/grpc/CREDITS
  45. 32
      src/php/ext/grpc/LICENSE
  46. 72
      src/php/ext/grpc/README.md
  47. 1
      src/php/ext/grpc/call.c
  48. 82
      src/php/ext/grpc/package.xml
  49. 3
      src/php/ext/grpc/server.c
  50. 14
      src/php/lib/Grpc/AbstractCall.php
  51. 31
      src/php/lib/Grpc/BaseStub.php
  52. 21
      src/php/tests/interop/interop_client.php
  53. 22
      src/php/tests/unit_tests/TimevalTest.php
  54. 12
      src/python/src/grpc/_adapter/_c/types/server.c
  55. 13
      src/python/src/grpc/_adapter/_c/utility.c
  56. 8
      src/python/src/grpc/_adapter/_intermediary_low.py
  57. 11
      src/python/src/grpc/_adapter/_intermediary_low_test.py
  58. 7
      src/python/src/grpc/_adapter/_low.py
  59. 2
      src/python/src/grpc/_adapter/_low_test.py
  60. 2
      src/ruby/.rspec
  61. 1
      src/ruby/ext/grpc/rb_call.c
  62. 12
      src/ruby/ext/grpc/rb_completion_queue.c
  63. 64
      src/ruby/ext/grpc/rb_server.c
  64. 13
      src/ruby/lib/grpc/generic/rpc_server.rb
  65. 9
      src/ruby/spec/client_server_spec.rb
  66. 2
      src/ruby/spec/generic/active_call_spec.rb
  67. 3
      src/ruby/spec/generic/client_stub_spec.rb
  68. 12
      src/ruby/spec/generic/rpc_server_spec.rb
  69. 22
      src/ruby/spec/server_spec.rb
  70. 2
      src/ruby/spec/spec_helper.rb
  71. 2
      templates/Makefile.template
  72. 126
      templates/gRPC.podspec.template
  73. 4
      test/core/bad_client/bad_client.c
  74. 10
      test/core/end2end/dualstack_socket_test.c
  75. 3
      test/core/end2end/gen_build_json.py
  76. 2
      test/core/end2end/no_server_test.c
  77. 7
      test/core/end2end/tests/bad_hostname.c
  78. 12
      test/core/end2end/tests/cancel_after_accept.c
  79. 13
      test/core/end2end/tests/cancel_after_accept_and_writes_closed.c
  80. 9
      test/core/end2end/tests/cancel_after_invoke.c
  81. 9
      test/core/end2end/tests/cancel_before_invoke.c
  82. 5
      test/core/end2end/tests/cancel_in_a_vacuum.c
  83. 14
      test/core/end2end/tests/census_simple_request.c
  84. 11
      test/core/end2end/tests/disappearing_server.c
  85. 19
      test/core/end2end/tests/early_server_shutdown_finishes_inflight_calls.c
  86. 13
      test/core/end2end/tests/early_server_shutdown_finishes_tags.c
  87. 3
      test/core/end2end/tests/empty_batch.c
  88. 12
      test/core/end2end/tests/graceful_server_shutdown.c
  89. 3
      test/core/end2end/tests/invoke_large_request.c
  90. 24
      test/core/end2end/tests/max_concurrent_streams.c
  91. 9
      test/core/end2end/tests/max_message_length.c
  92. 5
      test/core/end2end/tests/no_op.c
  93. 14
      test/core/end2end/tests/ping_pong_streaming.c
  94. 10
      test/core/end2end/tests/registered_call.c
  95. 14
      test/core/end2end/tests/request_response_with_binary_metadata_and_payload.c
  96. 14
      test/core/end2end/tests/request_response_with_metadata_and_payload.c
  97. 14
      test/core/end2end/tests/request_response_with_payload.c
  98. 14
      test/core/end2end/tests/request_response_with_payload_and_call_creds.c
  99. 14
      test/core/end2end/tests/request_response_with_trailing_metadata_and_payload.c
  100. 207
      test/core/end2end/tests/request_with_flags.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -145,12 +145,14 @@ cc_library(
"src/core/tsi/transport_security.h",
"src/core/tsi/transport_security_interface.h",
"src/core/census/grpc_context.h",
"src/core/channel/census_filter.h",
"src/core/channel/channel_args.h",
"src/core/channel/channel_stack.h",
"src/core/channel/child_channel.h",
"src/core/channel/client_channel.h",
"src/core/channel/client_setup.h",
"src/core/channel/connected_channel.h",
"src/core/channel/context.h",
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
@ -366,12 +368,14 @@ cc_library(
name = "grpc_unsecure",
srcs = [
"src/core/census/grpc_context.h",
"src/core/channel/census_filter.h",
"src/core/channel/channel_args.h",
"src/core/channel/channel_stack.h",
"src/core/channel/child_channel.h",
"src/core/channel/client_channel.h",
"src/core/channel/client_setup.h",
"src/core/channel/connected_channel.h",
"src/core/channel/context.h",
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",

File diff suppressed because one or more lines are too long

@ -6,8 +6,8 @@
"#": "The public version number of the library.",
"version": {
"major": 0,
"minor": 9,
"micro": 1,
"minor": 10,
"micro": 0,
"build": 0
}
},
@ -107,12 +107,14 @@
],
"headers": [
"src/core/census/grpc_context.h",
"src/core/channel/census_filter.h",
"src/core/channel/channel_args.h",
"src/core/channel/channel_stack.h",
"src/core/channel/child_channel.h",
"src/core/channel/client_channel.h",
"src/core/channel/client_setup.h",
"src/core/channel/connected_channel.h",
"src/core/channel/context.h",
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
@ -1361,6 +1363,9 @@
"grpc",
"gpr_test_util",
"gpr"
],
"platforms": [
"posix"
]
},
{

File diff suppressed because one or more lines are too long

@ -117,7 +117,7 @@ class ServerAsyncResponseWriter GRPC_FINAL
ctx_->sent_initial_metadata_ = true;
}
// The response is dropped if the status is not OK.
if (status.IsOk()) {
if (status.ok()) {
finish_buf_.AddSendMessage(msg);
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
@ -125,7 +125,7 @@ class ServerAsyncResponseWriter GRPC_FINAL
}
void FinishWithError(const Status& status, void* tag) {
GPR_ASSERT(!status.IsOk());
GPR_ASSERT(!status.ok());
finish_buf_.Reset(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);

@ -38,6 +38,7 @@
#include <list>
#include <grpc++/config.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
namespace grpc {
@ -58,6 +59,9 @@ class ChannelArguments {
void SetSslTargetNameOverride(const grpc::string& name);
// TODO(yangg) add flow control options
// Set the compression level for the channel.
void SetCompressionLevel(grpc_compression_level level);
// Generic channel argument setters. Only for advanced use cases.
void SetInt(const grpc::string& key, int value);
void SetString(const grpc::string& key, const grpc::string& value);

@ -41,6 +41,7 @@
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc++/config.h>
#include <grpc++/status.h>
#include <grpc++/time.h>
struct grpc_call;
@ -53,7 +54,6 @@ class ChannelInterface;
class CompletionQueue;
class Credentials;
class RpcMethod;
class Status;
template <class R>
class ClientReader;
template <class W>

@ -35,6 +35,7 @@
#define GRPCXX_IMPL_CLIENT_UNARY_CALL_H
#include <grpc++/config.h>
#include <grpc++/status.h>
namespace grpc {
@ -42,7 +43,6 @@ class ChannelInterface;
class ClientContext;
class CompletionQueue;
class RpcMethod;
class Status;
// Wrapper that performs a blocking unary call
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,

@ -35,6 +35,7 @@
#define GRPCXX_IMPL_SERVICE_TYPE_H
#include <grpc++/config.h>
#include <grpc++/status.h>
namespace grpc {
@ -44,7 +45,6 @@ class RpcService;
class Server;
class ServerCompletionQueue;
class ServerContext;
class Status;
class SynchronousService {
public:

@ -77,6 +77,7 @@ class Server GRPC_FINAL : public GrpcLibrary,
class SyncRequest;
class AsyncRequest;
class ShutdownRequest;
// ServerBuilder use only
Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned,

@ -42,18 +42,17 @@ namespace grpc {
class Status {
public:
Status() : code_(StatusCode::OK) {}
explicit Status(StatusCode code) : code_(code) {}
Status(StatusCode code, const grpc::string& details)
: code_(code), details_(details) {}
// Pre-defined special status objects.
static const Status& OK;
static const Status& Cancelled;
static const Status& CANCELLED;
StatusCode code() const { return code_; }
grpc::string details() const { return details_; }
StatusCode error_code() const { return code_; }
grpc::string error_message() const { return details_; }
bool IsOk() const { return code_ == StatusCode::OK; }
bool ok() const { return code_ == StatusCode::OK; }
private:
StatusCode code_;

@ -615,7 +615,7 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
ctx_->sent_initial_metadata_ = true;
}
// The response is dropped if the status is not OK.
if (status.IsOk()) {
if (status.ok()) {
finish_buf_.AddSendMessage(msg);
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
@ -623,7 +623,7 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
}
void FinishWithError(const Status& status, void* tag) {
GPR_ASSERT(!status.IsOk());
GPR_ASSERT(!status.ok());
finish_buf_.Reset(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);

@ -99,7 +99,8 @@ typedef struct {
These configuration options are modelled as key-value pairs as defined
by grpc_arg; keys are strings to allow easy backwards-compatible extension
by arbitrary parties.
All evaluation is performed at channel creation time. */
All evaluation is performed at channel creation time (i.e. the values in
this structure need only live through the creation invocation). */
typedef struct {
size_t num_args;
grpc_arg *args;
@ -155,6 +156,8 @@ typedef enum grpc_call_error {
/* Force compression to be disabled for a particular write
(start_write/add_metadata). Illegal on invoke/accept. */
#define GRPC_WRITE_NO_COMPRESS (0x00000002u)
/* Mask of all valid flags. */
#define GRPC_WRITE_USED_MASK (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS)
/* A single metadata element */
typedef struct grpc_metadata {
@ -173,11 +176,11 @@ typedef struct grpc_metadata {
/** The type of completion (for grpc_event) */
typedef enum grpc_completion_type {
/** Shutting down */
GRPC_QUEUE_SHUTDOWN,
GRPC_QUEUE_SHUTDOWN,
/** No event before timeout */
GRPC_QUEUE_TIMEOUT,
GRPC_QUEUE_TIMEOUT,
/** Operation completion */
GRPC_OP_COMPLETE
GRPC_OP_COMPLETE
} grpc_completion_type;
/** The result of an operation.
@ -186,7 +189,7 @@ typedef enum grpc_completion_type {
typedef struct grpc_event {
/** The type of the completion. */
grpc_completion_type type;
/** non-zero if the operation was successful, 0 upon failure.
/** non-zero if the operation was successful, 0 upon failure.
Only GRPC_OP_COMPLETE can succeed or fail. */
int success;
/** The tag passed to grpc_call_start_batch etc to start this operation.
@ -250,6 +253,7 @@ typedef enum {
no arguments) */
typedef struct grpc_op {
grpc_op_type op;
gpr_uint32 flags; /**< Write flags bitset for grpc_begin_messages */
union {
struct {
size_t count;
@ -268,6 +272,8 @@ typedef struct grpc_op {
After the operation completes, call grpc_metadata_array_destroy on this
value, or reuse it in a future op. */
grpc_metadata_array *recv_initial_metadata;
/* ownership of the byte buffer is moved to the caller; the caller must call
grpc_byte_buffer_destroy on this value, or reuse it in a future op. */
grpc_byte_buffer **recv_message;
struct {
/* ownership of the array is with the caller, but ownership of the
@ -313,7 +319,7 @@ typedef struct grpc_op {
} grpc_op;
/** Initialize the grpc library.
It is not safe to call any other grpc functions before calling this.
(To avoid overhead, little checking is done, and some things may work. We
do not warrant that they will continue to do so in future revisions of this
@ -321,7 +327,7 @@ typedef struct grpc_op {
void grpc_init(void);
/** Shut down the grpc library.
No memory is used by grpc after this call returns, nor are any instructions
executing within the grpc library.
Prior to calling, all application owned grpc objects must have been
@ -368,7 +374,8 @@ void grpc_completion_queue_destroy(grpc_completion_queue *cq);
/* Create a call given a grpc_channel, in order to call 'method'. The request
is not sent until grpc_call_invoke is called. All completions are sent to
'completion_queue'. */
'completion_queue'. 'method' and 'host' need only live through the invocation
of this function. */
grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_completion_queue *completion_queue,
const char *method, const char *host,
@ -393,8 +400,9 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
/* Create a client channel to 'target'. Additional channel level configuration
MAY be provided by grpc_channel_args, though the expectation is that most
clients will want to simply pass NULL. See grpc_channel_args definition
for more on this. */
clients will want to simply pass NULL. See grpc_channel_args definition for
more on this. The data in 'args' need only live through the invocation of
this function. */
grpc_channel *grpc_channel_create(const char *target,
const grpc_channel_args *args);
@ -465,7 +473,8 @@ grpc_call_error grpc_server_request_registered_call(
/* Create a server. Additional configuration for each incoming channel can
be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. */
be NULL. See grpc_channel_args for more. The data in 'args' need only live
through the invocation of this function. */
grpc_server *grpc_server_create(const grpc_channel_args *args);
/* Register a completion queue with the server. Must be done for any completion
@ -485,18 +494,20 @@ void grpc_server_start(grpc_server *server);
/* Begin shutting down a server.
After completion, no new calls or connections will be admitted.
Existing calls will be allowed to complete.
Shutdown is idempotent. */
void grpc_server_shutdown(grpc_server *server);
/* As per grpc_server_shutdown, but send a GRPC_OP_COMPLETE event when
there are no more calls being serviced.
Send a GRPC_OP_COMPLETE event when there are no more calls being serviced.
Shutdown is idempotent, and all tags will be notified at once if multiple
grpc_server_shutdown_and_notify calls are made. */
void grpc_server_shutdown_and_notify(grpc_server *server, void *tag);
void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag);
/* Cancel all in-progress calls.
Only usable after shutdown. */
void grpc_server_cancel_all_calls(grpc_server *server);
/* Destroy a server.
Forcefully cancels all existing calls.
Implies grpc_server_shutdown() if one was not previously performed. */
Shutdown must have completed beforehand (i.e. all tags generated by
grpc_server_shutdown_and_notify must have been received, and at least
one call to grpc_server_shutdown_and_notify must have been made). */
void grpc_server_destroy(grpc_server *server);
/** Enable or disable a tracer.

@ -854,7 +854,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer,
printer->Print(" (void) response;\n");
printer->Print(
" return ::grpc::Status("
"::grpc::StatusCode::UNIMPLEMENTED);\n");
"::grpc::StatusCode::UNIMPLEMENTED, \"\");\n");
printer->Print("}\n\n");
} else if (ClientOnlyStreaming(method)) {
printer->Print(*vars,
@ -867,7 +867,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer,
printer->Print(" (void) response;\n");
printer->Print(
" return ::grpc::Status("
"::grpc::StatusCode::UNIMPLEMENTED);\n");
"::grpc::StatusCode::UNIMPLEMENTED, \"\");\n");
printer->Print("}\n\n");
} else if (ServerOnlyStreaming(method)) {
printer->Print(*vars,
@ -880,7 +880,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer,
printer->Print(" (void) writer;\n");
printer->Print(
" return ::grpc::Status("
"::grpc::StatusCode::UNIMPLEMENTED);\n");
"::grpc::StatusCode::UNIMPLEMENTED, \"\");\n");
printer->Print("}\n\n");
} else if (BidiStreaming(method)) {
printer->Print(*vars,
@ -892,7 +892,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer,
printer->Print(" (void) stream;\n");
printer->Print(
" return ::grpc::Status("
"::grpc::StatusCode::UNIMPLEMENTED);\n");
"::grpc::StatusCode::UNIMPLEMENTED, \"\");\n");
printer->Print("}\n\n");
}
}

@ -115,3 +115,27 @@ int grpc_channel_args_is_census_enabled(const grpc_channel_args *a) {
}
return 0;
}
grpc_compression_level grpc_channel_args_get_compression_level(
const grpc_channel_args *a) {
size_t i;
if (a) {
for (i = 0; a && i < a->num_args; ++i) {
if (a->args[i].type == GRPC_ARG_INTEGER &&
!strcmp(GRPC_COMPRESSION_LEVEL_ARG, a->args[i].key)) {
return a->args[i].value.integer;
break;
}
}
}
return GRPC_COMPRESS_LEVEL_NONE;
}
void grpc_channel_args_set_compression_level(
grpc_channel_args **a, grpc_compression_level level) {
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_COMPRESSION_LEVEL_ARG;
tmp.value.integer = level;
*a = grpc_channel_args_copy_and_add(*a, &tmp);
}

@ -34,21 +34,31 @@
#ifndef GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H
#define GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H
#include <grpc/compression.h>
#include <grpc/grpc.h>
/* Copy some arguments */
grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src);
/* Copy some arguments and add the to_add parameter in the end.
/** Copy some arguments and add the to_add parameter in the end.
If to_add is NULL, it is equivalent to call grpc_channel_args_copy. */
grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
const grpc_arg *to_add);
/* Destroy arguments created by grpc_channel_args_copy */
/** Destroy arguments created by grpc_channel_args_copy */
void grpc_channel_args_destroy(grpc_channel_args *a);
/* Reads census_enabled settings from channel args. Returns 1 if census_enabled
is specified in channel args, otherwise returns 0. */
/** Reads census_enabled settings from channel args. Returns 1 if census_enabled
* is specified in channel args, otherwise returns 0. */
int grpc_channel_args_is_census_enabled(const grpc_channel_args *a);
/** Returns the compression level set in \a a. */
grpc_compression_level grpc_channel_args_get_compression_level(
const grpc_channel_args *a);
/** Sets the compression level in \a a to \a level. Setting it to
* GRPC_COMPRESS_LEVEL_NONE disables compression for the channel. */
void grpc_channel_args_set_compression_level(
grpc_channel_args **a, grpc_compression_level level);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H */

@ -54,7 +54,7 @@ const char *grpc_compression_algorithm_name(
grpc_compression_algorithm grpc_compression_algorithm_for_level(
grpc_compression_level level) {
switch (level) {
case GRPC_COMPRESS_NONE:
case GRPC_COMPRESS_LEVEL_NONE:
return GRPC_COMPRESS_NONE;
case GRPC_COMPRESS_LEVEL_LOW:
case GRPC_COMPRESS_LEVEL_MED:

@ -174,8 +174,6 @@ void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
/* pollset->mu already held */
gpr_timespec now = gpr_now();
/* FIXME(ctiller): see below */
gpr_timespec maximum_deadline = gpr_time_add(now, gpr_time_from_seconds(1));
int r;
if (gpr_time_cmp(now, deadline) > 0) {
return 0;
@ -186,14 +184,25 @@ int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
return 1;
}
/* FIXME(ctiller): we should not clamp deadline, however we have some
stuck at shutdown bugs that this resolves */
if (gpr_time_cmp(deadline, maximum_deadline) > 0) {
deadline = maximum_deadline;
if (pollset->shutting_down) {
return 1;
}
gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
r = pollset->vtable->maybe_work(pollset, deadline, now, 1);
gpr_tls_set(&g_current_thread_poller, 0);
if (pollset->shutting_down) {
if (pollset->counter > 0) {
grpc_pollset_kick(pollset);
} else if (pollset->in_flight_cbs == 0) {
gpr_mu_unlock(&pollset->mu);
pollset->shutdown_done_cb(pollset->shutdown_done_arg);
/* Continuing to access pollset here is safe -- it is the caller's
* responsibility to not destroy when it has outstanding calls to
* grpc_pollset_work.
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
}
}
return r;
}
@ -201,13 +210,19 @@ void grpc_pollset_shutdown(grpc_pollset *pollset,
void (*shutdown_done)(void *arg),
void *shutdown_done_arg) {
int in_flight_cbs;
int counter;
gpr_mu_lock(&pollset->mu);
pollset->shutting_down = 1;
in_flight_cbs = pollset->in_flight_cbs;
counter = pollset->counter;
pollset->shutdown_done_cb = shutdown_done;
pollset->shutdown_done_arg = shutdown_done_arg;
if (counter > 0) {
grpc_pollset_kick(pollset);
}
gpr_mu_unlock(&pollset->mu);
if (in_flight_cbs == 0) {
if (in_flight_cbs == 0 && counter == 0) {
shutdown_done(shutdown_done_arg);
}
}
@ -294,7 +309,7 @@ static void unary_poll_do_promote(void *args, int success) {
pollset->in_flight_cbs--;
if (pollset->shutting_down) {
/* We don't care about this pollset anymore. */
if (pollset->in_flight_cbs == 0) {
if (pollset->in_flight_cbs == 0 && pollset->counter == 0) {
do_shutdown_cb = 1;
}
} else if (grpc_fd_is_orphaned(fd)) {

@ -154,7 +154,7 @@ static void on_read(void *tcpp, int from_iocp) {
status = GRPC_ENDPOINT_CB_ERROR;
} else {
if (info->bytes_transfered != 0) {
sub = gpr_slice_sub(tcp->read_slice, 0, info->bytes_transfered);
sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
status = GRPC_ENDPOINT_CB_OK;
slice = &sub;
nslices = 1;

@ -118,7 +118,9 @@ void gpr_refn(gpr_refcount *r, int n) {
}
int gpr_unref(gpr_refcount *r) {
return gpr_atm_full_fetch_add(&r->count, -1) == 1;
gpr_atm prior = gpr_atm_full_fetch_add(&r->count, -1);
GPR_ASSERT(prior > 0);
return prior == 1;
}
void gpr_stats_init(gpr_stats_counter *c, gpr_intptr n) {

@ -185,6 +185,7 @@ struct grpc_call {
and a strong upper bound of a count of masters to be calculated. */
gpr_uint8 request_set[GRPC_IOREQ_OP_COUNT];
grpc_ioreq_data request_data[GRPC_IOREQ_OP_COUNT];
gpr_uint32 request_flags[GRPC_IOREQ_OP_COUNT];
reqinfo_master masters[GRPC_IOREQ_OP_COUNT];
/* Dynamic array of ioreq's that have completed: the count of
@ -231,6 +232,7 @@ struct grpc_call {
gpr_slice_buffer incoming_message;
gpr_uint32 incoming_message_length;
gpr_uint32 incoming_message_flags;
grpc_iomgr_closure destroy_closure;
};
@ -678,6 +680,7 @@ static int begin_message(grpc_call *call, grpc_begin_message msg) {
} else if (msg.length > 0) {
call->reading_message = 1;
call->incoming_message_length = msg.length;
call->incoming_message_flags = msg.flags;
return 1;
} else {
finish_message(call);
@ -826,6 +829,7 @@ static void copy_byte_buffer_to_stream_ops(grpc_byte_buffer *byte_buffer,
static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
grpc_ioreq_data data;
gpr_uint32 flags;
grpc_metadata_batch mdb;
size_t i;
GPR_ASSERT(op->send_ops == NULL);
@ -852,8 +856,9 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
case WRITE_STATE_STARTED:
if (is_op_live(call, GRPC_IOREQ_SEND_MESSAGE)) {
data = call->request_data[GRPC_IOREQ_SEND_MESSAGE];
flags = call->request_flags[GRPC_IOREQ_SEND_MESSAGE];
grpc_sopb_add_begin_message(
&call->send_ops, grpc_byte_buffer_length(data.send_message), 0);
&call->send_ops, grpc_byte_buffer_length(data.send_message), flags);
copy_byte_buffer_to_stream_ops(data.send_message, &call->send_ops);
op->send_ops = &call->send_ops;
call->last_send_contains |= 1 << GRPC_IOREQ_SEND_MESSAGE;
@ -987,6 +992,7 @@ static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
have_ops |= 1u << op;
call->request_data[op] = data;
call->request_flags[op] = reqs[i].flags;
call->request_set[op] = set;
}
@ -1221,6 +1227,14 @@ static void finish_batch_with_close(grpc_call *call, int success, void *tag) {
grpc_cq_end_op(call->cq, tag, call, 1);
}
static int are_write_flags_valid(gpr_uint32 flags) {
/* check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set */
const gpr_uint32 allowed_write_positions =
(GRPC_WRITE_USED_MASK | GRPC_WRITE_INTERNAL_USED_MASK);
const gpr_uint32 invalid_positions = ~allowed_write_positions;
return !(flags & invalid_positions);
}
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t nops, void *tag) {
grpc_ioreq reqs[GRPC_IOREQ_OP_COUNT];
@ -1243,30 +1257,43 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
op = &ops[in];
switch (op->op) {
case GRPC_OP_SEND_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
req = &reqs[out++];
req->op = GRPC_IOREQ_SEND_INITIAL_METADATA;
req->data.send_metadata.count = op->data.send_initial_metadata.count;
req->data.send_metadata.metadata =
op->data.send_initial_metadata.metadata;
req->flags = op->flags;
break;
case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)){
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_SEND_MESSAGE;
req->data.send_message = op->data.send_message;
req->flags = ops->flags;
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (!call->is_client) {
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_SEND_CLOSE;
req->flags = op->flags;
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (call->is_client) {
return GRPC_CALL_ERROR_NOT_ON_CLIENT;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_SEND_TRAILING_METADATA;
req->flags = op->flags;
req->data.send_metadata.count =
op->data.send_status_from_server.trailing_metadata_count;
req->data.send_metadata.metadata =
@ -1280,24 +1307,33 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
req->op = GRPC_IOREQ_SEND_CLOSE;
break;
case GRPC_OP_RECV_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (!call->is_client) {
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
req->data.recv_metadata = op->data.recv_initial_metadata;
req->flags = op->flags;
break;
case GRPC_OP_RECV_MESSAGE:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_MESSAGE;
req->data.recv_message = op->data.recv_message;
req->flags = op->flags;
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (!call->is_client) {
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_STATUS;
req->flags = op->flags;
req->data.recv_status.set_value = set_status_value_directly;
req->data.recv_status.user_data = op->data.recv_status_on_client.status;
req = &reqs[out++];
@ -1315,8 +1351,11 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
finish_func = finish_batch_with_close;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_STATUS;
req->flags = op->flags;
req->data.recv_status.set_value = set_cancelled_value;
req->data.recv_status.user_data =
op->data.recv_close_on_server.cancelled;

@ -79,6 +79,7 @@ typedef union {
typedef struct {
grpc_ioreq_op op;
grpc_ioreq_data data;
gpr_uint32 flags; /**< A copy of the write flags from grpc_op */
} grpc_ioreq;
typedef void (*grpc_ioreq_completion_func)(grpc_call *call, int success,

@ -83,7 +83,8 @@ grpc_completion_queue *grpc_completion_queue_create(void) {
memset(cc, 0, sizeof(*cc));
/* Initial ref is dropped by grpc_completion_queue_shutdown */
gpr_ref_init(&cc->refs, 1);
gpr_ref_init(&cc->owning_refs, 1);
/* One for destroy(), one for pollset_shutdown */
gpr_ref_init(&cc->owning_refs, 2);
grpc_pollset_init(&cc->pollset);
cc->allow_polling = 1;
return cc;
@ -95,14 +96,14 @@ void grpc_cq_internal_ref(grpc_completion_queue *cc) {
static void on_pollset_destroy_done(void *arg) {
grpc_completion_queue *cc = arg;
grpc_pollset_destroy(&cc->pollset);
gpr_free(cc);
grpc_cq_internal_unref(cc);
}
void grpc_cq_internal_unref(grpc_completion_queue *cc) {
if (gpr_unref(&cc->owning_refs)) {
GPR_ASSERT(cc->queue == NULL);
grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
grpc_pollset_destroy(&cc->pollset);
gpr_free(cc);
}
}
@ -145,25 +146,25 @@ void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call) {
/* Signal the end of an operation - if this is the last waiting-to-be-queued
event, then enter shutdown mode */
static void end_op_locked(grpc_completion_queue *cc,
grpc_completion_type type) {
if (gpr_unref(&cc->refs)) {
GPR_ASSERT(!cc->shutdown);
GPR_ASSERT(cc->shutdown_called);
cc->shutdown = 1;
gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
}
}
void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
int success) {
event *ev;
int shutdown = 0;
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call);
ev->base.success = success;
end_op_locked(cc, GRPC_OP_COMPLETE);
if (gpr_unref(&cc->refs)) {
GPR_ASSERT(!cc->shutdown);
GPR_ASSERT(cc->shutdown_called);
cc->shutdown = 1;
gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
shutdown = 1;
}
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
if (call) GRPC_CALL_INTERNAL_UNREF(call, "cq", 0);
if (shutdown) {
grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
}
}
/* Create a GRPC_QUEUE_SHUTDOWN event without queuing it anywhere */
@ -179,6 +180,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
event *ev = NULL;
grpc_event ret;
grpc_cq_internal_ref(cc);
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {
if (cc->queue != NULL) {
@ -214,6 +216,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
grpc_cq_internal_unref(cc);
return ret;
}
}
@ -221,6 +224,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
ret = ev->base;
gpr_free(ev);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
grpc_cq_internal_unref(cc);
return ret;
}
@ -258,6 +262,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
event *ev = NULL;
grpc_event ret;
grpc_cq_internal_ref(cc);
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {
if ((ev = pluck_event(cc, tag))) {
@ -276,6 +281,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
grpc_cq_internal_unref(cc);
return ret;
}
}
@ -283,6 +289,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
ret = ev->base;
gpr_free(ev);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
grpc_cq_internal_unref(cc);
return ret;
}
@ -299,6 +306,7 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
cc->shutdown = 1;
gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
}
}

@ -127,6 +127,11 @@ struct channel_data {
grpc_iomgr_closure finish_destroy_channel_closure;
};
typedef struct shutdown_tag {
void *tag;
grpc_completion_queue *cq;
} shutdown_tag;
struct grpc_server {
size_t channel_filter_count;
const grpc_channel_filter **channel_filters;
@ -137,14 +142,14 @@ struct grpc_server {
size_t cq_count;
gpr_mu mu;
gpr_cv cv;
registered_method *registered_methods;
requested_call_array requested_calls;
gpr_uint8 shutdown;
gpr_uint8 shutdown_published;
size_t num_shutdown_tags;
void **shutdown_tags;
shutdown_tag *shutdown_tags;
call_data *lists[CALL_LIST_COUNT];
channel_data root_channel_data;
@ -261,29 +266,32 @@ static void server_ref(grpc_server *server) {
gpr_ref(&server->internal_refcount);
}
static void server_unref(grpc_server *server) {
static void server_delete(grpc_server *server) {
registered_method *rm;
size_t i;
grpc_channel_args_destroy(server->channel_args);
gpr_mu_destroy(&server->mu);
gpr_free(server->channel_filters);
requested_call_array_destroy(&server->requested_calls);
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
gpr_free(rm->method);
gpr_free(rm->host);
requested_call_array_destroy(&rm->requested);
gpr_free(rm);
}
for (i = 0; i < server->cq_count; i++) {
grpc_cq_internal_unref(server->cqs[i]);
}
gpr_free(server->cqs);
gpr_free(server->pollsets);
gpr_free(server->shutdown_tags);
gpr_free(server);
}
static void server_unref(grpc_server *server) {
if (gpr_unref(&server->internal_refcount)) {
grpc_channel_args_destroy(server->channel_args);
gpr_mu_destroy(&server->mu);
gpr_cv_destroy(&server->cv);
gpr_free(server->channel_filters);
requested_call_array_destroy(&server->requested_calls);
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
gpr_free(rm->method);
gpr_free(rm->host);
requested_call_array_destroy(&rm->requested);
gpr_free(rm);
}
for (i = 0; i < server->cq_count; i++) {
grpc_cq_internal_unref(server->cqs[i]);
}
gpr_free(server->cqs);
gpr_free(server->pollsets);
gpr_free(server->shutdown_tags);
gpr_free(server);
server_delete(server);
}
}
@ -378,6 +386,26 @@ static void kill_zombie(void *elem, int success) {
grpc_call_destroy(grpc_call_from_top_element(elem));
}
static int num_listeners(grpc_server *server) {
listener *l;
int n = 0;
for (l = server->listeners; l; l = l->next) {
n++;
}
return n;
}
static void maybe_finish_shutdown(grpc_server *server) {
size_t i;
if (server->shutdown && !server->shutdown_published && server->lists[ALL_CALLS] == NULL && server->listeners_destroyed == num_listeners(server)) {
server->shutdown_published = 1;
for (i = 0; i < server->num_shutdown_tags; i++) {
grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag,
NULL, 1);
}
}
}
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
channel_data *chand = elem->channel_data;
@ -441,6 +469,9 @@ static void server_on_recv(void *ptr, int success) {
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
}
if (call_list_remove(calld, ALL_CALLS)) {
maybe_finish_shutdown(chand->server);
}
gpr_mu_unlock(&chand->server->mu);
break;
}
@ -539,19 +570,15 @@ static void init_call_elem(grpc_call_element *elem,
static void destroy_call_elem(grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
size_t i, j;
int removed[CALL_LIST_COUNT];
size_t i;
gpr_mu_lock(&chand->server->mu);
for (i = 0; i < CALL_LIST_COUNT; i++) {
call_list_remove(elem->call_data, i);
removed[i] = call_list_remove(elem->call_data, i);
}
if (chand->server->shutdown && chand->server->lists[ALL_CALLS] == NULL) {
for (i = 0; i < chand->server->num_shutdown_tags; i++) {
for (j = 0; j < chand->server->cq_count; j++) {
grpc_cq_end_op(chand->server->cqs[j], chand->server->shutdown_tags[i],
NULL, 1);
}
}
if (removed[ALL_CALLS]) {
maybe_finish_shutdown(chand->server);
}
gpr_mu_unlock(&chand->server->mu);
@ -646,7 +673,6 @@ grpc_server *grpc_server_create_from_filters(grpc_channel_filter **filters,
memset(server, 0, sizeof(grpc_server));
gpr_mu_init(&server->mu);
gpr_cv_init(&server->cv);
/* decremented by grpc_server_destroy */
gpr_ref_init(&server->internal_refcount, 1);
@ -806,38 +832,28 @@ grpc_transport_setup_result grpc_server_setup_transport(
return result;
}
static int num_listeners(grpc_server *server) {
listener *l;
int n = 0;
for (l = server->listeners; l; l = l->next) {
n++;
}
return n;
}
static void shutdown_internal(grpc_server *server, gpr_uint8 have_shutdown_tag,
void *shutdown_tag) {
void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag) {
listener *l;
requested_call_array requested_calls;
channel_data **channels;
channel_data *c;
size_t nchannels;
size_t i, j;
size_t i;
grpc_channel_op op;
grpc_channel_element *elem;
registered_method *rm;
shutdown_tag *sdt;
/* lock, and gather up some stuff to do */
gpr_mu_lock(&server->mu);
if (have_shutdown_tag) {
for (i = 0; i < server->cq_count; i++) {
grpc_cq_begin_op(server->cqs[i], NULL);
}
server->shutdown_tags =
gpr_realloc(server->shutdown_tags,
sizeof(void *) * (server->num_shutdown_tags + 1));
server->shutdown_tags[server->num_shutdown_tags++] = shutdown_tag;
}
grpc_cq_begin_op(cq, NULL);
server->shutdown_tags =
gpr_realloc(server->shutdown_tags,
sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
sdt = &server->shutdown_tags[server->num_shutdown_tags++];
sdt->tag = tag;
sdt->cq = cq;
if (server->shutdown) {
gpr_mu_unlock(&server->mu);
return;
@ -878,13 +894,7 @@ static void shutdown_internal(grpc_server *server, gpr_uint8 have_shutdown_tag,
}
server->shutdown = 1;
if (server->lists[ALL_CALLS] == NULL) {
for (i = 0; i < server->num_shutdown_tags; i++) {
for (j = 0; j < server->cq_count; j++) {
grpc_cq_end_op(server->cqs[j], server->shutdown_tags[i], NULL, 1);
}
}
}
maybe_finish_shutdown(server);
gpr_mu_unlock(&server->mu);
for (i = 0; i < nchannels; i++) {
@ -914,46 +924,64 @@ static void shutdown_internal(grpc_server *server, gpr_uint8 have_shutdown_tag,
}
}
void grpc_server_shutdown(grpc_server *server) {
shutdown_internal(server, 0, NULL);
}
void grpc_server_shutdown_and_notify(grpc_server *server, void *tag) {
shutdown_internal(server, 1, tag);
}
void grpc_server_listener_destroy_done(void *s) {
grpc_server *server = s;
gpr_mu_lock(&server->mu);
server->listeners_destroyed++;
gpr_cv_signal(&server->cv);
maybe_finish_shutdown(server);
gpr_mu_unlock(&server->mu);
}
void grpc_server_destroy(grpc_server *server) {
channel_data *c;
listener *l;
size_t i;
void grpc_server_cancel_all_calls(grpc_server *server) {
call_data *calld;
grpc_call **calls;
size_t call_count;
size_t call_capacity;
int is_first = 1;
size_t i;
gpr_mu_lock(&server->mu);
if (!server->shutdown) {
GPR_ASSERT(server->shutdown);
if (!server->lists[ALL_CALLS]) {
gpr_mu_unlock(&server->mu);
grpc_server_shutdown(server);
gpr_mu_lock(&server->mu);
return;
}
while (server->listeners_destroyed != num_listeners(server)) {
for (i = 0; i < server->cq_count; i++) {
gpr_mu_unlock(&server->mu);
grpc_cq_hack_spin_pollset(server->cqs[i]);
gpr_mu_lock(&server->mu);
call_capacity = 8;
call_count = 0;
calls = gpr_malloc(sizeof(grpc_call *) * call_capacity);
for (calld = server->lists[ALL_CALLS]; calld != server->lists[ALL_CALLS] || is_first; calld = calld->links[ALL_CALLS].next) {
if (call_count == call_capacity) {
call_capacity *= 2;
calls = gpr_realloc(calls, sizeof(grpc_call *) * call_capacity);
}
calls[call_count++] = calld->call;
GRPC_CALL_INTERNAL_REF(calld->call, "cancel_all");
is_first = 0;
}
gpr_mu_unlock(&server->mu);
gpr_cv_wait(&server->cv, &server->mu,
gpr_time_add(gpr_now(), gpr_time_from_millis(100)));
for (i = 0; i < call_count; i++) {
grpc_call_cancel_with_status(calls[i], GRPC_STATUS_UNAVAILABLE, "Unavailable");
GRPC_CALL_INTERNAL_UNREF(calls[i], "cancel_all", 1);
}
gpr_free(calls);
}
void grpc_server_destroy(grpc_server *server) {
channel_data *c;
listener *l;
call_data *calld;
gpr_mu_lock(&server->mu);
GPR_ASSERT(server->shutdown || !server->listeners);
GPR_ASSERT(server->listeners_destroyed == num_listeners(server));
while (server->listeners) {
l = server->listeners;
server->listeners = l->next;
@ -962,10 +990,6 @@ void grpc_server_destroy(grpc_server *server) {
while ((calld = call_list_remove_head(&server->lists[PENDING_START],
PENDING_START)) != NULL) {
/* TODO(dgq): If we knew the size of the call list (or an upper bound), we
* could allocate all the memory for the closures in advance in a single
* chunk */
gpr_log(GPR_DEBUG, "server destroys call %p", calld->call);
calld->state = ZOMBIED;
grpc_iomgr_closure_init(
&calld->kill_zombie_closure, kill_zombie,
@ -1111,6 +1135,7 @@ static void begin_call(grpc_server *server, call_data *calld,
rc->data.batch.details->deadline = calld->deadline;
r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
r->data.recv_metadata = rc->data.batch.initial_metadata;
r->flags = 0;
r++;
publish = publish_registered_or_batch;
break;
@ -1118,10 +1143,12 @@ static void begin_call(grpc_server *server, call_data *calld,
*rc->data.registered.deadline = calld->deadline;
r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
r->data.recv_metadata = rc->data.registered.initial_metadata;
r->flags = 0;
r++;
if (rc->data.registered.optional_payload) {
r->op = GRPC_IOREQ_RECV_MESSAGE;
r->data.recv_message = rc->data.registered.optional_payload;
r->flags = 0;
r++;
}
publish = publish_registered_or_batch;

@ -58,11 +58,18 @@ typedef enum grpc_stream_op_code {
GRPC_OP_SLICE
} grpc_stream_op_code;
/** Internal bit flag for grpc_begin_message's \a flags signaling the use of
* compression for the message */
#define GRPC_WRITE_INTERNAL_COMPRESS (0x80000000u)
/** Mask of all valid internal flags. */
#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
/* Arguments for GRPC_OP_BEGIN_MESSAGE */
typedef struct grpc_begin_message {
/* How many bytes of data will this message contain */
gpr_uint32 length;
/* Write flags for the message: see grpc.h GRPC_WRITE_xxx */
/* Write flags for the message: see grpc.h GRPC_WRITE_* for the public bits,
* GRPC_WRITE_INTERNAL_* for the internal ones. */
gpr_uint32 flags;
} grpc_begin_message;

@ -34,6 +34,7 @@
#include <grpc++/channel_arguments.h>
#include <grpc/grpc_security.h>
#include "src/core/channel/channel_args.h"
namespace grpc {
@ -41,6 +42,10 @@ void ChannelArguments::SetSslTargetNameOverride(const grpc::string& name) {
SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG, name);
}
void ChannelArguments::SetCompressionLevel(grpc_compression_level level) {
SetInt(GRPC_COMPRESSION_LEVEL_ARG, level);
}
grpc::string ChannelArguments::GetSslTargetNameOverride() const {
for (unsigned int i = 0; i < args_.size(); i++) {
if (grpc::string(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == args_[i].key) {

@ -57,7 +57,7 @@ Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
buf.AddClientSendClose();
buf.AddClientRecvStatus(context, &status);
call.PerformOps(&buf);
GPR_ASSERT((cq.Pluck(&buf) && buf.got_message) || !status.IsOk());
GPR_ASSERT((cq.Pluck(&buf) && buf.got_message) || !status.ok());
return status;
}

@ -214,8 +214,8 @@ void CallOpBuffer::AddServerSendStatus(
trailing_metadata_count_ = 0;
}
send_status_available_ = true;
send_status_code_ = static_cast<grpc_status_code>(status.code());
send_status_details_ = status.details();
send_status_code_ = static_cast<grpc_status_code>(status.error_code());
send_status_details_ = status.error_message();
}
void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
@ -224,11 +224,13 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
ops[*nops].op = GRPC_OP_SEND_INITIAL_METADATA;
ops[*nops].data.send_initial_metadata.count = initial_metadata_count_;
ops[*nops].data.send_initial_metadata.metadata = initial_metadata_;
ops[*nops].flags = 0;
(*nops)++;
}
if (recv_initial_metadata_) {
ops[*nops].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[*nops].data.recv_initial_metadata = &recv_initial_metadata_arr_;
ops[*nops].flags = 0;
(*nops)++;
}
if (send_message_ || send_message_buffer_) {
@ -245,15 +247,18 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
}
ops[*nops].op = GRPC_OP_SEND_MESSAGE;
ops[*nops].data.send_message = send_buf_;
ops[*nops].flags = 0;
(*nops)++;
}
if (recv_message_ || recv_message_buffer_) {
ops[*nops].op = GRPC_OP_RECV_MESSAGE;
ops[*nops].data.recv_message = &recv_buf_;
ops[*nops].flags = 0;
(*nops)++;
}
if (client_send_close_) {
ops[*nops].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[*nops].flags = 0;
(*nops)++;
}
if (recv_status_) {
@ -264,6 +269,7 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
ops[*nops].data.recv_status_on_client.status_details = &status_details_;
ops[*nops].data.recv_status_on_client.status_details_capacity =
&status_details_capacity_;
ops[*nops].flags = 0;
(*nops)++;
}
if (send_status_available_) {
@ -275,11 +281,13 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
ops[*nops].data.send_status_from_server.status = send_status_code_;
ops[*nops].data.send_status_from_server.status_details =
send_status_details_.empty() ? nullptr : send_status_details_.c_str();
ops[*nops].flags = 0;
(*nops)++;
}
if (recv_closed_) {
ops[*nops].op = GRPC_OP_RECV_CLOSE_ON_SERVER;
ops[*nops].data.recv_close_on_server.cancelled = &cancelled_buf_;
ops[*nops].flags = 0;
(*nops)++;
}
}

@ -52,6 +52,14 @@
namespace grpc {
class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag {
public:
bool FinalizeResult(void** tag, bool* status) {
delete this;
return false;
}
};
class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
public:
SyncRequest(RpcServiceMethod* method, void* tag)
@ -217,6 +225,9 @@ Server::~Server() {
Shutdown();
}
}
void* got_tag;
bool ok;
GPR_ASSERT(!cq_.Next(&got_tag, &ok));
grpc_server_destroy(server_);
if (thread_pool_owned_) {
delete thread_pool_;
@ -290,7 +301,7 @@ void Server::Shutdown() {
grpc::unique_lock<grpc::mutex> lock(mu_);
if (started_ && !shutdown_) {
shutdown_ = true;
grpc_server_shutdown(server_);
grpc_server_shutdown_and_notify(server_, cq_.cq(), new ShutdownRequest());
cq_.Shutdown();
// Wait for running callbacks to finish.

@ -36,6 +36,6 @@
namespace grpc {
const Status& Status::OK = Status();
const Status& Status::Cancelled = Status(StatusCode::CANCELLED);
const Status& Status::CANCELLED = Status(StatusCode::CANCELLED, "");
} // namespace grpc

@ -205,20 +205,22 @@ namespace Grpc.Core.Tests
() => { Calls.BlockingUnaryCall(call, "ABC", default(CancellationToken)); });
}
[Test]
public void UnknownMethodHandler()
{
var call = new Call<string, string>(ServiceName, NonexistentMethod, channel, Metadata.Empty);
try
{
Calls.BlockingUnaryCall(call, "ABC", default(CancellationToken));
Assert.Fail();
}
catch (RpcException e)
{
Assert.AreEqual(StatusCode.Unimplemented, e.Status.StatusCode);
}
}
// TODO(jtattermusch): temporarily commented out for #1731
// to be uncommented along with PR #1577
// [Test]
// public void UnknownMethodHandler()
// {
// var call = new Call<string, string>(ServiceName, NonexistentMethod, channel, Metadata.Empty);
// try
// {
// Calls.BlockingUnaryCall(call, "ABC", default(CancellationToken));
// Assert.Fail();
// }
// catch (RpcException e)
// {
// Assert.AreEqual(StatusCode.Unimplemented, e.Status.StatusCode);
// }
// }
private static async Task<string> EchoHandler(ServerCallContext context, string request)
{

@ -192,7 +192,5 @@ namespace Grpc.Core.Internal
{
return buffered ? 0 : GRPC_WRITE_BUFFER_HINT;
}
}
}

@ -32,14 +32,15 @@
#endregion
using System;
using System.Collections.Generic;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
internal delegate void OpCompletionDelegate(bool success);
internal delegate void BatchCompletionDelegate(bool success, BatchContextSafeHandle ctx);
internal class CompletionRegistry

@ -60,10 +60,10 @@ namespace Grpc.Core.Internal
static extern GRPCCallError grpcsharp_server_request_call(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_shutdown(ServerSafeHandle server);
static extern void grpcsharp_server_cancel_all_calls(ServerSafeHandle server);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_shutdown_and_notify_callback(ServerSafeHandle server, BatchContextSafeHandle ctx);
static extern void grpcsharp_server_shutdown_and_notify_callback(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_destroy(IntPtr server);
@ -91,17 +91,12 @@ namespace Grpc.Core.Internal
{
grpcsharp_server_start(this);
}
public void Shutdown()
{
grpcsharp_server_shutdown(this);
}
public void ShutdownAndNotify(BatchCompletionDelegate callback)
public void ShutdownAndNotify(CompletionQueueSafeHandle cq, BatchCompletionDelegate callback)
{
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_server_shutdown_and_notify_callback(this, ctx);
grpcsharp_server_shutdown_and_notify_callback(this, cq, ctx);
}
public void RequestCall(CompletionQueueSafeHandle cq, BatchCompletionDelegate callback)
@ -116,5 +111,11 @@ namespace Grpc.Core.Internal
grpcsharp_server_destroy(handle);
return true;
}
// Only to be called after ShutdownAndNotify.
public void CancelAllCalls()
{
grpcsharp_server_cancel_all_calls(this);
}
}
}

@ -143,7 +143,8 @@ namespace Grpc.Core
Preconditions.CheckState(!shutdownRequested);
shutdownRequested = true;
}
handle.ShutdownAndNotify(HandleServerShutdown);
handle.ShutdownAndNotify(GetCompletionQueue(), HandleServerShutdown);
await shutdownTcs.Task;
handle.Dispose();
}
@ -159,8 +160,22 @@ namespace Grpc.Core
}
}
public void Kill()
/// <summary>
/// Requests server shutdown while cancelling all the in-progress calls.
/// The returned task finishes when shutdown procedure is complete.
/// </summary>
public async Task KillAsync()
{
lock (myLock)
{
Preconditions.CheckState(startRequested);
Preconditions.CheckState(!shutdownRequested);
shutdownRequested = true;
}
handle.ShutdownAndNotify(GetCompletionQueue(), HandleServerShutdown);
handle.CancelAllCalls();
await shutdownTcs.Task;
handle.Dispose();
}

@ -417,18 +417,23 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count;
ops[0].data.send_initial_metadata.metadata =
ctx->send_initial_metadata.metadata;
ops[0].flags = 0;
ops[1].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ops[1].data.send_message = ctx->send_message;
ops[1].flags = 0;
ops[2].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[2].flags = 0;
ops[3].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[3].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
ops[3].flags = 0;
ops[4].op = GRPC_OP_RECV_MESSAGE;
ops[4].data.recv_message = &(ctx->recv_message);
ops[4].flags = 0;
ops[5].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
ops[5].data.recv_status_on_client.trailing_metadata =
@ -440,6 +445,7 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
&(ctx->recv_status_on_client.status_details);
ops[5].data.recv_status_on_client.status_details_capacity =
&(ctx->recv_status_on_client.status_details_capacity);
ops[5].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -456,12 +462,15 @@ grpcsharp_call_start_client_streaming(grpc_call *call,
ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count;
ops[0].data.send_initial_metadata.metadata =
ctx->send_initial_metadata.metadata;
ops[0].flags = 0;
ops[1].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[1].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
ops[1].flags = 0;
ops[2].op = GRPC_OP_RECV_MESSAGE;
ops[2].data.recv_message = &(ctx->recv_message);
ops[2].flags = 0;
ops[3].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
ops[3].data.recv_status_on_client.trailing_metadata =
@ -473,6 +482,7 @@ grpcsharp_call_start_client_streaming(grpc_call *call,
&(ctx->recv_status_on_client.status_details);
ops[3].data.recv_status_on_client.status_details_capacity =
&(ctx->recv_status_on_client.status_details_capacity);
ops[3].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -488,15 +498,19 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count;
ops[0].data.send_initial_metadata.metadata =
ctx->send_initial_metadata.metadata;
ops[0].flags = 0;
ops[1].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ops[1].data.send_message = ctx->send_message;
ops[1].flags = 0;
ops[2].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[2].flags = 0;
ops[3].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[3].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
ops[3].flags = 0;
ops[4].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
ops[4].data.recv_status_on_client.trailing_metadata =
@ -508,6 +522,7 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
&(ctx->recv_status_on_client.status_details);
ops[4].data.recv_status_on_client.status_details_capacity =
&(ctx->recv_status_on_client.status_details_capacity);
ops[4].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -524,9 +539,11 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call,
ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count;
ops[0].data.send_initial_metadata.metadata =
ctx->send_initial_metadata.metadata;
ops[0].flags = 0;
ops[1].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[1].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
ops[1].flags = 0;
ops[2].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
ops[2].data.recv_status_on_client.trailing_metadata =
@ -538,6 +555,7 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call,
&(ctx->recv_status_on_client.status_details);
ops[2].data.recv_status_on_client.status_details_capacity =
&(ctx->recv_status_on_client.status_details_capacity);
ops[2].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -550,6 +568,7 @@ grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx,
ops[0].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ops[0].data.send_message = ctx->send_message;
ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -560,6 +579,7 @@ grpcsharp_call_send_close_from_client(grpc_call *call,
/* TODO: don't use magic number */
grpc_op ops[1];
ops[0].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -577,6 +597,7 @@ grpcsharp_call_send_status_from_server(grpc_call *call,
gpr_strdup(status_details);
ops[0].data.send_status_from_server.trailing_metadata = NULL;
ops[0].data.send_status_from_server.trailing_metadata_count = 0;
ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -587,6 +608,7 @@ grpcsharp_call_recv_message(grpc_call *call, grpcsharp_batch_context *ctx) {
grpc_op ops[1];
ops[0].op = GRPC_OP_RECV_MESSAGE;
ops[0].data.recv_message = &(ctx->recv_message);
ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -597,10 +619,12 @@ grpcsharp_call_start_serverside(grpc_call *call, grpcsharp_batch_context *ctx) {
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
ops[0].data.send_initial_metadata.count = 0;
ops[0].data.send_initial_metadata.metadata = NULL;
ops[0].flags = 0;
ops[1].op = GRPC_OP_RECV_CLOSE_ON_SERVER;
ops[1].data.recv_close_on_server.cancelled =
(&ctx->recv_close_on_server_cancelled);
ops[1].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -624,14 +648,15 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_start(grpc_server *server) {
grpc_server_start(server);
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_shutdown(grpc_server *server) {
grpc_server_shutdown(server);
}
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_server_shutdown_and_notify_callback(grpc_server *server,
grpc_completion_queue *cq,
grpcsharp_batch_context *ctx) {
grpc_server_shutdown_and_notify(server, ctx);
grpc_server_shutdown_and_notify(server, cq, ctx);
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_cancel_all_calls(grpc_server *server) {
grpc_server_cancel_all_calls(server);
}
GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_destroy(grpc_server *server) {

@ -550,6 +550,7 @@ NAN_METHOD(Call::StartBatch) {
}
uint32_t type = keys->Get(i)->Uint32Value();
ops[i].op = static_cast<grpc_op_type>(type);
ops[i].flags = 0;
switch (type) {
case GRPC_OP_SEND_INITIAL_METADATA:
op.reset(new SendMetadataOp());

@ -112,9 +112,17 @@ class NewCallOp : public Op {
}
};
Server::Server(grpc_server *server) : wrapped_server(server) {}
Server::Server(grpc_server *server) : wrapped_server(server) {
shutdown_queue = grpc_completion_queue_create();
grpc_server_register_completion_queue(server, shutdown_queue);
}
Server::~Server() { grpc_server_destroy(wrapped_server); }
Server::~Server() {
this->ShutdownServer();
grpc_completion_queue_shutdown(this->shutdown_queue);
grpc_server_destroy(wrapped_server);
grpc_completion_queue_destroy(this->shutdown_queue);
}
void Server::Init(Handle<Object> exports) {
NanScope();
@ -148,6 +156,16 @@ bool Server::HasInstance(Handle<Value> val) {
return NanHasInstance(fun_tpl, val);
}
void Server::ShutdownServer() {
if (this->wrapped_server != NULL) {
grpc_server_shutdown_and_notify(this->wrapped_server,
this->shutdown_queue,
NULL);
grpc_completion_queue_pluck(this->shutdown_queue, NULL, gpr_inf_future);
this->wrapped_server = NULL;
}
}
NAN_METHOD(Server::New) {
NanScope();
@ -207,6 +225,9 @@ NAN_METHOD(Server::RequestCall) {
return NanThrowTypeError("requestCall can only be called on a Server");
}
Server *server = ObjectWrap::Unwrap<Server>(args.This());
if (server->wrapped_server == NULL) {
return NanThrowError("requestCall cannot be called on a shut down Server");
}
NewCallOp *op = new NewCallOp();
unique_ptr<OpVec> ops(new OpVec());
ops->push_back(unique_ptr<Op>(op));
@ -232,6 +253,9 @@ NAN_METHOD(Server::AddHttp2Port) {
return NanThrowTypeError("addHttp2Port's argument must be a String");
}
Server *server = ObjectWrap::Unwrap<Server>(args.This());
if (server->wrapped_server == NULL) {
return NanThrowError("addHttp2Port cannot be called on a shut down Server");
}
NanReturnValue(NanNew<Number>(grpc_server_add_http2_port(
server->wrapped_server, *NanUtf8String(args[0]))));
}
@ -251,6 +275,10 @@ NAN_METHOD(Server::AddSecureHttp2Port) {
"addSecureHttp2Port's second argument must be ServerCredentials");
}
Server *server = ObjectWrap::Unwrap<Server>(args.This());
if (server->wrapped_server == NULL) {
return NanThrowError(
"addSecureHttp2Port cannot be called on a shut down Server");
}
ServerCredentials *creds = ObjectWrap::Unwrap<ServerCredentials>(
args[1]->ToObject());
NanReturnValue(NanNew<Number>(grpc_server_add_secure_http2_port(
@ -264,17 +292,24 @@ NAN_METHOD(Server::Start) {
return NanThrowTypeError("start can only be called on a Server");
}
Server *server = ObjectWrap::Unwrap<Server>(args.This());
if (server->wrapped_server == NULL) {
return NanThrowError("start cannot be called on a shut down Server");
}
grpc_server_start(server->wrapped_server);
NanReturnUndefined();
}
NAN_METHOD(ShutdownCallback) {
NanReturnUndefined();
}
NAN_METHOD(Server::Shutdown) {
NanScope();
if (!HasInstance(args.This())) {
return NanThrowTypeError("shutdown can only be called on a Server");
}
Server *server = ObjectWrap::Unwrap<Server>(args.This());
grpc_server_shutdown(server->wrapped_server);
server->ShutdownServer();
NanReturnUndefined();
}

@ -61,6 +61,8 @@ class Server : public ::node::ObjectWrap {
Server(const Server &);
Server &operator=(const Server &);
void ShutdownServer();
static NAN_METHOD(New);
static NAN_METHOD(RequestCall);
static NAN_METHOD(AddHttp2Port);
@ -71,6 +73,7 @@ class Server : public ::node::ObjectWrap {
static v8::Persistent<v8::FunctionTemplate> fun_tpl;
grpc_server *wrapped_server;
grpc_completion_queue *shutdown_queue;
};
} // namespace node

@ -100,7 +100,9 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
if (!host || !method) {
[NSException raise:NSInvalidArgumentException format:@"Neither host nor method can be nil."];
}
// TODO(jcanizales): Throw if the requestWriter was already started.
if (requestWriter.state != GRXWriterStateNotStarted) {
[NSException raise:NSInvalidArgumentException format:@"The requests writer can't be already started."];
}
if ((self = [super init])) {
static dispatch_once_t initialization;
dispatch_once(&initialization, ^{
@ -319,9 +321,12 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
if (strongSelf) {
[strongSelf->_responseMetadata addEntriesFromDictionary:trailers];
NSMutableDictionary *userInfo = [NSMutableDictionary dictionaryWithDictionary:error.userInfo];
userInfo[kGRPCStatusMetadataKey] = strongSelf->_responseMetadata;
error = [NSError errorWithDomain:error.domain code:error.code userInfo:userInfo];
if (error) {
NSMutableDictionary *userInfo =
[NSMutableDictionary dictionaryWithDictionary:error.userInfo];
userInfo[kGRPCStatusMetadataKey] = strongSelf->_responseMetadata;
error = [NSError errorWithDomain:error.domain code:error.code userInfo:userInfo];
}
[strongSelf finishWithError:error];
}
}];

@ -163,7 +163,7 @@ files:
* [Podspec](https://github.com/grpc/grpc/blob/master/gRPC.podspec) for the Objective-C gRPC runtime
library. This can be tedious to configure manually.
* [Podspec](https://github.com/jcanizales/protobuf/blob/add-podspec/Protobuf.podspec) for the
* [Podspec](https://github.com/google/protobuf/blob/master/Protobuf.podspec) for the
Objective-C Protobuf runtime library.
[Protocol Buffers]:https://developers.google.com/protocol-buffers/

@ -9,12 +9,11 @@ Pre-Alpha : This gRPC PHP implementation is work-in-progress and is not expected
## ENVIRONMENT
Install `php5` and `php5-dev`.
Prerequisite: PHP 5.5 or later, PHPUnit, pecl
To run the tests, additionally install `phpunit`.
Alternatively, build and install PHP 5.5 or later from source with standard
configuration options.
```sh
sudo apt-get install php5 php5-dev phpunit php-pear
```
## Build from Homebrew
@ -48,7 +47,7 @@ $ make check
$ sudo make install
```
Build and install the gRPC C core
Build and install the gRPC C core libraries
```sh
$ cd grpc
@ -56,7 +55,13 @@ $ make
$ sudo make install
```
Build the gRPC PHP extension
Install the gRPC PHP extension
```sh
$ sudo pecl install grpc
```
OR
```sh
$ cd grpc/src/php/ext/grpc
@ -125,4 +130,3 @@ $ ./bin/run_gen_code_test.sh
[linuxbrew]:https://github.com/Homebrew/linuxbrew#installation
[gRPC install script]:https://raw.githubusercontent.com/grpc/homebrew-grpc/master/scripts/install
[Node]:https://github.com/grpc/grpc/tree/master/src/node/examples

@ -34,15 +34,31 @@ set -e
cd $(dirname $0)
default_extension_dir=`php -i | grep extension_dir | sed 's/.*=> //g'`
module_dir=../ext/grpc/modules
if command -v brew >/dev/null && [ -d `brew --prefix`/opt/grpc-php ]
then
# homebrew and the grpc-php formula are installed
extension_dir="-d extension_dir="`brew --prefix`/opt/grpc-php
elif [ ! -e $default_extension_dir/grpc.so ]
then
# the grpc extension is not found in the default PHP extension dir
# try the source modules directory
module_dir=../ext/grpc/modules
if [ ! -d $module_dir ]
then
echo "Please run 'phpize && ./configure && make' from ext/grpc first"
exit 1
fi
# sym-link in system supplied extensions
for f in $default_extension_dir/*.so
do
ln -s $f $module_dir/$(basename $f) &> /dev/null || true
done
# sym-link in system supplied extensions
for f in $default_extension_dir/*.so
do
ln -s $f $module_dir/$(basename $f) &> /dev/null || true
done
extension_dir='-d extension_dir='$module_dir
fi
php \
-d extension_dir=$module_dir \
$extension_dir \
-d extension=grpc.so \
`which phpunit` -v --debug --strict ../tests/unit_tests

@ -0,0 +1,3 @@
Michael Lumish (mlumish@google.com)
Tim Emiola (temiola@google.com)
Stanley Cheung (stanleycheung@google.com)

@ -0,0 +1,32 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/

@ -0,0 +1,72 @@
gRPC PHP Extension
==================
# Requirements
* PHP 5.5+
* [gRPC core library](https://github.com/grpc/grpc) 0.9.1
# Installation
## Install PHP 5
```
$ sudo apt-get install git php5 php5-dev php-pear unzip
```
## Compile gRPC Core Library
Clone the gRPC source code repository
```
$ git clone https://github.com/grpc/grpc.git
```
Build and install the Protocol Buffers compiler (protoc)
```
$ # from grpc
$ git checkout --track origin/release-0_9
$ git pull --recurse-submodules && git submodule update --init --recursive
$ cd third_party/protobuf
$ ./autogen.sh
$ ./configure
$ make
$ make check
$ sudo make install
```
Build and install the gRPC C core library
```sh
$ # from grpc
$ make
$ sudo make install
```
## Install the gRPC PHP extension
Quick install
```sh
$ sudo pecl install grpc
```
Note: before a stable release, you may need to do
```sh
$ sudo pecl install grpc-0.5.0
```
OR
Compile from source
```sh
$ # from grpc
$ cd src/php/ext/grpc
$ phpize
$ ./configure
$ make
$ sudo make install
```

@ -397,6 +397,7 @@ PHP_METHOD(Call, startBatch) {
goto cleanup;
}
ops[op_num].op = (grpc_op_type)index;
ops[op_num].flags = 0;
op_num++;
}
error = grpc_call_start_batch(call->wrapped, ops, op_num, call->wrapped);

@ -0,0 +1,82 @@
<?xml version="1.0" encoding="UTF-8"?>
<package packagerversion="1.9.5" version="2.0" xmlns="http://pear.php.net/dtd/package-2.0" xmlns:tasks="http://pear.php.net/dtd/tasks-1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://pear.php.net/dtd/tasks-1.0 http://pear.php.net/dtd/tasks-1.0.xsd http://pear.php.net/dtd/package-2.0 http://pear.php.net/dtd/package-2.0.xsd">
<name>grpc</name>
<channel>pecl.php.net</channel>
<summary>A high performance, open source, general RPC framework that puts mobile and HTTP/2 first.</summary>
<description>Remote Procedure Calls (RPCs) provide a useful abstraction for building distributed applications and services. The libraries in this repository provide a concrete implementation of the gRPC protocol, layered over HTTP/2. These libraries enable communication between clients and servers using any combination of the supported languages.</description>
<lead>
<name>Stanley Cheung</name>
<user>stanleycheung</user>
<email>grpc-packages@google.com</email>
<active>yes</active>
</lead>
<date>2015-06-16</date>
<time>20:12:55</time>
<version>
<release>0.5.0</release>
<api>0.5.0</api>
</version>
<stability>
<release>alpha</release>
<api>alpha</api>
</stability>
<license>BSD</license>
<notes>
First alpha release
</notes>
<contents>
<dir baseinstalldir="/" name="/">
<file baseinstalldir="/" md5sum="6f19828fb869b7b8a590cbb76b4f996d" name="byte_buffer.c" role="src" />
<file baseinstalldir="/" md5sum="c8de0f819499c48adfc8d7f472c0196b" name="byte_buffer.h" role="src" />
<file baseinstalldir="/" md5sum="cb45b62f767ae7b4377761df696649fc" name="call.c" role="src" />
<file baseinstalldir="/" md5sum="26acbf04c30162c2d2aca4688bb2aec8" name="call.h" role="src" />
<file baseinstalldir="/" md5sum="50837fbdb2892795f1871b22e5979762" name="channel.c" role="src" />
<file baseinstalldir="/" md5sum="f1b66029daeced20b47cf00cc6523fc8" name="channel.h" role="src" />
<file baseinstalldir="/" md5sum="81a1193e93d8b6602add8ac360de565b" name="completion_queue.c" role="src" />
<file baseinstalldir="/" md5sum="f10b5bb232d74a6878e829e2e76cdaa2" name="completion_queue.h" role="src" />
<file baseinstalldir="/" md5sum="a9181ed994a072ac5f41e7c9705c170f" name="config.m4" role="src" />
<file baseinstalldir="/" md5sum="8c3f1e11dac623001378bfd53b554f08" name="credentials.c" role="src" />
<file baseinstalldir="/" md5sum="6988d6e97c19c8f8e3eb92371cf8246b" name="credentials.h" role="src" />
<file baseinstalldir="/" md5sum="38a1bc979d810c36ebc2a52d4b7b5319" name="CREDITS" role="doc" />
<file baseinstalldir="/" md5sum="3f35b472bbdef5a788cd90617d7d0847" name="LICENSE" role="doc" />
<file baseinstalldir="/" md5sum="6aaa7a290122d230f2d8c4e4e05da4a9" name="php_grpc.c" role="src" />
<file baseinstalldir="/" md5sum="673b07859d9f69232f8a754c56780686" name="php_grpc.h" role="src" />
<file baseinstalldir="/" md5sum="4d4d3382f8d10cae2e4378468e5516b9" name="README.md" role="doc" />
<file baseinstalldir="/" md5sum="53fda0ee6937f6ddc8e271886018d441" name="server.c" role="src" />
<file baseinstalldir="/" md5sum="4b730f06d14cbbb0642bdbd194749595" name="server.h" role="src" />
<file baseinstalldir="/" md5sum="f6930beafb6c0e061899262f2f077e98" name="server_credentials.c" role="src" />
<file baseinstalldir="/" md5sum="9c4b4cc06356a8a39a16a085a9b85996" name="server_credentials.h" role="src" />
<file baseinstalldir="/" md5sum="c89c623cd17177ebde18313fc5c17122" name="timeval.c" role="src" />
<file baseinstalldir="/" md5sum="496e27a100b4d93ca3fb35c924c5e163" name="timeval.h" role="src" />
</dir>
</contents>
<dependencies>
<required>
<php>
<min>5.5.0</min>
</php>
<pearinstaller>
<min>1.4.0</min>
</pearinstaller>
</required>
</dependencies>
<providesextension>grpc</providesextension>
<extsrcrelease />
<changelog>
<release>
<version>
<release>0.5.0</release>
<api>0.5.0</api>
</version>
<stability>
<release>alpha</release>
<api>alpha</api>
</stability>
<date>2015-06-16</date>
<license>BSD</license>
<notes>
First alpha release
</notes>
</release>
</changelog>
</package>

@ -63,7 +63,8 @@ zend_class_entry *grpc_ce_server;
void free_wrapped_grpc_server(void *object TSRMLS_DC) {
wrapped_grpc_server *server = (wrapped_grpc_server *)object;
if (server->wrapped != NULL) {
grpc_server_shutdown(server->wrapped);
grpc_server_shutdown_and_notify(server->wrapped, completion_queue, NULL);
grpc_completion_queue_pluck(completion_queue, NULL, gpr_inf_future);
grpc_server_destroy(server->wrapped);
}
efree(server);

@ -43,9 +43,19 @@ abstract class AbstractCall {
* Create a new Call wrapper object.
* @param Channel $channel The channel to communicate on
* @param string $method The method to call on the remote server
* @param callback $deserialize A callback function to deserialize
* the response
* @param (optional) long $timeout Timeout in microseconds
*/
public function __construct(Channel $channel, $method, $deserialize) {
$this->call = new Call($channel, $method, Timeval::infFuture());
public function __construct(Channel $channel, $method, $deserialize, $timeout = false) {
if ($timeout) {
$now = Timeval::now();
$delta = new Timeval($timeout);
$deadline = $now->add($delta);
} else {
$deadline = Timeval::infFuture();
}
$this->call = new Call($channel, $method, $deadline);
$this->deserialize = $deserialize;
$this->metadata = null;
}

@ -83,6 +83,21 @@ class BaseStub {
return "https://" . $this->hostname . $service_name;
}
/**
* extract $timeout from $metadata
* @param $metadata The metadata map
* @return list($metadata_copy, $timeout)
*/
private function _extract_timeout_from_metadata($metadata) {
$timeout = false;
$metadata_copy = $metadata;
if (isset($metadata['timeout'])) {
$timeout = $metadata['timeout'];
unset($metadata_copy['timeout']);
}
return array($metadata_copy, $timeout);
}
/* This class is intended to be subclassed by generated code, so all functions
begin with "_" to avoid name collisions. */
@ -99,8 +114,8 @@ class BaseStub {
$argument,
callable $deserialize,
$metadata = array()) {
$call = new UnaryCall($this->channel, $method, $deserialize);
$actual_metadata = $metadata;
list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata);
$call = new UnaryCall($this->channel, $method, $deserialize, $timeout);
$jwt_aud_uri = $this->_get_jwt_aud_uri($method);
if (is_callable($this->update_metadata)) {
$actual_metadata = call_user_func($this->update_metadata,
@ -126,8 +141,8 @@ class BaseStub {
$arguments,
callable $deserialize,
$metadata = array()) {
$call = new ClientStreamingCall($this->channel, $method, $deserialize);
$actual_metadata = $metadata;
list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata);
$call = new ClientStreamingCall($this->channel, $method, $deserialize, $timeout);
$jwt_aud_uri = $this->_get_jwt_aud_uri($method);
if (is_callable($this->update_metadata)) {
$actual_metadata = call_user_func($this->update_metadata,
@ -152,8 +167,8 @@ class BaseStub {
$argument,
callable $deserialize,
$metadata = array()) {
$call = new ServerStreamingCall($this->channel, $method, $deserialize);
$actual_metadata = $metadata;
list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata);
$call = new ServerStreamingCall($this->channel, $method, $deserialize, $timeout);
$jwt_aud_uri = $this->_get_jwt_aud_uri($method);
if (is_callable($this->update_metadata)) {
$actual_metadata = call_user_func($this->update_metadata,
@ -175,8 +190,8 @@ class BaseStub {
public function _bidiRequest($method,
callable $deserialize,
$metadata = array()) {
$call = new BidiStreamingCall($this->channel, $method, $deserialize);
$actual_metadata = $metadata;
list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata);
$call = new BidiStreamingCall($this->channel, $method, $deserialize, $timeout);
$jwt_aud_uri = $this->_get_jwt_aud_uri($method);
if (is_callable($this->update_metadata)) {
$actual_metadata = call_user_func($this->update_metadata,

@ -270,6 +270,24 @@ function cancelAfterFirstResponse($stub) {
'Call status was not CANCELLED');
}
function timeoutOnSleepingServer($stub) {
$call = $stub->FullDuplexCall(array('timeout' => 500000));
$request = new grpc\testing\StreamingOutputCallRequest();
$request->setResponseType(grpc\testing\PayloadType::COMPRESSABLE);
$response_parameters = new grpc\testing\ResponseParameters();
$response_parameters->setSize(8);
$request->addResponseParameters($response_parameters);
$payload = new grpc\testing\Payload();
$payload->setBody(str_repeat("\0", 9));
$request->setPayload($payload);
$call->write($request);
$response = $call->read();
hardAssert($call->getStatus()->code === Grpc\STATUS_DEADLINE_EXCEEDED,
'Call status was not DEADLINE_EXCEEDED');
}
$args = getopt('', array('server_host:', 'server_port:', 'test_case:',
'server_host_override:', 'oauth_scope:',
'default_service_account:'));
@ -341,6 +359,9 @@ switch ($args['test_case']) {
case 'cancel_after_first_response':
cancelAfterFirstResponse($stub);
break;
case 'timeout_on_sleeping_server':
timeoutOnSleepingServer($stub);
break;
case 'service_account_creds':
serviceAccountCreds($stub, $args);
break;

@ -61,4 +61,26 @@ class TimevalTest extends PHPUnit_Framework_TestCase{
$this->assertLessThan(0, Grpc\Timeval::compare($zero, $now));
$this->assertLessThan(0, Grpc\Timeval::compare($now, $future));
}
public function testNowAndAdd() {
$now = Grpc\Timeval::now();
$delta = new Grpc\Timeval(1000);
$deadline = $now->add($delta);
$this->assertGreaterThan(0, Grpc\Timeval::compare($deadline, $now));
}
public function testNowAndSubtract() {
$now = Grpc\Timeval::now();
$delta = new Grpc\Timeval(1000);
$deadline = $now->subtract($delta);
$this->assertLessThan(0, Grpc\Timeval::compare($deadline, $now));
}
public function testAddAndSubtract() {
$now = Grpc\Timeval::now();
$delta = new Grpc\Timeval(1000);
$deadline = $now->add($delta);
$back_to_now = $deadline->subtract($delta);
$this->assertSame(0, Grpc\Timeval::compare($back_to_now, $now));
}
}

@ -167,17 +167,13 @@ PyObject *pygrpc_Server_start(Server *self, PyObject *ignored) {
PyObject *pygrpc_Server_shutdown(
Server *self, PyObject *args, PyObject *kwargs) {
PyObject *user_tag = NULL;
PyObject *user_tag;
pygrpc_tag *tag;
static char *keywords[] = {"tag", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", keywords, &user_tag)) {
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O", keywords, &user_tag)) {
return NULL;
}
if (user_tag) {
tag = pygrpc_produce_server_shutdown_tag(user_tag);
grpc_server_shutdown_and_notify(self->c_serv, tag);
} else {
grpc_server_shutdown(self->c_serv);
}
tag = pygrpc_produce_server_shutdown_tag(user_tag);
grpc_server_shutdown_and_notify(self->c_serv, self->cq->c_cq, tag);
Py_RETURN_NONE;
}

@ -123,7 +123,8 @@ PyObject *pygrpc_consume_event(grpc_event event) {
event.success ? Py_True : Py_False);
} else {
result = Py_BuildValue("iOOONO", GRPC_OP_COMPLETE, tag->user_tag,
tag->call, Py_None, pygrpc_consume_ops(tag->ops, tag->nops),
tag->call ? (PyObject*)tag->call : Py_None, Py_None,
pygrpc_consume_ops(tag->ops, tag->nops),
event.success ? Py_True : Py_False);
}
break;
@ -168,6 +169,7 @@ int pygrpc_produce_op(PyObject *op, grpc_op *result) {
return 0;
}
c_op.op = type;
c_op.flags = 0;
switch (type) {
case GRPC_OP_SEND_INITIAL_METADATA:
if (!pygrpc_cast_pylist_to_send_metadata(
@ -195,10 +197,11 @@ int pygrpc_produce_op(PyObject *op, grpc_op *result) {
return 0;
}
if (!PyTuple_Check(PyTuple_GET_ITEM(op, STATUS_INDEX))) {
char buf[64];
snprintf(buf, sizeof(buf), "expected tuple status in op of length %d",
STATUS_TUPLE_SIZE);
PyErr_SetString(PyExc_TypeError, buf);
char *buf;
gpr_asprintf(&buf, "expected tuple status in op of length %d",
STATUS_TUPLE_SIZE);
PyErr_SetString(PyExc_ValueError, buf);
gpr_free(buf);
return 0;
}
c_op.data.send_status_from_server.status = PyInt_AsLong(

@ -100,7 +100,7 @@ class _TagAdapter(collections.namedtuple('_TagAdapter', [
class Call(object):
"""Adapter from old _low.Call interface to new _low.Call."""
def __init__(self, channel, completion_queue, method, host, deadline):
self._internal = channel._internal.create_call(
completion_queue._internal, method, host, deadline)
@ -207,7 +207,7 @@ class CompletionQueue(object):
complete_accepted = ev.success if kind == Event.Kind.COMPLETE_ACCEPTED else None
service_acceptance = ServiceAcceptance(Call._from_internal(ev.call), ev.call_details.method, ev.call_details.host, ev.call_details.deadline) if kind == Event.Kind.SERVICE_ACCEPTED else None
message_bytes = ev.results[0].message if kind == Event.Kind.READ_ACCEPTED else None
status = Status(ev.results[0].status.code, ev.results[0].status.details) if (kind == Event.Kind.FINISH and ev.results[0].status) else Status(_types.StatusCode.CANCELLED if ev.results[0].cancelled else _types.StatusCode.OK, '') if ev.results[0].cancelled is not None else None
status = Status(ev.results[0].status.code, ev.results[0].status.details) if (kind == Event.Kind.FINISH and ev.results[0].status) else Status(_types.StatusCode.CANCELLED if ev.results[0].cancelled else _types.StatusCode.OK, '') if len(ev.results) > 0 and ev.results[0].cancelled is not None else None
metadata = ev.results[0].initial_metadata if (kind in [Event.Kind.SERVICE_ACCEPTED, Event.Kind.METADATA_ACCEPTED]) else (ev.results[0].trailing_metadata if kind == Event.Kind.FINISH else None)
else:
raise RuntimeError('unknown event')
@ -241,7 +241,7 @@ class Server(object):
return self._internal.request_call(self._internal_cq, _TagAdapter(tag, Event.Kind.SERVICE_ACCEPTED))
def stop(self):
return self._internal.shutdown()
return self._internal.shutdown(_TagAdapter(None, Event.Kind.STOP))
class ClientCredentials(object):
@ -253,6 +253,6 @@ class ClientCredentials(object):
class ServerCredentials(object):
"""Adapter from old _low.ServerCredentials interface to new _low.ServerCredentials."""
def __init__(self, root_credentials, pair_sequence):
self._internal = _low.ServerCredentials.ssl(root_credentials, list(pair_sequence))

@ -94,14 +94,6 @@ class EchoTest(unittest.TestCase):
def tearDown(self):
self.server.stop()
# NOTE(nathaniel): Yep, this is weird; it's a consequence of
# grpc_server_destroy's being what has the effect of telling the server's
# completion queue to pump out all pending events/tags immediately rather
# than gracefully completing all outstanding RPCs while accepting no new
# ones.
# TODO(nathaniel): Deallocation of a Python object shouldn't have this kind
# of observable side effect let alone such an important one.
del self.server
self.server_completion_queue.stop()
self.client_completion_queue.stop()
while True:
@ -114,6 +106,7 @@ class EchoTest(unittest.TestCase):
break
self.server_completion_queue = None
self.client_completion_queue = None
del self.server
def _perform_echo_test(self, test_data):
method = 'test method'
@ -316,7 +309,6 @@ class CancellationTest(unittest.TestCase):
def tearDown(self):
self.server.stop()
del self.server
self.server_completion_queue.stop()
self.client_completion_queue.stop()
while True:
@ -327,6 +319,7 @@ class CancellationTest(unittest.TestCase):
event = self.client_completion_queue.get(0)
if event is not None and event.kind is _low.Event.Kind.STOP:
break
del self.server
def testCancellation(self):
method = 'test method'

@ -101,11 +101,8 @@ class Server(_types.Server):
def start(self):
return self.server.start()
def shutdown(self, tag=_NO_TAG):
if tag is _NO_TAG:
return self.server.shutdown()
else:
return self.server.shutdown(tag)
def shutdown(self, tag=None):
return self.server.shutdown(tag)
def request_call(self, completion_queue, tag):
return self.server.request_call(completion_queue.completion_queue, tag)

@ -48,7 +48,6 @@ class InsecureServerInsecureClient(unittest.TestCase):
def tearDown(self):
self.server.shutdown()
del self.client_channel
del self.server
self.client_completion_queue.shutdown()
while self.client_completion_queue.next().type != _types.EventType.QUEUE_SHUTDOWN:
@ -59,6 +58,7 @@ class InsecureServerInsecureClient(unittest.TestCase):
del self.client_completion_queue
del self.server_completion_queue
del self.server
def testEcho(self):
DEADLINE = time.time()+5

@ -1,2 +1,4 @@
-I.
--require spec_helper
--format documentation
--color

@ -507,6 +507,7 @@ static void grpc_run_batch_stack_fill_ops(run_batch_stack *st, VALUE ops_hash) {
NUM2INT(this_op));
};
st->ops[st->op_num].op = (grpc_op_type)NUM2INT(this_op);
st->ops[st->op_num].flags = 0;
st->op_num++;
}
}

@ -142,8 +142,16 @@ grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag,
MEMZERO(&next_call, next_call_stack, 1);
TypedData_Get_Struct(self, grpc_completion_queue,
&grpc_rb_completion_queue_data_type, next_call.cq);
next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
next_call.tag = ROBJECT(tag);
if (TYPE(timeout) == T_NIL) {
next_call.timeout = gpr_inf_future;
} else {
next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
}
if (TYPE(tag) == T_NIL) {
next_call.tag = NULL;
} else {
next_call.tag = ROBJECT(tag);
}
next_call.event.type = GRPC_QUEUE_TIMEOUT;
rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil,
(void *)&next_call, NULL, NULL);

@ -210,7 +210,7 @@ static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue,
VALUE result;
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
if (s->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "closed!");
rb_raise(rb_eRuntimeError, "destroyed!");
return Qnil;
} else {
grpc_request_call_stack_init(&st);
@ -259,21 +259,69 @@ static VALUE grpc_rb_server_start(VALUE self) {
grpc_rb_server *s = NULL;
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
if (s->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "closed!");
rb_raise(rb_eRuntimeError, "destroyed!");
} else {
grpc_server_start(s->wrapped);
}
return Qnil;
}
static VALUE grpc_rb_server_destroy(VALUE self) {
/*
call-seq:
cq = CompletionQueue.new
server = Server.new(cq, {'arg1': 'value1'})
... // do stuff with server
...
... // to shutdown the server
server.destroy(cq)
... // to shutdown the server with a timeout
server.destroy(cq, timeout)
Destroys server instances. */
static VALUE grpc_rb_server_destroy(int argc, VALUE *argv, VALUE self) {
VALUE cqueue = Qnil;
VALUE timeout = Qnil;
grpc_completion_queue *cq = NULL;
grpc_event ev;
grpc_rb_server *s = NULL;
/* "11" == 1 mandatory args, 1 (timeout) is optional */
rb_scan_args(argc, argv, "11", &cqueue, &timeout);
cq = grpc_rb_get_wrapped_completion_queue(cqueue);
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
if (s->wrapped != NULL) {
grpc_server_shutdown(s->wrapped);
grpc_server_shutdown_and_notify(s->wrapped, cq, NULL);
ev = grpc_rb_completion_queue_pluck_event(cqueue, Qnil, timeout);
if (!ev.success) {
rb_warn("server shutdown failed, there will be a LEAKED object warning");
return Qnil;
/*
TODO: renable the rb_raise below.
At the moment if the timeout is INFINITE_FUTURE as recommended, the
pluck blocks forever, even though
the outstanding server_request_calls correctly fail on the other
thread that they are running on.
it's almost as if calls that fail on the other thread do not get
cleaned up by shutdown request, even though it caused htem to
terminate.
rb_raise(rb_eRuntimeError, "grpc server shutdown did not succeed");
return Qnil;
The workaround is just to use a timeout and return without really
shutting down the server, and rely on the grpc core garbage collection
it down as a 'LEAKED OBJECT'.
*/
}
grpc_server_destroy(s->wrapped);
s->wrapped = NULL;
s->mark = Qnil;
}
return Qnil;
}
@ -302,7 +350,7 @@ static VALUE grpc_rb_server_add_http2_port(int argc, VALUE *argv, VALUE self) {
TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
if (s->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "closed!");
rb_raise(rb_eRuntimeError, "destroyed!");
return Qnil;
} else if (rb_creds == Qnil) {
recvd_port = grpc_server_add_http2_port(s->wrapped, StringValueCStr(port));
@ -315,7 +363,7 @@ static VALUE grpc_rb_server_add_http2_port(int argc, VALUE *argv, VALUE self) {
creds = grpc_rb_get_wrapped_server_credentials(rb_creds);
recvd_port =
grpc_server_add_secure_http2_port(s->wrapped, StringValueCStr(port),
creds);
creds);
if (recvd_port == 0) {
rb_raise(rb_eRuntimeError,
"could not add secure port %s to server, not sure why",
@ -341,7 +389,7 @@ void Init_grpc_server() {
rb_define_method(grpc_rb_cServer, "request_call",
grpc_rb_server_request_call, 3);
rb_define_method(grpc_rb_cServer, "start", grpc_rb_server_start, 0);
rb_define_method(grpc_rb_cServer, "destroy", grpc_rb_server_destroy, 0);
rb_define_method(grpc_rb_cServer, "destroy", grpc_rb_server_destroy, -1);
rb_define_alias(grpc_rb_cServer, "close", "destroy");
rb_define_method(grpc_rb_cServer, "add_http2_port",
grpc_rb_server_add_http2_port,

@ -278,7 +278,9 @@ module GRPC
@stopped = true
end
@pool.stop
@server.close
deadline = from_relative_time(@poll_period)
@server.close(@cq, deadline)
end
# determines if the server has been stopped
@ -410,17 +412,18 @@ module GRPC
# handles calls to the server
def loop_handle_server_calls
fail 'not running' unless @running
request_call_tag = Object.new
loop_tag = Object.new
until stopped?
deadline = from_relative_time(@poll_period)
begin
an_rpc = @server.request_call(@cq, request_call_tag, deadline)
an_rpc = @server.request_call(@cq, loop_tag, deadline)
c = new_active_server_call(an_rpc)
rescue Core::CallError, RuntimeError => e
# can happen during server shutdown
# these might happen for various reasonse. The correct behaviour of
# the server is to log them and continue.
GRPC.logger.warn("server call failed: #{e}")
next
end
c = new_active_server_call(an_rpc)
unless c.nil?
mth = an_rpc.method.to_sym
@pool.schedule(c) do |call|

@ -42,11 +42,8 @@ shared_context 'setup: tags' do
let(:sent_message) { 'sent message' }
let(:reply_text) { 'the reply' }
before(:example) do
@server_finished_tag = Object.new
@client_finished_tag = Object.new
@client_metadata_tag = Object.new
@client_tag = Object.new
@server_tag = Object.new
@tag = Object.new
end
def deadline
@ -351,7 +348,7 @@ describe 'the http client/server' do
after(:example) do
@ch.close
@server.close
@server.close(@server_queue, deadline)
end
it_behaves_like 'basic GRPC message delivery is OK' do
@ -377,7 +374,7 @@ describe 'the secure http client/server' do
end
after(:example) do
@server.close
@server.close(@server_queue, deadline)
end
it_behaves_like 'basic GRPC message delivery is OK' do

@ -51,7 +51,7 @@ describe GRPC::ActiveCall do
end
after(:each) do
@server.close
@server.close(@server_queue, deadline)
end
describe 'restricted view methods' do

@ -54,6 +54,7 @@ describe 'ClientStub' do
before(:each) do
Thread.abort_on_exception = true
@server = nil
@server_queue = nil
@method = 'an_rpc_method'
@pass = OK
@fail = INTERNAL
@ -61,7 +62,7 @@ describe 'ClientStub' do
end
after(:each) do
@server.close unless @server.nil?
@server.close(@server_queue) unless @server_queue.nil?
end
describe '#new' do

@ -136,10 +136,6 @@ describe GRPC::RpcServer do
@ch = GRPC::Core::Channel.new(@host, nil)
end
after(:each) do
@server.close
end
describe '#new' do
it 'can be created with just some args' do
opts = { a_channel_arg: 'an_arg' }
@ -344,10 +340,6 @@ describe GRPC::RpcServer do
@srv = RpcServer.new(**server_opts)
end
after(:each) do
@srv.stop
end
it 'should return NOT_FOUND status on unknown methods', server: true do
@srv.handle(EchoService)
t = Thread.new { @srv.run }
@ -527,10 +519,6 @@ describe GRPC::RpcServer do
@srv = RpcServer.new(**server_opts)
end
after(:each) do
@srv.stop
end
it 'should send connect metadata to the client', server: true do
service = EchoService.new
@srv.handle(service)

@ -54,7 +54,7 @@ describe Server do
it 'fails if the server is closed' do
s = Server.new(@cq, nil)
s.close
s.close(@cq)
expect { s.start }.to raise_error(RuntimeError)
end
end
@ -62,19 +62,19 @@ describe Server do
describe '#destroy' do
it 'destroys a server ok' do
s = start_a_server
blk = proc { s.destroy }
blk = proc { s.destroy(@cq) }
expect(&blk).to_not raise_error
end
it 'can be called more than once without error' do
s = start_a_server
begin
blk = proc { s.destroy }
blk = proc { s.destroy(@cq) }
expect(&blk).to_not raise_error
blk.call
expect(&blk).to_not raise_error
ensure
s.close
s.close(@cq)
end
end
end
@ -83,16 +83,16 @@ describe Server do
it 'closes a server ok' do
s = start_a_server
begin
blk = proc { s.close }
blk = proc { s.close(@cq) }
expect(&blk).to_not raise_error
ensure
s.close
s.close(@cq)
end
end
it 'can be called more than once without error' do
s = start_a_server
blk = proc { s.close }
blk = proc { s.close(@cq) }
expect(&blk).to_not raise_error
blk.call
expect(&blk).to_not raise_error
@ -105,14 +105,14 @@ describe Server do
blk = proc do
s = Server.new(@cq, nil)
s.add_http2_port('localhost:0')
s.close
s.close(@cq)
end
expect(&blk).to_not raise_error
end
it 'fails if the server is closed' do
s = Server.new(@cq, nil)
s.close
s.close(@cq)
expect { s.add_http2_port('localhost:0') }.to raise_error(RuntimeError)
end
end
@ -123,14 +123,14 @@ describe Server do
blk = proc do
s = Server.new(@cq, nil)
s.add_http2_port('localhost:0', cert)
s.close
s.close(@cq)
end
expect(&blk).to_not raise_error
end
it 'fails if the server is closed' do
s = Server.new(@cq, nil)
s.close
s.close(@cq)
blk = proc { s.add_http2_port('localhost:0', cert) }
expect(&blk).to raise_error(RuntimeError)
end

@ -53,3 +53,5 @@ RSpec.configure do |config|
include RSpec::LoggingHelper
config.capture_log_messages
end
RSpec::Expectations.configuration.warn_about_potential_false_positives = false

@ -665,7 +665,7 @@ else
endif
endif
$(Q)$(MAKE) -C third_party/openssl clean
$(Q)$(MAKE) -C third_party/openssl build_crypto build_ssl
$(Q)(unset CPPFLAGS; $(MAKE) -C third_party/openssl build_crypto build_ssl)
$(Q)mkdir -p $(LIBDIR)/$(CONFIG)/openssl
$(Q)cp third_party/openssl/libssl.a third_party/openssl/libcrypto.a $(LIBDIR)/$(CONFIG)/openssl

@ -0,0 +1,126 @@
<%!
bad_header_names = ('time.h', 'string.h')
def fix_header_name(name):
split_name = name.split('/')
if split_name[-1] in bad_header_names:
return '/'.join(split_name[:-1] + ['grpc_' + split_name[-1]])
else:
return name
%>
Pod::Spec.new do |s|
s.name = 'gRPC'
s.version = '0.6.0'
s.summary = 'gRPC client library for iOS/OSX'
s.homepage = 'http://www.grpc.io'
s.license = 'New BSD'
s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' }
# s.source = { :git => 'https://github.com/grpc/grpc.git',
# :tag => 'release-0_9_1-objectivec-0.5.1' }
s.ios.deployment_target = '6.0'
s.osx.deployment_target = '10.8'
s.requires_arc = true
# Reactive Extensions library for iOS.
s.subspec 'RxLibrary' do |rs|
rs.source_files = 'src/objective-c/RxLibrary/*.{h,m}',
'src/objective-c/RxLibrary/transformations/*.{h,m}',
'src/objective-c/RxLibrary/private/*.{h,m}'
rs.private_header_files = 'src/objective-c/RxLibrary/private/*.h'
end
# Core cross-platform gRPC library, written in C.
s.subspec 'C-Core' do |cs|
cs.source_files = \
% for lib in libs:
% if lib.name in ("grpc", "gpr"):
% for hdr in lib.get("headers", []):
'${fix_header_name(hdr)}', \
% endfor
% for hdr in lib.get("public_headers", []):
'${fix_header_name(hdr)}', \
% endfor
% for src in lib.src:
'${src}', \
% endfor
% endif
% endfor
cs.private_header_files = \
% for lib in libs:
% if lib.name in ("grpc", "gpr"):
% for hdr in lib.get("headers", []):
'${hdr}', \
% endfor
% endif
% endfor
cs.header_mappings_dir = '.'
# The core library includes its headers as either "src/core/..." or "grpc/...", meaning we have
# to tell XCode to look for headers under the "include" subdirectory too.
#
# TODO(jcanizales): Instead of doing this, during installation move everything under
# "include/grpc" one directory up. The directory names under PODS_ROOT are implementation
# details of Cocoapods, and have changed in the past, breaking this podspec.
cs.xcconfig = { 'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers/Private/gRPC" ' +
'"$(PODS_ROOT)/Headers/Private/gRPC/include"' }
cs.requires_arc = false
cs.libraries = 'z'
cs.dependency 'OpenSSL', '~> 1.0.200'
end
# This is a workaround for Cocoapods Issue #1437.
# It renames time.h and string.h to grpc_time.h and grpc_string.h.
# It needs to be here (top-level) instead of in the C-Core subspec because Cocoapods doesn't run
# prepare_command's of subspecs.
#
# TODO(jcanizales): Try out Todd Reed's solution at Issue #1437.
s.prepare_command = <<-CMD
DIR_TIME="grpc/support"
BAD_TIME="$DIR_TIME/time.h"
GOOD_TIME="$DIR_TIME/grpc_time.h"
grep -rl "$BAD_TIME" include/grpc src/core | xargs sed -i '' -e s@$BAD_TIME@$GOOD_TIME@g
if [ -f "include/$BAD_TIME" ];
then
mv -f "include/$BAD_TIME" "include/$GOOD_TIME"
fi
DIR_STRING="src/core/support"
BAD_STRING="$DIR_STRING/string.h"
GOOD_STRING="$DIR_STRING/grpc_string.h"
grep -rl "$BAD_STRING" include/grpc src/core | xargs sed -i '' -e s@$BAD_STRING@$GOOD_STRING@g
if [ -f "$BAD_STRING" ];
then
mv -f "$BAD_STRING" "$GOOD_STRING"
fi
CMD
# Objective-C wrapper around the core gRPC library.
s.subspec 'GRPCClient' do |gs|
gs.source_files = 'src/objective-c/GRPCClient/*.{h,m}',
'src/objective-c/GRPCClient/private/*.{h,m}'
gs.private_header_files = 'src/objective-c/GRPCClient/private/*.h'
gs.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w'
gs.dependency 'gRPC/C-Core'
# TODO(jcanizales): Remove this when the prepare_command moves everything under "include/grpc"
# one directory up.
gs.xcconfig = { 'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers/Public/gRPC/include"' }
gs.dependency 'gRPC/RxLibrary'
# Certificates, to be able to establish TLS connections:
gs.resource_bundles = { 'gRPC' => ['etc/roots.pem'] }
end
# RPC library for ProtocolBuffers, based on gRPC
s.subspec 'ProtoRPC' do |ps|
ps.source_files = 'src/objective-c/ProtoRPC/*.{h,m}'
ps.dependency 'gRPC/GRPCClient'
ps.dependency 'gRPC/RxLibrary'
ps.dependency 'Protobuf', '~> 3.0.0-alpha-3'
end
end

@ -143,6 +143,10 @@ void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator,
if (sfd.client) {
grpc_endpoint_destroy(sfd.client);
}
grpc_server_shutdown_and_notify(a.server, a.cq, NULL);
GPR_ASSERT(grpc_completion_queue_pluck(a.cq, NULL,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1))
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(a.server);
grpc_completion_queue_destroy(a.cq);

@ -135,17 +135,21 @@ void test_connect(const char *server_host, const char *client_host, int port,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -161,14 +165,17 @@ void test_connect(const char *server_host, const char *client_host, int port,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -206,7 +213,8 @@ void test_connect(const char *server_host, const char *client_host, int port,
grpc_completion_queue_destroy(client_cq);
/* Destroy server. */
grpc_server_shutdown(server);
grpc_server_shutdown_and_notify(server, server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(server);
grpc_completion_queue_shutdown(server_cq);
drain_cq(server_cq);

@ -84,6 +84,7 @@ END2END_TESTS = {
'request_response_with_payload_and_call_creds': TestOptions(flaky=False, secure=True),
'request_with_large_metadata': default_test_options,
'request_with_payload': default_test_options,
'request_with_flags': default_test_options,
'server_finishes_request': default_test_options,
'simple_delayed_request': default_test_options,
'simple_request': default_test_options,
@ -101,7 +102,7 @@ def main():
'language': 'c',
'secure': 'check' if END2END_FIXTURES[f].secure else 'no',
'src': ['test/core/end2end/fixtures/%s.c' % f],
'platforms': [ 'posix' ] if f.endswith('_posix') else [ 'windows', 'posix' ],
'platforms': [ 'posix' ] if f.endswith('_posix') else END2END_FIXTURES[f].platforms,
}
for f in sorted(END2END_FIXTURES.keys())] + [
{

@ -67,12 +67,14 @@ int main(int argc, char **argv) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(call, ops, op - ops, tag(1)));

@ -76,7 +76,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -125,17 +126,21 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));

@ -75,7 +75,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -141,18 +142,23 @@ static void test_cancel_after_accept(grpc_end2end_test_config config,
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -166,15 +172,19 @@ static void test_cancel_after_accept(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(3)));

@ -75,7 +75,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -141,20 +142,26 @@ static void test_cancel_after_accept_and_writes_closed(
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -168,15 +175,19 @@ static void test_cancel_after_accept_and_writes_closed(
op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(3)));

@ -76,7 +76,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -136,20 +137,26 @@ static void test_cancel_after_invoke(grpc_end2end_test_config config,
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, test_ops, tag(1)));

@ -74,7 +74,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -135,20 +136,26 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config,
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, test_ops, tag(1)));

@ -46,6 +46,8 @@
enum { TIMEOUT = 200000 };
static void *tag(gpr_intptr t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
@ -73,7 +75,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}

@ -61,9 +61,12 @@ static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
return f;
}
static void *tag(gpr_intptr t) { return (void *)t; }
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -93,8 +96,6 @@ static void end_test(grpc_end2end_test_fixture *f) {
grpc_completion_queue_destroy(f->client_cq);
}
static void *tag(gpr_intptr t) { return (void *)t; }
static void test_body(grpc_end2end_test_fixture f) {
grpc_call *c;
grpc_call *s;
@ -124,17 +125,21 @@ static void test_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -148,14 +153,17 @@ static void test_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -62,7 +62,6 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -114,17 +113,21 @@ static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -137,23 +140,27 @@ static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f,
/* should be able to shut down the server early
- and still complete the request */
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
cq_expect_completion(v_server, tag(102), 1);
cq_expect_completion(v_server, tag(1000), 1);
cq_verify(v_server);
cq_expect_completion(v_client, tag(1), 1);

@ -72,13 +72,6 @@ static void drain_cq(grpc_completion_queue *cq) {
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
@ -86,7 +79,6 @@ static void shutdown_client(grpc_end2end_test_fixture *f) {
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->server_cq);
@ -129,17 +121,21 @@ static void test_early_server_shutdown_finishes_inflight_calls(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->data.send_initial_metadata.metadata = NULL;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -153,15 +149,20 @@ static void test_early_server_shutdown_finishes_inflight_calls(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
/* shutdown and destroy the server */
shutdown_server(&f);
grpc_server_shutdown_and_notify(f.server, f.server_cq, tag(1000));
grpc_server_cancel_all_calls(f.server);
cq_expect_completion(v_server, tag(102), 1);
cq_expect_completion(v_server, tag(1000), 1);
cq_verify(v_server);
grpc_server_destroy(f.server);
cq_expect_completion(v_client, tag(1), 1);
cq_verify(v_client);

@ -72,13 +72,6 @@ static void drain_cq(grpc_completion_queue *cq) {
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
/* don't shutdown, just destroy, to tickle this code edge */
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
@ -86,7 +79,6 @@ static void shutdown_client(grpc_end2end_test_fixture *f) {
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->server_cq);
@ -114,11 +106,14 @@ static void test_early_server_shutdown_finishes_tags(
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.server_cq,
f.server_cq, tag(101)));
grpc_server_shutdown(f.server);
grpc_server_shutdown_and_notify(f.server, f.server_cq, tag(1000));
cq_expect_completion(v_server, tag(101), 0);
cq_expect_completion(v_server, tag(1000), 1);
cq_verify(v_server);
GPR_ASSERT(s == NULL);
grpc_server_destroy(f.server);
end_test(&f);
config.tear_down_data(&f);
cq_verifier_destroy(v_server);

@ -76,7 +76,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}

@ -128,17 +128,21 @@ static void test_early_server_shutdown_finishes_inflight_calls(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->data.send_initial_metadata.metadata = NULL;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -150,29 +154,31 @@ static void test_early_server_shutdown_finishes_inflight_calls(
cq_verify(v_server);
/* shutdown and destroy the server */
grpc_server_shutdown_and_notify(f.server, tag(0xdead));
grpc_server_shutdown_and_notify(f.server, f.server_cq, tag(0xdead));
cq_verify_empty(v_server);
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
cq_expect_completion(v_server, tag(102), 1);
cq_expect_completion(v_server, tag(0xdead), 1);
cq_verify(v_server);
grpc_call_destroy(s);
cq_expect_completion(v_server, tag(0xdead), 1);
cq_verify(v_server);
cq_expect_completion(v_client, tag(1), 1);
cq_verify(v_client);

@ -72,7 +72,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}

@ -74,7 +74,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -126,17 +127,21 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -150,14 +155,17 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -257,8 +265,10 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(c1, ops, op - ops, tag(301)));
@ -269,9 +279,11 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op->data.recv_status_on_client.status = &status1;
op->data.recv_status_on_client.status_details = &details1;
op->data.recv_status_on_client.status_details_capacity = &details_capacity1;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv1;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(c1, ops, op - ops, tag(302)));
@ -279,8 +291,10 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(c2, ops, op - ops, tag(401)));
@ -291,9 +305,11 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op->data.recv_status_on_client.status = &status2;
op->data.recv_status_on_client.status_details = &details2;
op->data.recv_status_on_client.status_details_capacity = &details_capacity2;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv1;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(c2, ops, op - ops, tag(402)));
@ -314,14 +330,17 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s1, ops, op - ops, tag(102)));
@ -345,14 +364,17 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s2, ops, op - ops, tag(202)));

@ -74,7 +74,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -142,20 +143,25 @@ static void test_max_message_length(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -169,6 +175,7 @@ static void test_max_message_length(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -45,6 +45,8 @@
enum { TIMEOUT = 200000 };
static void *tag(gpr_intptr t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
@ -72,7 +74,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}

@ -74,7 +74,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -136,15 +137,18 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -158,9 +162,11 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(101)));
@ -171,15 +177,18 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(2)));
op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -189,6 +198,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s, ops, op - ops, tag(103)));
@ -209,6 +219,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(3)));
@ -217,6 +228,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(104)));

@ -76,7 +76,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -127,17 +128,21 @@ static void simple_request_body(grpc_end2end_test_fixture f, void *rc) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -151,14 +156,17 @@ static void simple_request_body(grpc_end2end_test_fixture f, void *rc) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -74,7 +74,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -156,23 +157,29 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_c;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -187,9 +194,11 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_s;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -199,14 +208,17 @@ static void test_request_response_with_metadata_and_payload(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -74,7 +74,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -142,23 +143,29 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_c;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -173,9 +180,11 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_s;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -185,14 +194,17 @@ static void test_request_response_with_metadata_and_payload(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -74,7 +74,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -134,23 +135,29 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -164,9 +171,11 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -176,14 +185,17 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -88,7 +88,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -209,23 +210,29 @@ static void request_response_with_payload_and_call_creds(
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -246,9 +253,11 @@ static void request_response_with_payload_and_call_creds(
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -258,14 +267,17 @@ static void request_response_with_payload_and_call_creds(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -74,7 +74,8 @@ static void drain_cq(grpc_completion_queue *cq) {
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
@ -141,23 +142,29 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_c;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -173,9 +180,11 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_s;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -185,15 +194,18 @@ static void test_request_response_with_metadata_and_payload(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 2;
op->data.send_status_from_server.trailing_metadata = meta_t;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -0,0 +1,207 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/transport/stream_op.h"
#include "test/core/end2end/cq_verifier.h"
enum { TIMEOUT = 200000 };
static void *tag(gpr_intptr t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_client(&f, client_args);
config.init_server(&f, server_args);
return f;
}
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
}
static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time());
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->server_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->server_cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->server_cq);
drain_cq(f->server_cq);
grpc_completion_queue_destroy(f->server_cq);
grpc_completion_queue_shutdown(f->client_cq);
drain_cq(f->client_cq);
grpc_completion_queue_destroy(f->client_cq);
}
static void test_invoke_request_with_flags(
grpc_end2end_test_config config, gpr_uint32 *flags_for_op,
grpc_call_error call_start_batch_expected_result) {
grpc_call *c;
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_end2end_test_fixture f =
begin_test(config, "test_invoke_request_with_flags", NULL, NULL);
cq_verifier *v_client = cq_verifier_create(f.client_cq);
cq_verifier *v_server = cq_verifier_create(f.server_cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_byte_buffer *request_payload_recv = NULL;
grpc_call_details call_details;
grpc_status_code status;
char *details = NULL;
size_t details_capacity = 0;
grpc_call_error expectation;
c = grpc_channel_create_call(f.client, f.client_cq, "/foo",
"foo.test.google.fr", deadline);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = flags_for_op[op->op];
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = flags_for_op[op->op];
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = flags_for_op[op->op];
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = flags_for_op[op->op];
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = flags_for_op[op->op];
op++;
expectation = call_start_batch_expected_result;
GPR_ASSERT(expectation == grpc_call_start_batch(c, ops, op - ops, tag(1)));
gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
cq_verifier_destroy(v_client);
cq_verifier_destroy(v_server);
grpc_byte_buffer_destroy(request_payload);
grpc_byte_buffer_destroy(request_payload_recv);
end_test(&f);
config.tear_down_data(&f);
}
void grpc_end2end_tests(grpc_end2end_test_config config) {
size_t i;
gpr_uint32 flags_for_op[GRPC_OP_RECV_CLOSE_ON_SERVER+1];
{
/* check that all grpc_op_types fail when their flag value is set to an
* invalid value */
int indices[] = {GRPC_OP_SEND_INITIAL_METADATA, GRPC_OP_SEND_MESSAGE,
GRPC_OP_SEND_CLOSE_FROM_CLIENT,
GRPC_OP_RECV_INITIAL_METADATA,
GRPC_OP_RECV_STATUS_ON_CLIENT};
for (i = 0; i < GPR_ARRAY_SIZE(indices); ++i) {
memset(flags_for_op, 0, sizeof(flags_for_op));
flags_for_op[indices[i]] = 0xDEADBEEF;
test_invoke_request_with_flags(config, flags_for_op,
GRPC_CALL_ERROR_INVALID_FLAGS);
}
}
{
/* check valid operation with allowed flags for GRPC_OP_SEND_BUFFER */
gpr_uint32 flags[] = {GRPC_WRITE_BUFFER_HINT, GRPC_WRITE_NO_COMPRESS,
GRPC_WRITE_INTERNAL_COMPRESS};
for (i = 0; i < GPR_ARRAY_SIZE(flags); ++i) {
memset(flags_for_op, 0, sizeof(flags_for_op));
flags_for_op[GRPC_OP_SEND_MESSAGE] = flags[i];
test_invoke_request_with_flags(config, flags_for_op, GRPC_CALL_OK);
}
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save