Merge github.com:grpc/grpc into you-complete-me

pull/1731/head
Craig Tiller 10 years ago
commit 48dd6bfb53
  1. 303
      Makefile
  2. 3
      build.json
  3. 47
      doc/connectivity-semantics-and-api.md
  4. 2
      gRPC.podspec
  5. 4
      include/grpc++/async_unary_call.h
  6. 2
      include/grpc++/client_context.h
  7. 2
      include/grpc++/impl/client_unary_call.h
  8. 2
      include/grpc++/impl/service_type.h
  9. 9
      include/grpc++/status.h
  10. 4
      include/grpc++/stream.h
  11. 3
      include/grpc/grpc.h
  12. 8
      src/compiler/cpp_generator.cc
  13. 41
      src/core/surface/call.c
  14. 1
      src/core/surface/call.h
  15. 3
      src/core/surface/server.c
  16. 9
      src/core/transport/stream_op.h
  17. 2
      src/cpp/client/client_unary_call.cc
  18. 12
      src/cpp/common/call.cc
  19. 2
      src/cpp/util/status.cc
  20. 24
      src/csharp/ext/grpc_csharp_ext.c
  21. 1
      src/node/ext/call.cc
  22. 58
      src/objective-c/GRPCClient/GRPCCall.h
  23. 48
      src/objective-c/GRPCClient/GRPCCall.m
  24. 2
      src/objective-c/GRPCClient/private/GRPCWrappedCall.h
  25. 29
      src/objective-c/GRPCClient/private/GRPCWrappedCall.m
  26. 3
      src/objective-c/GRPCClient/private/NSDictionary+GRPC.h
  27. 13
      src/objective-c/GRPCClient/private/NSDictionary+GRPC.m
  28. 16
      src/objective-c/GRPCClient/private/NSError+GRPC.h
  29. 11
      src/objective-c/GRPCClient/private/NSError+GRPC.m
  30. 24
      src/objective-c/README.md
  31. 6
      src/objective-c/generated_libraries/RemoteTestClient/RemoteTest.podspec
  32. 6
      src/objective-c/generated_libraries/RouteGuideClient/RouteGuide.podspec
  33. 101
      src/objective-c/tests/GRPCClientTests.m
  34. 1
      src/php/ext/grpc/call.c
  35. 14
      src/php/lib/Grpc/AbstractCall.php
  36. 31
      src/php/lib/Grpc/BaseStub.php
  37. 21
      src/php/tests/interop/interop_client.php
  38. 22
      src/php/tests/unit_tests/TimevalTest.php
  39. 8
      src/python/src/grpc/_adapter/_c/utility.c
  40. 1
      src/ruby/ext/grpc/rb_call.c
  41. 7
      test/core/end2end/dualstack_socket_test.c
  42. 3
      test/core/end2end/gen_build_json.py
  43. 2
      test/core/end2end/no_server_test.c
  44. 4
      test/core/end2end/tests/bad_hostname.c
  45. 9
      test/core/end2end/tests/cancel_after_accept.c
  46. 10
      test/core/end2end/tests/cancel_after_accept_and_writes_closed.c
  47. 6
      test/core/end2end/tests/cancel_after_invoke.c
  48. 6
      test/core/end2end/tests/cancel_before_invoke.c
  49. 7
      test/core/end2end/tests/census_simple_request.c
  50. 7
      test/core/end2end/tests/disappearing_server.c
  51. 5
      test/core/end2end/tests/early_server_shutdown_finishes_inflight_calls.c
  52. 7
      test/core/end2end/tests/graceful_server_shutdown.c
  53. 21
      test/core/end2end/tests/max_concurrent_streams.c
  54. 6
      test/core/end2end/tests/max_message_length.c
  55. 11
      test/core/end2end/tests/ping_pong_streaming.c
  56. 7
      test/core/end2end/tests/registered_call.c
  57. 11
      test/core/end2end/tests/request_response_with_binary_metadata_and_payload.c
  58. 11
      test/core/end2end/tests/request_response_with_metadata_and_payload.c
  59. 11
      test/core/end2end/tests/request_response_with_payload.c
  60. 11
      test/core/end2end/tests/request_response_with_payload_and_call_creds.c
  61. 11
      test/core/end2end/tests/request_response_with_trailing_metadata_and_payload.c
  62. 206
      test/core/end2end/tests/request_with_flags.c
  63. 9
      test/core/end2end/tests/request_with_large_metadata.c
  64. 9
      test/core/end2end/tests/request_with_payload.c
  65. 6
      test/core/end2end/tests/server_finishes_request.c
  66. 7
      test/core/end2end/tests/simple_delayed_request.c
  67. 7
      test/core/end2end/tests/simple_request.c
  68. 7
      test/core/end2end/tests/simple_request_with_high_initial_sequence_number.c
  69. 2
      test/core/surface/lame_client_test.c
  70. 18
      test/cpp/end2end/async_end2end_test.cc
  71. 4
      test/cpp/end2end/client_crash_test.cc
  72. 68
      test/cpp/end2end/end2end_test.cc
  73. 4
      test/cpp/end2end/generic_end2end_test.cc
  74. 4
      test/cpp/end2end/mock_test.cc
  75. 6
      test/cpp/end2end/thread_stress_test.cc
  76. 8
      test/cpp/interop/interop_client.cc
  77. 4
      test/cpp/qps/client_sync.cc
  78. 4
      test/cpp/qps/driver.cc
  79. 24
      test/cpp/qps/qps_worker.cc
  80. 6
      test/cpp/util/cli_call.cc
  81. 2
      test/cpp/util/cli_call_test.cc
  82. 14
      tools/jenkins/grpc_jenkins_slave/Dockerfile
  83. 1
      tools/jenkins/run_jenkins.sh
  84. 2
      tools/run_tests/build_python.sh
  85. 2
      tools/run_tests/run_tests.py
  86. 140
      tools/run_tests/tests.json
  87. 100
      vsprojects/Grpc.mak

File diff suppressed because one or more lines are too long

@ -1361,6 +1361,9 @@
"grpc",
"gpr_test_util",
"gpr"
],
"platforms": [
"posix"
]
},
{

@ -20,7 +20,7 @@ channel, we use a state machine with four states, defined below:
CONNECTING: The channel is trying to establish a connection and is waiting to
make progress on one of the steps involved in name resolution, TCP connection
establishment or TLS handshake. This is the initial state for all channels upon
establishment or TLS handshake. This may be used as the initial state for channels upon
creation.
READY: The channel has successfully established a connection all the way
@ -34,13 +34,25 @@ retries are done with exponential backoff, channels that fail to connect will
start out spending very little time in this state but as the attempts fail
repeatedly, the channel will spend increasingly large amounts of time in this
state. For many non-fatal failures (e.g., TCP connection attempts timing out
because the server is not yet available), the channel may be stuck in this
state for an indefinitely large amount of time.
FATAL_FAILURE: There has been a fatal failure and the channel will never
attempt to establish a connection again. (e.g., a server presenting an invalid
TLS certificate)
because the server is not yet available), the channel may spend increasingly
large amounts of time in this state.
IDLE: This is the state where the channel is not even trying to create a
connection because of a lack of new or pending RPCs. New channels MAY be created
in this state. Any attempt to start an RPC on the channel will push the channel
out of this state to connecting. When there has been no RPC activity on a channel
for a specified IDLE_TIMEOUT, i.e., no new or pending (active) RPCs for this
period, channels that are READY or CONNECTING switch to IDLE. Additionaly,
channels that receive a GOAWAY when there are no active or pending RPCs should
also switch to IDLE to avoid connection overload at servers that are attempting
to shed connections. We will use a default IDLE_TIMEOUT of 300 seconds (5 minutes).
SHUTDOWN: This channel has started shutting down. Any new RPCs should fail
immediately. Pending RPCs may continue running till the application cancels them.
Channels may enter this state either because the application explicitly requested
a shutdown or if a non-recoverable error has happened during attempts to connect
communicate . (As of 6/12/2015, there are no known errors (while connecting or
communicating) that are classified as non-recoverable)
Channels that enter this state never leave this state.
The following table lists the legal transitions from one state to another and
@ -52,14 +64,16 @@ corresponding reasons. Empty cells denote disallowed transitions.
<th>CONNECTING</th>
<th>READY</th>
<th>TRANSIENT_FAILURE</th>
<th>FATAL_FAILURE</th>
<th>IDLE</th>
<th>SHUTDOWN</th>
</tr>
<tr>
<th>CONNECTING</th>
<td>Incremental progress during connection establishment</td>
<td>All steps needed to establish a connection succeeded</td>
<td>Any failure in any of the steps needed to establish connection</td>
<td>Fatal failure encountered while attempting a connection.</td>
<td>No RPC activity on channel for IDLE_TIMEOUT</td>
<td>Shutdown triggered by application.</td>
</tr>
<tr>
<th>READY</th>
@ -67,7 +81,8 @@ corresponding reasons. Empty cells denote disallowed transitions.
<td>Incremental successful communication on established channel.</td>
<td>Any failure encountered while expecting successful communication on
established channel.</td>
<td></td>
<td>No RPC activity on channel for IDLE_TIMEOUT <br>OR<br>upon receiving a GOAWAY while there are no pending RPCs.</td>
<td>Shutdown triggered by application.</td>
</tr>
<tr>
<th>TRANSIENT_FAILURE</th>
@ -75,6 +90,15 @@ corresponding reasons. Empty cells denote disallowed transitions.
<td></td>
<td></td>
<td></td>
<td>Shutdown triggered by application.</td>
</tr>
<tr>
<th>IDLE</th>
<td>Any new RPC activity on the channel</td>
<td></td>
<td></td>
<td></td>
<td>Shutdown triggered by application.</td>
</tr>
<tr>
<th>FATAL_FAILURE</th>
@ -82,6 +106,7 @@ corresponding reasons. Empty cells denote disallowed transitions.
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</table>

@ -1,6 +1,6 @@
Pod::Spec.new do |s|
s.name = 'gRPC'
s.version = '0.5.1'
s.version = '0.6.0'
s.summary = 'gRPC client library for iOS/OSX'
s.homepage = 'http://www.grpc.io'
s.license = 'New BSD'

@ -117,7 +117,7 @@ class ServerAsyncResponseWriter GRPC_FINAL
ctx_->sent_initial_metadata_ = true;
}
// The response is dropped if the status is not OK.
if (status.IsOk()) {
if (status.ok()) {
finish_buf_.AddSendMessage(msg);
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
@ -125,7 +125,7 @@ class ServerAsyncResponseWriter GRPC_FINAL
}
void FinishWithError(const Status& status, void* tag) {
GPR_ASSERT(!status.IsOk());
GPR_ASSERT(!status.ok());
finish_buf_.Reset(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);

@ -41,6 +41,7 @@
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc++/config.h>
#include <grpc++/status.h>
#include <grpc++/time.h>
struct grpc_call;
@ -53,7 +54,6 @@ class ChannelInterface;
class CompletionQueue;
class Credentials;
class RpcMethod;
class Status;
template <class R>
class ClientReader;
template <class W>

@ -35,6 +35,7 @@
#define GRPCXX_IMPL_CLIENT_UNARY_CALL_H
#include <grpc++/config.h>
#include <grpc++/status.h>
namespace grpc {
@ -42,7 +43,6 @@ class ChannelInterface;
class ClientContext;
class CompletionQueue;
class RpcMethod;
class Status;
// Wrapper that performs a blocking unary call
Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,

@ -35,6 +35,7 @@
#define GRPCXX_IMPL_SERVICE_TYPE_H
#include <grpc++/config.h>
#include <grpc++/status.h>
namespace grpc {
@ -44,7 +45,6 @@ class RpcService;
class Server;
class ServerCompletionQueue;
class ServerContext;
class Status;
class SynchronousService {
public:

@ -42,18 +42,17 @@ namespace grpc {
class Status {
public:
Status() : code_(StatusCode::OK) {}
explicit Status(StatusCode code) : code_(code) {}
Status(StatusCode code, const grpc::string& details)
: code_(code), details_(details) {}
// Pre-defined special status objects.
static const Status& OK;
static const Status& Cancelled;
static const Status& CANCELLED;
StatusCode code() const { return code_; }
grpc::string details() const { return details_; }
StatusCode error_code() const { return code_; }
grpc::string error_message() const { return details_; }
bool IsOk() const { return code_ == StatusCode::OK; }
bool ok() const { return code_ == StatusCode::OK; }
private:
StatusCode code_;

@ -615,7 +615,7 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
ctx_->sent_initial_metadata_ = true;
}
// The response is dropped if the status is not OK.
if (status.IsOk()) {
if (status.ok()) {
finish_buf_.AddSendMessage(msg);
}
finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status);
@ -623,7 +623,7 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
}
void FinishWithError(const Status& status, void* tag) {
GPR_ASSERT(!status.IsOk());
GPR_ASSERT(!status.ok());
finish_buf_.Reset(tag);
if (!ctx_->sent_initial_metadata_) {
finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_);

@ -155,6 +155,8 @@ typedef enum grpc_call_error {
/* Force compression to be disabled for a particular write
(start_write/add_metadata). Illegal on invoke/accept. */
#define GRPC_WRITE_NO_COMPRESS (0x00000002u)
/* Mask of all valid flags. */
#define GRPC_WRITE_USED_MASK (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS)
/* A single metadata element */
typedef struct grpc_metadata {
@ -250,6 +252,7 @@ typedef enum {
no arguments) */
typedef struct grpc_op {
grpc_op_type op;
gpr_uint32 flags; /**< Write flags bitset for grpc_begin_messages */
union {
struct {
size_t count;

@ -854,7 +854,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer,
printer->Print(" (void) response;\n");
printer->Print(
" return ::grpc::Status("
"::grpc::StatusCode::UNIMPLEMENTED);\n");
"::grpc::StatusCode::UNIMPLEMENTED, \"\");\n");
printer->Print("}\n\n");
} else if (ClientOnlyStreaming(method)) {
printer->Print(*vars,
@ -867,7 +867,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer,
printer->Print(" (void) response;\n");
printer->Print(
" return ::grpc::Status("
"::grpc::StatusCode::UNIMPLEMENTED);\n");
"::grpc::StatusCode::UNIMPLEMENTED, \"\");\n");
printer->Print("}\n\n");
} else if (ServerOnlyStreaming(method)) {
printer->Print(*vars,
@ -880,7 +880,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer,
printer->Print(" (void) writer;\n");
printer->Print(
" return ::grpc::Status("
"::grpc::StatusCode::UNIMPLEMENTED);\n");
"::grpc::StatusCode::UNIMPLEMENTED, \"\");\n");
printer->Print("}\n\n");
} else if (BidiStreaming(method)) {
printer->Print(*vars,
@ -892,7 +892,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer,
printer->Print(" (void) stream;\n");
printer->Print(
" return ::grpc::Status("
"::grpc::StatusCode::UNIMPLEMENTED);\n");
"::grpc::StatusCode::UNIMPLEMENTED, \"\");\n");
printer->Print("}\n\n");
}
}

@ -185,6 +185,7 @@ struct grpc_call {
and a strong upper bound of a count of masters to be calculated. */
gpr_uint8 request_set[GRPC_IOREQ_OP_COUNT];
grpc_ioreq_data request_data[GRPC_IOREQ_OP_COUNT];
gpr_uint32 request_flags[GRPC_IOREQ_OP_COUNT];
reqinfo_master masters[GRPC_IOREQ_OP_COUNT];
/* Dynamic array of ioreq's that have completed: the count of
@ -228,6 +229,7 @@ struct grpc_call {
gpr_slice_buffer incoming_message;
gpr_uint32 incoming_message_length;
gpr_uint32 incoming_message_flags;
grpc_iomgr_closure destroy_closure;
};
@ -670,6 +672,7 @@ static int begin_message(grpc_call *call, grpc_begin_message msg) {
} else if (msg.length > 0) {
call->reading_message = 1;
call->incoming_message_length = msg.length;
call->incoming_message_flags = msg.flags;
return 1;
} else {
finish_message(call);
@ -818,6 +821,7 @@ static void copy_byte_buffer_to_stream_ops(grpc_byte_buffer *byte_buffer,
static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
grpc_ioreq_data data;
gpr_uint32 flags;
grpc_metadata_batch mdb;
size_t i;
GPR_ASSERT(op->send_ops == NULL);
@ -844,8 +848,9 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
case WRITE_STATE_STARTED:
if (is_op_live(call, GRPC_IOREQ_SEND_MESSAGE)) {
data = call->request_data[GRPC_IOREQ_SEND_MESSAGE];
flags = call->request_flags[GRPC_IOREQ_SEND_MESSAGE];
grpc_sopb_add_begin_message(
&call->send_ops, grpc_byte_buffer_length(data.send_message), 0);
&call->send_ops, grpc_byte_buffer_length(data.send_message), flags);
copy_byte_buffer_to_stream_ops(data.send_message, &call->send_ops);
op->send_ops = &call->send_ops;
call->last_send_contains |= 1 << GRPC_IOREQ_SEND_MESSAGE;
@ -979,6 +984,7 @@ static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
have_ops |= 1u << op;
call->request_data[op] = data;
call->request_flags[op] = reqs[i].flags;
call->request_set[op] = set;
}
@ -1189,6 +1195,14 @@ static void finish_batch_with_close(grpc_call *call, int success, void *tag) {
grpc_cq_end_op(call->cq, tag, call, 1);
}
static int are_write_flags_valid(gpr_uint32 flags) {
/* check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set */
const gpr_uint32 allowed_write_positions =
(GRPC_WRITE_USED_MASK | GRPC_WRITE_INTERNAL_USED_MASK);
const gpr_uint32 invalid_positions = ~allowed_write_positions;
return !(flags & invalid_positions);
}
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t nops, void *tag) {
grpc_ioreq reqs[GRPC_IOREQ_OP_COUNT];
@ -1211,30 +1225,43 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
op = &ops[in];
switch (op->op) {
case GRPC_OP_SEND_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
req = &reqs[out++];
req->op = GRPC_IOREQ_SEND_INITIAL_METADATA;
req->data.send_metadata.count = op->data.send_initial_metadata.count;
req->data.send_metadata.metadata =
op->data.send_initial_metadata.metadata;
req->flags = op->flags;
break;
case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)){
return GRPC_CALL_ERROR_INVALID_FLAGS;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_SEND_MESSAGE;
req->data.send_message = op->data.send_message;
req->flags = ops->flags;
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (!call->is_client) {
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_SEND_CLOSE;
req->flags = op->flags;
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (call->is_client) {
return GRPC_CALL_ERROR_NOT_ON_CLIENT;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_SEND_TRAILING_METADATA;
req->flags = op->flags;
req->data.send_metadata.count =
op->data.send_status_from_server.trailing_metadata_count;
req->data.send_metadata.metadata =
@ -1248,24 +1275,33 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
req->op = GRPC_IOREQ_SEND_CLOSE;
break;
case GRPC_OP_RECV_INITIAL_METADATA:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (!call->is_client) {
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
req->data.recv_metadata = op->data.recv_initial_metadata;
req->flags = op->flags;
break;
case GRPC_OP_RECV_MESSAGE:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_MESSAGE;
req->data.recv_message = op->data.recv_message;
req->flags = op->flags;
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
if (!call->is_client) {
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_STATUS;
req->flags = op->flags;
req->data.recv_status.set_value = set_status_value_directly;
req->data.recv_status.user_data = op->data.recv_status_on_client.status;
req = &reqs[out++];
@ -1283,8 +1319,11 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
finish_func = finish_batch_with_close;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
/* Flag validation: currently allow no flags */
if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS;
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_STATUS;
req->flags = op->flags;
req->data.recv_status.set_value = set_cancelled_value;
req->data.recv_status.user_data =
op->data.recv_close_on_server.cancelled;

@ -79,6 +79,7 @@ typedef union {
typedef struct {
grpc_ioreq_op op;
grpc_ioreq_data data;
gpr_uint32 flags; /**< A copy of the write flags from grpc_op */
} grpc_ioreq;
typedef void (*grpc_ioreq_completion_func)(grpc_call *call, int success,

@ -1135,6 +1135,7 @@ static void begin_call(grpc_server *server, call_data *calld,
rc->data.batch.details->deadline = calld->deadline;
r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
r->data.recv_metadata = rc->data.batch.initial_metadata;
r->flags = 0;
r++;
publish = publish_registered_or_batch;
break;
@ -1142,10 +1143,12 @@ static void begin_call(grpc_server *server, call_data *calld,
*rc->data.registered.deadline = calld->deadline;
r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
r->data.recv_metadata = rc->data.registered.initial_metadata;
r->flags = 0;
r++;
if (rc->data.registered.optional_payload) {
r->op = GRPC_IOREQ_RECV_MESSAGE;
r->data.recv_message = rc->data.registered.optional_payload;
r->flags = 0;
r++;
}
publish = publish_registered_or_batch;

@ -58,11 +58,18 @@ typedef enum grpc_stream_op_code {
GRPC_OP_SLICE
} grpc_stream_op_code;
/** Internal bit flag for grpc_begin_message's \a flags signaling the use of
* compression for the message */
#define GRPC_WRITE_INTERNAL_COMPRESS (0x80000000u)
/** Mask of all valid internal flags. */
#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
/* Arguments for GRPC_OP_BEGIN_MESSAGE */
typedef struct grpc_begin_message {
/* How many bytes of data will this message contain */
gpr_uint32 length;
/* Write flags for the message: see grpc.h GRPC_WRITE_xxx */
/* Write flags for the message: see grpc.h GRPC_WRITE_* for the public bits,
* GRPC_WRITE_INTERNAL_* for the internal ones. */
gpr_uint32 flags;
} grpc_begin_message;

@ -57,7 +57,7 @@ Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
buf.AddClientSendClose();
buf.AddClientRecvStatus(context, &status);
call.PerformOps(&buf);
GPR_ASSERT((cq.Pluck(&buf) && buf.got_message) || !status.IsOk());
GPR_ASSERT((cq.Pluck(&buf) && buf.got_message) || !status.ok());
return status;
}

@ -214,8 +214,8 @@ void CallOpBuffer::AddServerSendStatus(
trailing_metadata_count_ = 0;
}
send_status_available_ = true;
send_status_code_ = static_cast<grpc_status_code>(status.code());
send_status_details_ = status.details();
send_status_code_ = static_cast<grpc_status_code>(status.error_code());
send_status_details_ = status.error_message();
}
void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
@ -224,11 +224,13 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
ops[*nops].op = GRPC_OP_SEND_INITIAL_METADATA;
ops[*nops].data.send_initial_metadata.count = initial_metadata_count_;
ops[*nops].data.send_initial_metadata.metadata = initial_metadata_;
ops[*nops].flags = 0;
(*nops)++;
}
if (recv_initial_metadata_) {
ops[*nops].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[*nops].data.recv_initial_metadata = &recv_initial_metadata_arr_;
ops[*nops].flags = 0;
(*nops)++;
}
if (send_message_ || send_message_buffer_) {
@ -245,15 +247,18 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
}
ops[*nops].op = GRPC_OP_SEND_MESSAGE;
ops[*nops].data.send_message = send_buf_;
ops[*nops].flags = 0;
(*nops)++;
}
if (recv_message_ || recv_message_buffer_) {
ops[*nops].op = GRPC_OP_RECV_MESSAGE;
ops[*nops].data.recv_message = &recv_buf_;
ops[*nops].flags = 0;
(*nops)++;
}
if (client_send_close_) {
ops[*nops].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[*nops].flags = 0;
(*nops)++;
}
if (recv_status_) {
@ -264,6 +269,7 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
ops[*nops].data.recv_status_on_client.status_details = &status_details_;
ops[*nops].data.recv_status_on_client.status_details_capacity =
&status_details_capacity_;
ops[*nops].flags = 0;
(*nops)++;
}
if (send_status_available_) {
@ -275,11 +281,13 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) {
ops[*nops].data.send_status_from_server.status = send_status_code_;
ops[*nops].data.send_status_from_server.status_details =
send_status_details_.empty() ? nullptr : send_status_details_.c_str();
ops[*nops].flags = 0;
(*nops)++;
}
if (recv_closed_) {
ops[*nops].op = GRPC_OP_RECV_CLOSE_ON_SERVER;
ops[*nops].data.recv_close_on_server.cancelled = &cancelled_buf_;
ops[*nops].flags = 0;
(*nops)++;
}
}

@ -36,6 +36,6 @@
namespace grpc {
const Status& Status::OK = Status();
const Status& Status::Cancelled = Status(StatusCode::CANCELLED);
const Status& Status::CANCELLED = Status(StatusCode::CANCELLED, "");
} // namespace grpc

@ -417,18 +417,23 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count;
ops[0].data.send_initial_metadata.metadata =
ctx->send_initial_metadata.metadata;
ops[0].flags = 0;
ops[1].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ops[1].data.send_message = ctx->send_message;
ops[1].flags = 0;
ops[2].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[2].flags = 0;
ops[3].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[3].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
ops[3].flags = 0;
ops[4].op = GRPC_OP_RECV_MESSAGE;
ops[4].data.recv_message = &(ctx->recv_message);
ops[4].flags = 0;
ops[5].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
ops[5].data.recv_status_on_client.trailing_metadata =
@ -440,6 +445,7 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
&(ctx->recv_status_on_client.status_details);
ops[5].data.recv_status_on_client.status_details_capacity =
&(ctx->recv_status_on_client.status_details_capacity);
ops[5].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -456,12 +462,15 @@ grpcsharp_call_start_client_streaming(grpc_call *call,
ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count;
ops[0].data.send_initial_metadata.metadata =
ctx->send_initial_metadata.metadata;
ops[0].flags = 0;
ops[1].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[1].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
ops[1].flags = 0;
ops[2].op = GRPC_OP_RECV_MESSAGE;
ops[2].data.recv_message = &(ctx->recv_message);
ops[2].flags = 0;
ops[3].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
ops[3].data.recv_status_on_client.trailing_metadata =
@ -473,6 +482,7 @@ grpcsharp_call_start_client_streaming(grpc_call *call,
&(ctx->recv_status_on_client.status_details);
ops[3].data.recv_status_on_client.status_details_capacity =
&(ctx->recv_status_on_client.status_details_capacity);
ops[3].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -488,15 +498,19 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count;
ops[0].data.send_initial_metadata.metadata =
ctx->send_initial_metadata.metadata;
ops[0].flags = 0;
ops[1].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ops[1].data.send_message = ctx->send_message;
ops[1].flags = 0;
ops[2].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[2].flags = 0;
ops[3].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[3].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
ops[3].flags = 0;
ops[4].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
ops[4].data.recv_status_on_client.trailing_metadata =
@ -508,6 +522,7 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
&(ctx->recv_status_on_client.status_details);
ops[4].data.recv_status_on_client.status_details_capacity =
&(ctx->recv_status_on_client.status_details_capacity);
ops[4].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -524,9 +539,11 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call,
ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count;
ops[0].data.send_initial_metadata.metadata =
ctx->send_initial_metadata.metadata;
ops[0].flags = 0;
ops[1].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[1].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
ops[1].flags = 0;
ops[2].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
ops[2].data.recv_status_on_client.trailing_metadata =
@ -538,6 +555,7 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call,
&(ctx->recv_status_on_client.status_details);
ops[2].data.recv_status_on_client.status_details_capacity =
&(ctx->recv_status_on_client.status_details_capacity);
ops[2].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -550,6 +568,7 @@ grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx,
ops[0].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ops[0].data.send_message = ctx->send_message;
ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -560,6 +579,7 @@ grpcsharp_call_send_close_from_client(grpc_call *call,
/* TODO: don't use magic number */
grpc_op ops[1];
ops[0].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -577,6 +597,7 @@ grpcsharp_call_send_status_from_server(grpc_call *call,
gpr_strdup(status_details);
ops[0].data.send_status_from_server.trailing_metadata = NULL;
ops[0].data.send_status_from_server.trailing_metadata_count = 0;
ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -587,6 +608,7 @@ grpcsharp_call_recv_message(grpc_call *call, grpcsharp_batch_context *ctx) {
grpc_op ops[1];
ops[0].op = GRPC_OP_RECV_MESSAGE;
ops[0].data.recv_message = &(ctx->recv_message);
ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
@ -597,10 +619,12 @@ grpcsharp_call_start_serverside(grpc_call *call, grpcsharp_batch_context *ctx) {
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
ops[0].data.send_initial_metadata.count = 0;
ops[0].data.send_initial_metadata.metadata = NULL;
ops[0].flags = 0;
ops[1].op = GRPC_OP_RECV_CLOSE_ON_SERVER;
ops[1].data.recv_close_on_server.cancelled =
(&ctx->recv_close_on_server_cancelled);
ops[1].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}

@ -550,6 +550,7 @@ NAN_METHOD(Call::StartBatch) {
}
uint32_t type = keys->Get(i)->Uint32Value();
ops[i].op = static_cast<grpc_op_type>(type);
ops[i].flags = 0;
switch (type) {
case GRPC_OP_SEND_INITIAL_METADATA:
op.reset(new SendMetadataOp());

@ -31,43 +31,55 @@
*
*/
// The gRPC protocol is an RPC protocol on top of HTTP2.
//
// While the most common type of RPC receives only one request message and returns only one response
// message, the protocol also supports RPCs that return multiple individual messages in a streaming
// fashion, RPCs that accept a stream of request messages, or RPCs with both streaming requests and
// responses.
//
// Conceptually, each gRPC call consists of a bidirectional stream of binary messages, with RPCs of
// the "non-streaming type" sending only one message in the corresponding direction (the protocol
// doesn't make any distinction).
//
// Each RPC uses a different HTTP2 stream, and thus multiple simultaneous RPCs can be multiplexed
// transparently on the same TCP connection.
#import <Foundation/Foundation.h>
#import <gRPC/GRXWriter.h>
@class GRPCMethodName;
@class GRPCCall;
// Key used in |NSError|'s |userInfo| dictionary to store the response metadata sent by the server.
extern id const kGRPCStatusMetadataKey;
// The gRPC protocol is an RPC protocol on top of HTTP2.
//
// While the most common type of RPC receives only one request message and
// returns only one response message, the protocol also supports RPCs that
// return multiple individual messages in a streaming fashion, RPCs that
// accept a stream of request messages, or RPCs with both streaming requests
// and responses.
//
// Conceptually, each gRPC call consists of a bidirectional stream of binary
// messages, with RPCs of the "non-streaming type" sending only one message in
// the corresponding direction (the protocol doesn't make any distinction).
//
// Each RPC uses a different HTTP2 stream, and thus multiple simultaneous RPCs
// can be multiplexed transparently on the same TCP connection.
// Represents a single gRPC remote call.
@interface GRPCCall : NSObject<GRXWriter>
// These HTTP2 headers will be passed to the server as part of this call. Each
// HTTP2 header is a name-value pair with string names and either string or binary values.
// These HTTP headers will be passed to the server as part of this call. Each HTTP header is a
// name-value pair with string names and either string or binary values.
//
// The passed dictionary has to use NSString keys, corresponding to the header names. The
// value associated to each can be a NSString object or a NSData object. E.g.:
//
// call.requestMetadata = @{
// @"Authorization": @"Bearer ...",
// @"SomeBinaryHeader": someData
// };
// call.requestMetadata = @{@"Authorization": @"Bearer ..."};
//
// call.requestMetadata[@"SomeBinaryHeader"] = someData;
//
// After the call is started, modifying this won't have any effect.
@property(nonatomic, readwrite) NSMutableDictionary *requestMetadata;
//
// For convenience, the property is initialized to an empty NSMutableDictionary, and the setter
// accepts (and copies) both mutable and immutable dictionaries.
- (NSMutableDictionary *)requestMetadata; // nonatomic
- (void)setRequestMetadata:(NSDictionary *)requestMetadata; // nonatomic, copy
// This isn't populated until the first event is delivered to the handler.
// This dictionary is populated with the HTTP headers received from the server. When the RPC ends,
// the HTTP trailers received are added to the dictionary too. It has the same structure as the
// request metadata dictionary.
//
// The first time this object calls |writeValue| on the writeable passed to |startWithWriteable|,
// the |responseMetadata| dictionary already contains the response headers. When it calls
// |writesFinishedWithError|, the dictionary contains both the response headers and trailers.
@property(atomic, readonly) NSDictionary *responseMetadata;
// The request writer has to write NSData objects into the provided Writeable. The server will

@ -46,9 +46,9 @@
#import "private/NSDictionary+GRPC.h"
#import "private/NSError+GRPC.h"
NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
@interface GRPCCall () <GRXWriteable>
// Makes it readwrite.
@property(atomic, strong) NSDictionary *responseMetadata;
@end
// The following methods of a C gRPC call object aren't reentrant, and thus
@ -82,6 +82,9 @@
// correct ordering.
GRPCDelegateWrapper *_responseWriteable;
id<GRXWriter> _requestWriter;
NSMutableDictionary *_requestMetadata;
NSMutableDictionary *_responseMetadata;
}
@synthesize state = _state;
@ -116,10 +119,27 @@
_callQueue = dispatch_queue_create("org.grpc.call", NULL);
_requestWriter = requestWriter;
_requestMetadata = [NSMutableDictionary dictionary];
_responseMetadata = [NSMutableDictionary dictionary];
}
return self;
}
#pragma mark Metadata
- (NSMutableDictionary *)requestMetadata {
return _requestMetadata;
}
- (void)setRequestMetadata:(NSDictionary *)requestMetadata {
_requestMetadata = [NSMutableDictionary dictionaryWithDictionary:requestMetadata];
}
- (NSDictionary *)responseMetadata {
return _responseMetadata;
}
#pragma mark Finish
- (void)finishWithError:(NSError *)errorOrNil {
@ -277,7 +297,7 @@
// The first one (metadataHandler), when the response headers are received.
// The second one (completionHandler), whenever the RPC finishes for any reason.
- (void)invokeCallWithMetadataHandler:(void(^)(NSDictionary *))metadataHandler
completionHandler:(void(^)(NSError *))completionHandler {
completionHandler:(void(^)(NSError *, NSDictionary *))completionHandler {
// TODO(jcanizales): Add error handlers for async failures
[_wrappedCall startBatchWithOperations:@[[[GRPCOpRecvMetadata alloc]
initWithHandler:metadataHandler]]];
@ -287,16 +307,26 @@
- (void)invokeCall {
__weak GRPCCall *weakSelf = self;
[self invokeCallWithMetadataHandler:^(NSDictionary *metadata) {
// Response metadata received.
[self invokeCallWithMetadataHandler:^(NSDictionary *headers) {
// Response headers received.
GRPCCall *strongSelf = weakSelf;
if (strongSelf) {
strongSelf.responseMetadata = metadata;
[strongSelf->_responseMetadata addEntriesFromDictionary:headers];
[strongSelf startNextRead];
}
} completionHandler:^(NSError *error) {
// TODO(jcanizales): Merge HTTP2 trailers into response metadata.
[weakSelf finishWithError:error];
} completionHandler:^(NSError *error, NSDictionary *trailers) {
GRPCCall *strongSelf = weakSelf;
if (strongSelf) {
[strongSelf->_responseMetadata addEntriesFromDictionary:trailers];
if (error) {
NSMutableDictionary *userInfo =
[NSMutableDictionary dictionaryWithDictionary:error.userInfo];
userInfo[kGRPCStatusMetadataKey] = strongSelf->_responseMetadata;
error = [NSError errorWithDomain:error.domain code:error.code userInfo:userInfo];
}
[strongSelf finishWithError:error];
}
}];
// Now that the RPC has been initiated, request writes can start.
[_requestWriter startWithWriteable:self];

@ -79,7 +79,7 @@ typedef void(^GRPCCompletionHandler)(NSDictionary *);
@interface GRPCOpRecvStatus : NSObject <GRPCOp>
- (instancetype)initWithHandler:(void(^)(NSError *))handler NS_DESIGNATED_INITIALIZER;
- (instancetype)initWithHandler:(void(^)(NSError *, NSDictionary *))handler NS_DESIGNATED_INITIALIZER;
@end

@ -165,9 +165,7 @@
}
- (void)finish {
NSDictionary *metadata = [NSDictionary
grpc_dictionaryFromMetadata:_recvInitialMetadata.metadata
count:_recvInitialMetadata.count];
NSDictionary *metadata = [NSDictionary grpc_dictionaryFromMetadataArray:_recvInitialMetadata];
if (_handler) {
_handler(metadata);
}
@ -209,41 +207,44 @@
@end
@implementation GRPCOpRecvStatus{
void(^_handler)(NSError *);
void(^_handler)(NSError *, NSDictionary *);
grpc_status_code _statusCode;
char *_details;
size_t _detailsCapacity;
grpc_status _status;
grpc_metadata_array _metadata;
}
- (instancetype) init {
return [self initWithHandler:nil];
}
- (instancetype) initWithHandler:(void (^)(NSError *))handler {
- (instancetype) initWithHandler:(void (^)(NSError *, NSDictionary *))handler {
if (self = [super init]) {
_handler = handler;
grpc_metadata_array_init(&_status.metadata);
grpc_metadata_array_init(&_metadata);
}
return self;
}
- (void)getOp:(grpc_op *)op {
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.status = &_status.status;
op->data.recv_status_on_client.status_details = &_status.details;
op->data.recv_status_on_client.status = &_statusCode;
op->data.recv_status_on_client.status_details = &_details;
op->data.recv_status_on_client.status_details_capacity = &_detailsCapacity;
op->data.recv_status_on_client.trailing_metadata = &_status.metadata;
op->data.recv_status_on_client.trailing_metadata = &_metadata;
}
- (void)finish {
if (_handler) {
NSError *error = [NSError grpc_errorFromStatus:&_status];
_handler(error);
NSError *error = [NSError grpc_errorFromStatusCode:_statusCode details:_details];
NSDictionary *trailers = [NSDictionary grpc_dictionaryFromMetadataArray:_metadata];
_handler(error, trailers);
}
}
- (void)dealloc {
grpc_metadata_array_destroy(&_status.metadata);
gpr_free(_status.details);
grpc_metadata_array_destroy(&_metadata);
gpr_free(_details);
}
@end

@ -35,6 +35,7 @@
#include <grpc/grpc.h>
@interface NSDictionary (GRPC)
+ (instancetype)grpc_dictionaryFromMetadata:(struct grpc_metadata *)entries count:(size_t)count;
+ (instancetype)grpc_dictionaryFromMetadataArray:(grpc_metadata_array)array;
+ (instancetype)grpc_dictionaryFromMetadata:(grpc_metadata *)entries count:(size_t)count;
- (grpc_metadata *)grpc_metadataArray;
@end

@ -98,14 +98,18 @@
#pragma mark Category for metadata arrays
@implementation NSDictionary (GRPC)
+ (instancetype)grpc_dictionaryFromMetadataArray:(grpc_metadata_array)array {
return [self grpc_dictionaryFromMetadata:array.metadata count:array.count];
}
+ (instancetype)grpc_dictionaryFromMetadata:(grpc_metadata *)entries count:(size_t)count {
NSMutableDictionary *metadata = [NSMutableDictionary dictionaryWithCapacity:count];
for (grpc_metadata *entry = entries; entry < entries + count; entry++) {
// TODO(jcanizales): Verify in a C library test that it's converting header names to lower case
// automatically.
NSString *name = [NSString stringWithCString:entry->key encoding:NSASCIIStringEncoding];
if (!name) {
// log?
if (!name || metadata[name]) {
// Log if name is nil?
continue;
}
id value;
@ -115,10 +119,7 @@
} else {
value = [NSString grpc_stringFromMetadataValue:entry];
}
if (!metadata[name]) {
metadata[name] = [NSMutableArray array];
}
[metadata[name] addObject:value];
metadata[name] = value;
}
return metadata;
}

@ -32,6 +32,7 @@
*/
#import <Foundation/Foundation.h>
#include <grpc/grpc.h>
// TODO(jcanizales): Make the domain string public.
extern NSString *const kGRPCErrorDomain;
@ -56,17 +57,8 @@ typedef NS_ENUM(NSInteger, GRPCErrorCode) {
GRPCErrorCodeDataLoss = 15
};
// TODO(jcanizales): This is conflating trailing metadata with Status details. Fix it once there's
// a decision on how to codify Status.
#include <grpc/grpc.h>
typedef struct grpc_status {
grpc_status_code status;
char *details;
grpc_metadata_array metadata;
} grpc_status;
@interface NSError (GRPC)
// Returns nil if the status is OK. Otherwise, a NSError whose code is one of
// GRPCErrorCode and whose domain is kGRPCErrorDomain.
+ (instancetype)grpc_errorFromStatus:(struct grpc_status *)status;
// Returns nil if the status code is OK. Otherwise, a NSError whose code is one of |GRPCErrorCode|
// and whose domain is |kGRPCErrorDomain|.
+ (instancetype)grpc_errorFromStatusCode:(grpc_status_code)statusCode details:(char *)details;
@end

@ -35,17 +35,16 @@
#include <grpc.h>
NSString *const kGRPCErrorDomain = @"org.grpc";
NSString * const kGRPCErrorDomain = @"io.grpc";
@implementation NSError (GRPC)
+ (instancetype)grpc_errorFromStatus:(struct grpc_status *)status {
if (status->status == GRPC_STATUS_OK) {
+ (instancetype)grpc_errorFromStatusCode:(grpc_status_code)statusCode details:(char *)details {
if (statusCode == GRPC_STATUS_OK) {
return nil;
}
NSString *message =
[NSString stringWithFormat:@"Code=%i Message='%s'", status->status, status->details];
NSString *message = [NSString stringWithCString:details encoding:NSASCIIStringEncoding];
return [NSError errorWithDomain:kGRPCErrorDomain
code:status->status
code:statusCode
userInfo:@{NSLocalizedDescriptionKey: message}];
}
@end

@ -52,11 +52,11 @@ Pod::Spec.new do |s|
# Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
# You can run this command manually if you later change your protos and need to regenerate.
s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto"
s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto"
# The --objc_out plugin generates a pair of .pbobjc.h/.pbobjc.m files for each .proto file.
s.subspec "Messages" do |ms|
ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}"
ms.source_files = "*.pbobjc.{h,m}"
ms.header_mappings_dir = "."
ms.requires_arc = false
ms.dependency "Protobuf", "~> 3.0.0-alpha-3"
@ -65,7 +65,7 @@ Pod::Spec.new do |s|
# The --objcgrpc_out plugin generates a pair of .pbrpc.h/.pbrpc.m files for each .proto file with
# a service defined.
s.subspec "Services" do |ss|
ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}"
ss.source_files = "*.pbrpc.{h,m}"
ss.header_mappings_dir = "."
ss.requires_arc = true
ss.dependency "gRPC", "~> 0.5"
@ -74,9 +74,21 @@ Pod::Spec.new do |s|
end
```
The file should be named `<Podspec file name>.podspec`. Once your library has a Podspec, Cocoapods
can install it into any XCode project. For that, go into your project's directory and create a
Podfile by running:
The file should be named `<Podspec file name>.podspec`.
Note: If your proto files are in a directory hierarchy, you might want to adjust the _globs_ used in
the sample Podspec above. For example, you could use:
```ruby
s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto"
...
ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}"
...
ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}"
```
Once your library has a Podspec, Cocoapods can install it into any XCode project. For that, go into
your project's directory and create a Podfile by running:
```sh
pod init

@ -7,17 +7,17 @@ Pod::Spec.new do |s|
s.osx.deployment_target = "10.8"
# Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto"
s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto"
s.subspec "Messages" do |ms|
ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}"
ms.source_files = "*.pbobjc.{h,m}"
ms.header_mappings_dir = "."
ms.requires_arc = false
ms.dependency "Protobuf", "~> 3.0.0-alpha-3"
end
s.subspec "Services" do |ss|
ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}"
ss.source_files = "*.pbrpc.{h,m}"
ss.header_mappings_dir = "."
ss.requires_arc = true
ss.dependency "gRPC", "~> 0.5"

@ -7,17 +7,17 @@ Pod::Spec.new do |s|
s.osx.deployment_target = "10.8"
# Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients.
s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto"
s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto"
s.subspec "Messages" do |ms|
ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}"
ms.source_files = "*.pbobjc.{h,m}"
ms.header_mappings_dir = "."
ms.requires_arc = false
ms.dependency "Protobuf", "~> 3.0.0-alpha-3"
end
s.subspec "Services" do |ss|
ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}"
ss.source_files = "*.pbrpc.{h,m}"
ss.header_mappings_dir = "."
ss.requires_arc = true
ss.dependency "gRPC", "~> 0.5"

@ -43,24 +43,38 @@
// These are a few tests similar to InteropTests, but which use the generic gRPC client (GRPCCall)
// rather than a generated proto library on top of it.
static NSString * const kHostAddress = @"grpc-test.sandbox.google.com";
static NSString * const kPackage = @"grpc.testing";
static NSString * const kService = @"TestService";
static GRPCMethodName *kInexistentMethod;
static GRPCMethodName *kEmptyCallMethod;
static GRPCMethodName *kUnaryCallMethod;
@interface GRPCClientTests : XCTestCase
@end
@implementation GRPCClientTests
- (void)testConnectionToRemoteServer {
__weak XCTestExpectation *expectation = [self expectationWithDescription:@"Server reachable."];
- (void)setUp {
// This method isn't implemented by the remote server.
GRPCMethodName *method = [[GRPCMethodName alloc] initWithPackage:@"grpc.testing"
interface:@"TestService"
method:@"Nonexistent"];
kInexistentMethod = [[GRPCMethodName alloc] initWithPackage:kPackage
interface:kService
method:@"Inexistent"];
kEmptyCallMethod = [[GRPCMethodName alloc] initWithPackage:kPackage
interface:kService
method:@"EmptyCall"];
kUnaryCallMethod = [[GRPCMethodName alloc] initWithPackage:kPackage
interface:kService
method:@"UnaryCall"];
}
id<GRXWriter> requestsWriter = [GRXWriter writerWithValue:[NSData data]];
- (void)testConnectionToRemoteServer {
__weak XCTestExpectation *expectation = [self expectationWithDescription:@"Server reachable."];
GRPCCall *call = [[GRPCCall alloc] initWithHost:@"grpc-test.sandbox.google.com"
method:method
requestsWriter:requestsWriter];
GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress
method:kInexistentMethod
requestsWriter:[GRXWriter writerWithValue:[NSData data]]];
id<GRXWriteable> responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) {
XCTFail(@"Received unexpected response: %@", value);
@ -80,15 +94,9 @@
__weak XCTestExpectation *response = [self expectationWithDescription:@"Empty response received."];
__weak XCTestExpectation *completion = [self expectationWithDescription:@"Empty RPC completed."];
GRPCMethodName *method = [[GRPCMethodName alloc] initWithPackage:@"grpc.testing"
interface:@"TestService"
method:@"EmptyCall"];
id<GRXWriter> requestsWriter = [GRXWriter writerWithValue:[NSData data]];
GRPCCall *call = [[GRPCCall alloc] initWithHost:@"grpc-test.sandbox.google.com"
method:method
requestsWriter:requestsWriter];
GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress
method:kEmptyCallMethod
requestsWriter:[GRXWriter writerWithValue:[NSData data]]];
id<GRXWriteable> responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) {
XCTAssertNotNil(value, @"nil value received as response.");
@ -105,34 +113,27 @@
}
- (void)testSimpleProtoRPC {
__weak XCTestExpectation *response = [self expectationWithDescription:@"Response received."];
__weak XCTestExpectation *expectedResponse =
[self expectationWithDescription:@"Expected response."];
__weak XCTestExpectation *response = [self expectationWithDescription:@"Expected response."];
__weak XCTestExpectation *completion = [self expectationWithDescription:@"RPC completed."];
GRPCMethodName *method = [[GRPCMethodName alloc] initWithPackage:@"grpc.testing"
interface:@"TestService"
method:@"UnaryCall"];
RMTSimpleRequest *request = [[RMTSimpleRequest alloc] init];
RMTSimpleRequest *request = [RMTSimpleRequest message];
request.responseSize = 100;
request.fillUsername = YES;
request.fillOauthScope = YES;
id<GRXWriter> requestsWriter = [GRXWriter writerWithValue:[request data]];
GRPCCall *call = [[GRPCCall alloc] initWithHost:@"grpc-test.sandbox.google.com"
method:method
GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress
method:kUnaryCallMethod
requestsWriter:requestsWriter];
id<GRXWriteable> responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) {
XCTAssertNotNil(value, @"nil value received as response.");
[response fulfill];
XCTAssertGreaterThan(value.length, 0, @"Empty response received.");
RMTSimpleResponse *response = [RMTSimpleResponse parseFromData:value error:NULL];
RMTSimpleResponse *responseProto = [RMTSimpleResponse parseFromData:value error:NULL];
// We expect empty strings, not nil:
XCTAssertNotNil(response.username, @"Response's username is nil.");
XCTAssertNotNil(response.oauthScope, @"Response's OAuth scope is nil.");
[expectedResponse fulfill];
XCTAssertNotNil(responseProto.username, @"Response's username is nil.");
XCTAssertNotNil(responseProto.oauthScope, @"Response's OAuth scope is nil.");
[response fulfill];
} completionHandler:^(NSError *errorOrNil) {
XCTAssertNil(errorOrNil, @"Finished with unexpected error: %@", errorOrNil);
[completion fulfill];
@ -143,4 +144,36 @@
[self waitForExpectationsWithTimeout:2. handler:nil];
}
- (void)testMetadata {
__weak XCTestExpectation *expectation = [self expectationWithDescription:@"RPC unauthorized."];
RMTSimpleRequest *request = [RMTSimpleRequest message];
request.fillUsername = YES;
request.fillOauthScope = YES;
id<GRXWriter> requestsWriter = [GRXWriter writerWithValue:[request data]];
GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress
method:kUnaryCallMethod
requestsWriter:requestsWriter];
call.requestMetadata[@"Authorization"] = @"Bearer bogusToken";
id<GRXWriteable> responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) {
XCTFail(@"Received unexpected response: %@", value);
} completionHandler:^(NSError *errorOrNil) {
XCTAssertNotNil(errorOrNil, @"Finished without error!");
XCTAssertEqual(errorOrNil.code, 16, @"Finished with unexpected error: %@", errorOrNil);
XCTAssertEqualObjects(call.responseMetadata, errorOrNil.userInfo[kGRPCStatusMetadataKey],
@"Metadata in the NSError object and call object differ.");
NSString *challengeHeader = call.responseMetadata[@"www-authenticate"];
XCTAssertGreaterThan(challengeHeader.length, 0,
@"No challenge in response headers %@", call.responseMetadata);
[expectation fulfill];
}];
[call startWithWriteable:responsesWriteable];
[self waitForExpectationsWithTimeout:2. handler:nil];
}
@end

@ -397,6 +397,7 @@ PHP_METHOD(Call, startBatch) {
goto cleanup;
}
ops[op_num].op = (grpc_op_type)index;
ops[op_num].flags = 0;
op_num++;
}
error = grpc_call_start_batch(call->wrapped, ops, op_num, call->wrapped);

@ -43,9 +43,19 @@ abstract class AbstractCall {
* Create a new Call wrapper object.
* @param Channel $channel The channel to communicate on
* @param string $method The method to call on the remote server
* @param callback $deserialize A callback function to deserialize
* the response
* @param (optional) long $timeout Timeout in microseconds
*/
public function __construct(Channel $channel, $method, $deserialize) {
$this->call = new Call($channel, $method, Timeval::infFuture());
public function __construct(Channel $channel, $method, $deserialize, $timeout = false) {
if ($timeout) {
$now = Timeval::now();
$delta = new Timeval($timeout);
$deadline = $now->add($delta);
} else {
$deadline = Timeval::infFuture();
}
$this->call = new Call($channel, $method, $deadline);
$this->deserialize = $deserialize;
$this->metadata = null;
}

@ -83,6 +83,21 @@ class BaseStub {
return "https://" . $this->hostname . $service_name;
}
/**
* extract $timeout from $metadata
* @param $metadata The metadata map
* @return list($metadata_copy, $timeout)
*/
private function _extract_timeout_from_metadata($metadata) {
$timeout = false;
$metadata_copy = $metadata;
if (isset($metadata['timeout'])) {
$timeout = $metadata['timeout'];
unset($metadata_copy['timeout']);
}
return array($metadata_copy, $timeout);
}
/* This class is intended to be subclassed by generated code, so all functions
begin with "_" to avoid name collisions. */
@ -99,8 +114,8 @@ class BaseStub {
$argument,
callable $deserialize,
$metadata = array()) {
$call = new UnaryCall($this->channel, $method, $deserialize);
$actual_metadata = $metadata;
list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata);
$call = new UnaryCall($this->channel, $method, $deserialize, $timeout);
$jwt_aud_uri = $this->_get_jwt_aud_uri($method);
if (is_callable($this->update_metadata)) {
$actual_metadata = call_user_func($this->update_metadata,
@ -126,8 +141,8 @@ class BaseStub {
$arguments,
callable $deserialize,
$metadata = array()) {
$call = new ClientStreamingCall($this->channel, $method, $deserialize);
$actual_metadata = $metadata;
list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata);
$call = new ClientStreamingCall($this->channel, $method, $deserialize, $timeout);
$jwt_aud_uri = $this->_get_jwt_aud_uri($method);
if (is_callable($this->update_metadata)) {
$actual_metadata = call_user_func($this->update_metadata,
@ -152,8 +167,8 @@ class BaseStub {
$argument,
callable $deserialize,
$metadata = array()) {
$call = new ServerStreamingCall($this->channel, $method, $deserialize);
$actual_metadata = $metadata;
list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata);
$call = new ServerStreamingCall($this->channel, $method, $deserialize, $timeout);
$jwt_aud_uri = $this->_get_jwt_aud_uri($method);
if (is_callable($this->update_metadata)) {
$actual_metadata = call_user_func($this->update_metadata,
@ -175,8 +190,8 @@ class BaseStub {
public function _bidiRequest($method,
callable $deserialize,
$metadata = array()) {
$call = new BidiStreamingCall($this->channel, $method, $deserialize);
$actual_metadata = $metadata;
list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata);
$call = new BidiStreamingCall($this->channel, $method, $deserialize, $timeout);
$jwt_aud_uri = $this->_get_jwt_aud_uri($method);
if (is_callable($this->update_metadata)) {
$actual_metadata = call_user_func($this->update_metadata,

@ -270,6 +270,24 @@ function cancelAfterFirstResponse($stub) {
'Call status was not CANCELLED');
}
function timeoutOnSleepingServer($stub) {
$call = $stub->FullDuplexCall(array('timeout' => 500000));
$request = new grpc\testing\StreamingOutputCallRequest();
$request->setResponseType(grpc\testing\PayloadType::COMPRESSABLE);
$response_parameters = new grpc\testing\ResponseParameters();
$response_parameters->setSize(8);
$request->addResponseParameters($response_parameters);
$payload = new grpc\testing\Payload();
$payload->setBody(str_repeat("\0", 9));
$request->setPayload($payload);
$call->write($request);
$response = $call->read();
hardAssert($call->getStatus()->code === Grpc\STATUS_DEADLINE_EXCEEDED,
'Call status was not DEADLINE_EXCEEDED');
}
$args = getopt('', array('server_host:', 'server_port:', 'test_case:',
'server_host_override:', 'oauth_scope:',
'default_service_account:'));
@ -341,6 +359,9 @@ switch ($args['test_case']) {
case 'cancel_after_first_response':
cancelAfterFirstResponse($stub);
break;
case 'timeout_on_sleeping_server':
timeoutOnSleepingServer($stub);
break;
case 'service_account_creds':
serviceAccountCreds($stub, $args);
break;

@ -61,4 +61,26 @@ class TimevalTest extends PHPUnit_Framework_TestCase{
$this->assertLessThan(0, Grpc\Timeval::compare($zero, $now));
$this->assertLessThan(0, Grpc\Timeval::compare($now, $future));
}
public function testNowAndAdd() {
$now = Grpc\Timeval::now();
$delta = new Grpc\Timeval(1000);
$deadline = $now->add($delta);
$this->assertGreaterThan(0, Grpc\Timeval::compare($deadline, $now));
}
public function testNowAndSubtract() {
$now = Grpc\Timeval::now();
$delta = new Grpc\Timeval(1000);
$deadline = $now->subtract($delta);
$this->assertLessThan(0, Grpc\Timeval::compare($deadline, $now));
}
public function testAddAndSubtract() {
$now = Grpc\Timeval::now();
$delta = new Grpc\Timeval(1000);
$deadline = $now->add($delta);
$back_to_now = $deadline->subtract($delta);
$this->assertSame(0, Grpc\Timeval::compare($back_to_now, $now));
}
}

@ -169,6 +169,7 @@ int pygrpc_produce_op(PyObject *op, grpc_op *result) {
return 0;
}
c_op.op = type;
c_op.flags = 0;
switch (type) {
case GRPC_OP_SEND_INITIAL_METADATA:
if (!pygrpc_cast_pylist_to_send_metadata(
@ -196,10 +197,11 @@ int pygrpc_produce_op(PyObject *op, grpc_op *result) {
return 0;
}
if (!PyTuple_Check(PyTuple_GET_ITEM(op, STATUS_INDEX))) {
char buf[64];
snprintf(buf, sizeof(buf), "expected tuple status in op of length %d",
char *buf;
gpr_asprintf(&buf, "expected tuple status in op of length %d",
STATUS_TUPLE_SIZE);
PyErr_SetString(PyExc_TypeError, buf);
PyErr_SetString(PyExc_ValueError, buf);
gpr_free(buf);
return 0;
}
c_op.data.send_status_from_server.status = PyInt_AsLong(

@ -507,6 +507,7 @@ static void grpc_run_batch_stack_fill_ops(run_batch_stack *st, VALUE ops_hash) {
NUM2INT(this_op));
};
st->ops[st->op_num].op = (grpc_op_type)NUM2INT(this_op);
st->ops[st->op_num].flags = 0;
st->op_num++;
}
}

@ -135,17 +135,21 @@ void test_connect(const char *server_host, const char *client_host, int port,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -161,14 +165,17 @@ void test_connect(const char *server_host, const char *client_host, int port,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -84,6 +84,7 @@ END2END_TESTS = {
'request_response_with_payload_and_call_creds': TestOptions(flaky=False, secure=True),
'request_with_large_metadata': default_test_options,
'request_with_payload': default_test_options,
'request_with_flags': default_test_options,
'server_finishes_request': default_test_options,
'simple_delayed_request': default_test_options,
'simple_request': default_test_options,
@ -101,7 +102,7 @@ def main():
'language': 'c',
'secure': 'check' if END2END_FIXTURES[f].secure else 'no',
'src': ['test/core/end2end/fixtures/%s.c' % f],
'platforms': [ 'posix' ] if f.endswith('_posix') else [ 'windows', 'posix' ],
'platforms': [ 'posix' ] if f.endswith('_posix') else END2END_FIXTURES[f].platforms,
}
for f in sorted(END2END_FIXTURES.keys())] + [
{

@ -67,12 +67,14 @@ int main(int argc, char **argv) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(call, ops, op - ops, tag(1)));

@ -126,17 +126,21 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));

@ -142,18 +142,23 @@ static void test_cancel_after_accept(grpc_end2end_test_config config,
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -167,15 +172,19 @@ static void test_cancel_after_accept(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(3)));

@ -142,20 +142,26 @@ static void test_cancel_after_accept_and_writes_closed(
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -169,15 +175,19 @@ static void test_cancel_after_accept_and_writes_closed(
op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(3)));

@ -137,20 +137,26 @@ static void test_cancel_after_invoke(grpc_end2end_test_config config,
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, test_ops, tag(1)));

@ -136,20 +136,26 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config,
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, test_ops, tag(1)));

@ -125,17 +125,21 @@ static void test_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -149,14 +153,17 @@ static void test_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -113,17 +113,21 @@ static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -141,14 +145,17 @@ static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -121,17 +121,21 @@ static void test_early_server_shutdown_finishes_inflight_calls(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->data.send_initial_metadata.metadata = NULL;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -145,6 +149,7 @@ static void test_early_server_shutdown_finishes_inflight_calls(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -128,17 +128,21 @@ static void test_early_server_shutdown_finishes_inflight_calls(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->data.send_initial_metadata.metadata = NULL;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -156,14 +160,17 @@ static void test_early_server_shutdown_finishes_inflight_calls(
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -127,17 +127,21 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -151,14 +155,17 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -258,8 +265,10 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(c1, ops, op - ops, tag(301)));
@ -270,9 +279,11 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op->data.recv_status_on_client.status = &status1;
op->data.recv_status_on_client.status_details = &details1;
op->data.recv_status_on_client.status_details_capacity = &details_capacity1;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv1;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(c1, ops, op - ops, tag(302)));
@ -280,8 +291,10 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(c2, ops, op - ops, tag(401)));
@ -292,9 +305,11 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op->data.recv_status_on_client.status = &status2;
op->data.recv_status_on_client.status_details = &details2;
op->data.recv_status_on_client.status_details_capacity = &details_capacity2;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv1;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(c2, ops, op - ops, tag(402)));
@ -315,14 +330,17 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s1, ops, op - ops, tag(102)));
@ -346,14 +364,17 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s2, ops, op - ops, tag(202)));

@ -143,20 +143,25 @@ static void test_max_message_length(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -170,6 +175,7 @@ static void test_max_message_length(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -137,15 +137,18 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -159,9 +162,11 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(101)));
@ -172,15 +177,18 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(2)));
op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -190,6 +198,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(s, ops, op - ops, tag(103)));
@ -210,6 +219,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(3)));
@ -218,6 +228,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(104)));

@ -128,17 +128,21 @@ static void simple_request_body(grpc_end2end_test_fixture f, void *rc) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -152,14 +156,17 @@ static void simple_request_body(grpc_end2end_test_fixture f, void *rc) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -157,23 +157,29 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_c;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -188,9 +194,11 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_s;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -200,14 +208,17 @@ static void test_request_response_with_metadata_and_payload(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -143,23 +143,29 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_c;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -174,9 +180,11 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_s;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -186,14 +194,17 @@ static void test_request_response_with_metadata_and_payload(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -135,23 +135,29 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -165,9 +171,11 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -177,14 +185,17 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -210,23 +210,29 @@ static void request_response_with_payload_and_call_creds(
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -247,9 +253,11 @@ static void request_response_with_payload_and_call_creds(
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -259,14 +267,17 @@ static void request_response_with_payload_and_call_creds(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -142,23 +142,29 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_c;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -174,9 +180,11 @@ static void test_request_response_with_metadata_and_payload(
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_s;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -186,15 +194,18 @@ static void test_request_response_with_metadata_and_payload(
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = response_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 2;
op->data.send_status_from_server.trailing_metadata = meta_t;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -0,0 +1,206 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/transport/stream_op.h"
#include "test/core/end2end/cq_verifier.h"
enum { TIMEOUT = 200000 };
static void *tag(gpr_intptr t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_client(&f, client_args);
config.init_server(&f, server_args);
return f;
}
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
}
static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time());
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->server_cq);
drain_cq(f->server_cq);
grpc_completion_queue_destroy(f->server_cq);
grpc_completion_queue_shutdown(f->client_cq);
drain_cq(f->client_cq);
grpc_completion_queue_destroy(f->client_cq);
}
static void test_invoke_request_with_flags(
grpc_end2end_test_config config, gpr_uint32 *flags_for_op,
grpc_call_error call_start_batch_expected_result) {
grpc_call *c;
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_end2end_test_fixture f =
begin_test(config, "test_invoke_request_with_flags", NULL, NULL);
cq_verifier *v_client = cq_verifier_create(f.client_cq);
cq_verifier *v_server = cq_verifier_create(f.server_cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_byte_buffer *request_payload_recv = NULL;
grpc_call_details call_details;
grpc_status_code status;
char *details = NULL;
size_t details_capacity = 0;
grpc_call_error expectation;
c = grpc_channel_create_call(f.client, f.client_cq, "/foo",
"foo.test.google.fr", deadline);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = flags_for_op[op->op];
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = flags_for_op[op->op];
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = flags_for_op[op->op];
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = flags_for_op[op->op];
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = flags_for_op[op->op];
op++;
expectation = call_start_batch_expected_result;
GPR_ASSERT(expectation == grpc_call_start_batch(c, ops, op - ops, tag(1)));
gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
cq_verifier_destroy(v_client);
cq_verifier_destroy(v_server);
grpc_byte_buffer_destroy(request_payload);
grpc_byte_buffer_destroy(request_payload_recv);
end_test(&f);
config.tear_down_data(&f);
}
void grpc_end2end_tests(grpc_end2end_test_config config) {
size_t i;
gpr_uint32 flags_for_op[GRPC_OP_RECV_CLOSE_ON_SERVER+1];
{
/* check that all grpc_op_types fail when their flag value is set to an
* invalid value */
int indices[] = {GRPC_OP_SEND_INITIAL_METADATA, GRPC_OP_SEND_MESSAGE,
GRPC_OP_SEND_CLOSE_FROM_CLIENT,
GRPC_OP_RECV_INITIAL_METADATA,
GRPC_OP_RECV_STATUS_ON_CLIENT};
for (i = 0; i < GPR_ARRAY_SIZE(indices); ++i) {
memset(flags_for_op, 0, sizeof(flags_for_op));
flags_for_op[indices[i]] = 0xDEADBEEF;
test_invoke_request_with_flags(config, flags_for_op,
GRPC_CALL_ERROR_INVALID_FLAGS);
}
}
{
/* check valid operation with allowed flags for GRPC_OP_SEND_BUFFER */
gpr_uint32 flags[] = {GRPC_WRITE_BUFFER_HINT, GRPC_WRITE_NO_COMPRESS,
GRPC_WRITE_INTERNAL_COMPRESS};
for (i = 0; i < GPR_ARRAY_SIZE(flags); ++i) {
memset(flags_for_op, 0, sizeof(flags_for_op));
flags_for_op[GRPC_OP_SEND_MESSAGE] = flags[i];
test_invoke_request_with_flags(config, flags_for_op, GRPC_CALL_OK);
}
}
}

@ -142,20 +142,25 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) {
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 1;
op->data.send_initial_metadata.metadata = &meta;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -169,9 +174,11 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -181,11 +188,13 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -133,20 +133,25 @@ static void test_invoke_request_with_payload(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -160,9 +165,11 @@ static void test_invoke_request_with_payload(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
@ -172,11 +179,13 @@ static void test_invoke_request_with_payload(grpc_end2end_test_config config) {
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103)));

@ -129,15 +129,18 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -151,14 +154,17 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -121,17 +121,21 @@ static void simple_delayed_request_body(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -147,14 +151,17 @@ static void simple_delayed_request_body(grpc_end2end_test_config config,
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -129,17 +129,21 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -153,14 +157,17 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -129,17 +129,21 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
@ -153,14 +157,17 @@ static void simple_request_body(grpc_end2end_test_fixture f) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));

@ -68,12 +68,14 @@ int main(int argc, char **argv) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(call, ops, op - ops, tag(1)));

@ -157,7 +157,7 @@ class AsyncEnd2endTest : public ::testing::Test {
client_ok(4);
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
}
}
@ -218,7 +218,7 @@ TEST_F(AsyncEnd2endTest, AsyncNextRpc) {
verify_timed_ok(&cli_cq_, 4, true);
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
}
// Two pings and a final pong.
@ -272,7 +272,7 @@ TEST_F(AsyncEnd2endTest, SimpleClientStreaming) {
client_ok(10);
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
}
// One ping, two pongs.
@ -323,7 +323,7 @@ TEST_F(AsyncEnd2endTest, SimpleServerStreaming) {
cli_stream->Finish(&recv_status, tag(9));
client_ok(9);
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
}
// One ping, one pong.
@ -376,7 +376,7 @@ TEST_F(AsyncEnd2endTest, SimpleBidiStreaming) {
cli_stream->Finish(&recv_status, tag(10));
client_ok(10);
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
}
// Metadata tests
@ -420,7 +420,7 @@ TEST_F(AsyncEnd2endTest, ClientInitialMetadataRpc) {
client_ok(4);
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
}
TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) {
@ -467,7 +467,7 @@ TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) {
client_ok(6);
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
}
TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
@ -507,7 +507,7 @@ TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
response_reader->Finish(&recv_response, &recv_status, tag(5));
client_ok(5);
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
EXPECT_EQ(meta1.second, server_trailing_metadata.find(meta1.first)->second);
EXPECT_EQ(meta2.second, server_trailing_metadata.find(meta2.first)->second);
@ -580,7 +580,7 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) {
response_reader->Finish(&recv_response, &recv_status, tag(6));
client_ok(6);
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
EXPECT_EQ(meta5.second, server_trailing_metadata.find(meta5.first)->second);
EXPECT_EQ(meta6.second, server_trailing_metadata.find(meta6.first)->second);

@ -119,7 +119,7 @@ TEST_F(CrashTest, KillAfterWrite) {
EXPECT_FALSE(stream->Read(&response));
EXPECT_FALSE(stream->Finish().IsOk());
EXPECT_FALSE(stream->Finish().ok());
}
TEST_F(CrashTest, KillBeforeWrite) {
@ -142,7 +142,7 @@ TEST_F(CrashTest, KillBeforeWrite) {
EXPECT_FALSE(stream->Write(request));
EXPECT_FALSE(stream->Read(&response));
EXPECT_FALSE(stream->Finish().IsOk());
EXPECT_FALSE(stream->Finish().ok());
}
} // namespace

@ -101,13 +101,13 @@ class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service {
gpr_now(),
gpr_time_from_micros(request->param().client_cancel_after_us())));
}
return Status::Cancelled;
return Status::CANCELLED;
} else if (request->has_param() &&
request->param().server_cancel_after_us()) {
gpr_sleep_until(gpr_time_add(
gpr_now(),
gpr_time_from_micros(request->param().server_cancel_after_us())));
return Status::Cancelled;
return Status::CANCELLED;
} else {
EXPECT_FALSE(context->IsCancelled());
}
@ -232,7 +232,7 @@ static void SendRpc(grpc::cpp::test::util::TestService::Stub* stub,
ClientContext context;
Status s = stub->Echo(&context, request, &response);
EXPECT_EQ(response.message(), request.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
}
@ -265,7 +265,7 @@ TEST_F(End2endTest, RpcDeadlineExpires) {
std::chrono::system_clock::now() + std::chrono::microseconds(10);
context.set_deadline(deadline);
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(StatusCode::DEADLINE_EXCEEDED, s.code());
EXPECT_EQ(StatusCode::DEADLINE_EXCEEDED, s.error_code());
}
// Set a long but finite deadline.
@ -281,7 +281,7 @@ TEST_F(End2endTest, RpcLongDeadline) {
context.set_deadline(deadline);
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(response.message(), request.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
// Ask server to echo back the deadline it sees.
@ -298,7 +298,7 @@ TEST_F(End2endTest, EchoDeadline) {
context.set_deadline(deadline);
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(response.message(), request.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
gpr_timespec sent_deadline;
Timepoint2Timespec(deadline, &sent_deadline);
// Allow 1 second error.
@ -317,7 +317,7 @@ TEST_F(End2endTest, EchoDeadlineForNoDeadlineRpc) {
ClientContext context;
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(response.message(), request.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
EXPECT_EQ(response.param().request_deadline(), gpr_inf_future.tv_sec);
}
@ -329,9 +329,9 @@ TEST_F(End2endTest, UnimplementedRpc) {
ClientContext context;
Status s = stub_->Unimplemented(&context, request, &response);
EXPECT_FALSE(s.IsOk());
EXPECT_EQ(s.code(), grpc::StatusCode::UNIMPLEMENTED);
EXPECT_EQ(s.details(), "");
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.error_code(), grpc::StatusCode::UNIMPLEMENTED);
EXPECT_EQ(s.error_message(), "");
EXPECT_EQ(response.message(), "");
}
@ -347,7 +347,7 @@ TEST_F(End2endTest, RequestStreamOneRequest) {
stream->WritesDone();
Status s = stream->Finish();
EXPECT_EQ(response.message(), request.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
TEST_F(End2endTest, RequestStreamTwoRequests) {
@ -363,7 +363,7 @@ TEST_F(End2endTest, RequestStreamTwoRequests) {
stream->WritesDone();
Status s = stream->Finish();
EXPECT_EQ(response.message(), "hellohello");
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
TEST_F(End2endTest, ResponseStream) {
@ -383,7 +383,7 @@ TEST_F(End2endTest, ResponseStream) {
EXPECT_FALSE(stream->Read(&response));
Status s = stream->Finish();
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
TEST_F(End2endTest, BidiStream) {
@ -414,7 +414,7 @@ TEST_F(End2endTest, BidiStream) {
EXPECT_FALSE(stream->Read(&response));
Status s = stream->Finish();
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
// Talk to the two services with the same name but different package names.
@ -433,7 +433,7 @@ TEST_F(End2endTest, DiffPackageServices) {
ClientContext context;
Status s = stub->Echo(&context, request, &response);
EXPECT_EQ(response.message(), request.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
std::unique_ptr<grpc::cpp::test::util::duplicate::TestService::Stub>
dup_pkg_stub(
@ -441,7 +441,7 @@ TEST_F(End2endTest, DiffPackageServices) {
ClientContext context2;
s = dup_pkg_stub->Echo(&context2, request, &response);
EXPECT_EQ("no package", response.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
// rpc and stream should fail on bad credentials.
@ -459,16 +459,16 @@ TEST_F(End2endTest, BadCredentials) {
Status s = stub->Echo(&context, request, &response);
EXPECT_EQ("", response.message());
EXPECT_FALSE(s.IsOk());
EXPECT_EQ(StatusCode::UNKNOWN, s.code());
EXPECT_EQ("Rpc sent on a lame channel.", s.details());
EXPECT_FALSE(s.ok());
EXPECT_EQ(StatusCode::UNKNOWN, s.error_code());
EXPECT_EQ("Rpc sent on a lame channel.", s.error_message());
ClientContext context2;
auto stream = stub->BidiStream(&context2);
s = stream->Finish();
EXPECT_FALSE(s.IsOk());
EXPECT_EQ(StatusCode::UNKNOWN, s.code());
EXPECT_EQ("Rpc sent on a lame channel.", s.details());
EXPECT_FALSE(s.ok());
EXPECT_EQ(StatusCode::UNKNOWN, s.error_code());
EXPECT_EQ("Rpc sent on a lame channel.", s.error_message());
}
void CancelRpc(ClientContext* context, int delay_us, TestServiceImpl* service) {
@ -491,8 +491,8 @@ TEST_F(End2endTest, ClientCancelsRpc) {
std::thread cancel_thread(CancelRpc, &context, kCancelDelayUs, &service_);
Status s = stub_->Echo(&context, request, &response);
cancel_thread.join();
EXPECT_EQ(StatusCode::CANCELLED, s.code());
EXPECT_EQ(s.details(), "Cancelled");
EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
EXPECT_EQ(s.error_message(), "Cancelled");
}
// Server cancels rpc after 1ms
@ -505,8 +505,8 @@ TEST_F(End2endTest, ServerCancelsRpc) {
ClientContext context;
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(StatusCode::CANCELLED, s.code());
EXPECT_TRUE(s.details().empty());
EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
EXPECT_TRUE(s.error_message().empty());
}
// Client cancels request stream after sending two messages
@ -524,7 +524,7 @@ TEST_F(End2endTest, ClientCancelsRequestStream) {
context.TryCancel();
Status s = stream->Finish();
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.code());
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
EXPECT_EQ(response.message(), "");
}
@ -558,7 +558,7 @@ TEST_F(End2endTest, ClientCancelsResponseStream) {
Status s = stream->Finish();
// The final status could be either of CANCELLED or OK depending on
// who won the race.
EXPECT_GE(grpc::StatusCode::CANCELLED, s.code());
EXPECT_GE(grpc::StatusCode::CANCELLED, s.error_code());
}
// Client cancels bidi stream after sending some messages
@ -591,7 +591,7 @@ TEST_F(End2endTest, ClientCancelsBidi) {
}
Status s = stream->Finish();
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.code());
EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
}
TEST_F(End2endTest, RpcMaxMessageSize) {
@ -602,7 +602,7 @@ TEST_F(End2endTest, RpcMaxMessageSize) {
ClientContext context;
Status s = stub_->Echo(&context, request, &response);
EXPECT_FALSE(s.IsOk());
EXPECT_FALSE(s.ok());
}
bool MetadataContains(const std::multimap<grpc::string, grpc::string>& metadata,
@ -632,7 +632,7 @@ TEST_F(End2endTest, SetPerCallCredentials) {
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(request.message(), response.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
"fake_token"));
@ -652,8 +652,8 @@ TEST_F(End2endTest, InsecurePerCallCredentials) {
request.mutable_param()->set_echo_metadata(true);
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(StatusCode::CANCELLED, s.code());
EXPECT_EQ("Failed to set credentials to rpc.", s.details());
EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
EXPECT_EQ("Failed to set credentials to rpc.", s.error_message());
}
TEST_F(End2endTest, OverridePerCallCredentials) {
@ -684,7 +684,7 @@ TEST_F(End2endTest, OverridePerCallCredentials) {
GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
"fake_selector1"));
EXPECT_EQ(request.message(), response.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
} // namespace testing

@ -190,7 +190,7 @@ class GenericEnd2endTest : public ::testing::Test {
client_ok(9);
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
}
}
@ -273,7 +273,7 @@ TEST_F(GenericEnd2endTest, SimpleBidiStreaming) {
client_ok(10);
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.IsOk());
EXPECT_TRUE(recv_status.ok());
}
} // namespace

@ -168,7 +168,7 @@ class FakeClient {
request.set_message("hello world");
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(request.message(), response.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
void DoBidiStream() {
@ -199,7 +199,7 @@ class FakeClient {
EXPECT_FALSE(stream->Read(&response));
Status s = stream->Finish();
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
void ResetStub(TestService::StubInterface* stub) { stub_ = stub; }

@ -99,13 +99,13 @@ class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service {
gpr_now(),
gpr_time_from_micros(request->param().client_cancel_after_us())));
}
return Status::Cancelled;
return Status::CANCELLED;
} else if (request->has_param() &&
request->param().server_cancel_after_us()) {
gpr_sleep_until(gpr_time_add(
gpr_now(),
gpr_time_from_micros(request->param().server_cancel_after_us())));
return Status::Cancelled;
return Status::CANCELLED;
} else {
EXPECT_FALSE(context->IsCancelled());
}
@ -219,7 +219,7 @@ static void SendRpc(grpc::cpp::test::util::TestService::Stub* stub,
ClientContext context;
Status s = stub->Echo(&context, request, &response);
EXPECT_EQ(response.message(), request.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
}
}

@ -65,11 +65,11 @@ InteropClient::InteropClient(std::shared_ptr<ChannelInterface> channel)
: channel_(channel) {}
void InteropClient::AssertOkOrPrintErrorStatus(const Status& s) {
if (s.IsOk()) {
if (s.ok()) {
return;
}
gpr_log(GPR_INFO, "Error status code: %d, message: %s", s.code(),
s.details().c_str());
gpr_log(GPR_INFO, "Error status code: %d, message: %s", s.error_code(),
s.error_message().c_str());
GPR_ASSERT(0);
}
@ -321,7 +321,7 @@ void InteropClient::DoCancelAfterBegin() {
gpr_log(GPR_INFO, "Trying to cancel...");
context.TryCancel();
Status s = stream->Finish();
GPR_ASSERT(s.code() == StatusCode::CANCELLED);
GPR_ASSERT(s.error_code() == StatusCode::CANCELLED);
gpr_log(GPR_INFO, "Canceling streaming done.");
}

@ -103,7 +103,7 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient {
grpc::Status s =
stub->UnaryCall(&context, request_, &responses_[thread_idx]);
histogram->Add((Timer::Now() - start) * 1e9);
return s.IsOk();
return s.ok();
}
};
@ -124,7 +124,7 @@ class SynchronousStreamingClient GRPC_FINAL : public SynchronousClient {
for (auto stream = stream_.begin(); stream != stream_.end(); stream++) {
if (*stream) {
(*stream)->WritesDone();
EXPECT_TRUE((*stream)->Finish().IsOk());
EXPECT_TRUE((*stream)->Finish().ok());
}
}
}

@ -241,11 +241,11 @@ std::unique_ptr<ScenarioResult> RunScenario(
for (auto client = clients.begin(); client != clients.end(); client++) {
GPR_ASSERT(client->stream->WritesDone());
GPR_ASSERT(client->stream->Finish().IsOk());
GPR_ASSERT(client->stream->Finish().ok());
}
for (auto server = servers.begin(); server != servers.end(); server++) {
GPR_ASSERT(server->stream->WritesDone());
GPR_ASSERT(server->stream->Finish().IsOk());
GPR_ASSERT(server->stream->Finish().ok());
}
return result;
}

@ -100,7 +100,7 @@ class WorkerImpl GRPC_FINAL : public Worker::Service {
GRPC_OVERRIDE {
InstanceGuard g(this);
if (!g.Acquired()) {
return Status(RESOURCE_EXHAUSTED);
return Status(StatusCode::RESOURCE_EXHAUSTED, "");
}
grpc_profiler_start("qps_client.prof");
@ -114,7 +114,7 @@ class WorkerImpl GRPC_FINAL : public Worker::Service {
GRPC_OVERRIDE {
InstanceGuard g(this);
if (!g.Acquired()) {
return Status(RESOURCE_EXHAUSTED);
return Status(StatusCode::RESOURCE_EXHAUSTED, "");
}
grpc_profiler_start("qps_server.prof");
@ -159,22 +159,22 @@ class WorkerImpl GRPC_FINAL : public Worker::Service {
ServerReaderWriter<ClientStatus, ClientArgs>* stream) {
ClientArgs args;
if (!stream->Read(&args)) {
return Status(INVALID_ARGUMENT);
return Status(StatusCode::INVALID_ARGUMENT, "");
}
if (!args.has_setup()) {
return Status(INVALID_ARGUMENT);
return Status(StatusCode::INVALID_ARGUMENT, "");
}
auto client = CreateClient(args.setup());
if (!client) {
return Status(INVALID_ARGUMENT);
return Status(StatusCode::INVALID_ARGUMENT, "");
}
ClientStatus status;
if (!stream->Write(status)) {
return Status(UNKNOWN);
return Status(StatusCode::UNKNOWN, "");
}
while (stream->Read(&args)) {
if (!args.has_mark()) {
return Status(INVALID_ARGUMENT);
return Status(StatusCode::INVALID_ARGUMENT, "");
}
*status.mutable_stats() = client->Mark();
stream->Write(status);
@ -187,23 +187,23 @@ class WorkerImpl GRPC_FINAL : public Worker::Service {
ServerReaderWriter<ServerStatus, ServerArgs>* stream) {
ServerArgs args;
if (!stream->Read(&args)) {
return Status(INVALID_ARGUMENT);
return Status(StatusCode::INVALID_ARGUMENT, "");
}
if (!args.has_setup()) {
return Status(INVALID_ARGUMENT);
return Status(StatusCode::INVALID_ARGUMENT, "");
}
auto server = CreateServer(args.setup(), server_port_);
if (!server) {
return Status(INVALID_ARGUMENT);
return Status(StatusCode::INVALID_ARGUMENT, "");
}
ServerStatus status;
status.set_port(server_port_);
if (!stream->Write(status)) {
return Status(UNKNOWN);
return Status(StatusCode::UNKNOWN, "");
}
while (stream->Read(&args)) {
if (!args.has_mark()) {
return Status(INVALID_ARGUMENT);
return Status(StatusCode::INVALID_ARGUMENT, "");
}
*status.mutable_stats() = server->Mark();
stream->Write(status);

@ -86,7 +86,7 @@ void CliCall::Call(std::shared_ptr<grpc::ChannelInterface> channel,
cq.Next(&got_tag, &ok);
GPR_ASSERT(ok);
if (status.IsOk()) {
if (status.ok()) {
std::cout << "RPC finished with OK status." << std::endl;
std::vector<grpc::Slice> slices;
recv_buffer.Dump(&slices);
@ -97,8 +97,8 @@ void CliCall::Call(std::shared_ptr<grpc::ChannelInterface> channel,
slices[i].size());
}
} else {
std::cout << "RPC finished with status code " << status.code()
<< " details: " << status.details() << std::endl;
std::cout << "RPC finished with status code " << status.error_code()
<< " details: " << status.error_message() << std::endl;
}
}

@ -108,7 +108,7 @@ TEST_F(CliCallTest, SimpleRpc) {
ClientContext context;
Status s = stub_->Echo(&context, request, &response);
EXPECT_EQ(response.message(), request.message());
EXPECT_TRUE(s.IsOk());
EXPECT_TRUE(s.ok());
const grpc::string kMethod("/grpc.cpp.test.util.TestService/Echo");
grpc::string request_bin, response_bin, expected_response_bin;

@ -115,5 +115,19 @@ RUN apt-get update && apt-get install -y \
# Install Python packages from PyPI
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2
##################
# PHP dependencies
# Install dependencies
RUN /bin/bash -l -c "echo 'deb http://packages.dotdeb.org wheezy-php55 all' \
>> /etc/apt/sources.list.d/dotdeb.list"
RUN /bin/bash -l -c "echo 'deb-src http://packages.dotdeb.org wheezy-php55 all' \
>> /etc/apt/sources.list.d/dotdeb.list"
RUN wget http://www.dotdeb.org/dotdeb.gpg -O- | apt-key add -
RUN apt-get update && apt-get install -y \
git php5 php5-dev phpunit unzip
# Define the default command.
CMD ["bash"]

@ -68,6 +68,7 @@ then
else
echo "Docker exited with failure, keeping container $DOCKER_CID."
echo "You can SSH to the worker and use 'docker start CID' and 'docker exec -i -t CID bash' to debug the problem."
exit 1
fi
elif [ "$platform" == "windows" ]

@ -38,5 +38,5 @@ rm -rf python2.7_virtual_environment
virtualenv -p /usr/bin/python2.7 python2.7_virtual_environment
source python2.7_virtual_environment/bin/activate
pip install -r src/python/requirements.txt
CFLAGS="-I$root/include -std=c89" LDFLAGS=-L$root/libs/$CONFIG pip install src/python/src
CFLAGS="-I$root/include -std=c89 -Werror" LDFLAGS=-L$root/libs/$CONFIG pip install src/python/src
pip install src/python/interop

@ -123,7 +123,7 @@ class CLanguage(object):
if travis and target['flaky']:
continue
if self.platform == 'windows':
binary = 'vsprojects\\test_bin\\%s.exe' % (target['name'])
binary = 'vsprojects/test_bin/%s.exe' % (target['name'])
else:
binary = 'bins/%s/%s' % (config.build_config, target['name'])
out.append(config.job_spec([binary], [binary]))

@ -353,7 +353,6 @@
"language": "c",
"name": "httpcli_test",
"platforms": [
"windows",
"posix"
]
},
@ -920,6 +919,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_fake_security_request_with_flags_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -1181,6 +1189,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_fullstack_request_with_flags_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -1419,6 +1436,14 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_fullstack_uds_posix_request_with_flags_test",
"platforms": [
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -1651,6 +1676,14 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_fullstack_with_poll_request_with_flags_test",
"platforms": [
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -1906,6 +1939,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_simple_ssl_fullstack_request_with_flags_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -2144,6 +2186,14 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test",
"platforms": [
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -2399,6 +2449,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -2660,6 +2719,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_socket_pair_request_with_flags_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -2921,6 +2989,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -3182,6 +3259,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_socket_pair_with_grpc_trace_request_with_flags_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -3434,6 +3520,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_fullstack_request_with_flags_unsecure_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -3664,6 +3759,14 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_fullstack_uds_posix_request_with_flags_unsecure_test",
"platforms": [
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -3888,6 +3991,14 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_fullstack_with_poll_request_with_flags_unsecure_test",
"platforms": [
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -4134,6 +4245,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_socket_pair_request_with_flags_unsecure_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -4386,6 +4506,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -4638,6 +4767,15 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",

File diff suppressed because one or more lines are too long
Loading…
Cancel
Save