Merge branch 'batch-metadata' of github.com:ctiller/grpc into one-pass

pull/1369/head
Craig Tiller 10 years ago
commit 7982da73ab
  1. 4
      BUILD
  2. 84
      Makefile
  3. 31
      build.json
  4. 9
      include/grpc/grpc.h
  5. 51
      src/core/channel/call_op_string.c
  6. 24
      src/core/channel/census_filter.c
  7. 57
      src/core/channel/channel_stack.c
  8. 31
      src/core/channel/channel_stack.h
  9. 99
      src/core/channel/client_channel.c
  10. 77
      src/core/channel/connected_channel.c
  11. 77
      src/core/channel/http_client_filter.c
  12. 237
      src/core/channel/http_server_filter.c
  13. 149
      src/core/channel/metadata_buffer.c
  14. 70
      src/core/channel/metadata_buffer.h
  15. 61
      src/core/security/auth.c
  16. 225
      src/core/surface/call.c
  17. 25
      src/core/surface/call.h
  18. 52
      src/core/surface/channel.c
  19. 28
      src/core/surface/client.c
  20. 42
      src/core/surface/lame_client.c
  21. 40
      src/core/surface/server.c
  22. 42
      src/core/transport/chttp2/stream_encoder.c
  23. 106
      src/core/transport/chttp2_transport.c
  24. 2
      src/core/transport/metadata.c
  25. 198
      src/core/transport/stream_op.c
  26. 54
      src/core/transport/stream_op.h
  27. 5
      src/python/src/grpc/_adapter/_low_test.py
  28. 6
      templates/Makefile.template
  29. 201
      test/core/channel/metadata_buffer_test.c
  30. 1
      test/core/end2end/gen_build_json.py
  31. 4
      test/core/end2end/tests/cancel_before_invoke_legacy.c
  32. 2
      test/core/end2end/tests/cancel_test_helpers.h
  33. 19
      test/core/end2end/tests/request_response_with_binary_metadata_and_payload.c
  34. 20
      test/core/end2end/tests/request_response_with_binary_metadata_and_payload_legacy.c
  35. 6
      test/core/end2end/tests/request_response_with_metadata_and_payload.c
  36. 8
      test/core/end2end/tests/request_response_with_metadata_and_payload_legacy.c
  37. 12
      test/core/end2end/tests/request_response_with_trailing_metadata_and_payload_legacy.c
  38. 2
      test/core/surface/lame_client_test.c
  39. 80
      test/core/transport/chttp2/stream_encoder_test.c
  40. 119
      test/core/transport/chttp2_transport_end2end_test.c
  41. 931
      test/core/transport/transport_end2end_tests.c
  42. 68
      test/core/transport/transport_end2end_tests.h
  43. 2
      test/cpp/end2end/end2end_test.cc
  44. 46
      tools/run_tests/tests.json
  45. 20
      vsprojects/vs2010/Grpc.mak
  46. 3
      vsprojects/vs2010/grpc.vcxproj
  47. 6
      vsprojects/vs2010/grpc.vcxproj.filters
  48. 2
      vsprojects/vs2010/grpc_test_util.vcxproj
  49. 3
      vsprojects/vs2010/grpc_unsecure.vcxproj
  50. 6
      vsprojects/vs2010/grpc_unsecure.vcxproj.filters
  51. 18
      vsprojects/vs2013/Grpc.mak
  52. 3
      vsprojects/vs2013/grpc.vcxproj
  53. 6
      vsprojects/vs2013/grpc.vcxproj.filters
  54. 2
      vsprojects/vs2013/grpc_test_util.vcxproj
  55. 3
      vsprojects/vs2013/grpc_unsecure.vcxproj
  56. 6
      vsprojects/vs2013/grpc_unsecure.vcxproj.filters

@ -147,7 +147,6 @@ cc_library(
"src/core/channel/http_client_filter.h",
"src/core/channel/http_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/metadata_buffer.h",
"src/core/channel/noop_filter.h",
"src/core/compression/algorithm.h",
"src/core/compression/message_compress.h",
@ -257,7 +256,6 @@ cc_library(
"src/core/channel/http_client_filter.c",
"src/core/channel/http_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/metadata_buffer.c",
"src/core/channel/noop_filter.c",
"src/core/compression/algorithm.c",
"src/core/compression/message_compress.c",
@ -377,7 +375,6 @@ cc_library(
"src/core/channel/http_client_filter.h",
"src/core/channel/http_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/metadata_buffer.h",
"src/core/channel/noop_filter.h",
"src/core/compression/algorithm.h",
"src/core/compression/message_compress.h",
@ -468,7 +465,6 @@ cc_library(
"src/core/channel/http_client_filter.c",
"src/core/channel/http_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/metadata_buffer.c",
"src/core/channel/noop_filter.c",
"src/core/compression/algorithm.c",
"src/core/compression/message_compress.c",

File diff suppressed because one or more lines are too long

@ -98,7 +98,6 @@
"src/core/channel/http_client_filter.h",
"src/core/channel/http_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/metadata_buffer.h",
"src/core/channel/noop_filter.h",
"src/core/compression/algorithm.h",
"src/core/compression/message_compress.h",
@ -190,7 +189,6 @@
"src/core/channel/http_client_filter.c",
"src/core/channel/http_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/metadata_buffer.c",
"src/core/channel/noop_filter.c",
"src/core/compression/algorithm.c",
"src/core/compression/message_compress.c",
@ -443,7 +441,6 @@
"test/core/end2end/data/test_root_cert.c",
"test/core/iomgr/endpoint_tests.c",
"test/core/statistics/census_log_tests.c",
"test/core/transport/transport_end2end_tests.c",
"test/core/util/grpc_profiler.c",
"test/core/util/parse_hexstring.c",
"test/core/util/port_posix.c",
@ -938,20 +935,6 @@
"gpr"
]
},
{
"name": "chttp2_transport_end2end_test",
"build": "test",
"language": "c",
"src": [
"test/core/transport/chttp2_transport_end2end_test.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "dualstack_socket_test",
"build": "test",
@ -1586,20 +1569,6 @@
"gpr"
]
},
{
"name": "metadata_buffer_test",
"build": "test",
"language": "c",
"src": [
"test/core/channel/metadata_buffer_test.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "multi_init_test",
"build": "test",

@ -186,6 +186,13 @@ typedef struct grpc_metadata {
const char *key;
const char *value;
size_t value_length;
/* The following fields are reserved for grpc internal use.
There is no need to initialize them, and they will be set to garbage during
calls to grpc. */
struct {
void *obfuscated[3];
} internal_data;
} grpc_metadata;
typedef enum grpc_completion_type {
@ -295,7 +302,7 @@ typedef struct grpc_op {
union {
struct {
size_t count;
const grpc_metadata *metadata;
grpc_metadata *metadata;
} send_initial_metadata;
grpc_byte_buffer *send_message;
struct {

@ -43,12 +43,27 @@
static void put_metadata(gpr_strvec *b, grpc_mdelem *md) {
gpr_strvec_add(b, gpr_strdup(" key="));
gpr_strvec_add(b, gpr_hexdump((char *)GPR_SLICE_START_PTR(md->key->slice),
GPR_SLICE_LENGTH(md->key->slice), GPR_HEXDUMP_PLAINTEXT));
gpr_strvec_add(
b, gpr_hexdump((char *)GPR_SLICE_START_PTR(md->key->slice),
GPR_SLICE_LENGTH(md->key->slice), GPR_HEXDUMP_PLAINTEXT));
gpr_strvec_add(b, gpr_strdup(" value="));
gpr_strvec_add(b, gpr_hexdump((char *)GPR_SLICE_START_PTR(md->value->slice),
GPR_SLICE_LENGTH(md->value->slice), GPR_HEXDUMP_PLAINTEXT));
GPR_SLICE_LENGTH(md->value->slice),
GPR_HEXDUMP_PLAINTEXT));
}
static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
grpc_linked_mdelem *m;
for (m = md.list.head; m != NULL; m = m->next) {
put_metadata(b, m->md);
}
if (gpr_time_cmp(md.deadline, gpr_inf_future) != 0) {
char *tmp;
gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec,
md.deadline.tv_nsec);
gpr_strvec_add(b, tmp);
}
}
char *grpc_call_op_string(grpc_call_op *op) {
@ -69,16 +84,7 @@ char *grpc_call_op_string(grpc_call_op *op) {
switch (op->type) {
case GRPC_SEND_METADATA:
gpr_strvec_add(&b, gpr_strdup("SEND_METADATA"));
put_metadata(&b, op->data.metadata);
break;
case GRPC_SEND_DEADLINE:
gpr_asprintf(&tmp, "SEND_DEADLINE %d.%09d", op->data.deadline.tv_sec,
op->data.deadline.tv_nsec);
gpr_strvec_add(&b, tmp);
break;
case GRPC_SEND_START:
gpr_asprintf(&tmp, "SEND_START pollset=%p", op->data.start.pollset);
gpr_strvec_add(&b, tmp);
put_metadata_list(&b, op->data.metadata);
break;
case GRPC_SEND_MESSAGE:
gpr_strvec_add(&b, gpr_strdup("SEND_MESSAGE"));
@ -94,15 +100,7 @@ char *grpc_call_op_string(grpc_call_op *op) {
break;
case GRPC_RECV_METADATA:
gpr_strvec_add(&b, gpr_strdup("RECV_METADATA"));
put_metadata(&b, op->data.metadata);
break;
case GRPC_RECV_DEADLINE:
gpr_asprintf(&tmp, "RECV_DEADLINE %d.%09d", op->data.deadline.tv_sec,
op->data.deadline.tv_nsec);
gpr_strvec_add(&b, tmp);
break;
case GRPC_RECV_END_OF_INITIAL_METADATA:
gpr_strvec_add(&b, gpr_strdup("RECV_END_OF_INITIAL_METADATA"));
put_metadata_list(&b, op->data.metadata);
break;
case GRPC_RECV_MESSAGE:
gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
@ -113,12 +111,21 @@ char *grpc_call_op_string(grpc_call_op *op) {
case GRPC_RECV_FINISH:
gpr_strvec_add(&b, gpr_strdup("RECV_FINISH"));
break;
case GRPC_RECV_SYNTHETIC_STATUS:
gpr_asprintf(&tmp, "RECV_SYNTHETIC_STATUS status=%d message='%s'",
op->data.synthetic_status.status,
op->data.synthetic_status.message);
gpr_strvec_add(&b, tmp);
break;
case GRPC_CANCEL_OP:
gpr_strvec_add(&b, gpr_strdup("CANCEL_OP"));
break;
}
gpr_asprintf(&tmp, " flags=0x%08x", op->flags);
gpr_strvec_add(&b, tmp);
if (op->bind_pollset) {
gpr_strvec_add(&b, gpr_strdup("bind_pollset"));
}
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);

@ -62,11 +62,13 @@ static void init_rpc_stats(census_rpc_stats* stats) {
static void extract_and_annotate_method_tag(grpc_call_op* op, call_data* calld,
channel_data* chand) {
if (op->data.metadata->key == chand->path_str) {
gpr_log(GPR_DEBUG,
(const char*)GPR_SLICE_START_PTR(op->data.metadata->value->slice));
census_add_method_tag(calld->op_id, (const char*)GPR_SLICE_START_PTR(
op->data.metadata->value->slice));
grpc_linked_mdelem* m;
for (m = op->data.metadata.list.head; m != NULL; m = m->next) {
if (m->md->key == chand->path_str) {
gpr_log(GPR_DEBUG, "%s", (const char*)GPR_SLICE_START_PTR(m->md->value->slice));
census_add_method_tag(
calld->op_id, (const char*)GPR_SLICE_START_PTR(m->md->value->slice));
}
}
}
@ -178,11 +180,11 @@ static void destroy_channel_elem(grpc_channel_element* elem) {
}
const grpc_channel_filter grpc_client_census_filter = {
client_call_op, channel_op, sizeof(call_data),
client_init_call_elem, client_destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "census-client"};
client_call_op, channel_op, sizeof(call_data), client_init_call_elem,
client_destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "census-client"};
const grpc_channel_filter grpc_server_census_filter = {
server_call_op, channel_op, sizeof(call_data),
server_init_call_elem, server_destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "census-server"};
server_call_op, channel_op, sizeof(call_data), server_init_call_elem,
server_destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "census-server"};

@ -77,9 +77,9 @@ size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
return size;
}
#define CHANNEL_ELEMS_FROM_STACK(stk) \
((grpc_channel_element *)( \
(char *)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack))))
#define CHANNEL_ELEMS_FROM_STACK(stk) \
((grpc_channel_element *)((char *)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE( \
sizeof(grpc_channel_stack))))
#define CALL_ELEMS_FROM_STACK(stk) \
((grpc_call_element *)((char *)(stk) + \
@ -183,6 +183,9 @@ void grpc_call_stack_destroy(grpc_call_stack *stack) {
void grpc_call_next_op(grpc_call_element *elem, grpc_call_op *op) {
grpc_call_element *next_elem = elem + op->dir;
if (op->type == GRPC_SEND_METADATA || op->type == GRPC_RECV_METADATA) {
grpc_metadata_batch_assert_ok(&op->data.metadata);
}
next_elem->filter->call_op(next_elem, elem, op);
}
@ -193,42 +196,17 @@ void grpc_channel_next_op(grpc_channel_element *elem, grpc_channel_op *op) {
grpc_channel_stack *grpc_channel_stack_from_top_element(
grpc_channel_element *elem) {
return (grpc_channel_stack *)((char *)(elem) -
ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_channel_stack)));
return (grpc_channel_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_channel_stack)));
}
grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
return (grpc_call_stack *)((char *)(elem) - ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_call_stack)));
return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_call_stack)));
}
static void do_nothing(void *user_data, grpc_op_error error) {}
void grpc_call_element_recv_metadata(grpc_call_element *cur_elem,
grpc_mdelem *mdelem) {
grpc_call_op metadata_op;
metadata_op.type = GRPC_RECV_METADATA;
metadata_op.dir = GRPC_CALL_UP;
metadata_op.done_cb = do_nothing;
metadata_op.user_data = NULL;
metadata_op.flags = 0;
metadata_op.data.metadata = mdelem;
grpc_call_next_op(cur_elem, &metadata_op);
}
void grpc_call_element_send_metadata(grpc_call_element *cur_elem,
grpc_mdelem *mdelem) {
grpc_call_op metadata_op;
metadata_op.type = GRPC_SEND_METADATA;
metadata_op.dir = GRPC_CALL_DOWN;
metadata_op.done_cb = do_nothing;
metadata_op.user_data = NULL;
metadata_op.flags = 0;
metadata_op.data.metadata = mdelem;
grpc_call_next_op(cur_elem, &metadata_op);
}
void grpc_call_element_send_cancel(grpc_call_element *cur_elem) {
grpc_call_op cancel_op;
cancel_op.type = GRPC_CANCEL_OP;
@ -236,6 +214,7 @@ void grpc_call_element_send_cancel(grpc_call_element *cur_elem) {
cancel_op.done_cb = do_nothing;
cancel_op.user_data = NULL;
cancel_op.flags = 0;
cancel_op.bind_pollset = NULL;
grpc_call_next_op(cur_elem, &cancel_op);
}
@ -246,5 +225,19 @@ void grpc_call_element_send_finish(grpc_call_element *cur_elem) {
finish_op.done_cb = do_nothing;
finish_op.user_data = NULL;
finish_op.flags = 0;
finish_op.bind_pollset = NULL;
grpc_call_next_op(cur_elem, &finish_op);
}
void grpc_call_element_recv_status(grpc_call_element *cur_elem,
grpc_status_code status,
const char *message) {
grpc_call_op op;
op.type = GRPC_RECV_SYNTHETIC_STATUS;
op.dir = GRPC_CALL_UP;
op.done_cb = do_nothing;
op.user_data = NULL;
op.data.synthetic_status.status = status;
op.data.synthetic_status.message = message;
grpc_call_next_op(cur_elem, &op);
}

@ -62,10 +62,6 @@ typedef struct grpc_call_element grpc_call_element;
typedef enum {
/* send metadata to the channels peer */
GRPC_SEND_METADATA,
/* send a deadline */
GRPC_SEND_DEADLINE,
/* start a connection (corresponds to start_invoke/accept) */
GRPC_SEND_START,
/* send a message to the channels peer */
GRPC_SEND_MESSAGE,
/* send a pre-formatted message to the channels peer */
@ -76,16 +72,14 @@ typedef enum {
GRPC_REQUEST_DATA,
/* metadata was received from the channels peer */
GRPC_RECV_METADATA,
/* receive a deadline */
GRPC_RECV_DEADLINE,
/* the end of the first batch of metadata was received */
GRPC_RECV_END_OF_INITIAL_METADATA,
/* a message was received from the channels peer */
GRPC_RECV_MESSAGE,
/* half-close was received from the channels peer */
GRPC_RECV_HALF_CLOSE,
/* full close was received from the channels peer */
GRPC_RECV_FINISH,
/* a status has been sythesized locally */
GRPC_RECV_SYNTHETIC_STATUS,
/* the call has been abnormally terminated */
GRPC_CANCEL_OP
} grpc_call_op_type;
@ -109,14 +103,16 @@ typedef struct {
/* Argument data, matching up with grpc_call_op_type names */
union {
struct {
grpc_pollset *pollset;
} start;
grpc_byte_buffer *message;
grpc_mdelem *metadata;
gpr_timespec deadline;
grpc_metadata_batch metadata;
struct {
grpc_status_code status;
const char *message;
} synthetic_status;
} data;
grpc_pollset *bind_pollset;
/* Must be called when processing of this call-op is complete.
Signature chosen to match transport flow control callbacks */
void (*done_cb)(void *user_data, grpc_op_error error);
@ -291,16 +287,15 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_call_op *op);
void grpc_call_element_send_metadata(grpc_call_element *cur_elem,
grpc_mdelem *elem);
void grpc_call_element_recv_metadata(grpc_call_element *cur_elem,
grpc_mdelem *elem);
void grpc_call_element_send_cancel(grpc_call_element *cur_elem);
void grpc_call_element_send_finish(grpc_call_element *cur_elem);
void grpc_call_element_recv_status(grpc_call_element *cur_elem,
grpc_status_code status,
const char *message);
extern int grpc_trace_channel;
#define GRPC_CALL_LOG_OP(sev, elem, op) \
if (grpc_trace_channel) grpc_call_log_op(sev, elem, op)
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H */
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H */

@ -38,7 +38,6 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/child_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/metadata_buffer.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
@ -70,9 +69,6 @@ typedef struct {
int transport_setup_initiated;
grpc_channel_args *args;
/* metadata cache */
grpc_mdelem *cancel_status;
} channel_data;
typedef enum {
@ -86,20 +82,16 @@ struct call_data {
/* owning element */
grpc_call_element *elem;
gpr_uint8 got_first_send;
call_state state;
grpc_metadata_buffer pending_metadata;
gpr_timespec deadline;
union {
struct {
/* our child call stack */
grpc_child_call *child_call;
} active;
struct {
void (*on_complete)(void *user_data, grpc_op_error error);
void *on_complete_user_data;
gpr_uint32 start_flags;
grpc_pollset *pollset;
} waiting;
grpc_call_op waiting_op;
} s;
};
@ -127,20 +119,6 @@ static void complete_activate(grpc_call_element *elem, grpc_call_op *op) {
GPR_ASSERT(calld->state == CALL_ACTIVE);
/* sending buffered metadata down the stack before the start call */
grpc_metadata_buffer_flush(&calld->pending_metadata, child_elem);
if (gpr_time_cmp(calld->deadline, gpr_inf_future) != 0) {
grpc_call_op dop;
dop.type = GRPC_SEND_DEADLINE;
dop.dir = GRPC_CALL_DOWN;
dop.flags = 0;
dop.data.deadline = calld->deadline;
dop.done_cb = do_nothing;
dop.user_data = NULL;
child_elem->filter->call_op(child_elem, elem, &dop);
}
/* continue the start call down the stack, this nees to happen after metadata
are flushed*/
child_elem->filter->call_op(child_elem, elem, op);
@ -152,6 +130,7 @@ static void start_rpc(grpc_call_element *elem, grpc_call_op *op) {
gpr_mu_lock(&chand->mu);
if (calld->state == CALL_CANCELLED) {
gpr_mu_unlock(&chand->mu);
grpc_metadata_batch_destroy(&op->data.metadata);
op->done_cb(op->user_data, GRPC_OP_ERROR);
return;
}
@ -184,10 +163,7 @@ static void start_rpc(grpc_call_element *elem, grpc_call_op *op) {
gpr_realloc(chand->waiting_children,
chand->waiting_child_capacity * sizeof(call_data *));
}
calld->s.waiting.on_complete = op->done_cb;
calld->s.waiting.on_complete_user_data = op->user_data;
calld->s.waiting.start_flags = op->flags;
calld->s.waiting.pollset = op->data.start.pollset;
calld->s.waiting_op = *op;
chand->waiting_children[chand->waiting_child_count++] = calld;
gpr_mu_unlock(&chand->mu);
@ -212,15 +188,8 @@ static void remove_waiting_child(channel_data *chand, call_data *calld) {
static void send_up_cancelled_ops(grpc_call_element *elem) {
grpc_call_op finish_op;
channel_data *chand = elem->channel_data;
/* send up a synthesized status */
finish_op.type = GRPC_RECV_METADATA;
finish_op.dir = GRPC_CALL_UP;
finish_op.flags = 0;
finish_op.data.metadata = grpc_mdelem_ref(chand->cancel_status);
finish_op.done_cb = do_nothing;
finish_op.user_data = NULL;
grpc_call_next_op(elem, &finish_op);
grpc_call_element_recv_status(elem, GRPC_STATUS_CANCELLED, "Cancelled");
/* send up a finish */
finish_op.type = GRPC_RECV_FINISH;
finish_op.dir = GRPC_CALL_UP;
@ -243,12 +212,12 @@ static void cancel_rpc(grpc_call_element *elem, grpc_call_op *op) {
child_elem->filter->call_op(child_elem, elem, op);
return; /* early out */
case CALL_WAITING:
grpc_metadata_batch_destroy(&calld->s.waiting_op.data.metadata);
remove_waiting_child(chand, calld);
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&chand->mu);
send_up_cancelled_ops(elem);
calld->s.waiting.on_complete(calld->s.waiting.on_complete_user_data,
GRPC_OP_ERROR);
calld->s.waiting_op.done_cb(calld->s.waiting_op.user_data, GRPC_OP_ERROR);
return; /* early out */
case CALL_CREATED:
calld->state = CALL_CANCELLED;
@ -271,15 +240,13 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
switch (op->type) {
case GRPC_SEND_METADATA:
grpc_metadata_buffer_queue(&calld->pending_metadata, op);
break;
case GRPC_SEND_DEADLINE:
calld->deadline = op->data.deadline;
op->done_cb(op->user_data, GRPC_OP_OK);
break;
case GRPC_SEND_START:
/* filter out the start event to find which child to send on */
start_rpc(elem, op);
if (!calld->got_first_send) {
/* filter out the start event to find which child to send on */
calld->got_first_send = 1;
start_rpc(elem, op);
} else {
grpc_call_next_op(elem, op);
}
break;
case GRPC_CANCEL_OP:
cancel_rpc(elem, op);
@ -382,12 +349,6 @@ static void channel_op(grpc_channel_element *elem,
}
}
static void error_bad_on_complete(void *arg, grpc_op_error error) {
gpr_log(GPR_ERROR,
"Waiting finished but not started? Bad on_complete callback");
abort();
}
/* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) {
@ -398,23 +359,22 @@ static void init_call_elem(grpc_call_element *elem,
calld->elem = elem;
calld->state = CALL_CREATED;
calld->deadline = gpr_inf_future;
calld->s.waiting.on_complete = error_bad_on_complete;
calld->s.waiting.on_complete_user_data = NULL;
grpc_metadata_buffer_init(&calld->pending_metadata);
calld->got_first_send = 0;
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_call_element *elem) {
call_data *calld = elem->call_data;
/* if the metadata buffer is not flushed, destroy it here. */
grpc_metadata_buffer_destroy(&calld->pending_metadata, GRPC_OP_OK);
/* if the call got activated, we need to destroy the child stack also, and
remove it from the in-flight requests tracked by the child_entry we
picked */
if (calld->state == CALL_ACTIVE) {
grpc_child_call_destroy(calld->s.active.child_call);
}
if (calld->state == CALL_WAITING) {
grpc_metadata_batch_destroy(&calld->s.waiting_op.data.metadata);
}
}
/* Constructor for channel_data */
@ -423,7 +383,6 @@ static void init_channel_elem(grpc_channel_element *elem,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
channel_data *chand = elem->channel_data;
char temp[GPR_LTOA_MIN_BUFSIZE];
GPR_ASSERT(!is_first);
GPR_ASSERT(is_last);
@ -437,10 +396,6 @@ static void init_channel_elem(grpc_channel_element *elem,
chand->transport_setup = NULL;
chand->transport_setup_initiated = 0;
chand->args = grpc_channel_args_copy(args);
gpr_ltoa(GRPC_STATUS_CANCELLED, temp);
chand->cancel_status =
grpc_mdelem_from_strings(metadata_context, "grpc-status", temp);
}
/* Destructor for channel_data */
@ -455,7 +410,6 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
}
grpc_channel_args_destroy(chand->args);
grpc_mdelem_unref(chand->cancel_status);
gpr_mu_destroy(&chand->mu);
GPR_ASSERT(chand->waiting_child_count == 0);
@ -463,9 +417,10 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
}
const grpc_channel_filter grpc_client_channel_filter = {
call_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "client-channel", };
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
"client-channel",
};
grpc_transport_setup_result grpc_client_channel_transport_setup_complete(
grpc_channel_stack *channel_stack, grpc_transport *transport,
@ -520,13 +475,7 @@ grpc_transport_setup_result grpc_client_channel_transport_setup_complete(
call_ops = gpr_malloc(sizeof(grpc_call_op) * waiting_child_count);
for (i = 0; i < waiting_child_count; i++) {
call_ops[i].type = GRPC_SEND_START;
call_ops[i].dir = GRPC_CALL_DOWN;
call_ops[i].flags = waiting_children[i]->s.waiting.start_flags;
call_ops[i].done_cb = waiting_children[i]->s.waiting.on_complete;
call_ops[i].user_data =
waiting_children[i]->s.waiting.on_complete_user_data;
call_ops[i].data.start.pollset = waiting_children[i]->s.waiting.pollset;
call_ops[i] = waiting_children[i]->s.waiting_op;
if (!prepare_activate(waiting_children[i]->elem, chand->active_child)) {
waiting_children[i] = NULL;
call_ops[i].done_cb(call_ops[i].user_data, GRPC_OP_ERROR);

@ -60,7 +60,6 @@ typedef struct connected_channel_call_data {
gpr_uint32 max_message_length;
gpr_uint32 incoming_message_length;
gpr_uint8 reading_message;
gpr_uint8 got_metadata_boundary;
gpr_uint8 got_read_close;
gpr_slice_buffer incoming_message;
gpr_uint32 outgoing_buffer_length_estimate;
@ -120,27 +119,20 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
if (op->bind_pollset) {
grpc_transport_add_to_pollset(chand->transport, op->bind_pollset);
}
switch (op->type) {
case GRPC_SEND_METADATA:
grpc_sopb_add_metadata(&calld->outgoing_sopb, op->data.metadata);
grpc_sopb_add_flow_ctl_cb(&calld->outgoing_sopb, op->done_cb,
op->user_data);
break;
case GRPC_SEND_DEADLINE:
grpc_sopb_add_deadline(&calld->outgoing_sopb, op->data.deadline);
grpc_sopb_add_flow_ctl_cb(&calld->outgoing_sopb, op->done_cb,
op->user_data);
break;
case GRPC_SEND_START:
grpc_transport_add_to_pollset(chand->transport, op->data.start.pollset);
grpc_sopb_add_metadata_boundary(&calld->outgoing_sopb);
end_bufferable_op(op, chand, calld, 0);
break;
case GRPC_SEND_MESSAGE:
grpc_sopb_add_begin_message(&calld->outgoing_sopb,
grpc_byte_buffer_length(op->data.message),
op->flags);
/* fall-through */
/* fall-through */
case GRPC_SEND_PREFORMATTED_MESSAGE:
copy_byte_buffer_to_stream_ops(op->data.message, &calld->outgoing_sopb);
calld->outgoing_buffer_length_estimate +=
@ -200,7 +192,6 @@ static void init_call_elem(grpc_call_element *elem,
grpc_sopb_init(&calld->outgoing_sopb);
calld->reading_message = 0;
calld->got_metadata_boundary = 0;
calld->got_read_close = 0;
calld->outgoing_buffer_length_estimate = 0;
calld->max_message_length = chand->max_message_length;
@ -259,9 +250,9 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
}
const grpc_channel_filter grpc_connected_channel_filter = {
call_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "connected", };
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, "connected",
};
static gpr_slice alloc_recv_buffer(void *user_data, grpc_transport *transport,
grpc_stream *stream, size_t size_hint) {
@ -307,8 +298,8 @@ static void finish_message(channel_data *chand, call_data *calld) {
call_op.type = GRPC_RECV_MESSAGE;
call_op.done_cb = do_nothing;
/* TODO(ctiller): this could be a lot faster if coded directly */
call_op.data.message = grpc_byte_buffer_create(
calld->incoming_message.slices, calld->incoming_message.count);
call_op.data.message = grpc_byte_buffer_create(calld->incoming_message.slices,
calld->incoming_message.count);
gpr_slice_buffer_reset_and_unref(&calld->incoming_message);
/* disable window updates until we get a request more from above */
@ -320,6 +311,19 @@ static void finish_message(channel_data *chand, call_data *calld) {
grpc_call_next_op(elem, &call_op);
}
static void got_metadata(grpc_call_element *elem,
grpc_metadata_batch metadata) {
grpc_call_op op;
op.type = GRPC_RECV_METADATA;
op.dir = GRPC_CALL_UP;
op.flags = 0;
op.data.metadata = metadata;
op.done_cb = do_nothing;
op.user_data = NULL;
grpc_call_next_op(elem, &op);
}
/* Handle incoming stream ops from the transport, translating them into
call_ops to pass up the call stack */
static void recv_batch(void *user_data, grpc_transport *transport,
@ -339,40 +343,12 @@ static void recv_batch(void *user_data, grpc_transport *transport,
stream_op = ops + i;
switch (stream_op->type) {
case GRPC_OP_FLOW_CTL_CB:
gpr_log(GPR_ERROR,
"should not receive flow control ops from transport");
abort();
stream_op->data.flow_ctl_cb.cb(stream_op->data.flow_ctl_cb.arg, 1);
break;
case GRPC_NO_OP:
break;
case GRPC_OP_METADATA:
call_op.type = GRPC_RECV_METADATA;
call_op.dir = GRPC_CALL_UP;
call_op.flags = 0;
call_op.data.metadata = stream_op->data.metadata;
call_op.done_cb = do_nothing;
call_op.user_data = NULL;
grpc_call_next_op(elem, &call_op);
break;
case GRPC_OP_DEADLINE:
call_op.type = GRPC_RECV_DEADLINE;
call_op.dir = GRPC_CALL_UP;
call_op.flags = 0;
call_op.data.deadline = stream_op->data.deadline;
call_op.done_cb = do_nothing;
call_op.user_data = NULL;
grpc_call_next_op(elem, &call_op);
break;
case GRPC_OP_METADATA_BOUNDARY:
if (!calld->got_metadata_boundary) {
calld->got_metadata_boundary = 1;
call_op.type = GRPC_RECV_END_OF_INITIAL_METADATA;
call_op.dir = GRPC_CALL_UP;
call_op.flags = 0;
call_op.done_cb = do_nothing;
call_op.user_data = NULL;
grpc_call_next_op(elem, &call_op);
}
got_metadata(elem, stream_op->data.metadata);
break;
case GRPC_OP_BEGIN_MESSAGE:
/* can't begin a message when we're still reading a message */
@ -495,7 +471,8 @@ static void transport_closed(void *user_data, grpc_transport *transport) {
const grpc_transport_callbacks connected_channel_transport_callbacks = {
alloc_recv_buffer, accept_stream, recv_batch,
transport_goaway, transport_closed, };
transport_goaway, transport_closed,
};
grpc_transport_setup_result grpc_connected_channel_bind_transport(
grpc_channel_stack *channel_stack, grpc_transport *transport) {

@ -35,7 +35,10 @@
#include <grpc/support/log.h>
typedef struct call_data {
int sent_headers;
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type;
} call_data;
typedef struct channel_data {
@ -49,6 +52,18 @@ typedef struct channel_data {
/* used to silence 'variable not used' warnings */
static void ignore_unused(void *ignored) {}
static grpc_mdelem *client_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
channel_data *channeld = elem->channel_data;
if (md == channeld->status) {
return NULL;
} else if (md->key == channeld->status->key) {
grpc_call_element_send_cancel(elem);
return NULL;
}
return md;
}
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
@ -61,42 +76,23 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
channel_data *channeld = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
ignore_unused(calld);
switch (op->type) {
case GRPC_SEND_METADATA:
if (!calld->sent_headers) {
/* Send : prefixed headers, which have to be before any application
* layer headers. */
calld->sent_headers = 1;
grpc_call_element_send_metadata(elem, grpc_mdelem_ref(channeld->method));
grpc_call_element_send_metadata(elem, grpc_mdelem_ref(channeld->scheme));
}
grpc_call_next_op(elem, op);
break;
case GRPC_SEND_START:
if (!calld->sent_headers) {
/* Send : prefixed headers, if we haven't already */
calld->sent_headers = 1;
grpc_call_element_send_metadata(elem, grpc_mdelem_ref(channeld->method));
grpc_call_element_send_metadata(elem, grpc_mdelem_ref(channeld->scheme));
}
/* Send non : prefixed headers */
grpc_call_element_send_metadata(elem, grpc_mdelem_ref(channeld->te_trailers));
grpc_call_element_send_metadata(elem, grpc_mdelem_ref(channeld->content_type));
/* Send : prefixed headers, which have to be before any application
* layer headers. */
grpc_metadata_batch_add_head(&op->data.metadata, &calld->method,
grpc_mdelem_ref(channeld->method));
grpc_metadata_batch_add_head(&op->data.metadata, &calld->scheme,
grpc_mdelem_ref(channeld->scheme));
grpc_metadata_batch_add_tail(&op->data.metadata, &calld->te_trailers,
grpc_mdelem_ref(channeld->te_trailers));
grpc_metadata_batch_add_tail(&op->data.metadata, &calld->content_type,
grpc_mdelem_ref(channeld->content_type));
grpc_call_next_op(elem, op);
break;
case GRPC_RECV_METADATA:
if (op->data.metadata == channeld->status) {
grpc_mdelem_unref(op->data.metadata);
op->done_cb(op->user_data, GRPC_OP_OK);
} else if (op->data.metadata->key == channeld->status->key) {
grpc_mdelem_unref(op->data.metadata);
op->done_cb(op->user_data, GRPC_OP_OK);
grpc_call_element_send_cancel(elem);
} else {
grpc_call_next_op(elem, op);
}
grpc_metadata_batch_filter(&op->data.metadata, client_filter, elem);
grpc_call_next_op(elem, op);
break;
default:
/* pass control up or down the stack depending on op->dir */
@ -124,16 +120,7 @@ static void channel_op(grpc_channel_element *elem,
/* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
ignore_unused(channeld);
/* initialize members */
calld->sent_headers = 0;
}
const void *server_transport_data) {}
/* Destructor for call_data */
static void destroy_call_elem(grpc_call_element *elem) {
@ -194,6 +181,6 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
}
const grpc_channel_filter grpc_http_client_filter = {
call_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "http-client"};
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
"http-client"};

@ -38,8 +38,6 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
typedef enum { NOT_RECEIVED, POST, GET } known_method_type;
typedef struct {
grpc_mdelem *path;
grpc_mdelem *content_type;
@ -47,16 +45,17 @@ typedef struct {
} gettable;
typedef struct call_data {
known_method_type seen_method;
gpr_uint8 got_initial_metadata;
gpr_uint8 seen_path;
gpr_uint8 seen_post;
gpr_uint8 sent_status;
gpr_uint8 seen_scheme;
gpr_uint8 seen_te_trailers;
grpc_mdelem *path;
grpc_linked_mdelem status;
} call_data;
typedef struct channel_data {
grpc_mdelem *te_trailers;
grpc_mdelem *method_get;
grpc_mdelem *method_post;
grpc_mdelem *http_scheme;
grpc_mdelem *https_scheme;
@ -78,38 +77,70 @@ typedef struct channel_data {
/* used to silence 'variable not used' warnings */
static void ignore_unused(void *ignored) {}
/* Handle 'GET': not technically grpc, so probably a web browser hitting
us */
static void payload_done(void *elem, grpc_op_error error) {
if (error == GRPC_OP_OK) {
grpc_call_element_send_finish(elem);
}
}
static void handle_get(grpc_call_element *elem) {
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
channel_data *channeld = elem->channel_data;
call_data *calld = elem->call_data;
grpc_call_op op;
size_t i;
for (i = 0; i < channeld->gettable_count; i++) {
if (channeld->gettables[i].path == calld->path) {
grpc_call_element_send_metadata(elem,
grpc_mdelem_ref(channeld->status_ok));
grpc_call_element_send_metadata(
elem, grpc_mdelem_ref(channeld->gettables[i].content_type));
op.type = GRPC_SEND_PREFORMATTED_MESSAGE;
op.dir = GRPC_CALL_DOWN;
op.flags = 0;
op.data.message = channeld->gettables[i].content;
op.done_cb = payload_done;
op.user_data = elem;
grpc_call_next_op(elem, &op);
/* Check if it is one of the headers we care about. */
if (md == channeld->te_trailers || md == channeld->method_post ||
md == channeld->http_scheme || md == channeld->https_scheme ||
md == channeld->grpc_scheme || md == channeld->content_type) {
/* swallow it */
if (md == channeld->method_post) {
calld->seen_post = 1;
} else if (md->key == channeld->http_scheme->key) {
calld->seen_scheme = 1;
} else if (md == channeld->te_trailers) {
calld->seen_te_trailers = 1;
}
/* TODO(klempner): Track that we've seen all the headers we should
require */
return NULL;
} else if (md->key == channeld->content_type->key) {
if (strncmp(grpc_mdstr_as_c_string(md->value), "application/grpc+", 17) ==
0) {
/* Although the C implementation doesn't (currently) generate them,
any custom +-suffix is explicitly valid. */
/* TODO(klempner): We should consider preallocating common values such
as +proto or +json, or at least stashing them if we see them. */
/* TODO(klempner): Should we be surfacing this to application code? */
} else {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
gpr_log(GPR_INFO, "Unexpected content-type %s",
channeld->content_type->key);
}
return NULL;
} else if (md->key == channeld->te_trailers->key ||
md->key == channeld->method_post->key ||
md->key == channeld->http_scheme->key ||
md->key == channeld->content_type->key) {
gpr_log(GPR_ERROR, "Invalid %s: header: '%s'",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value));
/* swallow it and error everything out. */
/* TODO(klempner): We ought to generate more descriptive error messages
on the wire here. */
grpc_call_element_send_cancel(elem);
return NULL;
} else if (md->key == channeld->path_key) {
if (calld->seen_path) {
gpr_log(GPR_ERROR, "Received :path twice");
return NULL;
}
calld->seen_path = 1;
return md;
} else if (md->key == channeld->host_key) {
/* translate host to :authority since :authority may be
omitted */
grpc_mdelem *authority = grpc_mdelem_from_metadata_strings(
channeld->mdctx, grpc_mdstr_ref(channeld->authority_key),
grpc_mdstr_ref(md->value));
grpc_mdelem_unref(md);
return authority;
} else {
return md;
}
grpc_call_element_send_metadata(elem,
grpc_mdelem_ref(channeld->status_not_found));
grpc_call_element_send_finish(elem);
}
/* Called either:
@ -126,115 +157,41 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
switch (op->type) {
case GRPC_RECV_METADATA:
/* Check if it is one of the headers we care about. */
if (op->data.metadata == channeld->te_trailers ||
op->data.metadata == channeld->method_get ||
op->data.metadata == channeld->method_post ||
op->data.metadata == channeld->http_scheme ||
op->data.metadata == channeld->https_scheme ||
op->data.metadata == channeld->grpc_scheme ||
op->data.metadata == channeld->content_type) {
/* swallow it */
if (op->data.metadata == channeld->method_get) {
calld->seen_method = GET;
} else if (op->data.metadata == channeld->method_post) {
calld->seen_method = POST;
} else if (op->data.metadata->key == channeld->http_scheme->key) {
calld->seen_scheme = 1;
} else if (op->data.metadata == channeld->te_trailers) {
calld->seen_te_trailers = 1;
}
/* TODO(klempner): Track that we've seen all the headers we should
require */
grpc_mdelem_unref(op->data.metadata);
op->done_cb(op->user_data, GRPC_OP_OK);
} else if (op->data.metadata->key == channeld->content_type->key) {
if (strncmp(grpc_mdstr_as_c_string(op->data.metadata->value),
"application/grpc+", 17) == 0) {
/* Although the C implementation doesn't (currently) generate them,
any
custom +-suffix is explicitly valid. */
/* TODO(klempner): We should consider preallocating common values such
as +proto or +json, or at least stashing them if we see them. */
/* TODO(klempner): Should we be surfacing this to application code? */
grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
if (!calld->got_initial_metadata) {
calld->got_initial_metadata = 1;
/* Have we seen the required http2 transport headers?
(:method, :scheme, content-type, with :path and :authority covered
at the channel level right now) */
if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
calld->seen_path) {
grpc_call_next_op(elem, op);
} else {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
gpr_log(GPR_INFO, "Unexpected content-type %s",
channeld->content_type->key);
}
grpc_mdelem_unref(op->data.metadata);
op->done_cb(op->user_data, GRPC_OP_OK);
} else if (op->data.metadata->key == channeld->te_trailers->key ||
op->data.metadata->key == channeld->method_post->key ||
op->data.metadata->key == channeld->http_scheme->key ||
op->data.metadata->key == channeld->content_type->key) {
gpr_log(GPR_ERROR, "Invalid %s: header: '%s'",
grpc_mdstr_as_c_string(op->data.metadata->key),
grpc_mdstr_as_c_string(op->data.metadata->value));
/* swallow it and error everything out. */
/* TODO(klempner): We ought to generate more descriptive error messages
on the wire here. */
grpc_mdelem_unref(op->data.metadata);
op->done_cb(op->user_data, GRPC_OP_OK);
grpc_call_element_send_cancel(elem);
} else if (op->data.metadata->key == channeld->path_key) {
if (calld->path != NULL) {
gpr_log(GPR_ERROR, "Received :path twice");
grpc_mdelem_unref(calld->path);
if (!calld->seen_post) {
gpr_log(GPR_ERROR, "Missing :method header");
}
if (!calld->seen_scheme) {
gpr_log(GPR_ERROR, "Missing :scheme header");
}
if (!calld->seen_te_trailers) {
gpr_log(GPR_ERROR, "Missing te trailers header");
}
/* Error this call out */
grpc_metadata_batch_destroy(&op->data.metadata);
op->done_cb(op->user_data, GRPC_OP_OK);
grpc_call_element_send_cancel(elem);
}
calld->path = op->data.metadata;
op->done_cb(op->user_data, GRPC_OP_OK);
} else if (op->data.metadata->key == channeld->host_key) {
/* translate host to :authority since :authority may be
omitted */
grpc_mdelem *authority = grpc_mdelem_from_metadata_strings(
channeld->mdctx, grpc_mdstr_ref(channeld->authority_key),
grpc_mdstr_ref(op->data.metadata->value));
grpc_mdelem_unref(op->data.metadata);
op->data.metadata = authority;
/* pass the event up */
grpc_call_next_op(elem, op);
} else {
/* pass the event up */
grpc_call_next_op(elem, op);
}
break;
case GRPC_RECV_END_OF_INITIAL_METADATA:
/* Have we seen the required http2 transport headers?
(:method, :scheme, content-type, with :path and :authority covered
at the channel level right now) */
if (calld->seen_method == POST && calld->seen_scheme &&
calld->seen_te_trailers && calld->path) {
grpc_call_element_recv_metadata(elem, calld->path);
calld->path = NULL;
grpc_call_next_op(elem, op);
} else if (calld->seen_method == GET) {
handle_get(elem);
} else {
if (calld->seen_method == NOT_RECEIVED) {
gpr_log(GPR_ERROR, "Missing :method header");
}
if (!calld->seen_scheme) {
gpr_log(GPR_ERROR, "Missing :scheme header");
}
if (!calld->seen_te_trailers) {
gpr_log(GPR_ERROR, "Missing te trailers header");
}
/* Error this call out */
op->done_cb(op->user_data, GRPC_OP_OK);
grpc_call_element_send_cancel(elem);
}
break;
case GRPC_SEND_START:
case GRPC_SEND_METADATA:
/* If we haven't sent status 200 yet, we need to so so because it needs to
come before any non : prefixed metadata. */
if (!calld->sent_status) {
calld->sent_status = 1;
/* status is reffed by grpc_call_element_send_metadata */
grpc_call_element_send_metadata(elem,
grpc_mdelem_ref(channeld->status_ok));
grpc_metadata_batch_add_head(&op->data.metadata, &calld->status,
grpc_mdelem_ref(channeld->status_ok));
}
grpc_call_next_op(elem, op);
break;
@ -272,25 +229,11 @@ static void init_call_elem(grpc_call_element *elem,
ignore_unused(channeld);
/* initialize members */
calld->path = NULL;
calld->sent_status = 0;
calld->seen_scheme = 0;
calld->seen_method = NOT_RECEIVED;
calld->seen_te_trailers = 0;
memset(calld, 0, sizeof(*calld));
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_call_element *elem) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
ignore_unused(channeld);
if (calld->path) {
grpc_mdelem_unref(calld->path);
}
}
static void destroy_call_elem(grpc_call_element *elem) {}
/* Constructor for channel_data */
static void init_channel_elem(grpc_channel_element *elem,
@ -314,7 +257,6 @@ static void init_channel_elem(grpc_channel_element *elem,
channeld->status_not_found =
grpc_mdelem_from_strings(mdctx, ":status", "404");
channeld->method_post = grpc_mdelem_from_strings(mdctx, ":method", "POST");
channeld->method_get = grpc_mdelem_from_strings(mdctx, ":method", "GET");
channeld->http_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "http");
channeld->https_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "https");
channeld->grpc_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "grpc");
@ -369,7 +311,6 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
grpc_mdelem_unref(channeld->status_ok);
grpc_mdelem_unref(channeld->status_not_found);
grpc_mdelem_unref(channeld->method_post);
grpc_mdelem_unref(channeld->method_get);
grpc_mdelem_unref(channeld->http_scheme);
grpc_mdelem_unref(channeld->https_scheme);
grpc_mdelem_unref(channeld->grpc_scheme);

@ -1,149 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/channel/metadata_buffer.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include <string.h>
#define INITIAL_ELEM_CAP 8
/* One queued call; we track offsets to string data in a shared buffer to
reduce allocations. See grpc_metadata_buffer_impl for the memory use
strategy */
typedef struct {
grpc_mdelem *md;
void (*cb)(void *user_data, grpc_op_error error);
void *user_data;
gpr_uint32 flags;
} qelem;
/* Memory layout:
grpc_metadata_buffer_impl
followed by an array of qelem */
struct grpc_metadata_buffer_impl {
/* number of elements in q */
size_t elems;
/* capacity of q */
size_t elem_cap;
};
#define ELEMS(buffer) ((qelem *)((buffer) + 1))
void grpc_metadata_buffer_init(grpc_metadata_buffer *buffer) {
/* start buffer as NULL, indicating no elements */
*buffer = NULL;
}
void grpc_metadata_buffer_destroy(grpc_metadata_buffer *buffer,
grpc_op_error error) {
size_t i;
qelem *qe;
if (*buffer) {
for (i = 0; i < (*buffer)->elems; i++) {
qe = &ELEMS(*buffer)[i];
grpc_mdelem_unref(qe->md);
qe->cb(qe->user_data, error);
}
gpr_free(*buffer);
}
}
void grpc_metadata_buffer_queue(grpc_metadata_buffer *buffer,
grpc_call_op *op) {
grpc_metadata_buffer_impl *impl = *buffer;
qelem *qe;
size_t bytes;
GPR_ASSERT(op->type == GRPC_SEND_METADATA || op->type == GRPC_RECV_METADATA);
if (!impl) {
/* this is the first element: allocate enough space to hold the
header object and the initial element capacity of qelems */
bytes =
sizeof(grpc_metadata_buffer_impl) + INITIAL_ELEM_CAP * sizeof(qelem);
impl = gpr_malloc(bytes);
/* initialize the header object */
impl->elems = 0;
impl->elem_cap = INITIAL_ELEM_CAP;
} else if (impl->elems == impl->elem_cap) {
/* more qelems than what we can deal with: grow by doubling size */
impl->elem_cap *= 2;
bytes = sizeof(grpc_metadata_buffer_impl) + impl->elem_cap * sizeof(qelem);
impl = gpr_realloc(impl, bytes);
}
/* append an element to the queue */
qe = &ELEMS(impl)[impl->elems];
impl->elems++;
qe->md = op->data.metadata;
qe->cb = op->done_cb;
qe->user_data = op->user_data;
qe->flags = op->flags;
/* header object may have changed location: store it back */
*buffer = impl;
}
void grpc_metadata_buffer_flush(grpc_metadata_buffer *buffer,
grpc_call_element *elem) {
grpc_metadata_buffer_impl *impl = *buffer;
grpc_call_op op;
qelem *qe;
size_t i;
if (!impl) {
/* nothing to send */
return;
}
/* construct call_op's, and push them down the stack */
op.type = GRPC_SEND_METADATA;
op.dir = GRPC_CALL_DOWN;
for (i = 0; i < impl->elems; i++) {
qe = &ELEMS(impl)[i];
op.done_cb = qe->cb;
op.user_data = qe->user_data;
op.flags = qe->flags;
op.data.metadata = qe->md;
grpc_call_next_op(elem, &op);
}
/* free data structures and reset to NULL: we can only flush once */
gpr_free(impl);
*buffer = NULL;
}

@ -1,70 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_CHANNEL_METADATA_BUFFER_H
#define GRPC_INTERNAL_CORE_CHANNEL_METADATA_BUFFER_H
#include "src/core/channel/channel_stack.h"
/* Utility code to buffer GRPC_SEND_METADATA calls and pass them down the stack
all at once at some otherwise-determined time. Useful for implementing
filters that want to queue metadata until a START event chooses some
underlying filter stack to send an rpc on. */
/* Clients should declare a member of grpc_metadata_buffer. This may at some
point become a typedef for a struct, but for now a pointer suffices */
typedef struct grpc_metadata_buffer_impl grpc_metadata_buffer_impl;
typedef grpc_metadata_buffer_impl *grpc_metadata_buffer;
/* Initializes the metadata buffer. Allocates no memory. */
void grpc_metadata_buffer_init(grpc_metadata_buffer *buffer);
/* Destroy the metadata buffer. */
void grpc_metadata_buffer_destroy(grpc_metadata_buffer *buffer,
grpc_op_error error);
/* Append a call to the end of a metadata buffer: may allocate memory */
void grpc_metadata_buffer_queue(grpc_metadata_buffer *buffer, grpc_call_op *op);
/* Flush all queued operations from the metadata buffer to the element below
self */
void grpc_metadata_buffer_flush(grpc_metadata_buffer *buffer,
grpc_call_element *self);
/* Count the number of queued elements in the buffer. */
size_t grpc_metadata_buffer_count(const grpc_metadata_buffer *buffer);
/* Extract elements as a grpc_metadata*, for presentation to applications.
The returned buffer must be freed with
grpc_metadata_buffer_cleanup_elements.
Clears the metadata buffer (this is a one-shot operation) */
grpc_metadata *grpc_metadata_buffer_extract_elements(
grpc_metadata_buffer *buffer);
void grpc_metadata_buffer_cleanup_elements(void *elements, grpc_op_error error);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_METADATA_BUFFER_H */

@ -44,12 +44,15 @@
#include "src/core/security/credentials.h"
#include "src/core/surface/call.h"
#define MAX_CREDENTIALS_METADATA_COUNT 4
/* We can have a per-call credentials. */
typedef struct {
grpc_credentials *creds;
grpc_mdstr *host;
grpc_mdstr *method;
grpc_call_op op;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
} call_data;
/* We can have a per-channel credentials. */
@ -62,30 +65,8 @@ typedef struct {
grpc_mdstr *status_key;
} channel_data;
static void do_nothing(void *ignored, grpc_op_error error) {}
static void bubbleup_error(grpc_call_element *elem, const char *error_msg) {
grpc_call_op finish_op;
channel_data *channeld = elem->channel_data;
char status[GPR_LTOA_MIN_BUFSIZE];
gpr_log(GPR_ERROR, "%s", error_msg);
finish_op.type = GRPC_RECV_METADATA;
finish_op.dir = GRPC_CALL_UP;
finish_op.flags = 0;
finish_op.data.metadata = grpc_mdelem_from_metadata_strings(
channeld->md_ctx, grpc_mdstr_ref(channeld->error_msg_key),
grpc_mdstr_from_string(channeld->md_ctx, error_msg));
finish_op.done_cb = do_nothing;
finish_op.user_data = NULL;
grpc_call_next_op(elem, &finish_op);
gpr_ltoa(GRPC_STATUS_UNAUTHENTICATED, status);
finish_op.data.metadata = grpc_mdelem_from_metadata_strings(
channeld->md_ctx, grpc_mdstr_ref(channeld->status_key),
grpc_mdstr_from_string(channeld->md_ctx, status));
grpc_call_next_op(elem, &finish_op);
grpc_call_element_recv_status(elem, GRPC_STATUS_UNAUTHENTICATED, error_msg);
grpc_call_element_send_cancel(elem);
}
@ -93,11 +74,15 @@ static void on_credentials_metadata(void *user_data, grpc_mdelem **md_elems,
size_t num_md,
grpc_credentials_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data;
grpc_call_op op = calld->op;
size_t i;
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
for (i = 0; i < num_md; i++) {
grpc_call_element_send_metadata(elem, grpc_mdelem_ref(md_elems[i]));
grpc_metadata_batch_add_tail(&op.data.metadata, &calld->md_links[i],
grpc_mdelem_ref(md_elems[i]));
}
grpc_call_next_op(elem, &((call_data *)elem->call_data)->op);
grpc_call_next_op(elem, &op);
}
static char *build_service_url(const char *url_scheme, call_data *calld) {
@ -159,6 +144,7 @@ static void on_host_checked(void *user_data, grpc_security_status status) {
gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.",
grpc_mdstr_as_c_string(calld->host));
bubbleup_error(elem, error_msg);
grpc_metadata_batch_destroy(&calld->op.data.metadata);
gpr_free(error_msg);
calld->op.done_cb(calld->op.user_data, GRPC_OP_ERROR);
}
@ -174,21 +160,22 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
grpc_linked_mdelem *l;
switch (op->type) {
case GRPC_SEND_METADATA:
/* Pointer comparison is OK for md_elems created from the same context. */
if (op->data.metadata->key == channeld->authority_string) {
if (calld->host != NULL) grpc_mdstr_unref(calld->host);
calld->host = grpc_mdstr_ref(op->data.metadata->value);
} else if (op->data.metadata->key == channeld->path_string) {
if (calld->method != NULL) grpc_mdstr_unref(calld->method);
calld->method = grpc_mdstr_ref(op->data.metadata->value);
for (l = op->data.metadata.list.head; l; l = l->next) {
grpc_mdelem *md = l->md;
/* Pointer comparison is OK for md_elems created from the same context.
*/
if (md->key == channeld->authority_string) {
if (calld->host != NULL) grpc_mdstr_unref(calld->host);
calld->host = grpc_mdstr_ref(md->value);
} else if (md->key == channeld->path_string) {
if (calld->method != NULL) grpc_mdstr_unref(calld->method);
calld->method = grpc_mdstr_ref(md->value);
}
}
grpc_call_next_op(elem, op);
break;
case GRPC_SEND_START:
if (calld->host != NULL) {
grpc_security_status status;
const char *call_host = grpc_mdstr_as_c_string(calld->host);
@ -202,6 +189,7 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
"Invalid host %s set in :authority metadata.",
call_host);
bubbleup_error(elem, error_msg);
grpc_metadata_batch_destroy(&calld->op.data.metadata);
gpr_free(error_msg);
op->done_cb(op->user_data, GRPC_OP_ERROR);
}
@ -210,7 +198,6 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
}
send_security_metadata(elem, op);
break;
default:
/* pass control up or down the stack depending on op->dir */
grpc_call_next_op(elem, op);

@ -33,7 +33,6 @@
#include "src/core/surface/call.h"
#include "src/core/channel/channel_stack.h"
#include "src/core/channel/metadata_buffer.h"
#include "src/core/iomgr/alarm.h"
#include "src/core/support/string.h"
#include "src/core/surface/byte_buffer_queue.h"
@ -41,6 +40,7 @@
#include "src/core/surface/completion_queue.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
@ -68,8 +68,10 @@ typedef struct {
} completed_request;
/* See request_set in grpc_call below for a description */
#define REQSET_EMPTY 255
#define REQSET_DONE 254
#define REQSET_EMPTY 'X'
#define REQSET_DONE 'Y'
#define MAX_SEND_INITIAL_METADATA_COUNT 3
typedef struct {
/* Overall status of the operation: starts OK, may degrade to
@ -92,6 +94,8 @@ typedef enum {
/* Status came from the application layer overriding whatever
the wire says */
STATUS_FROM_API_OVERRIDE = 0,
/* Status was created by some internal channel stack operation */
STATUS_FROM_CORE,
/* Status came from 'the wire' - or somewhere below the surface
layer */
STATUS_FROM_WIRE,
@ -204,12 +208,18 @@ struct grpc_call {
/* Call refcount - to keep the call alive during asynchronous operations */
gpr_refcount internal_refcount;
grpc_linked_mdelem send_initial_metadata[MAX_SEND_INITIAL_METADATA_COUNT];
grpc_linked_mdelem status_link;
grpc_linked_mdelem details_link;
size_t send_initial_metadata_count;
gpr_timespec send_deadline;
/* Data that the legacy api needs to track. To be deleted at some point
soon */
legacy_state *legacy_state;
};
#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack *)((call)+1))
#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack *)((call) + 1))
#define CALL_FROM_CALL_STACK(call_stack) (((grpc_call *)(call_stack)) - 1)
#define CALL_ELEM_FROM_CALL(call, idx) \
grpc_call_stack_element(CALL_STACK_FROM_CALL(call), idx)
@ -226,9 +236,13 @@ struct grpc_call {
static void do_nothing(void *ignored, grpc_op_error also_ignored) {}
static send_action choose_send_action(grpc_call *call);
static void enact_send_action(grpc_call *call, send_action sa);
static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
const void *server_transport_data) {
const void *server_transport_data,
grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count,
gpr_timespec send_deadline) {
size_t i;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_call *call =
@ -245,6 +259,12 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
call->request_set[GRPC_IOREQ_SEND_TRAILING_METADATA] = REQSET_DONE;
call->request_set[GRPC_IOREQ_SEND_STATUS] = REQSET_DONE;
}
GPR_ASSERT(add_initial_metadata_count < MAX_SEND_INITIAL_METADATA_COUNT);
for (i = 0; i < add_initial_metadata_count; i++) {
call->send_initial_metadata[i].md = add_initial_metadata[i];
}
call->send_initial_metadata_count = add_initial_metadata_count;
call->send_deadline = send_deadline;
grpc_channel_internal_ref(channel);
call->metadata_context = grpc_channel_get_metadata_context(channel);
/* one ref is dropped in response to destroy, the other in
@ -252,6 +272,9 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
gpr_ref_init(&call->internal_refcount, 2);
grpc_call_stack_init(channel_stack, server_transport_data,
CALL_STACK_FROM_CALL(call));
if (gpr_time_cmp(send_deadline, gpr_inf_future) != 0) {
set_deadline_alarm(call, send_deadline);
}
return call;
}
@ -284,6 +307,9 @@ static void destroy_call(void *call, int ignored_success) {
for (i = 0; i < GPR_ARRAY_SIZE(c->buffered_metadata); i++) {
gpr_free(c->buffered_metadata[i].metadata);
}
for (i = 0; i < c->send_initial_metadata_count; i++) {
grpc_mdelem_unref(c->send_initial_metadata[i].md);
}
if (c->legacy_state) {
destroy_legacy_state(c->legacy_state);
}
@ -342,6 +368,7 @@ static void request_more_data(grpc_call *call) {
op.flags = 0;
op.done_cb = do_nothing;
op.user_data = NULL;
op.bind_pollset = NULL;
grpc_call_execute_op(call, &op);
}
@ -587,15 +614,29 @@ static send_action choose_send_action(grpc_call *call) {
return SEND_NOTHING;
}
static void send_metadata(grpc_call *call, grpc_mdelem *elem) {
grpc_call_op op;
op.type = GRPC_SEND_METADATA;
op.dir = GRPC_CALL_DOWN;
op.flags = GRPC_WRITE_BUFFER_HINT;
op.data.metadata = elem;
op.done_cb = do_nothing;
op.user_data = NULL;
grpc_call_execute_op(call, &op);
static grpc_mdelem_list chain_metadata_from_app(grpc_call *call, size_t count,
grpc_metadata *metadata) {
size_t i;
grpc_mdelem_list out;
if (count == 0) {
out.head = out.tail = NULL;
return out;
}
for (i = 0; i < count; i++) {
grpc_metadata *md = &metadata[i];
grpc_metadata *next_md = (i == count - 1) ? NULL : &metadata[i + 1];
grpc_metadata *prev_md = (i == 0) ? NULL : &metadata[i - 1];
grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
assert(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data));
l->md = grpc_mdelem_from_string_and_buffer(call->metadata_context, md->key,
(const gpr_uint8 *)md->value,
md->value_length);
l->next = next_md ? (grpc_linked_mdelem *)&next_md->internal_data : NULL;
l->prev = prev_md ? (grpc_linked_mdelem *)&prev_md->internal_data : NULL;
}
out.head = (grpc_linked_mdelem *)&(metadata[0].internal_data);
out.tail = (grpc_linked_mdelem *)&(metadata[count - 1].internal_data);
return out;
}
static void enact_send_action(grpc_call *call, send_action sa) {
@ -614,19 +655,21 @@ static void enact_send_action(grpc_call *call, send_action sa) {
/* fallthrough */
case SEND_INITIAL_METADATA:
data = call->request_data[GRPC_IOREQ_SEND_INITIAL_METADATA];
for (i = 0; i < data.send_metadata.count; i++) {
const grpc_metadata *md = &data.send_metadata.metadata[i];
send_metadata(call,
grpc_mdelem_from_string_and_buffer(
call->metadata_context, md->key,
(const gpr_uint8 *)md->value, md->value_length));
}
op.type = GRPC_SEND_START;
op.type = GRPC_SEND_METADATA;
op.dir = GRPC_CALL_DOWN;
op.flags = flags;
op.data.start.pollset = grpc_cq_pollset(call->cq);
op.data.metadata.list = chain_metadata_from_app(
call, data.send_metadata.count, data.send_metadata.metadata);
op.data.metadata.garbage.head = op.data.metadata.garbage.tail = NULL;
op.data.metadata.deadline = call->send_deadline;
for (i = 0; i < call->send_initial_metadata_count; i++) {
grpc_metadata_batch_link_head(&op.data.metadata,
&call->send_initial_metadata[i]);
}
call->send_initial_metadata_count = 0;
op.done_cb = finish_start_step;
op.user_data = call;
op.bind_pollset = grpc_cq_pollset(call->cq);
grpc_call_execute_op(call, &op);
break;
case SEND_BUFFERED_MESSAGE:
@ -640,37 +683,42 @@ static void enact_send_action(grpc_call *call, send_action sa) {
op.data.message = data.send_message;
op.done_cb = finish_write_step;
op.user_data = call;
op.bind_pollset = NULL;
grpc_call_execute_op(call, &op);
break;
case SEND_TRAILING_METADATA_AND_FINISH:
/* send trailing metadata */
data = call->request_data[GRPC_IOREQ_SEND_TRAILING_METADATA];
for (i = 0; i < data.send_metadata.count; i++) {
const grpc_metadata *md = &data.send_metadata.metadata[i];
send_metadata(call,
grpc_mdelem_from_string_and_buffer(
call->metadata_context, md->key,
(const gpr_uint8 *)md->value, md->value_length));
}
op.type = GRPC_SEND_METADATA;
op.dir = GRPC_CALL_DOWN;
op.flags = flags;
op.data.metadata.list = chain_metadata_from_app(
call, data.send_metadata.count, data.send_metadata.metadata);
op.data.metadata.garbage.head = op.data.metadata.garbage.tail = NULL;
op.data.metadata.deadline = call->send_deadline;
op.bind_pollset = NULL;
/* send status */
/* TODO(ctiller): cache common status values */
data = call->request_data[GRPC_IOREQ_SEND_STATUS];
gpr_ltoa(data.send_status.code, status_str);
send_metadata(
call,
grpc_metadata_batch_add_tail(
&op.data.metadata, &call->status_link,
grpc_mdelem_from_metadata_strings(
call->metadata_context,
grpc_mdstr_ref(grpc_channel_get_status_string(call->channel)),
grpc_mdstr_from_string(call->metadata_context, status_str)));
if (data.send_status.details) {
send_metadata(
call,
grpc_metadata_batch_add_tail(
&op.data.metadata, &call->details_link,
grpc_mdelem_from_metadata_strings(
call->metadata_context,
grpc_mdstr_ref(grpc_channel_get_message_string(call->channel)),
grpc_mdstr_from_string(call->metadata_context,
data.send_status.details)));
}
op.done_cb = do_nothing;
op.user_data = NULL;
grpc_call_execute_op(call, &op);
/* fallthrough: see choose_send_action for details */
case SEND_FINISH:
op.type = GRPC_SEND_FINISH;
@ -678,6 +726,7 @@ static void enact_send_action(grpc_call *call, send_action sa) {
op.flags = 0;
op.done_cb = finish_finish_step;
op.user_data = call;
op.bind_pollset = NULL;
grpc_call_execute_op(call, &op);
break;
}
@ -831,6 +880,7 @@ grpc_call_error grpc_call_cancel(grpc_call *c) {
op.flags = 0;
op.done_cb = do_nothing;
op.user_data = NULL;
op.bind_pollset = NULL;
elem = CALL_ELEM_FROM_CALL(c, 0);
elem->filter->call_op(elem, NULL, &op);
@ -875,9 +925,7 @@ static void call_alarm(void *arg, int success) {
grpc_call_internal_unref(call, 1);
}
void grpc_call_set_deadline(grpc_call_element *elem, gpr_timespec deadline) {
grpc_call *call = CALL_FROM_TOP_ELEM(elem);
static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) {
if (call->have_alarm) {
gpr_log(GPR_ERROR, "Attempt to set deadline alarm twice");
}
@ -886,11 +934,15 @@ void grpc_call_set_deadline(grpc_call_element *elem, gpr_timespec deadline) {
grpc_alarm_init(&call->alarm, deadline, call_alarm, call, gpr_now());
}
static void set_read_state(grpc_call *call, read_state state) {
lock(call);
static void set_read_state_locked(grpc_call *call, read_state state) {
GPR_ASSERT(call->read_state < state);
call->read_state = state;
finish_read_ops(call);
}
static void set_read_state(grpc_call *call, read_state state) {
lock(call);
set_read_state_locked(call, state);
unlock(call);
}
@ -914,7 +966,7 @@ static gpr_uint32 decode_status(grpc_mdelem *md) {
gpr_uint32 status;
void *user_data = grpc_mdelem_get_user_data(md, destroy_status);
if (user_data) {
status = ((gpr_uint32)(gpr_intptr) user_data) - STATUS_OFFSET;
status = ((gpr_uint32)(gpr_intptr)user_data) - STATUS_OFFSET;
} else {
if (!gpr_parse_bytes_to_uint32(grpc_mdstr_as_c_string(md->value),
GPR_SLICE_LENGTH(md->value->slice),
@ -936,52 +988,81 @@ void grpc_call_recv_message(grpc_call_element *elem,
unlock(call);
}
void grpc_call_recv_metadata(grpc_call_element *elem, grpc_mdelem *md) {
void grpc_call_recv_synthetic_status(grpc_call_element *elem,
grpc_status_code status,
const char *message) {
grpc_call *call = CALL_FROM_TOP_ELEM(elem);
grpc_mdstr *key = md->key;
lock(call);
set_status_code(call, STATUS_FROM_CORE, status);
set_status_details(call, STATUS_FROM_CORE,
grpc_mdstr_from_string(call->metadata_context, message));
unlock(call);
}
int grpc_call_recv_metadata(grpc_call_element *elem, grpc_metadata_batch *md) {
grpc_call *call = CALL_FROM_TOP_ELEM(elem);
grpc_linked_mdelem *l;
grpc_metadata_array *dest;
grpc_metadata *mdusr;
int is_trailing;
grpc_mdctx *mdctx = call->metadata_context;
lock(call);
if (key == grpc_channel_get_status_string(call->channel)) {
set_status_code(call, STATUS_FROM_WIRE, decode_status(md));
grpc_mdelem_unref(md);
} else if (key == grpc_channel_get_message_string(call->channel)) {
set_status_details(call, STATUS_FROM_WIRE, grpc_mdstr_ref(md->value));
grpc_mdelem_unref(md);
} else {
dest = &call->buffered_metadata[call->read_state >=
READ_STATE_GOT_INITIAL_METADATA];
if (dest->count == dest->capacity) {
dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
dest->metadata =
gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
}
mdusr = &dest->metadata[dest->count++];
mdusr->key = grpc_mdstr_as_c_string(md->key);
mdusr->value = grpc_mdstr_as_c_string(md->value);
mdusr->value_length = GPR_SLICE_LENGTH(md->value->slice);
if (call->owned_metadata_count == call->owned_metadata_capacity) {
call->owned_metadata_capacity = GPR_MAX(
call->owned_metadata_capacity + 8, call->owned_metadata_capacity * 2);
call->owned_metadata =
gpr_realloc(call->owned_metadata,
sizeof(grpc_mdelem *) * call->owned_metadata_capacity);
is_trailing = call->read_state >= READ_STATE_GOT_INITIAL_METADATA;
for (l = md->list.head; l; l = l->next) {
grpc_mdelem *md = l->md;
grpc_mdstr *key = md->key;
if (key == grpc_channel_get_status_string(call->channel)) {
set_status_code(call, STATUS_FROM_WIRE, decode_status(md));
} else if (key == grpc_channel_get_message_string(call->channel)) {
set_status_details(call, STATUS_FROM_WIRE, grpc_mdstr_ref(md->value));
} else {
dest = &call->buffered_metadata[is_trailing];
if (dest->count == dest->capacity) {
dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
dest->metadata =
gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
}
mdusr = &dest->metadata[dest->count++];
mdusr->key = grpc_mdstr_as_c_string(md->key);
mdusr->value = grpc_mdstr_as_c_string(md->value);
mdusr->value_length = GPR_SLICE_LENGTH(md->value->slice);
if (call->owned_metadata_count == call->owned_metadata_capacity) {
call->owned_metadata_capacity =
GPR_MAX(call->owned_metadata_capacity + 8,
call->owned_metadata_capacity * 2);
call->owned_metadata =
gpr_realloc(call->owned_metadata,
sizeof(grpc_mdelem *) * call->owned_metadata_capacity);
}
call->owned_metadata[call->owned_metadata_count++] = md;
l->md = 0;
}
call->owned_metadata[call->owned_metadata_count++] = md;
}
if (gpr_time_cmp(md->deadline, gpr_inf_future) != 0) {
set_deadline_alarm(call, md->deadline);
}
if (!is_trailing) {
set_read_state_locked(call, READ_STATE_GOT_INITIAL_METADATA);
}
unlock(call);
grpc_mdctx_lock(mdctx);
for (l = md->list.head; l; l = l->next) {
if (l->md) grpc_mdctx_locked_mdelem_unref(mdctx, l->md);
}
for (l = md->garbage.head; l; l = l->next) {
grpc_mdctx_locked_mdelem_unref(mdctx, l->md);
}
grpc_mdctx_unlock(mdctx);
return !is_trailing;
}
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) {
return CALL_STACK_FROM_CALL(call);
}
void grpc_call_initial_metadata_complete(grpc_call_element *surface_element) {
grpc_call *call = grpc_call_from_top_element(surface_element);
set_read_state(call, READ_STATE_GOT_INITIAL_METADATA);
}
/*
* BATCH API IMPLEMENTATION
*/

@ -35,7 +35,6 @@
#define GRPC_INTERNAL_CORE_SURFACE_CALL_H
#include "src/core/channel/channel_stack.h"
#include "src/core/channel/metadata_buffer.h"
#include <grpc/grpc.h>
/* Primitive operation types - grpc_op's get rewritten into these */
@ -67,7 +66,7 @@ typedef union {
} recv_status_details;
struct {
size_t count;
const grpc_metadata *metadata;
grpc_metadata *metadata;
} send_metadata;
grpc_byte_buffer *send_message;
struct {
@ -86,7 +85,10 @@ typedef void (*grpc_ioreq_completion_func)(grpc_call *call,
void *user_data);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
const void *server_transport_data);
const void *server_transport_data,
grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count,
gpr_timespec send_deadline);
void grpc_call_set_completion_queue(grpc_call *call, grpc_completion_queue *cq);
grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call);
@ -96,8 +98,9 @@ void grpc_call_internal_unref(grpc_call *call, int allow_immediate_deletion);
/* Helpers for grpc_client, grpc_server filters to publish received data to
the completion queue/surface layer */
void grpc_call_recv_metadata(grpc_call_element *surface_element,
grpc_mdelem *md);
/* receive metadata - returns 1 if this was initial metadata */
int grpc_call_recv_metadata(grpc_call_element *surface_element,
grpc_metadata_batch *md);
void grpc_call_recv_message(grpc_call_element *surface_element,
grpc_byte_buffer *message);
void grpc_call_read_closed(grpc_call_element *surface_element);
@ -108,14 +111,12 @@ grpc_call_error grpc_call_start_ioreq_and_call_back(
grpc_call *call, const grpc_ioreq *reqs, size_t nreqs,
grpc_ioreq_completion_func on_complete, void *user_data);
/* Called when it's known that the initial batch of metadata is complete */
void grpc_call_initial_metadata_complete(grpc_call_element *surface_element);
void grpc_call_set_deadline(grpc_call_element *surface_element,
gpr_timespec deadline);
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call);
void grpc_call_recv_synthetic_status(grpc_call_element *elem,
grpc_status_code status,
const char *message);
/* Given the top call_element, get the call object. */
grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element);
@ -128,4 +129,4 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
#define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \
if (grpc_trace_batch) grpc_call_log_batch(sev, call, ops, nops, tag)
#endif /* GRPC_INTERNAL_CORE_SURFACE_CALL_H */
#endif /* GRPC_INTERNAL_CORE_SURFACE_CALL_H */

@ -62,7 +62,7 @@ struct grpc_channel {
registered_call *registered_calls;
};
#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c)+1))
#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))
#define CHANNEL_FROM_CHANNEL_STACK(channel_stack) \
(((grpc_channel *)(channel_stack)) - 1)
#define CHANNEL_FROM_TOP_ELEM(top_elem) \
@ -91,44 +91,25 @@ grpc_channel *grpc_channel_create_from_filters(
return channel;
}
static void do_nothing(void *ignored, grpc_op_error error) {}
static grpc_call *grpc_channel_create_call_internal(
grpc_channel *channel, grpc_completion_queue *cq, grpc_mdelem *path_mdelem,
grpc_mdelem *authority_mdelem, gpr_timespec deadline) {
grpc_call *call;
grpc_call_op op;
grpc_mdelem *send_metadata[2];
if (!channel->is_client) {
gpr_log(GPR_ERROR, "Cannot create a call on the server.");
return NULL;
}
GPR_ASSERT(channel->is_client);
call = grpc_call_create(channel, cq, NULL);
send_metadata[0] = path_mdelem;
send_metadata[1] = authority_mdelem;
/* Add :path and :authority headers. */
op.type = GRPC_SEND_METADATA;
op.dir = GRPC_CALL_DOWN;
op.flags = 0;
op.data.metadata = path_mdelem;
op.done_cb = do_nothing;
op.user_data = NULL;
grpc_call_execute_op(call, &op);
op.data.metadata = authority_mdelem;
grpc_call_execute_op(call, &op);
if (0 != gpr_time_cmp(deadline, gpr_inf_future)) {
op.type = GRPC_SEND_DEADLINE;
op.dir = GRPC_CALL_DOWN;
op.flags = 0;
op.data.deadline = deadline;
op.done_cb = do_nothing;
op.user_data = NULL;
grpc_call_execute_op(call, &op);
}
return grpc_call_create(channel, cq, NULL, send_metadata,
GPR_ARRAY_SIZE(send_metadata), deadline);
}
return call;
grpc_call *grpc_channel_create_call_old(grpc_channel *channel,
const char *method, const char *host,
gpr_timespec absolute_deadline) {
return grpc_channel_create_call(channel, NULL, method, host,
absolute_deadline);
}
grpc_call *grpc_channel_create_call(grpc_channel *channel,
@ -146,13 +127,6 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
deadline);
}
grpc_call *grpc_channel_create_call_old(grpc_channel *channel,
const char *method, const char *host,
gpr_timespec absolute_deadline) {
return grpc_channel_create_call(channel, NULL, method, host,
absolute_deadline);
}
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host) {
registered_call *rc = gpr_malloc(sizeof(registered_call));

@ -39,28 +39,17 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
typedef struct {
void *unused;
} call_data;
typedef struct { void *unused; } call_data;
typedef struct {
void *unused;
} channel_data;
typedef struct { void *unused; } channel_data;
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_call_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
switch (op->type) {
case GRPC_SEND_DEADLINE:
grpc_call_set_deadline(elem, op->data.deadline);
grpc_call_next_op(elem, op);
break;
case GRPC_RECV_METADATA:
grpc_call_recv_metadata(elem, op->data.metadata);
break;
case GRPC_RECV_DEADLINE:
gpr_log(GPR_ERROR, "Deadline received by client (ignored)");
grpc_call_recv_metadata(elem, &op->data.metadata);
break;
case GRPC_RECV_MESSAGE:
grpc_call_recv_message(elem, op->data.message);
@ -72,8 +61,9 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
case GRPC_RECV_FINISH:
grpc_call_stream_closed(elem);
break;
case GRPC_RECV_END_OF_INITIAL_METADATA:
grpc_call_initial_metadata_complete(elem);
case GRPC_RECV_SYNTHETIC_STATUS:
grpc_call_recv_synthetic_status(elem, op->data.synthetic_status.status,
op->data.synthetic_status.message);
break;
default:
GPR_ASSERT(op->dir == GRPC_CALL_DOWN);
@ -114,6 +104,6 @@ static void init_channel_elem(grpc_channel_element *elem,
static void destroy_channel_elem(grpc_channel_element *elem) {}
const grpc_channel_filter grpc_client_surface_filter = {
call_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "client", };
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, "client",
};

@ -42,28 +42,20 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
typedef struct {
void *unused;
} call_data;
typedef struct { void *unused; } call_data;
typedef struct {
grpc_mdelem *status;
grpc_mdelem *message;
} channel_data;
typedef struct { void *unused; } channel_data;
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_call_op *op) {
channel_data *channeld = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
switch (op->type) {
case GRPC_SEND_START:
grpc_call_recv_metadata(elem, grpc_mdelem_ref(channeld->status));
grpc_call_recv_metadata(elem, grpc_mdelem_ref(channeld->message));
grpc_call_stream_closed(elem);
break;
case GRPC_SEND_METADATA:
grpc_mdelem_unref(op->data.metadata);
grpc_metadata_batch_destroy(&op->data.metadata);
grpc_call_recv_synthetic_status(elem, GRPC_STATUS_UNKNOWN,
"Rpc sent on a lame channel.");
grpc_call_stream_closed(elem);
break;
default:
break;
@ -94,29 +86,17 @@ static void destroy_call_elem(grpc_call_element *elem) {}
static void init_channel_elem(grpc_channel_element *elem,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
channel_data *channeld = elem->channel_data;
char status[12];
GPR_ASSERT(is_first);
GPR_ASSERT(is_last);
channeld->message = grpc_mdelem_from_strings(mdctx, "grpc-message",
"Rpc sent on a lame channel.");
gpr_ltoa(GRPC_STATUS_UNKNOWN, status);
channeld->status = grpc_mdelem_from_strings(mdctx, "grpc-status", status);
}
static void destroy_channel_elem(grpc_channel_element *elem) {
channel_data *channeld = elem->channel_data;
grpc_mdelem_unref(channeld->message);
grpc_mdelem_unref(channeld->status);
}
static void destroy_channel_elem(grpc_channel_element *elem) {}
static const grpc_channel_filter lame_filter = {
call_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "lame-client", };
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
"lame-client",
};
grpc_channel *grpc_lame_client_channel_create(void) {
static const grpc_channel_filter *filters[] = {&lame_filter};

@ -411,29 +411,32 @@ static void read_closed(grpc_call_element *elem) {
gpr_mu_unlock(&chand->server->mu);
}
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (md->key == chand->path_key) {
calld->path = grpc_mdstr_ref(md->value);
return NULL;
} else if (md->key == chand->authority_key) {
calld->host = grpc_mdstr_ref(md->value);
return NULL;
}
return md;
}
static void call_op(grpc_call_element *elem, grpc_call_element *from_elemn,
grpc_call_op *op) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_mdelem *md;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
switch (op->type) {
case GRPC_RECV_METADATA:
md = op->data.metadata;
if (md->key == chand->path_key) {
calld->path = grpc_mdstr_ref(md->value);
grpc_mdelem_unref(md);
} else if (md->key == chand->authority_key) {
calld->host = grpc_mdstr_ref(md->value);
grpc_mdelem_unref(md);
} else {
grpc_call_recv_metadata(elem, md);
grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
if (grpc_call_recv_metadata(elem, &op->data.metadata)) {
calld->deadline = op->data.metadata.deadline;
start_new_rpc(elem);
}
break;
case GRPC_RECV_END_OF_INITIAL_METADATA:
start_new_rpc(elem);
grpc_call_initial_metadata_complete(elem);
break;
case GRPC_RECV_MESSAGE:
grpc_call_recv_message(elem, op->data.message);
op->done_cb(op->user_data, GRPC_OP_OK);
@ -444,10 +447,6 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elemn,
case GRPC_RECV_FINISH:
stream_closed(elem);
break;
case GRPC_RECV_DEADLINE:
grpc_call_set_deadline(elem, op->data.deadline);
((call_data *)elem->call_data)->deadline = op->data.deadline;
break;
default:
GPR_ASSERT(op->dir == GRPC_CALL_DOWN);
grpc_call_next_op(elem, op);
@ -464,7 +463,8 @@ static void channel_op(grpc_channel_element *elem,
case GRPC_ACCEPT_CALL:
/* create a call */
grpc_call_create(chand->channel, NULL,
op->data.accept_call.transport_server_data);
op->data.accept_call.transport_server_data, NULL, 0,
gpr_inf_future);
break;
case GRPC_TRANSPORT_CLOSED:
/* if the transport is closed for a server channel, we destroy the

@ -43,7 +43,7 @@
#include "src/core/transport/chttp2/timeout_encoding.h"
#include "src/core/transport/chttp2/varint.h"
#define HASH_FRAGMENT_1(x) ((x) & 255)
#define HASH_FRAGMENT_1(x) ((x)&255)
#define HASH_FRAGMENT_2(x) ((x >> 8) & 255)
#define HASH_FRAGMENT_3(x) ((x >> 16) & 255)
#define HASH_FRAGMENT_4(x) ((x >> 24) & 255)
@ -479,10 +479,9 @@ gpr_uint32 grpc_chttp2_preencode(grpc_stream_op *inops, size_t *inops_count,
/* skip */
curop++;
break;
case GRPC_OP_FLOW_CTL_CB:
case GRPC_OP_DEADLINE:
case GRPC_OP_METADATA:
case GRPC_OP_METADATA_BOUNDARY:
grpc_metadata_batch_assert_ok(&op->data.metadata);
case GRPC_OP_FLOW_CTL_CB:
/* these just get copied as they don't impact the number of flow
controlled bytes */
grpc_sopb_append(outops, op, 1);
@ -529,6 +528,12 @@ exit_loop:
*inops_count -= curop;
memmove(inops, inops + curop, *inops_count * sizeof(grpc_stream_op));
for (curop = 0; curop < *inops_count; curop++) {
if (inops[curop].type == GRPC_OP_METADATA) {
grpc_metadata_batch_assert_ok(&inops[curop].data.metadata);
}
}
return flow_controlled_bytes_taken;
}
@ -543,6 +548,7 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
gpr_uint32 curop = 0;
gpr_uint32 unref_op;
grpc_mdctx *mdctx = compressor->mdctx;
grpc_linked_mdelem *l;
int need_unref = 0;
GPR_ASSERT(stream_id != 0);
@ -566,19 +572,19 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
curop++;
break;
case GRPC_OP_METADATA:
/* Encode a metadata element; store the returned value, representing
/* Encode a metadata batch; store the returned values, representing
a metadata element that needs to be unreffed back into the metadata
slot. THIS MAY NOT BE THE SAME ELEMENT (if a decoder table slot got
updated). After this loop, we'll do a batch unref of elements. */
op->data.metadata = hpack_enc(compressor, op->data.metadata, &st);
need_unref |= op->data.metadata != NULL;
curop++;
break;
case GRPC_OP_DEADLINE:
deadline_enc(compressor, op->data.deadline, &st);
curop++;
break;
case GRPC_OP_METADATA_BOUNDARY:
need_unref |= op->data.metadata.garbage.head != NULL;
grpc_metadata_batch_assert_ok(&op->data.metadata);
for (l = op->data.metadata.list.head; l; l = l->next) {
l->md = hpack_enc(compressor, l->md, &st);
need_unref |= l->md != NULL;
}
if (gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future) != 0) {
deadline_enc(compressor, op->data.metadata.deadline, &st);
}
ensure_frame_type(&st, HEADER, 0);
finish_frame(&st, 1, 0);
st.last_was_header = 0; /* force a new header frame */
@ -614,8 +620,12 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
for (unref_op = 0; unref_op < curop; unref_op++) {
op = &ops[unref_op];
if (op->type != GRPC_OP_METADATA) continue;
if (!op->data.metadata) continue;
grpc_mdctx_locked_mdelem_unref(mdctx, op->data.metadata);
for (l = op->data.metadata.list.head; l; l = l->next) {
if (l->md) grpc_mdctx_locked_mdelem_unref(mdctx, l->md);
}
for (l = op->data.metadata.garbage.head; l; l = l->next) {
grpc_mdctx_locked_mdelem_unref(mdctx, l->md);
}
}
grpc_mdctx_unlock(mdctx);
}

@ -68,10 +68,10 @@ int grpc_http_trace = 0;
typedef struct transport transport;
typedef struct stream stream;
#define IF_TRACING(stmt) \
if (!(grpc_http_trace)) \
; \
else \
#define IF_TRACING(stmt) \
if (!(grpc_http_trace)) \
; \
else \
stmt
/* streams are kept in various linked lists depending on what things need to
@ -292,6 +292,12 @@ struct stream {
stream_link links[STREAM_LIST_COUNT];
gpr_uint8 included[STREAM_LIST_COUNT];
/* incoming metadata */
grpc_linked_mdelem *incoming_metadata;
size_t incoming_metadata_count;
size_t incoming_metadata_capacity;
gpr_timespec incoming_deadline;
/* sops from application */
grpc_stream_op_buffer outgoing_sopb;
/* sops that have passed flow control to be written */
@ -577,7 +583,7 @@ static int init_stream(grpc_transport *gt, grpc_stream *gs,
lock(t);
s->id = 0;
} else {
s->id = (gpr_uint32)(gpr_uintptr) server_data;
s->id = (gpr_uint32)(gpr_uintptr)server_data;
t->incoming_stream = s;
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
}
@ -593,6 +599,10 @@ static int init_stream(grpc_transport *gt, grpc_stream *gs,
s->cancelled = 0;
s->allow_window_updates = 0;
s->published_close = 0;
s->incoming_metadata_count = 0;
s->incoming_metadata_capacity = 0;
s->incoming_metadata = NULL;
s->incoming_deadline = gpr_inf_future;
memset(&s->links, 0, sizeof(s->links));
memset(&s->included, 0, sizeof(s->included));
grpc_sopb_init(&s->outgoing_sopb);
@ -698,7 +708,8 @@ static void stream_list_add_tail(transport *t, stream *s, stream_list_id id) {
}
static void stream_list_join(transport *t, stream *s, stream_list_id id) {
if (id == PENDING_CALLBACKS) GPR_ASSERT(t->cb != NULL || t->error_state == ERROR_STATE_NONE);
if (id == PENDING_CALLBACKS)
GPR_ASSERT(t->cb != NULL || t->error_state == ERROR_STATE_NONE);
if (s->included[id]) {
return;
}
@ -760,7 +771,7 @@ static void unlock(transport *t) {
if (t->error_state == ERROR_STATE_SEEN && !t->writing) {
call_closed = 1;
t->calling_back = 1;
t->cb = NULL; /* no more callbacks */
t->cb = NULL; /* no more callbacks */
t->error_state = ERROR_STATE_NOTIFIED;
}
if (t->num_pending_goaways) {
@ -782,8 +793,7 @@ static void unlock(transport *t) {
/* perform some callbacks if necessary */
for (i = 0; i < num_goaways; i++) {
cb->goaway(t->cb_user_data, &t->base, goaways[i].status,
goaways[i].debug);
cb->goaway(t->cb_user_data, &t->base, goaways[i].status, goaways[i].debug);
}
if (perform_callbacks) {
@ -1058,6 +1068,17 @@ static void finalize_cancellations(transport *t) {
}
}
static void add_incoming_metadata(transport *t, stream *s, grpc_mdelem *elem) {
if (s->incoming_metadata_capacity == s->incoming_metadata_count) {
s->incoming_metadata_capacity =
GPR_MAX(8, 2 * s->incoming_metadata_capacity);
s->incoming_metadata =
gpr_realloc(s->incoming_metadata, sizeof(*s->incoming_metadata) *
s->incoming_metadata_capacity);
}
s->incoming_metadata[s->incoming_metadata_count++].md = elem;
}
static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
grpc_status_code local_status,
grpc_chttp2_error_code error_code,
@ -1077,9 +1098,18 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
stream_list_join(t, s, CANCELLED);
gpr_ltoa(local_status, buffer);
grpc_sopb_add_metadata(
&s->parser.incoming_sopb,
add_incoming_metadata(
t, s,
grpc_mdelem_from_strings(t->metadata_context, "grpc-status", buffer));
switch (local_status) {
case GRPC_STATUS_CANCELLED:
add_incoming_metadata(
t, s, grpc_mdelem_from_strings(t->metadata_context,
"grpc-message", "Cancelled"));
break;
default:
break;
}
stream_list_join(t, s, PENDING_CALLBACKS);
}
@ -1255,11 +1285,10 @@ static void on_header(void *tp, grpc_mdelem *md) {
}
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
grpc_sopb_add_deadline(&s->parser.incoming_sopb,
gpr_time_add(gpr_now(), *cached_timeout));
s->incoming_deadline = gpr_time_add(gpr_now(), *cached_timeout);
grpc_mdelem_unref(md);
} else {
grpc_sopb_add_metadata(&s->parser.incoming_sopb, md);
add_incoming_metadata(t, s, md);
}
}
@ -1304,7 +1333,7 @@ static int init_header_frame_parser(transport *t, int is_continuation) {
t->incoming_stream = NULL;
/* if stream is accepted, we set incoming_stream in init_stream */
t->cb->accept_stream(t->cb_user_data, &t->base,
(void *)(gpr_uintptr) t->incoming_stream_id);
(void *)(gpr_uintptr)t->incoming_stream_id);
s = t->incoming_stream;
if (!s) {
gpr_log(GPR_ERROR, "stream not accepted");
@ -1435,6 +1464,35 @@ static int is_window_update_legal(gpr_int64 window_update, gpr_int64 window) {
return window + window_update < MAX_WINDOW;
}
static void free_md(void *p, grpc_op_error result) { gpr_free(p); }
static void add_metadata_batch(transport *t, stream *s) {
grpc_metadata_batch b;
size_t i;
b.list.head = &s->incoming_metadata[0];
b.list.tail = &s->incoming_metadata[s->incoming_metadata_count - 1];
b.garbage.head = b.garbage.tail = NULL;
b.deadline = s->incoming_deadline;
for (i = 1; i < s->incoming_metadata_count; i++) {
s->incoming_metadata[i].prev = &s->incoming_metadata[i - 1];
s->incoming_metadata[i - 1].next = &s->incoming_metadata[i];
}
s->incoming_metadata[0].prev = NULL;
s->incoming_metadata[s->incoming_metadata_count - 1].next = NULL;
grpc_sopb_add_metadata(&s->parser.incoming_sopb, b);
grpc_sopb_add_flow_ctl_cb(&s->parser.incoming_sopb, free_md,
s->incoming_metadata);
/* reset */
s->incoming_deadline = gpr_inf_future;
s->incoming_metadata = NULL;
s->incoming_metadata_count = 0;
s->incoming_metadata_capacity = 0;
}
static int parse_frame_slice(transport *t, gpr_slice slice, int is_last) {
grpc_chttp2_parse_state st;
size_t i;
@ -1449,8 +1507,7 @@ static int parse_frame_slice(transport *t, gpr_slice slice, int is_last) {
stream_list_join(t, t->incoming_stream, PENDING_CALLBACKS);
}
if (st.metadata_boundary) {
grpc_sopb_add_metadata_boundary(
&t->incoming_stream->parser.incoming_sopb);
add_metadata_batch(t, t->incoming_stream);
stream_list_join(t, t->incoming_stream, PENDING_CALLBACKS);
}
if (st.ack_settings) {
@ -1580,8 +1637,8 @@ static int process_read(transport *t, gpr_slice slice) {
"Connect string mismatch: expected '%c' (%d) got '%c' (%d) "
"at byte %d",
CLIENT_CONNECT_STRING[t->deframe_state],
(int)(gpr_uint8) CLIENT_CONNECT_STRING[t->deframe_state],
*cur, (int)*cur, t->deframe_state);
(int)(gpr_uint8)CLIENT_CONNECT_STRING[t->deframe_state], *cur,
(int)*cur, t->deframe_state);
drop_connection(t);
return 0;
}
@ -1778,17 +1835,20 @@ static int prepare_callbacks(transport *t) {
int n = 0;
while ((s = stream_list_remove_head(t, PENDING_CALLBACKS))) {
int execute = 1;
grpc_sopb_swap(&s->parser.incoming_sopb, &s->callback_sopb);
s->callback_state = compute_state(s->sent_write_closed, s->read_closed);
if (s->callback_state == GRPC_STREAM_CLOSED) {
remove_from_stream_map(t, s);
if (s->published_close) {
execute = 0;
} else if (s->incoming_metadata_count) {
add_metadata_batch(t, s);
}
s->published_close = 1;
}
grpc_sopb_swap(&s->parser.incoming_sopb, &s->callback_sopb);
if (execute) {
stream_list_add_tail(t, s, EXECUTING_CALLBACKS);
n = 1;
@ -1825,9 +1885,9 @@ static void add_to_pollset(grpc_transport *gt, grpc_pollset *pollset) {
*/
static const grpc_transport_vtable vtable = {
sizeof(stream), init_stream, send_batch, set_allow_window_updates,
add_to_pollset, destroy_stream, abort_stream, goaway,
close_transport, send_ping, destroy_transport};
sizeof(stream), init_stream, send_batch, set_allow_window_updates,
add_to_pollset, destroy_stream, abort_stream, goaway, close_transport,
send_ping, destroy_transport};
void grpc_create_chttp2_transport(grpc_transport_setup_callback setup,
void *arg,

@ -120,7 +120,7 @@ static void unlock(grpc_mdctx *ctx) {
if (ctx->refs == 0) {
/* uncomment if you're having trouble diagnosing an mdelem leak to make
things clearer (slows down destruction a lot, however) */
/* gc_mdtab(ctx); */
gc_mdtab(ctx);
if (ctx->mdtab_count && ctx->mdtab_count == ctx->mdtab_free) {
discard_metadata(ctx);
}

@ -33,11 +33,11 @@
#include "src/core/transport/stream_op.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <string.h>
/* Exponential growth function: Given x, return a larger x.
Currently we grow by 1.5 times upon reallocation. */
#define GROW(x) (3 * (x) / 2)
@ -79,33 +79,46 @@ void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops) {
gpr_slice_unref(ops[i].data.slice);
break;
case GRPC_OP_METADATA:
grpc_mdelem_unref(ops[i].data.metadata);
grpc_metadata_batch_destroy(&ops[i].data.metadata);
break;
case GRPC_OP_FLOW_CTL_CB:
ops[i].data.flow_ctl_cb.cb(ops[i].data.flow_ctl_cb.arg, GRPC_OP_ERROR);
break;
case GRPC_NO_OP:
case GRPC_OP_DEADLINE:
case GRPC_OP_METADATA_BOUNDARY:
case GRPC_OP_BEGIN_MESSAGE:
break;
}
}
}
static void assert_contained_metadata_ok(grpc_stream_op *ops, size_t nops) {
#ifndef NDEBUG
size_t i;
for (i = 0; i < nops; i++) {
if (ops[i].type == GRPC_OP_METADATA) {
grpc_metadata_batch_assert_ok(&ops[i].data.metadata);
}
}
#endif
}
static void expandto(grpc_stream_op_buffer *sopb, size_t new_capacity) {
sopb->capacity = new_capacity;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
if (sopb->ops == sopb->inlined_ops) {
sopb->ops = gpr_malloc(sizeof(grpc_stream_op) * new_capacity);
memcpy(sopb->ops, sopb->inlined_ops, sopb->nops * sizeof(grpc_stream_op));
} else {
sopb->ops = gpr_realloc(sopb->ops, sizeof(grpc_stream_op) * new_capacity);
}
assert_contained_metadata_ok(sopb->ops, sopb->nops);
}
static grpc_stream_op *add(grpc_stream_op_buffer *sopb) {
grpc_stream_op *out;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
if (sopb->nops == sopb->capacity) {
expandto(sopb, GROW(sopb->capacity));
}
@ -116,6 +129,7 @@ static grpc_stream_op *add(grpc_stream_op_buffer *sopb) {
void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb) {
add(sopb)->type = GRPC_NO_OP;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
}
void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
@ -124,30 +138,24 @@ void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
op->type = GRPC_OP_BEGIN_MESSAGE;
op->data.begin_message.length = length;
op->data.begin_message.flags = flags;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
}
void grpc_sopb_add_metadata_boundary(grpc_stream_op_buffer *sopb) {
grpc_stream_op *op = add(sopb);
op->type = GRPC_OP_METADATA_BOUNDARY;
}
void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb, grpc_mdelem *md) {
void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb,
grpc_metadata_batch b) {
grpc_stream_op *op = add(sopb);
grpc_metadata_batch_assert_ok(&b);
op->type = GRPC_OP_METADATA;
op->data.metadata = md;
}
void grpc_sopb_add_deadline(grpc_stream_op_buffer *sopb,
gpr_timespec deadline) {
grpc_stream_op *op = add(sopb);
op->type = GRPC_OP_DEADLINE;
op->data.deadline = deadline;
op->data.metadata = b;
grpc_metadata_batch_assert_ok(&op->data.metadata);
assert_contained_metadata_ok(sopb->ops, sopb->nops);
}
void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice) {
grpc_stream_op *op = add(sopb);
op->type = GRPC_OP_SLICE;
op->data.slice = slice;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
}
void grpc_sopb_add_flow_ctl_cb(grpc_stream_op_buffer *sopb,
@ -157,6 +165,7 @@ void grpc_sopb_add_flow_ctl_cb(grpc_stream_op_buffer *sopb,
op->type = GRPC_OP_FLOW_CTL_CB;
op->data.flow_ctl_cb.cb = cb;
op->data.flow_ctl_cb.arg = arg;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
}
void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
@ -164,10 +173,161 @@ void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
size_t orig_nops = sopb->nops;
size_t new_nops = orig_nops + nops;
assert_contained_metadata_ok(ops, nops);
assert_contained_metadata_ok(sopb->ops, sopb->nops);
if (new_nops > sopb->capacity) {
expandto(sopb, GPR_MAX(GROW(sopb->capacity), new_nops));
}
memcpy(sopb->ops + orig_nops, ops, sizeof(grpc_stream_op) * nops);
sopb->nops = new_nops;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
}
static void assert_valid_list(grpc_mdelem_list *list) {
#ifndef NDEBUG
grpc_linked_mdelem *l;
GPR_ASSERT((list->head == NULL) == (list->tail == NULL));
if (!list->head) return;
GPR_ASSERT(list->head->prev == NULL);
GPR_ASSERT(list->tail->next == NULL);
GPR_ASSERT((list->head == list->tail) == (list->head->next == NULL));
for (l = list->head; l; l = l->next) {
GPR_ASSERT(l->md);
GPR_ASSERT((l->prev == NULL) == (l == list->head));
GPR_ASSERT((l->next == NULL) == (l == list->tail));
if (l->next) GPR_ASSERT(l->next->prev == l);
if (l->prev) GPR_ASSERT(l->prev->next == l);
}
#endif
}
#ifndef NDEBUG
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd) {
assert_valid_list(&comd->list);
assert_valid_list(&comd->garbage);
}
#endif
void grpc_metadata_batch_init(grpc_metadata_batch *comd) {
comd->list.head = comd->list.tail = comd->garbage.head = comd->garbage.tail =
NULL;
comd->deadline = gpr_inf_future;
}
void grpc_metadata_batch_destroy(grpc_metadata_batch *comd) {
grpc_linked_mdelem *l;
for (l = comd->list.head; l; l = l->next) {
grpc_mdelem_unref(l->md);
}
for (l = comd->garbage.head; l; l = l->next) {
grpc_mdelem_unref(l->md);
}
}
void grpc_metadata_batch_add_head(grpc_metadata_batch *comd,
grpc_linked_mdelem *storage,
grpc_mdelem *elem_to_add) {
GPR_ASSERT(elem_to_add);
storage->md = elem_to_add;
grpc_metadata_batch_link_head(comd, storage);
}
static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
assert_valid_list(list);
GPR_ASSERT(storage->md);
storage->prev = NULL;
storage->next = list->head;
if (list->head != NULL) {
list->head->prev = storage;
} else {
list->tail = storage;
}
list->head = storage;
assert_valid_list(list);
}
void grpc_metadata_batch_link_head(grpc_metadata_batch *comd,
grpc_linked_mdelem *storage) {
link_head(&comd->list, storage);
}
void grpc_metadata_batch_add_tail(grpc_metadata_batch *comd,
grpc_linked_mdelem *storage,
grpc_mdelem *elem_to_add) {
GPR_ASSERT(elem_to_add);
storage->md = elem_to_add;
grpc_metadata_batch_link_tail(comd, storage);
}
static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
assert_valid_list(list);
GPR_ASSERT(storage->md);
storage->prev = list->tail;
storage->next = NULL;
if (list->tail != NULL) {
list->tail->next = storage;
} else {
list->head = storage;
}
list->tail = storage;
assert_valid_list(list);
}
void grpc_metadata_batch_link_tail(grpc_metadata_batch *comd,
grpc_linked_mdelem *storage) {
link_tail(&comd->list, storage);
}
void grpc_metadata_batch_merge(grpc_metadata_batch *target,
grpc_metadata_batch *add) {
grpc_linked_mdelem *l;
grpc_linked_mdelem *next;
for (l = add->list.head; l; l = next) {
next = l->next;
link_tail(&target->list, l);
}
for (l = add->garbage.head; l; l = next) {
next = l->next;
link_tail(&target->garbage, l);
}
}
void grpc_metadata_batch_filter(grpc_metadata_batch *comd,
grpc_mdelem *(*filter)(void *user_data,
grpc_mdelem *elem),
void *user_data) {
grpc_linked_mdelem *l;
grpc_linked_mdelem *next;
assert_valid_list(&comd->list);
assert_valid_list(&comd->garbage);
for (l = comd->list.head; l; l = next) {
grpc_mdelem *orig = l->md;
grpc_mdelem *filt = filter(user_data, orig);
next = l->next;
if (filt == NULL) {
if (l->prev) {
l->prev->next = l->next;
}
if (l->next) {
l->next->prev = l->prev;
}
if (comd->list.head == l) {
comd->list.head = l->next;
}
if (comd->list.tail == l) {
comd->list.tail = l->prev;
}
assert_valid_list(&comd->list);
link_head(&comd->garbage, l);
} else if (filt != orig) {
grpc_mdelem_unref(orig);
l->md = filt;
}
}
assert_valid_list(&comd->list);
assert_valid_list(&comd->garbage);
}

@ -50,8 +50,6 @@ typedef enum grpc_stream_op_code {
Must be ignored by receivers */
GRPC_NO_OP,
GRPC_OP_METADATA,
GRPC_OP_DEADLINE,
GRPC_OP_METADATA_BOUNDARY,
/* Begin a message/metadata element/status - as defined by
grpc_message_type. */
GRPC_OP_BEGIN_MESSAGE,
@ -76,6 +74,51 @@ typedef struct grpc_flow_ctl_cb {
void *arg;
} grpc_flow_ctl_cb;
typedef struct grpc_linked_mdelem {
grpc_mdelem *md;
struct grpc_linked_mdelem *next;
struct grpc_linked_mdelem *prev;
} grpc_linked_mdelem;
typedef struct grpc_mdelem_list {
grpc_linked_mdelem *head;
grpc_linked_mdelem *tail;
} grpc_mdelem_list;
typedef struct grpc_metadata_batch {
grpc_mdelem_list list;
grpc_mdelem_list garbage;
gpr_timespec deadline;
} grpc_metadata_batch;
void grpc_metadata_batch_init(grpc_metadata_batch *comd);
void grpc_metadata_batch_destroy(grpc_metadata_batch *comd);
void grpc_metadata_batch_merge(grpc_metadata_batch *target,
grpc_metadata_batch *add);
void grpc_metadata_batch_link_head(grpc_metadata_batch *comd,
grpc_linked_mdelem *storage);
void grpc_metadata_batch_link_tail(grpc_metadata_batch *comd,
grpc_linked_mdelem *storage);
void grpc_metadata_batch_add_head(grpc_metadata_batch *comd,
grpc_linked_mdelem *storage,
grpc_mdelem *elem_to_add);
void grpc_metadata_batch_add_tail(grpc_metadata_batch *comd,
grpc_linked_mdelem *storage,
grpc_mdelem *elem_to_add);
void grpc_metadata_batch_filter(grpc_metadata_batch *comd,
grpc_mdelem *(*filter)(void *user_data,
grpc_mdelem *elem),
void *user_data);
#ifndef NDEBUG
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd);
#else
#define grpc_metadata_batch_assert_ok(comd) do {} while (0)
#endif
/* Represents a single operation performed on a stream/transport */
typedef struct grpc_stream_op {
/* the operation to be applied */
@ -84,8 +127,7 @@ typedef struct grpc_stream_op {
associated op-code */
union {
grpc_begin_message begin_message;
grpc_mdelem *metadata;
gpr_timespec deadline;
grpc_metadata_batch metadata;
gpr_slice slice;
grpc_flow_ctl_cb flow_ctl_cb;
} data;
@ -118,9 +160,7 @@ void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb);
/* Append a GRPC_OP_BEGIN to a buffer */
void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
gpr_uint32 flags);
void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb, grpc_mdelem *metadata);
void grpc_sopb_add_deadline(grpc_stream_op_buffer *sopb, gpr_timespec deadline);
void grpc_sopb_add_metadata_boundary(grpc_stream_op_buffer *sopb);
void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb, grpc_metadata_batch metadata);
/* Append a GRPC_SLICE to a buffer - does not ref/unref the slice */
void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice);
/* Append a GRPC_OP_FLOW_CTL_CB to a buffer */

@ -43,7 +43,6 @@ _BYTE_SEQUENCE_SEQUENCE = tuple(
bytes(bytearray((row + column) % 256 for column in range(row)))
for row in range(_STREAM_LENGTH))
class LonelyClientTest(unittest.TestCase):
def testLonelyClient(self):
@ -296,7 +295,6 @@ class EchoTest(unittest.TestCase):
def testManyManyByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE_SEQUENCE)
class CancellationTest(unittest.TestCase):
def setUp(self):
@ -392,7 +390,8 @@ class CancellationTest(unittest.TestCase):
finish_event = self.client_completion_queue.get(_FUTURE)
self.assertEqual(_low.Event.Kind.FINISH, finish_event.kind)
self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), finish_event.status)
self.assertEqual(_low.Status(_low.Code.CANCELLED, 'Cancelled'),
finish_event.status)
server_timeout_none_event = self.server_completion_queue.get(0)
self.assertIsNone(server_timeout_none_event)

@ -150,9 +150,9 @@ CC_asan = clang
CXX_asan = clang++
LD_asan = clang
LDXX_asan = clang++
CPPFLAGS_asan = -O1 -fsanitize=address -fno-omit-frame-pointer
CPPFLAGS_asan = -O0 -fsanitize=address -fno-omit-frame-pointer
LDFLAGS_asan = -fsanitize=address
DEFINES_asan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=5
DEFINES_asan = GRPC_TEST_SLOWDOWN_BUILD_FACTOR=5
VALID_CONFIG_msan = 1
REQUIRE_CUSTOM_LIBRARIES_msan = 1
@ -160,7 +160,7 @@ CC_msan = clang
CXX_msan = clang++-libc++
LD_msan = clang
LDXX_msan = clang++-libc++
CPPFLAGS_msan = -O1 -fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1
CPPFLAGS_msan = -O0 -fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1
OPENSSL_CFLAGS_msan = -DPURIFY
LDFLAGS_msan = -fsanitize=memory -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1
DEFINES_msan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=20

@ -1,201 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/channel/metadata_buffer.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "test/core/util/test_config.h"
#include <string.h>
#include <stdio.h>
/* construct a buffer with some prefix followed by an integer converted to
a string */
static gpr_slice construct_buffer(size_t prefix_length, size_t index) {
gpr_slice buffer = gpr_slice_malloc(prefix_length + GPR_LTOA_MIN_BUFSIZE);
memset(GPR_SLICE_START_PTR(buffer), 'a', prefix_length);
GPR_SLICE_SET_LENGTH(
buffer,
prefix_length +
gpr_ltoa(index, (char *)GPR_SLICE_START_PTR(buffer) + prefix_length));
return buffer;
}
static void do_nothing(void *ignored, grpc_op_error also_ignored) {}
/* we need a fake channel & call stack, which is defined here */
/* a fake channel needs to track some information about the test */
typedef struct {
size_t key_prefix_len;
size_t value_prefix_len;
} channel_data;
static void fail_call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_call_op *op) {
abort();
}
/* verify that the metadata passed on during flush is the same as we expect */
static void expect_call_op(grpc_call_element *elem,
grpc_call_element *from_elem, grpc_call_op *op) {
size_t *n = elem->call_data;
channel_data *cd = elem->channel_data;
gpr_slice key = construct_buffer(cd->key_prefix_len, *n);
gpr_slice value = construct_buffer(cd->value_prefix_len, *n);
GPR_ASSERT(op->type == GRPC_SEND_METADATA);
GPR_ASSERT(op->dir == GRPC_CALL_DOWN);
GPR_ASSERT(op->flags == *n);
GPR_ASSERT(op->done_cb == do_nothing);
GPR_ASSERT(op->user_data == (void *)(gpr_uintptr) * n);
GPR_ASSERT(0 == gpr_slice_cmp(op->data.metadata->key->slice, key));
GPR_ASSERT(0 == gpr_slice_cmp(op->data.metadata->value->slice, value));
++*n;
gpr_slice_unref(key);
gpr_slice_unref(value);
grpc_mdelem_unref(op->data.metadata);
}
static void fail_channel_op(grpc_channel_element *elem,
grpc_channel_element *from_elem,
grpc_channel_op *op) {
abort();
}
static void init_call_elem(grpc_call_element *elem,
const void *transport_server_data) {
*(size_t *)elem->call_data = 0;
}
static void destroy_call_elem(grpc_call_element *elem) {}
static void init_channel_elem(grpc_channel_element *elem,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {}
static void destroy_channel_elem(grpc_channel_element *elem) {}
static const grpc_channel_filter top_filter = {
fail_call_op, fail_channel_op, sizeof(size_t),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "top_filter"};
static const grpc_channel_filter bottom_filter = {
expect_call_op, fail_channel_op, sizeof(size_t),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "bottom_filter"};
static const grpc_channel_filter *filters[2] = {&top_filter, &bottom_filter};
/* run a test with differently sized keys, and values, some number of times. */
static void test_case(size_t key_prefix_len, size_t value_prefix_len,
size_t num_calls) {
size_t i;
size_t got_calls;
grpc_metadata_buffer buffer;
grpc_channel_stack *stk;
grpc_call_stack *call;
grpc_mdctx *mdctx;
gpr_log(GPR_INFO, "Test %d calls, {key,value}_prefix_len = {%d, %d}",
(int)num_calls, (int)key_prefix_len, (int)value_prefix_len);
mdctx = grpc_mdctx_create();
grpc_metadata_buffer_init(&buffer);
/* queue metadata elements */
for (i = 0; i < num_calls; i++) {
grpc_call_op op;
gpr_slice key = construct_buffer(key_prefix_len, i);
gpr_slice value = construct_buffer(value_prefix_len, i);
op.type = GRPC_SEND_METADATA;
op.dir = GRPC_CALL_DOWN;
op.flags = i;
op.data.metadata = grpc_mdelem_from_slices(mdctx, key, value);
op.done_cb = do_nothing;
op.user_data = (void *)(gpr_uintptr) i;
grpc_metadata_buffer_queue(&buffer, &op);
}
/* construct a test channel, call stack */
stk = gpr_malloc(grpc_channel_stack_size(filters, 2));
grpc_channel_stack_init(filters, 2, NULL, mdctx, stk);
for (i = 0; i < 2; i++) {
channel_data *cd =
(channel_data *)grpc_channel_stack_element(stk, i)->channel_data;
cd->key_prefix_len = key_prefix_len;
cd->value_prefix_len = value_prefix_len;
}
call = gpr_malloc(stk->call_stack_size);
grpc_call_stack_init(stk, NULL, call);
/* flush out metadata, verifying each element (see expect_call_op) */
grpc_metadata_buffer_flush(&buffer, grpc_call_stack_element(call, 0));
/* verify expect_call_op was called an appropriate number of times */
got_calls = *(size_t *)grpc_call_stack_element(call, 1)->call_data;
GPR_ASSERT(num_calls == got_calls);
/* clean up the things */
grpc_call_stack_destroy(call);
gpr_free(call);
grpc_channel_stack_destroy(stk);
gpr_free(stk);
grpc_metadata_buffer_destroy(&buffer, GRPC_OP_OK);
grpc_mdctx_unref(mdctx);
}
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
test_case(0, 0, 0);
test_case(0, 0, 1);
test_case(0, 0, 2);
test_case(0, 0, 10000);
test_case(10, 10, 1);
test_case(10, 10, 2);
test_case(10, 10, 10000);
test_case(100, 100, 1);
test_case(100, 100, 2);
test_case(100, 100, 10000);
return 0;
}

@ -138,6 +138,7 @@ def main():
'build': 'test',
'language': 'c',
'src': [],
'flaky': 'invoke_large_request' in t,
'deps': [
'end2end_fixture_%s' % f,
'end2end_test_%s' % t,

@ -118,8 +118,8 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config) {
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_invoke_old(c, f.client_cq, tag(2), tag(3), 0));
cq_expect_client_metadata_read(v_client, tag(2), NULL);
cq_expect_finished_with_status(v_client, tag(3), GRPC_STATUS_CANCELLED, NULL,
NULL);
cq_expect_finished_with_status(v_client, tag(3), GRPC_STATUS_CANCELLED,
"Cancelled", NULL);
cq_verify(v_client);
grpc_call_destroy(c);

@ -46,7 +46,7 @@ static grpc_call_error wait_for_deadline(grpc_call *call) {
}
static const cancellation_mode cancellation_modes[] = {
{"cancel", grpc_call_cancel, GRPC_STATUS_CANCELLED, ""},
{"cancel", grpc_call_cancel, GRPC_STATUS_CANCELLED, "Cancelled"},
{"deadline", wait_for_deadline, GRPC_STATUS_DEADLINE_EXCEEDED,
"Deadline Exceeded"},
};

@ -115,14 +115,23 @@ static void test_request_response_with_metadata_and_payload(
grpc_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_metadata meta_c[2] = {
{"key1-bin", "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc", 13},
{"key2-bin", "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d",
14}};
{"key1-bin",
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc",
13,
{{NULL, NULL, NULL}}},
{"key2-bin",
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d",
14,
{{NULL, NULL, NULL}}}};
grpc_metadata meta_s[2] = {
{"key3-bin",
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee", 15},
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee",
15,
{{NULL, NULL, NULL}}},
{"key4-bin",
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", 16}};
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
16,
{{NULL, NULL, NULL}}}};
grpc_end2end_test_fixture f = begin_test(config, __FUNCTION__, NULL, NULL);
cq_verifier *v_client = cq_verifier_create(f.client_cq);
cq_verifier *v_server = cq_verifier_create(f.server_cq);

@ -116,17 +116,25 @@ static void test_request_response_with_metadata_and_payload(
gpr_timespec deadline = five_seconds_time();
/* staggered lengths to ensure we hit various branches in base64 encode/decode
*/
grpc_metadata meta1 = {
"key1-bin", "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc", 13};
grpc_metadata meta1 = {"key1-bin",
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc",
13,
{{NULL, NULL, NULL}}};
grpc_metadata meta2 = {
"key2-bin", "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d",
14};
"key2-bin",
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d",
14,
{{NULL, NULL, NULL}}};
grpc_metadata meta3 = {
"key3-bin",
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee", 15};
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee",
15,
{{NULL, NULL, NULL}}};
grpc_metadata meta4 = {
"key4-bin",
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", 16};
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
16,
{{NULL, NULL, NULL}}};
grpc_end2end_test_fixture f = begin_test(config, __FUNCTION__, NULL, NULL);
cq_verifier *v_client = cq_verifier_create(f.client_cq);
cq_verifier *v_server = cq_verifier_create(f.server_cq);

@ -114,8 +114,10 @@ static void test_request_response_with_metadata_and_payload(
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_metadata meta_c[2] = {{"key1", "val1", 4}, {"key2", "val2", 4}};
grpc_metadata meta_s[2] = {{"key3", "val3", 4}, {"key4", "val4", 4}};
grpc_metadata meta_c[2] = {{"key1", "val1", 4, {{NULL, NULL, NULL}}},
{"key2", "val2", 4, {{NULL, NULL, NULL}}}};
grpc_metadata meta_s[2] = {{"key3", "val3", 4, {{NULL, NULL, NULL}}},
{"key4", "val4", 4, {{NULL, NULL, NULL}}}};
grpc_end2end_test_fixture f = begin_test(config, __FUNCTION__, NULL, NULL);
cq_verifier *v_client = cq_verifier_create(f.client_cq);
cq_verifier *v_server = cq_verifier_create(f.server_cq);

@ -114,10 +114,10 @@ static void test_request_response_with_metadata_and_payload(
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_metadata meta1 = {"key1", "val1", 4};
grpc_metadata meta2 = {"key2", "val2", 4};
grpc_metadata meta3 = {"key3", "val3", 4};
grpc_metadata meta4 = {"key4", "val4", 4};
grpc_metadata meta1 = {"key1", "val1", 4, {{NULL, NULL, NULL}}};
grpc_metadata meta2 = {"key2", "val2", 4, {{NULL, NULL, NULL}}};
grpc_metadata meta3 = {"key3", "val3", 4, {{NULL, NULL, NULL}}};
grpc_metadata meta4 = {"key4", "val4", 4, {{NULL, NULL, NULL}}};
grpc_end2end_test_fixture f = begin_test(config, __FUNCTION__, NULL, NULL);
cq_verifier *v_client = cq_verifier_create(f.client_cq);
cq_verifier *v_server = cq_verifier_create(f.server_cq);

@ -114,12 +114,12 @@ static void test_request_response_with_metadata_and_payload(
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_metadata meta1 = {"key1", "val1", 4};
grpc_metadata meta2 = {"key2", "val2", 4};
grpc_metadata meta3 = {"key3", "val3", 4};
grpc_metadata meta4 = {"key4", "val4", 4};
grpc_metadata meta5 = {"key5", "val5", 4};
grpc_metadata meta6 = {"key6", "val6", 4};
grpc_metadata meta1 = {"key1", "val1", 4, {{NULL, NULL, NULL}}};
grpc_metadata meta2 = {"key2", "val2", 4, {{NULL, NULL, NULL}}};
grpc_metadata meta3 = {"key3", "val3", 4, {{NULL, NULL, NULL}}};
grpc_metadata meta4 = {"key4", "val4", 4, {{NULL, NULL, NULL}}};
grpc_metadata meta5 = {"key5", "val5", 4, {{NULL, NULL, NULL}}};
grpc_metadata meta6 = {"key6", "val6", 4, {{NULL, NULL, NULL}}};
grpc_end2end_test_fixture f = begin_test(config, __FUNCTION__, NULL, NULL);
cq_verifier *v_client = cq_verifier_create(f.client_cq);
cq_verifier *v_server = cq_verifier_create(f.server_cq);

@ -42,7 +42,7 @@ static void *tag(gpr_intptr x) { return (void *)x; }
int main(int argc, char **argv) {
grpc_channel *chan;
grpc_call *call;
grpc_metadata md = {"a", "b", 1};
grpc_metadata md = {"a", "b", 1, {{NULL, NULL, NULL}}};
grpc_completion_queue *cq;
cq_verifier *cqv;

@ -50,6 +50,10 @@ grpc_chttp2_hpack_compressor g_compressor;
int g_failure = 0;
grpc_stream_op_buffer g_sopb;
void **to_delete = NULL;
int num_to_delete = 0;
int cap_to_delete = 0;
static gpr_slice create_test_slice(size_t length) {
gpr_slice slice = gpr_slice_malloc(length);
size_t i;
@ -130,47 +134,71 @@ static void test_small_data_framing(void) {
verify_sopb(10, 0, 5, "000005 0000 deadbeef 00000000ff");
}
static void add_sopb_header(const char *key, const char *value) {
grpc_sopb_add_metadata(&g_sopb,
grpc_mdelem_from_strings(g_mdctx, key, value));
static void add_sopb_headers(int n, ...) {
int i;
grpc_metadata_batch b;
va_list l;
grpc_linked_mdelem *e = gpr_malloc(sizeof(*e) * n);
grpc_metadata_batch_init(&b);
va_start(l, n);
for (i = 0; i < n; i++) {
char *key = va_arg(l, char *);
char *value = va_arg(l, char *);
if (i) {
e[i - 1].next = &e[i];
e[i].prev = &e[i - 1];
}
e[i].md = grpc_mdelem_from_strings(g_mdctx, key, value);
}
e[0].prev = NULL;
e[n - 1].next = NULL;
va_end(l);
b.list.head = &e[0];
b.list.tail = &e[n - 1];
if (cap_to_delete == num_to_delete) {
cap_to_delete = GPR_MAX(2 * cap_to_delete, 1000);
to_delete = gpr_realloc(to_delete, sizeof(*to_delete) * cap_to_delete);
}
to_delete[num_to_delete++] = e;
grpc_sopb_add_metadata(&g_sopb, b);
}
static void test_basic_headers(void) {
int i;
add_sopb_header("a", "a");
add_sopb_headers(1, "a", "a");
verify_sopb(0, 0, 0, "000005 0104 deadbeef 40 0161 0161");
add_sopb_header("a", "a");
add_sopb_headers(1, "a", "a");
verify_sopb(0, 0, 0, "000001 0104 deadbeef be");
add_sopb_header("a", "a");
add_sopb_headers(1, "a", "a");
verify_sopb(0, 0, 0, "000001 0104 deadbeef be");
add_sopb_header("a", "a");
add_sopb_header("b", "c");
add_sopb_headers(2, "a", "a", "b", "c");
verify_sopb(0, 0, 0, "000006 0104 deadbeef be 40 0162 0163");
add_sopb_header("a", "a");
add_sopb_header("b", "c");
add_sopb_headers(2, "a", "a", "b", "c");
verify_sopb(0, 0, 0, "000002 0104 deadbeef bf be");
add_sopb_header("a", "d");
add_sopb_headers(1, "a", "d");
verify_sopb(0, 0, 0, "000004 0104 deadbeef 7f 00 0164");
/* flush out what's there to make a few values look very popular */
for (i = 0; i < 350; i++) {
add_sopb_header("a", "a");
add_sopb_header("b", "c");
add_sopb_header("a", "d");
add_sopb_headers(3, "a", "a", "b", "c", "a", "d");
verify_sopb(0, 0, 0, "000003 0104 deadbeef c0 bf be");
}
add_sopb_header("a", "a");
add_sopb_header("k", "v");
add_sopb_headers(2, "a", "a", "k", "v");
verify_sopb(0, 0, 0, "000006 0104 deadbeef c0 00 016b 0176");
add_sopb_header("a", "v");
add_sopb_headers(1, "a", "v");
/* this could be 000004 0104 deadbeef 0f 30 0176 also */
verify_sopb(0, 0, 0, "000004 0104 deadbeef 0f 2f 0176");
}
@ -190,7 +218,7 @@ static void test_decode_table_overflow(void) {
for (i = 0; i < 114; i++) {
if (i > 0) {
add_sopb_header("aa", "ba");
add_sopb_headers(1, "aa", "ba");
}
encode_int_to_str(i, key);
@ -198,25 +226,27 @@ static void test_decode_table_overflow(void) {
if (i + 61 >= 127) {
gpr_asprintf(&expect,
"000009 0104 deadbeef ff%02x 40 02%02x%02x 02%02x%02x",
"000002 0104 deadbeef ff%02x 000007 0104 deadbeef 40 "
"02%02x%02x 02%02x%02x",
i + 61 - 127, key[0], key[1], value[0], value[1]);
} else if (i > 0) {
gpr_asprintf(&expect,
"000008 0104 deadbeef %02x 40 02%02x%02x 02%02x%02x",
"000001 0104 deadbeef %02x 000007 0104 deadbeef 40 "
"02%02x%02x 02%02x%02x",
0x80 + 61 + i, key[0], key[1], value[0], value[1]);
} else {
gpr_asprintf(&expect, "000007 0104 deadbeef 40 02%02x%02x 02%02x%02x",
key[0], key[1], value[0], value[1]);
}
add_sopb_header(key, value);
add_sopb_headers(1, key, value);
verify_sopb(0, 0, 0, expect);
gpr_free(expect);
}
/* if the above passes, then we must have just knocked this pair out of the
decoder stack, and so we'll be forced to re-encode it */
add_sopb_header("aa", "ba");
add_sopb_headers(1, "aa", "ba");
verify_sopb(0, 0, 0, "000007 0104 deadbeef 40 026161 026261");
}
@ -260,7 +290,7 @@ static void test_decode_random_headers_inner(int max_len) {
randstr(st.key, max_len);
randstr(st.value, max_len);
add_sopb_header(st.key, st.value);
add_sopb_headers(1, st.key, st.value);
gpr_slice_buffer_init(&output);
GPR_ASSERT(0 ==
grpc_chttp2_preencode(g_sopb.ops, &g_sopb.nops, 0, &encops));
@ -314,6 +344,7 @@ static void run_test(void (*test)(), const char *name) {
}
int main(int argc, char **argv) {
int i;
grpc_test_init(argc, argv);
TEST(test_small_data_framing);
TEST(test_basic_headers);
@ -329,5 +360,8 @@ int main(int argc, char **argv) {
TEST(test_decode_random_headers_55);
TEST(test_decode_random_headers_89);
TEST(test_decode_random_headers_144);
for (i = 0; i < num_to_delete; i++) {
gpr_free(to_delete[i]);
}
return g_failure;
}

@ -1,119 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "transport_end2end_tests.h"
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <signal.h>
#include <sys/types.h>
#include "test/core/util/test_config.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/endpoint_pair.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/log.h>
/* Wrapper to create an http2 transport pair */
static int create_http2_transport_for_test(
grpc_transport_setup_callback client_setup_transport,
void *client_setup_arg,
grpc_transport_setup_callback server_setup_transport,
void *server_setup_arg, size_t slice_size, grpc_mdctx *mdctx) {
grpc_endpoint_pair p = grpc_iomgr_create_endpoint_pair(1);
grpc_create_chttp2_transport(client_setup_transport, client_setup_arg, NULL,
p.client, NULL, 0, mdctx, 1);
grpc_create_chttp2_transport(server_setup_transport, server_setup_arg, NULL,
p.server, NULL, 0, mdctx, 0);
return 0;
}
static int create_http2_transport_for_test_small_slices(
grpc_transport_setup_callback client_setup_transport,
void *client_setup_arg,
grpc_transport_setup_callback server_setup_transport,
void *server_setup_arg, grpc_mdctx *mdctx) {
return create_http2_transport_for_test(
client_setup_transport, client_setup_arg, server_setup_transport,
server_setup_arg, 1, mdctx);
}
static int create_http2_transport_for_test_medium_slices(
grpc_transport_setup_callback client_setup_transport,
void *client_setup_arg,
grpc_transport_setup_callback server_setup_transport,
void *server_setup_arg, grpc_mdctx *mdctx) {
return create_http2_transport_for_test(
client_setup_transport, client_setup_arg, server_setup_transport,
server_setup_arg, 8192, mdctx);
}
static int create_http2_transport_for_test_large_slices(
grpc_transport_setup_callback client_setup_transport,
void *client_setup_arg,
grpc_transport_setup_callback server_setup_transport,
void *server_setup_arg, grpc_mdctx *mdctx) {
return create_http2_transport_for_test(
client_setup_transport, client_setup_arg, server_setup_transport,
server_setup_arg, 1024 * 1024, mdctx);
}
/* All configurations to be tested */
grpc_transport_test_config fixture_configs[] = {
{"chttp2_on_socketpair/small",
create_http2_transport_for_test_small_slices},
{"chttp2_on_socketpair/medium",
create_http2_transport_for_test_medium_slices},
{"chttp2_on_socketpair/large",
create_http2_transport_for_test_large_slices},
};
/* Driver function: run the test suite for each test configuration */
int main(int argc, char **argv) {
size_t i;
grpc_test_init(argc, argv);
grpc_iomgr_init();
for (i = 0; i < sizeof(fixture_configs) / sizeof(*fixture_configs); i++) {
grpc_transport_end2end_tests(&fixture_configs[i]);
}
grpc_iomgr_shutdown();
gpr_log(GPR_INFO, "exiting");
return 0;
}

@ -1,931 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/transport/transport_end2end_tests.h"
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include "src/core/support/string.h"
#include "src/core/transport/transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "test/core/util/test_config.h"
static grpc_mdctx *g_metadata_context;
static gpr_once g_pending_ops_init = GPR_ONCE_INIT;
static gpr_mu g_mu;
static gpr_cv g_cv;
static int g_pending_ops;
/* Defines a suite of tests that all GRPC transports should be able to pass */
/******************************************************************************
* Testing framework
*/
/* Forward declarations */
typedef struct test_fixture test_fixture;
/* User data passed to the transport and handed to each callback */
typedef struct test_user_data { test_fixture *fixture; } test_user_data;
/* A message we expect to receive (forms a singly linked list with next) */
typedef struct expected_message {
/* The next message expected */
struct expected_message *next;
/* The (owned) data that we expect to receive */
gpr_uint8 *data;
/* The length of the expected message */
size_t length;
/* How many bytes of the expected message have we received? */
size_t read_pos;
/* Have we received the GRPC_OP_BEGIN for this message */
int begun;
} expected_message;
/* Metadata we expect to receive */
typedef struct expected_metadata {
struct expected_metadata *next;
struct expected_metadata *prev;
grpc_mdelem *metadata;
} expected_metadata;
/* Tracks a stream for a test. Forms a doubly-linked list with (prev, next) */
typedef struct test_stream {
/* The owning fixture */
test_fixture *fixture;
/* The transport client stream */
grpc_stream *client_stream;
/* The transport server stream */
grpc_stream *server_stream;
/* Linked lists of messages expected on client and server */
expected_message *client_expected_messages;
expected_message *server_expected_messages;
expected_metadata *client_expected_metadata;
expected_metadata *server_expected_metadata;
/* Test streams are linked in the fixture */
struct test_stream *next;
struct test_stream *prev;
} test_stream;
/* A test_fixture tracks all transport state and expectations for a test */
struct test_fixture {
gpr_mu mu;
gpr_cv cv; /* broadcast when expectation state has changed */
/* The transport instances */
grpc_transport *client_transport;
grpc_transport *server_transport;
/* User data for the transport instances - pointers to these are passed
to the transport. */
test_user_data client_ud;
test_user_data server_ud;
/* A pointer to the head of the tracked streams list, or NULL if no streams
are open */
test_stream *streams;
};
static void expect_metadata(test_stream *s, int from_client, const char *key,
const char *value);
/* Convert some number of seconds into a gpr_timespec that many seconds in the
future */
static gpr_timespec deadline_from_seconds(double deadline_seconds) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(deadline_seconds);
}
/* Init a test_user_data instance */
static void init_user_data(test_user_data *ud, test_fixture *f,
grpc_transport_test_config *config, int is_client) {
ud->fixture = f;
}
/* Implements the alloc_recv_buffer transport callback */
static gpr_slice alloc_recv_buffer(void *user_data, grpc_transport *transport,
grpc_stream *stream, size_t size_hint) {
return gpr_slice_malloc(size_hint);
}
static void pending_ops_cleanup(void) {
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_cv);
}
static void pending_ops_init(void) {
gpr_mu_init(&g_mu);
gpr_cv_init(&g_cv);
atexit(pending_ops_cleanup);
}
static void use_pending_ops(void) {
gpr_once_init(&g_pending_ops_init, pending_ops_init);
}
static void add_pending_op(void) {
use_pending_ops();
gpr_mu_lock(&g_mu);
g_pending_ops++;
gpr_mu_unlock(&g_mu);
}
static void end_pending_op(void) {
gpr_mu_lock(&g_mu);
g_pending_ops--;
gpr_cv_broadcast(&g_cv);
gpr_mu_unlock(&g_mu);
}
static void wait_pending_ops(void) {
use_pending_ops();
gpr_mu_lock(&g_mu);
while (g_pending_ops > 0) {
gpr_cv_wait(&g_cv, &g_mu, gpr_inf_future);
}
gpr_mu_unlock(&g_mu);
}
/* Implements the create_stream transport callback */
static void create_stream(void *user_data, grpc_transport *transport,
const void *server_data) {
test_user_data *ud = user_data;
test_fixture *f = ud->fixture;
test_stream *stream;
GPR_ASSERT(ud == &f->server_ud);
GPR_ASSERT(transport == f->server_transport);
gpr_mu_lock(&f->mu);
/* Search streams for the peer to this stream */
if (!f->streams) goto done;
/* found the expecting stream */
stream = f->streams;
stream->server_stream = gpr_malloc(grpc_transport_stream_size(transport));
grpc_transport_init_stream(transport, stream->server_stream, server_data);
done:
/* wakeup begin_stream, and maybe wait_and_verify */
gpr_cv_broadcast(&f->cv);
gpr_mu_unlock(&f->mu);
}
/* Search fixture streams for the test_stream instance holding a given transport
stream */
static test_stream *find_test_stream(test_fixture *f, grpc_stream *stream) {
test_stream *s;
GPR_ASSERT(f->streams);
s = f->streams;
do {
if (s->client_stream == stream || s->server_stream == stream) {
return s;
}
} while (s != f->streams);
GPR_ASSERT(0 && "found");
return NULL;
}
/* Stringify a grpc_stream_state for debugging */
static const char *state_name(grpc_stream_state state) {
switch (state) {
case GRPC_STREAM_OPEN:
return "GRPC_STREAM_OPEN";
case GRPC_STREAM_RECV_CLOSED:
return "GRPC_STREAM_RECV_CLOSED";
case GRPC_STREAM_SEND_CLOSED:
return "GRPC_STREAM_SEND_CLOSED";
case GRPC_STREAM_CLOSED:
return "GRPC_STREAM_CLOSED";
}
GPR_ASSERT(0 && "reachable");
return NULL;
}
typedef struct {
grpc_transport *transport;
grpc_stream *stream;
} destroy_stream_args;
static void destroy_stream(void *p) {
destroy_stream_args *a = p;
grpc_transport_destroy_stream(a->transport, a->stream);
gpr_free(a->stream);
gpr_free(a);
end_pending_op();
}
static void recv_batch(void *user_data, grpc_transport *transport,
grpc_stream *stream, grpc_stream_op *ops,
size_t ops_count, grpc_stream_state final_state) {
test_user_data *ud = user_data;
test_fixture *f = ud->fixture;
test_stream *s;
/* Pointer to the root pointer of either client or server expected messages;
not a simple pointer as we may need to manipulate the list (on receipt
of messages */
expected_message **expect_root_message;
expected_metadata **expect_root_metadata;
expected_metadata *emd;
size_t i, j;
char *hexstr1, *hexstr2;
int repeats = 0;
gpr_mu_lock(&f->mu);
s = find_test_stream(f, stream);
expect_root_message = s->client_stream == stream
? &s->client_expected_messages
: &s->server_expected_messages;
expect_root_metadata = s->client_stream == stream
? &s->client_expected_metadata
: &s->server_expected_metadata;
/* Debug log */
gpr_log(GPR_DEBUG, "recv_batch: %d ops on %s final_state=%s", ops_count,
s->client_stream == stream ? "client" : "server",
state_name(final_state));
#define CLEAR_REPEATS \
if (repeats) { \
gpr_log(GPR_DEBUG, " + %d more", repeats); \
repeats = 0; \
}
for (i = 0; i < ops_count; i++) {
switch (ops[i].type) {
case GRPC_NO_OP:
CLEAR_REPEATS;
gpr_log(GPR_DEBUG, " [%02d] GRPC_NO_OP", i);
break;
case GRPC_OP_METADATA_BOUNDARY:
CLEAR_REPEATS;
gpr_log(GPR_DEBUG, " [%02d] GRPC_OP_METADATA_BOUNDARY", i);
break;
case GRPC_OP_METADATA:
CLEAR_REPEATS;
hexstr1 =
gpr_hexdump(grpc_mdstr_as_c_string(ops[i].data.metadata->key),
GPR_SLICE_LENGTH(ops[i].data.metadata->key->slice),
GPR_HEXDUMP_PLAINTEXT);
hexstr2 =
gpr_hexdump(grpc_mdstr_as_c_string(ops[i].data.metadata->value),
GPR_SLICE_LENGTH(ops[i].data.metadata->value->slice),
GPR_HEXDUMP_PLAINTEXT);
gpr_log(GPR_DEBUG, " [%02d] GRPC_OP_METADATA key=%s value=%s", i,
hexstr1, hexstr2);
gpr_free(hexstr1);
gpr_free(hexstr2);
break;
case GRPC_OP_BEGIN_MESSAGE:
CLEAR_REPEATS;
gpr_log(GPR_DEBUG, " [%02d] GRPC_OP_BEGIN_MESSAGE len=%d", i,
ops[i].data.begin_message.length);
break;
case GRPC_OP_DEADLINE:
CLEAR_REPEATS;
gpr_log(GPR_DEBUG, " [%02d] GRPC_OP_DEADLINE value=%d.%09d", i,
ops[i].data.deadline.tv_sec, ops[i].data.deadline.tv_nsec);
break;
case GRPC_OP_SLICE:
if (i && ops[i - 1].type == GRPC_OP_SLICE &&
GPR_SLICE_LENGTH(ops[i - 1].data.slice) ==
GPR_SLICE_LENGTH(ops[i].data.slice)) {
repeats++;
} else {
CLEAR_REPEATS;
gpr_log(GPR_DEBUG, " [%02d] GRPC_OP_SLICE len=%d", i,
GPR_SLICE_LENGTH(ops[i].data.slice));
}
break;
case GRPC_OP_FLOW_CTL_CB:
CLEAR_REPEATS;
gpr_log(GPR_DEBUG, " [%02d] GRPC_OP_FLOW_CTL_CB", i);
break;
}
}
CLEAR_REPEATS;
/* Iterate over operations, and verify them against expectations */
for (i = 0; i < ops_count; i++) {
switch (ops[i].type) {
case GRPC_NO_OP:
break;
case GRPC_OP_METADATA_BOUNDARY:
break;
case GRPC_OP_METADATA:
GPR_ASSERT(*expect_root_metadata && "must be expecting metadata");
emd = *expect_root_metadata;
if (emd == NULL) {
gpr_log(GPR_ERROR, "metadata not found");
abort();
}
do {
if (emd->metadata == ops[i].data.metadata) {
if (emd == *expect_root_metadata) {
if (emd->next == emd) {
*expect_root_metadata = NULL;
} else {
*expect_root_metadata = emd->next;
}
}
emd->next->prev = emd->prev;
emd->prev->next = emd->next;
grpc_mdelem_unref(emd->metadata);
grpc_mdelem_unref(ops[i].data.metadata);
gpr_free(emd);
emd = NULL;
break;
}
emd = emd->next;
} while (emd != *expect_root_metadata);
if (emd) {
gpr_log(GPR_ERROR, "metadata not found");
abort();
}
break;
case GRPC_OP_BEGIN_MESSAGE:
GPR_ASSERT(*expect_root_message && "must be expecting a message");
GPR_ASSERT((*expect_root_message)->read_pos == 0 &&
"must be at the start of a message");
GPR_ASSERT((*expect_root_message)->begun == 0 &&
"can only BEGIN a message once");
GPR_ASSERT((*expect_root_message)->length ==
ops[i].data.begin_message.length &&
"message lengths must match");
(*expect_root_message)->begun = 1;
break;
case GRPC_OP_SLICE:
GPR_ASSERT(*expect_root_message && "must be expecting a message");
GPR_ASSERT((*expect_root_message)->begun == 1 &&
"must have begun a message");
GPR_ASSERT((*expect_root_message)->read_pos +
GPR_SLICE_LENGTH(ops[i].data.slice) <=
(*expect_root_message)->length &&
"must not send more data than expected");
for (j = 0; j < GPR_SLICE_LENGTH(ops[i].data.slice); j++) {
GPR_ASSERT((*expect_root_message)
->data[(*expect_root_message)->read_pos + j] ==
GPR_SLICE_START_PTR(ops[i].data.slice)[j] &&
"must send the correct message");
}
(*expect_root_message)->read_pos += GPR_SLICE_LENGTH(ops[i].data.slice);
if ((*expect_root_message)->read_pos ==
(*expect_root_message)->length) {
expected_message *great_success = *expect_root_message;
*expect_root_message = great_success->next;
gpr_free(great_success->data);
gpr_free(great_success);
}
gpr_slice_unref(ops[i].data.slice);
break;
case GRPC_OP_FLOW_CTL_CB:
GPR_ASSERT(0 && "allowed");
break;
case GRPC_OP_DEADLINE:
GPR_ASSERT(0 && "implemented");
break;
}
}
/* If the stream has become fully closed then we must destroy the transport
part of the stream */
if (final_state == GRPC_STREAM_CLOSED) {
destroy_stream_args *dsa = gpr_malloc(sizeof(destroy_stream_args));
gpr_thd_id id;
dsa->transport = transport;
dsa->stream = stream;
/* start a thread after incrementing a pending op counter (so we can wait
at test completion */
add_pending_op();
gpr_thd_new(&id, destroy_stream, dsa, NULL);
if (stream == s->client_stream) {
GPR_ASSERT(s->client_expected_messages == NULL &&
"must receive all expected messages");
s->client_stream = NULL;
} else {
GPR_ASSERT(s->server_expected_messages == NULL &&
"must receive all expected messages");
s->server_stream = NULL;
}
/* And if both the client and the server report fully closed, we can
unlink the stream object entirely */
if (s->client_stream == NULL && s->server_stream == NULL) {
s->next->prev = s->prev;
s->prev->next = s->next;
if (s == f->streams) {
if (s->next == f->streams) {
f->streams = NULL;
} else {
f->streams = s->next;
}
}
}
}
/* wakeup wait_and_verify */
gpr_cv_broadcast(&f->cv);
gpr_mu_unlock(&f->mu);
}
static void close_transport(void *user_data, grpc_transport *transport) {}
static void recv_goaway(void *user_data, grpc_transport *transport,
grpc_status_code status, gpr_slice debug) {
gpr_slice_unref(debug);
}
static grpc_transport_callbacks transport_callbacks = {
alloc_recv_buffer, create_stream, recv_batch, recv_goaway, close_transport};
/* Helper for tests to create a stream.
Arguments:
s - uninitialized test_stream struct to begin
f - test fixture to associate this stream with
method, host, deadline_seconds - header fields for the stream */
static void begin_stream(test_stream *s, test_fixture *f, const char *method,
const char *host, double deadline_seconds) {
/* Deadline to initiate the stream (prevents the tests from hanging
forever) */
gpr_timespec deadline = deadline_from_seconds(10.0);
grpc_stream_op_buffer sopb;
grpc_sopb_init(&sopb);
gpr_mu_lock(&f->mu);
s->fixture = f;
s->client_stream =
gpr_malloc(grpc_transport_stream_size(f->client_transport));
/* server stream will be set once it's received by the peer transport */
s->server_stream = NULL;
s->client_expected_messages = NULL;
s->server_expected_messages = NULL;
s->client_expected_metadata = NULL;
s->server_expected_metadata = NULL;
if (f->streams) {
s->next = f->streams;
s->prev = s->next->prev;
s->next->prev = s->prev->next = s;
} else {
s->next = s->prev = s;
}
f->streams = s;
gpr_mu_unlock(&f->mu);
GPR_ASSERT(0 == grpc_transport_init_stream(f->client_transport,
s->client_stream, NULL));
#define ADDMD(k, v) \
do { \
grpc_mdelem *md = grpc_mdelem_from_strings(g_metadata_context, (k), (v)); \
grpc_sopb_add_metadata(&sopb, md); \
expect_metadata(s, 1, (k), (v)); \
} while (0)
ADDMD(":path", method);
ADDMD(":authority", host);
ADDMD(":method", "POST");
grpc_transport_send_batch(f->client_transport, s->client_stream, sopb.ops,
sopb.nops, 0);
sopb.nops = 0;
grpc_sopb_destroy(&sopb);
/* wait for the server side stream to be created */
gpr_mu_lock(&f->mu);
while (s->server_stream == NULL) {
GPR_ASSERT(0 == gpr_cv_wait(&f->cv, &f->mu, deadline));
}
gpr_mu_unlock(&f->mu);
}
static grpc_transport_setup_result setup_transport(
test_fixture *f, grpc_transport **set_transport, void *user_data,
grpc_transport *transport) {
grpc_transport_setup_result result;
gpr_mu_lock(&f->mu);
*set_transport = transport;
gpr_cv_broadcast(&f->cv);
gpr_mu_unlock(&f->mu);
result.callbacks = &transport_callbacks;
result.user_data = user_data;
return result;
}
static grpc_transport_setup_result setup_server_transport(
void *arg, grpc_transport *transport, grpc_mdctx *mdctx) {
test_fixture *f = arg;
return setup_transport(f, &f->server_transport, &f->server_ud, transport);
}
static grpc_transport_setup_result setup_client_transport(
void *arg, grpc_transport *transport, grpc_mdctx *mdctx) {
test_fixture *f = arg;
return setup_transport(f, &f->client_transport, &f->client_ud, transport);
}
/* Begin a test
Arguments:
f - uninitialized test_fixture struct
config - test configuration for this test
name - the name of this test */
static void begin_test(test_fixture *f, grpc_transport_test_config *config,
const char *name) {
gpr_timespec timeout = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(100);
gpr_log(GPR_INFO, "BEGIN: %s/%s", name, config->name);
gpr_mu_init(&f->mu);
gpr_cv_init(&f->cv);
f->streams = NULL;
init_user_data(&f->client_ud, f, config, 1);
init_user_data(&f->server_ud, f, config, 0);
f->client_transport = NULL;
f->server_transport = NULL;
GPR_ASSERT(0 ==
config->create_transport(setup_client_transport, f,
setup_server_transport, f,
g_metadata_context));
gpr_mu_lock(&f->mu);
while (!f->client_transport || !f->server_transport) {
GPR_ASSERT(gpr_cv_wait(&f->cv, &f->mu, timeout));
}
gpr_mu_unlock(&f->mu);
}
/* Enumerate expected messages on a stream */
static void enumerate_expected_messages(
test_stream *s, expected_message *root, const char *stream_tag,
void (*cb)(void *user, const char *fmt, ...), void *user) {
expected_message *msg;
for (msg = root; msg; msg = msg->next) {
cb(user,
"Waiting for message to finish: "
"length=%zu read_pos=%zu begun=%d",
msg->length, msg->read_pos);
}
}
/* Walk through everything that is still waiting to happen, and call 'cb' with
userdata 'user' for that expectation. */
static void enumerate_expectations(test_fixture *f,
void (*cb)(void *user, const char *fmt, ...),
void *user) {
test_stream *stream;
if (f->streams) {
stream = f->streams;
do {
cb(user,
"Waiting for request to close: "
"client=%p, server=%p",
stream->client_stream, stream->server_stream);
enumerate_expected_messages(stream, stream->client_expected_messages,
"client", cb, user);
enumerate_expected_messages(stream, stream->server_expected_messages,
"server", cb, user);
stream = stream->next;
} while (stream != f->streams);
}
}
/* Callback for enumerate_expectations, that increments an integer each time
an expectation is seen */
static void increment_expectation_count(void *p, const char *fmt, ...) {
++*(int *)p;
}
/* Returns the count of pending expectations in a fixture. Requires mu taken */
static int count_expectations(test_fixture *f) {
int n = 0;
enumerate_expectations(f, increment_expectation_count, &n);
return n;
}
/* Callback for enumerate_expectations that adds an expectation to the log */
static void dump_expectation(void *p, const char *fmt, ...) {
char *str;
va_list args;
va_start(args, fmt);
gpr_asprintf(&str, fmt, args);
gpr_log(GPR_INFO, "EXPECTED: %s", str);
gpr_free(str);
va_end(args);
}
/* Add all pending expectations to the log */
static void dump_expectations(test_fixture *f) {
enumerate_expectations(f, dump_expectation, NULL);
}
/* Wait until all expectations are completed, or crash */
static void wait_and_verify(test_fixture *f) {
gpr_timespec deadline = deadline_from_seconds(10.0);
gpr_mu_lock(&f->mu);
while (count_expectations(f) > 0) {
gpr_log(GPR_INFO, "waiting for expectations to complete");
if (gpr_cv_wait(&f->cv, &f->mu, deadline)) {
gpr_log(GPR_ERROR, "Timeout waiting for expectation completion");
dump_expectations(f);
gpr_mu_unlock(&f->mu);
abort();
}
}
gpr_mu_unlock(&f->mu);
}
/* Finish a test */
static void end_test(test_fixture *f) {
wait_and_verify(f);
grpc_transport_close(f->client_transport);
grpc_transport_close(f->server_transport);
grpc_transport_destroy(f->client_transport);
grpc_transport_destroy(f->server_transport);
wait_pending_ops();
}
/* Generate a test slice filled with {0,1,2,3,...,255,0,1,2,3,4,...} */
static gpr_slice generate_test_data(size_t length) {
gpr_slice slice = gpr_slice_malloc(length);
size_t i;
for (i = 0; i < length; i++) {
GPR_SLICE_START_PTR(slice)[i] = i;
}
return slice;
}
/* Add an expected message to the end of a list with root root */
static void append_expected_message(expected_message **root,
expected_message *message) {
expected_message *end;
if (!*root) {
*root = message;
return;
}
for (end = *root; end->next; end = end->next)
;
end->next = message;
}
/* Add an expected message on stream 's''.
If from_client==1, expect it on the server, otherwise expect it on the client
Variadic parameters are a NULL-terminated list of pointers to slices that
should be expected as payload */
static void expect_message(test_stream *s, int from_client,
/* gpr_slice* */...) {
va_list args;
gpr_slice *slice;
size_t capacity = 32;
size_t length = 0;
gpr_uint8 *buffer = gpr_malloc(capacity);
expected_message *e;
va_start(args, from_client);
while ((slice = va_arg(args, gpr_slice *))) {
while (GPR_SLICE_LENGTH(*slice) + length > capacity) {
capacity *= 2;
buffer = gpr_realloc(buffer, capacity);
}
memcpy(buffer + length, GPR_SLICE_START_PTR(*slice),
GPR_SLICE_LENGTH(*slice));
length += GPR_SLICE_LENGTH(*slice);
}
va_end(args);
e = gpr_malloc(sizeof(expected_message));
e->data = buffer;
e->length = length;
e->read_pos = 0;
e->begun = 0;
e->next = NULL;
gpr_mu_lock(&s->fixture->mu);
append_expected_message(
from_client ? &s->server_expected_messages : &s->client_expected_messages,
e);
gpr_mu_unlock(&s->fixture->mu);
}
static void expect_metadata(test_stream *s, int from_client, const char *key,
const char *value) {
expected_metadata *e = gpr_malloc(sizeof(expected_metadata));
expected_metadata **root =
from_client ? &s->server_expected_metadata : &s->client_expected_metadata;
e->metadata = grpc_mdelem_from_strings(g_metadata_context, key, value);
gpr_mu_lock(&s->fixture->mu);
if (!*root) {
*root = e;
e->next = e->prev = e;
} else {
e->next = *root;
e->prev = e->next->prev;
e->next->prev = e->prev->next = e;
}
gpr_mu_unlock(&s->fixture->mu);
}
/******************************************************************************
* Actual unit tests
*/
/* Test that we can create, begin, and end a test */
static void test_no_op(grpc_transport_test_config *config) {
test_fixture f;
begin_test(&f, config, __FUNCTION__);
end_test(&f);
}
/* Test that a request can be initiated and terminated normally */
static void test_simple_request(grpc_transport_test_config *config) {
test_fixture f;
test_stream s;
begin_test(&f, config, __FUNCTION__);
begin_stream(&s, &f, "/Test", "foo.google.com", 10);
grpc_transport_send_batch(f.client_transport, s.client_stream, NULL, 0, 1);
grpc_transport_send_batch(f.server_transport, s.server_stream, NULL, 0, 1);
end_test(&f);
}
/* Test that a request can be aborted by the client */
static void test_can_abort_client(grpc_transport_test_config *config) {
test_fixture f;
test_stream s;
begin_test(&f, config, __FUNCTION__);
begin_stream(&s, &f, "/Test", "foo.google.com", 10);
expect_metadata(&s, 0, "grpc-status", "1");
expect_metadata(&s, 1, "grpc-status", "1");
grpc_transport_abort_stream(f.client_transport, s.client_stream,
GRPC_STATUS_CANCELLED);
end_test(&f);
}
/* Test that a request can be aborted by the server */
static void test_can_abort_server(grpc_transport_test_config *config) {
test_fixture f;
test_stream s;
begin_test(&f, config, __FUNCTION__);
begin_stream(&s, &f, "/Test", "foo.google.com", 10);
expect_metadata(&s, 0, "grpc-status", "1");
expect_metadata(&s, 1, "grpc-status", "1");
grpc_transport_abort_stream(f.server_transport, s.server_stream,
GRPC_STATUS_CANCELLED);
end_test(&f);
}
/* Test that a request can be sent with payload */
static void test_request_with_data(grpc_transport_test_config *config,
size_t message_length) {
test_fixture f;
test_stream s;
gpr_slice data = generate_test_data(message_length);
grpc_stream_op_buffer sopb;
grpc_sopb_init(&sopb);
begin_test(&f, config, __FUNCTION__);
gpr_log(GPR_INFO, "message_length = %d", message_length);
begin_stream(&s, &f, "/Test", "foo.google.com", 10);
expect_message(&s, 1, &data, NULL);
grpc_sopb_add_begin_message(&sopb, message_length, 0);
grpc_sopb_add_slice(&sopb, data);
grpc_transport_set_allow_window_updates(f.server_transport, s.server_stream,
1);
grpc_transport_send_batch(f.client_transport, s.client_stream, sopb.ops,
sopb.nops, 1);
sopb.nops = 0;
grpc_transport_send_batch(f.server_transport, s.server_stream, NULL, 0, 1);
end_test(&f);
grpc_sopb_destroy(&sopb);
}
/* Increment an integer pointed to by x - used for verifying flow control */
static void increment_int(void *x, grpc_op_error error) { ++*(int *)x; }
/* Test that flow control callbacks are made at appropriate times */
static void test_request_with_flow_ctl_cb(grpc_transport_test_config *config,
size_t message_length) {
test_fixture f;
test_stream s;
int flow_ctl_called = 0;
gpr_slice data = generate_test_data(message_length);
grpc_stream_op_buffer sopb;
grpc_sopb_init(&sopb);
begin_test(&f, config, __FUNCTION__);
gpr_log(GPR_INFO, "length=%d", message_length);
begin_stream(&s, &f, "/Test", "foo.google.com", 10);
expect_message(&s, 1, &data, NULL);
grpc_sopb_add_begin_message(&sopb, message_length, 0);
grpc_sopb_add_slice(&sopb, data);
grpc_sopb_add_flow_ctl_cb(&sopb, increment_int, &flow_ctl_called);
grpc_transport_set_allow_window_updates(f.server_transport, s.server_stream,
1);
grpc_transport_send_batch(f.client_transport, s.client_stream, sopb.ops,
sopb.nops, 1);
sopb.nops = 0;
grpc_transport_send_batch(f.server_transport, s.server_stream, NULL, 0, 1);
end_test(&f);
GPR_ASSERT(flow_ctl_called == 1);
grpc_sopb_destroy(&sopb);
}
/* Set an event on ping response */
static void ping_cb(void *p) { gpr_event_set(p, (void *)1); }
/* Test that pinging gets a response */
static void test_ping(grpc_transport_test_config *config) {
test_fixture f;
gpr_event ev;
begin_test(&f, config, __FUNCTION__);
gpr_event_init(&ev);
grpc_transport_ping(f.client_transport, ping_cb, &ev);
GPR_ASSERT(gpr_event_wait(&ev, deadline_from_seconds(10)));
end_test(&f);
}
/******************************************************************************
* Test driver
*/
static const size_t interesting_message_lengths[] = {
1, 100, 10000, 100000, 1000000,
};
void grpc_transport_end2end_tests(grpc_transport_test_config *config) {
unsigned i;
g_metadata_context = grpc_mdctx_create();
test_no_op(config);
test_simple_request(config);
test_can_abort_client(config);
test_can_abort_server(config);
test_ping(config);
for (i = 0; i < GPR_ARRAY_SIZE(interesting_message_lengths); i++) {
test_request_with_data(config, interesting_message_lengths[i]);
test_request_with_flow_ctl_cb(config, interesting_message_lengths[i]);
}
grpc_mdctx_unref(g_metadata_context);
gpr_log(GPR_INFO, "tests completed ok");
}

@ -1,68 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_TEST_CORE_TRANSPORT_TRANSPORT_END2END_TESTS_H
#define GRPC_TEST_CORE_TRANSPORT_TRANSPORT_END2END_TESTS_H
#include "src/core/transport/transport.h"
/* Defines a suite of tests that all GRPC transports should be able to pass */
/* A test configuration has a name and a factory method */
typedef struct grpc_transport_test_config {
/* The name of this configuration */
char *name;
/* Create a transport
Returns 0 on success
Arguments:
OUT: client - the created client half of the transport
IN: client_callbacks - callback structure to be used by the client
transport
IN: client_user_data - user data pointer to be passed into each client
callback
OUT: server - the created server half of the transport
IN: server_callbacks - callback structure to be used by the server
transport
IN: server_user_data - user data pointer to be passed into each
server */
int (*create_transport)(grpc_transport_setup_callback client_setup,
void *client_arg,
grpc_transport_setup_callback server_setup,
void *server_arg, grpc_mdctx *mdctx);
} grpc_transport_test_config;
/* Run the test suite on one configuration */
void grpc_transport_end2end_tests(grpc_transport_test_config *config);
#endif /* GRPC_TEST_CORE_TRANSPORT_TRANSPORT_END2END_TESTS_H */

@ -474,7 +474,7 @@ TEST_F(End2endTest, ClientCancelsRpc) {
Status s = stub_->Echo(&context, request, &response);
cancel_thread.join();
EXPECT_EQ(StatusCode::CANCELLED, s.code());
EXPECT_TRUE(s.details().empty());
EXPECT_EQ(s.details(), "Cancelled");
}
// Server cancels rpc after 1ms

@ -145,15 +145,6 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "chttp2_transport_end2end_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -468,15 +459,6 @@
"posix"
]
},
{
"flaky": false,
"language": "c",
"name": "metadata_buffer_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c",
@ -820,7 +802,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_fake_security_invoke_large_request_test",
"platforms": [
@ -1036,7 +1018,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_fake_security_invoke_large_request_legacy_test",
"platforms": [
@ -1270,7 +1252,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_fullstack_invoke_large_request_test",
"platforms": [
@ -1486,7 +1468,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_fullstack_invoke_large_request_legacy_test",
"platforms": [
@ -1720,7 +1702,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_fullstack_uds_invoke_large_request_test",
"platforms": [
@ -1936,7 +1918,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_fullstack_uds_invoke_large_request_legacy_test",
"platforms": [
@ -2170,7 +2152,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_simple_ssl_fullstack_invoke_large_request_test",
"platforms": [
@ -2386,7 +2368,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_simple_ssl_fullstack_invoke_large_request_legacy_test",
"platforms": [
@ -2620,7 +2602,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test",
"platforms": [
@ -2836,7 +2818,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_legacy_test",
"platforms": [
@ -3070,7 +3052,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_socket_pair_invoke_large_request_test",
"platforms": [
@ -3286,7 +3268,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_socket_pair_invoke_large_request_legacy_test",
"platforms": [
@ -3520,7 +3502,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test",
"platforms": [
@ -3736,7 +3718,7 @@
]
},
{
"flaky": false,
"flaky": true,
"language": "c",
"name": "chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_legacy_test",
"platforms": [

@ -53,10 +53,10 @@ grpc_test_util:
$(OUT_DIR):
mkdir $(OUT_DIR)
buildtests: alarm_heap_test.exe alarm_list_test.exe alarm_test.exe alpn_test.exe bin_encoder_test.exe census_hash_table_test.exe census_statistics_multiple_writers_circular_buffer_test.exe census_statistics_multiple_writers_test.exe census_statistics_performance_test.exe census_statistics_quick_test.exe census_statistics_small_log_test.exe census_stats_store_test.exe census_stub_test.exe census_trace_store_test.exe census_window_stats_test.exe chttp2_status_conversion_test.exe chttp2_stream_encoder_test.exe chttp2_stream_map_test.exe chttp2_transport_end2end_test.exe dualstack_socket_test.exe echo_test.exe fd_posix_test.exe fling_stream_test.exe fling_test.exe gpr_cancellable_test.exe gpr_cmdline_test.exe gpr_env_test.exe gpr_file_test.exe gpr_histogram_test.exe gpr_host_port_test.exe gpr_log_test.exe gpr_slice_buffer_test.exe gpr_slice_test.exe gpr_string_test.exe gpr_sync_test.exe gpr_thd_test.exe gpr_time_test.exe gpr_tls_test.exe gpr_useful_test.exe grpc_base64_test.exe grpc_byte_buffer_reader_test.exe grpc_channel_stack_test.exe grpc_completion_queue_test.exe grpc_credentials_test.exe grpc_json_token_test.exe grpc_stream_op_test.exe hpack_parser_test.exe hpack_table_test.exe httpcli_format_request_test.exe httpcli_parser_test.exe httpcli_test.exe json_rewrite_test.exe json_test.exe lame_client_test.exe message_compress_test.exe metadata_buffer_test.exe multi_init_test.exe murmur_hash_test.exe no_server_test.exe poll_kick_posix_test.exe resolve_address_test.exe secure_endpoint_test.exe sockaddr_utils_test.exe tcp_client_posix_test.exe tcp_posix_test.exe tcp_server_posix_test.exe time_averaged_stats_test.exe time_test.exe timeout_encoding_test.exe timers_test.exe transport_metadata_test.exe transport_security_test.exe
buildtests: alarm_heap_test.exe alarm_list_test.exe alarm_test.exe alpn_test.exe bin_encoder_test.exe census_hash_table_test.exe census_statistics_multiple_writers_circular_buffer_test.exe census_statistics_multiple_writers_test.exe census_statistics_performance_test.exe census_statistics_quick_test.exe census_statistics_small_log_test.exe census_stats_store_test.exe census_stub_test.exe census_trace_store_test.exe census_window_stats_test.exe chttp2_status_conversion_test.exe chttp2_stream_encoder_test.exe chttp2_stream_map_test.exe dualstack_socket_test.exe echo_test.exe fd_posix_test.exe fling_stream_test.exe fling_test.exe gpr_cancellable_test.exe gpr_cmdline_test.exe gpr_env_test.exe gpr_file_test.exe gpr_histogram_test.exe gpr_host_port_test.exe gpr_log_test.exe gpr_slice_buffer_test.exe gpr_slice_test.exe gpr_string_test.exe gpr_sync_test.exe gpr_thd_test.exe gpr_time_test.exe gpr_tls_test.exe gpr_useful_test.exe grpc_base64_test.exe grpc_byte_buffer_reader_test.exe grpc_channel_stack_test.exe grpc_completion_queue_test.exe grpc_credentials_test.exe grpc_json_token_test.exe grpc_stream_op_test.exe hpack_parser_test.exe hpack_table_test.exe httpcli_format_request_test.exe httpcli_parser_test.exe httpcli_test.exe json_rewrite_test.exe json_test.exe lame_client_test.exe message_compress_test.exe multi_init_test.exe murmur_hash_test.exe no_server_test.exe poll_kick_posix_test.exe resolve_address_test.exe secure_endpoint_test.exe sockaddr_utils_test.exe tcp_client_posix_test.exe tcp_posix_test.exe tcp_server_posix_test.exe time_averaged_stats_test.exe time_test.exe timeout_encoding_test.exe timers_test.exe transport_metadata_test.exe transport_security_test.exe
echo All tests built.
test: alarm_heap_test alarm_list_test alarm_test alpn_test bin_encoder_test census_hash_table_test census_statistics_multiple_writers_circular_buffer_test census_statistics_multiple_writers_test census_statistics_performance_test census_statistics_quick_test census_statistics_small_log_test census_stats_store_test census_stub_test census_trace_store_test census_window_stats_test chttp2_status_conversion_test chttp2_stream_encoder_test chttp2_stream_map_test chttp2_transport_end2end_test dualstack_socket_test echo_test fd_posix_test fling_stream_test fling_test gpr_cancellable_test gpr_cmdline_test gpr_env_test gpr_file_test gpr_histogram_test gpr_host_port_test gpr_log_test gpr_slice_buffer_test gpr_slice_test gpr_string_test gpr_sync_test gpr_thd_test gpr_time_test gpr_tls_test gpr_useful_test grpc_base64_test grpc_byte_buffer_reader_test grpc_channel_stack_test grpc_completion_queue_test grpc_credentials_test grpc_json_token_test grpc_stream_op_test hpack_parser_test hpack_table_test httpcli_format_request_test httpcli_parser_test httpcli_test json_rewrite_test json_test lame_client_test message_compress_test metadata_buffer_test multi_init_test murmur_hash_test no_server_test poll_kick_posix_test resolve_address_test secure_endpoint_test sockaddr_utils_test tcp_client_posix_test tcp_posix_test tcp_server_posix_test time_averaged_stats_test time_test timeout_encoding_test timers_test transport_metadata_test transport_security_test
test: alarm_heap_test alarm_list_test alarm_test alpn_test bin_encoder_test census_hash_table_test census_statistics_multiple_writers_circular_buffer_test census_statistics_multiple_writers_test census_statistics_performance_test census_statistics_quick_test census_statistics_small_log_test census_stats_store_test census_stub_test census_trace_store_test census_window_stats_test chttp2_status_conversion_test chttp2_stream_encoder_test chttp2_stream_map_test dualstack_socket_test echo_test fd_posix_test fling_stream_test fling_test gpr_cancellable_test gpr_cmdline_test gpr_env_test gpr_file_test gpr_histogram_test gpr_host_port_test gpr_log_test gpr_slice_buffer_test gpr_slice_test gpr_string_test gpr_sync_test gpr_thd_test gpr_time_test gpr_tls_test gpr_useful_test grpc_base64_test grpc_byte_buffer_reader_test grpc_channel_stack_test grpc_completion_queue_test grpc_credentials_test grpc_json_token_test grpc_stream_op_test hpack_parser_test hpack_table_test httpcli_format_request_test httpcli_parser_test httpcli_test json_rewrite_test json_test lame_client_test message_compress_test multi_init_test murmur_hash_test no_server_test poll_kick_posix_test resolve_address_test secure_endpoint_test sockaddr_utils_test tcp_client_posix_test tcp_posix_test tcp_server_posix_test time_averaged_stats_test time_test timeout_encoding_test timers_test transport_metadata_test transport_security_test
echo All tests ran.
test_gpr: gpr_cancellable_test gpr_cmdline_test gpr_env_test gpr_file_test gpr_histogram_test gpr_host_port_test gpr_log_test gpr_slice_buffer_test gpr_slice_test gpr_string_test gpr_sync_test gpr_thd_test gpr_time_test gpr_tls_test gpr_useful_test
@ -206,14 +206,6 @@ chttp2_stream_map_test: chttp2_stream_map_test.exe
echo Running chttp2_stream_map_test
$(OUT_DIR)\chttp2_stream_map_test.exe
chttp2_transport_end2end_test.exe: grpc_test_util
echo Building chttp2_transport_end2end_test
$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ ..\..\test\core\transport\chttp2_transport_end2end_test.c
$(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_transport_end2end_test.exe" Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\chttp2_transport_end2end_test.obj
chttp2_transport_end2end_test: chttp2_transport_end2end_test.exe
echo Running chttp2_transport_end2end_test
$(OUT_DIR)\chttp2_transport_end2end_test.exe
dualstack_socket_test.exe: grpc_test_util
echo Building dualstack_socket_test
$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ ..\..\test\core\end2end\dualstack_socket_test.c
@ -590,14 +582,6 @@ message_compress_test: message_compress_test.exe
echo Running message_compress_test
$(OUT_DIR)\message_compress_test.exe
metadata_buffer_test.exe: grpc_test_util
echo Building metadata_buffer_test
$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ ..\..\test\core\channel\metadata_buffer_test.c
$(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\metadata_buffer_test.exe" Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\metadata_buffer_test.obj
metadata_buffer_test: metadata_buffer_test.exe
echo Running metadata_buffer_test
$(OUT_DIR)\metadata_buffer_test.exe
multi_init_test.exe: grpc_test_util
echo Building multi_init_test
$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ ..\..\test\core\surface\multi_init_test.c

@ -109,7 +109,6 @@
<ClInclude Include="..\..\src\core\channel\http_client_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_server_filter.h" />
<ClInclude Include="..\..\src\core\channel\metadata_buffer.h" />
<ClInclude Include="..\..\src\core\channel\noop_filter.h" />
<ClInclude Include="..\..\src\core\compression\algorithm.h" />
<ClInclude Include="..\..\src\core\compression\message_compress.h" />
@ -252,8 +251,6 @@
</ClCompile>
<ClCompile Include="..\..\src\core\channel\http_server_filter.c">
</ClCompile>
<ClCompile Include="..\..\src\core\channel\metadata_buffer.c">
</ClCompile>
<ClCompile Include="..\..\src\core\channel\noop_filter.c">
</ClCompile>
<ClCompile Include="..\..\src\core\compression\algorithm.c">

@ -94,9 +94,6 @@
<ClCompile Include="..\..\src\core\channel\http_server_filter.c">
<Filter>src\core\channel</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\channel\metadata_buffer.c">
<Filter>src\core\channel</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\channel\noop_filter.c">
<Filter>src\core\channel</Filter>
</ClCompile>
@ -452,9 +449,6 @@
<ClInclude Include="..\..\src\core\channel\http_server_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\channel\metadata_buffer.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\channel\noop_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>

@ -88,8 +88,6 @@
</ClCompile>
<ClCompile Include="..\..\test\core\statistics\census_log_tests.c">
</ClCompile>
<ClCompile Include="..\..\test\core\transport\transport_end2end_tests.c">
</ClCompile>
<ClCompile Include="..\..\test\core\util\grpc_profiler.c">
</ClCompile>
<ClCompile Include="..\..\test\core\util\parse_hexstring.c">

@ -93,7 +93,6 @@
<ClInclude Include="..\..\src\core\channel\http_client_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_server_filter.h" />
<ClInclude Include="..\..\src\core\channel\metadata_buffer.h" />
<ClInclude Include="..\..\src\core\channel\noop_filter.h" />
<ClInclude Include="..\..\src\core\compression\algorithm.h" />
<ClInclude Include="..\..\src\core\compression\message_compress.h" />
@ -198,8 +197,6 @@
</ClCompile>
<ClCompile Include="..\..\src\core\channel\http_server_filter.c">
</ClCompile>
<ClCompile Include="..\..\src\core\channel\metadata_buffer.c">
</ClCompile>
<ClCompile Include="..\..\src\core\channel\noop_filter.c">
</ClCompile>
<ClCompile Include="..\..\src\core\compression\algorithm.c">

@ -37,9 +37,6 @@
<ClCompile Include="..\..\src\core\channel\http_server_filter.c">
<Filter>src\core\channel</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\channel\metadata_buffer.c">
<Filter>src\core\channel</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\channel\noop_filter.c">
<Filter>src\core\channel</Filter>
</ClCompile>
@ -347,9 +344,6 @@
<ClInclude Include="..\..\src\core\channel\http_server_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\channel\metadata_buffer.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\channel\noop_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>

@ -55,7 +55,7 @@ $(OUT_DIR):
buildtests: buildtests_c buildtests_cxx
buildtests_c: alarm_heap_test.exe alarm_list_test.exe alarm_test.exe alpn_test.exe bin_encoder_test.exe census_hash_table_test.exe census_statistics_multiple_writers_circular_buffer_test.exe census_statistics_multiple_writers_test.exe census_statistics_performance_test.exe census_statistics_quick_test.exe census_statistics_small_log_test.exe census_stub_test.exe census_window_stats_test.exe chttp2_status_conversion_test.exe chttp2_stream_encoder_test.exe chttp2_stream_map_test.exe chttp2_transport_end2end_test.exe echo_client.exe echo_server.exe echo_test.exe fd_posix_test.exe fling_client.exe fling_server.exe fling_stream_test.exe fling_test.exe gpr_cancellable_test.exe gpr_cmdline_test.exe gpr_env_test.exe gpr_file_test.exe gpr_histogram_test.exe gpr_host_port_test.exe gpr_log_test.exe gpr_slice_buffer_test.exe gpr_slice_test.exe gpr_string_test.exe gpr_sync_test.exe gpr_thd_test.exe gpr_time_test.exe gpr_tls_test.exe gpr_useful_test.exe grpc_base64_test.exe grpc_byte_buffer_reader_test.exe grpc_channel_stack_test.exe grpc_completion_queue_test.exe grpc_credentials_test.exe grpc_json_token_test.exe grpc_stream_op_test.exe hpack_parser_test.exe hpack_table_test.exe httpcli_format_request_test.exe httpcli_parser_test.exe httpcli_test.exe json_rewrite.exe json_rewrite_test.exe json_test.exe lame_client_test.exe message_compress_test.exe metadata_buffer_test.exe multi_init_test.exe murmur_hash_test.exe no_server_test.exe poll_kick_posix_test.exe resolve_address_test.exe secure_endpoint_test.exe sockaddr_utils_test.exe tcp_client_posix_test.exe tcp_posix_test.exe tcp_server_posix_test.exe time_averaged_stats_test.exe time_test.exe timeout_encoding_test.exe timers_test.exe transport_metadata_test.exe transport_security_test.exe
buildtests_c: alarm_heap_test.exe alarm_list_test.exe alarm_test.exe alpn_test.exe bin_encoder_test.exe census_hash_table_test.exe census_statistics_multiple_writers_circular_buffer_test.exe census_statistics_multiple_writers_test.exe census_statistics_performance_test.exe census_statistics_quick_test.exe census_statistics_small_log_test.exe census_stub_test.exe census_window_stats_test.exe chttp2_status_conversion_test.exe chttp2_stream_encoder_test.exe chttp2_stream_map_test.exe echo_client.exe echo_server.exe echo_test.exe fd_posix_test.exe fling_client.exe fling_server.exe fling_stream_test.exe fling_test.exe gpr_cancellable_test.exe gpr_cmdline_test.exe gpr_env_test.exe gpr_file_test.exe gpr_histogram_test.exe gpr_host_port_test.exe gpr_log_test.exe gpr_slice_buffer_test.exe gpr_slice_test.exe gpr_string_test.exe gpr_sync_test.exe gpr_thd_test.exe gpr_time_test.exe gpr_tls_test.exe gpr_useful_test.exe grpc_base64_test.exe grpc_byte_buffer_reader_test.exe grpc_channel_stack_test.exe grpc_completion_queue_test.exe grpc_credentials_test.exe grpc_json_token_test.exe grpc_stream_op_test.exe hpack_parser_test.exe hpack_table_test.exe httpcli_format_request_test.exe httpcli_parser_test.exe httpcli_test.exe json_rewrite.exe json_rewrite_test.exe json_test.exe lame_client_test.exe message_compress_test.exe multi_init_test.exe murmur_hash_test.exe no_server_test.exe poll_kick_posix_test.exe resolve_address_test.exe secure_endpoint_test.exe sockaddr_utils_test.exe tcp_client_posix_test.exe tcp_posix_test.exe tcp_server_posix_test.exe time_averaged_stats_test.exe time_test.exe timeout_encoding_test.exe timers_test.exe transport_metadata_test.exe transport_security_test.exe
echo All tests built.
buildtests_cxx:
@ -205,14 +205,6 @@ chttp2_stream_map_test: chttp2_stream_map_test.exe
echo Running chttp2_stream_map_test
$(OUT_DIR)\chttp2_stream_map_test.exe
chttp2_transport_end2end_test.exe: grpc_test_util $(OUT_DIR)
echo Building chttp2_transport_end2end_test
$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ ..\..\test\core\transport\chttp2_transport_end2end_test.c
$(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_transport_end2end_test.exe" Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\chttp2_transport_end2end_test.obj
chttp2_transport_end2end_test: chttp2_transport_end2end_test.exe
echo Running chttp2_transport_end2end_test
$(OUT_DIR)\chttp2_transport_end2end_test.exe
echo_client.exe: grpc_test_util $(OUT_DIR)
echo Building echo_client
$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ ..\..\test\core\echo\client.c
@ -581,14 +573,6 @@ message_compress_test: message_compress_test.exe
echo Running message_compress_test
$(OUT_DIR)\message_compress_test.exe
metadata_buffer_test.exe: grpc_test_util $(OUT_DIR)
echo Building metadata_buffer_test
$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ ..\..\test\core\channel\metadata_buffer_test.c
$(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\metadata_buffer_test.exe" Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\metadata_buffer_test.obj
metadata_buffer_test: metadata_buffer_test.exe
echo Running metadata_buffer_test
$(OUT_DIR)\metadata_buffer_test.exe
multi_init_test.exe: grpc_test_util $(OUT_DIR)
echo Building multi_init_test
$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ ..\..\test\core\surface\multi_init_test.c

@ -111,7 +111,6 @@
<ClInclude Include="..\..\src\core\channel\http_client_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_server_filter.h" />
<ClInclude Include="..\..\src\core\channel\metadata_buffer.h" />
<ClInclude Include="..\..\src\core\channel\noop_filter.h" />
<ClInclude Include="..\..\src\core\compression\algorithm.h" />
<ClInclude Include="..\..\src\core\compression\message_compress.h" />
@ -254,8 +253,6 @@
</ClCompile>
<ClCompile Include="..\..\src\core\channel\http_server_filter.c">
</ClCompile>
<ClCompile Include="..\..\src\core\channel\metadata_buffer.c">
</ClCompile>
<ClCompile Include="..\..\src\core\channel\noop_filter.c">
</ClCompile>
<ClCompile Include="..\..\src\core\compression\algorithm.c">

@ -94,9 +94,6 @@
<ClCompile Include="..\..\src\core\channel\http_server_filter.c">
<Filter>src\core\channel</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\channel\metadata_buffer.c">
<Filter>src\core\channel</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\channel\noop_filter.c">
<Filter>src\core\channel</Filter>
</ClCompile>
@ -452,9 +449,6 @@
<ClInclude Include="..\..\src\core\channel\http_server_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\channel\metadata_buffer.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\channel\noop_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>

@ -90,8 +90,6 @@
</ClCompile>
<ClCompile Include="..\..\test\core\statistics\census_log_tests.c">
</ClCompile>
<ClCompile Include="..\..\test\core\transport\transport_end2end_tests.c">
</ClCompile>
<ClCompile Include="..\..\test\core\util\grpc_profiler.c">
</ClCompile>
<ClCompile Include="..\..\test\core\util\parse_hexstring.c">

@ -95,7 +95,6 @@
<ClInclude Include="..\..\src\core\channel\http_client_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_server_filter.h" />
<ClInclude Include="..\..\src\core\channel\metadata_buffer.h" />
<ClInclude Include="..\..\src\core\channel\noop_filter.h" />
<ClInclude Include="..\..\src\core\compression\algorithm.h" />
<ClInclude Include="..\..\src\core\compression\message_compress.h" />
@ -200,8 +199,6 @@
</ClCompile>
<ClCompile Include="..\..\src\core\channel\http_server_filter.c">
</ClCompile>
<ClCompile Include="..\..\src\core\channel\metadata_buffer.c">
</ClCompile>
<ClCompile Include="..\..\src\core\channel\noop_filter.c">
</ClCompile>
<ClCompile Include="..\..\src\core\compression\algorithm.c">

@ -37,9 +37,6 @@
<ClCompile Include="..\..\src\core\channel\http_server_filter.c">
<Filter>src\core\channel</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\channel\metadata_buffer.c">
<Filter>src\core\channel</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\channel\noop_filter.c">
<Filter>src\core\channel</Filter>
</ClCompile>
@ -347,9 +344,6 @@
<ClInclude Include="..\..\src\core\channel\http_server_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\channel\metadata_buffer.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\channel\noop_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>

Loading…
Cancel
Save