Merge branch 'merge-write' of github.com:ctiller/grpc into grand-unified-closures

reviewable/pr8008/r2
Craig Tiller 9 years ago
commit ce475062ad
  1. 4
      include/grpc++/impl/codegen/client_context.h
  2. 4
      include/grpc++/impl/codegen/server_context.h
  3. 4
      src/core/ext/client_config/subchannel.c
  4. 3
      src/core/ext/transport/chttp2/transport/chttp2_plugin.c
  5. 1966
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  6. 4
      src/core/ext/transport/chttp2/transport/frame.h
  7. 51
      src/core/ext/transport/chttp2/transport/frame_data.c
  8. 9
      src/core/ext/transport/chttp2/transport/frame_data.h
  9. 18
      src/core/ext/transport/chttp2/transport/frame_goaway.c
  10. 9
      src/core/ext/transport/chttp2/transport/frame_goaway.h
  11. 13
      src/core/ext/transport/chttp2/transport/frame_ping.c
  12. 8
      src/core/ext/transport/chttp2/transport/frame_ping.h
  13. 38
      src/core/ext/transport/chttp2/transport/frame_rst_stream.c
  14. 9
      src/core/ext/transport/chttp2/transport/frame_rst_stream.h
  15. 23
      src/core/ext/transport/chttp2/transport/frame_settings.c
  16. 9
      src/core/ext/transport/chttp2/transport/frame_settings.h
  17. 34
      src/core/ext/transport/chttp2/transport/frame_window_update.c
  18. 5
      src/core/ext/transport/chttp2/transport/frame_window_update.h
  19. 484
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  20. 17
      src/core/ext/transport/chttp2/transport/hpack_parser.h
  21. 634
      src/core/ext/transport/chttp2/transport/internal.h
  22. 857
      src/core/ext/transport/chttp2/transport/parsing.c
  23. 359
      src/core/ext/transport/chttp2/transport/stream_lists.c
  24. 441
      src/core/ext/transport/chttp2/transport/writing.c
  25. 6
      src/core/lib/iomgr/closure.c
  26. 5
      src/core/lib/iomgr/closure.h
  27. 316
      src/core/lib/iomgr/combiner.c
  28. 9
      src/core/lib/iomgr/combiner.h
  29. 2
      src/core/lib/iomgr/error.c
  30. 69
      src/core/lib/iomgr/exec_ctx.c
  31. 14
      src/core/lib/iomgr/exec_ctx.h
  32. 4
      src/core/lib/iomgr/tcp_posix.c
  33. 4
      src/core/lib/iomgr/workqueue_posix.c
  34. 11
      src/core/lib/profiling/basic_timers.c
  35. 7
      src/core/lib/profiling/timers.h
  36. 6
      src/core/lib/security/context/security_context.c
  37. 12
      src/core/lib/security/context/security_context.h
  38. 106
      src/core/lib/surface/call.c
  39. 32
      src/core/lib/surface/call.h
  40. 18
      src/core/lib/surface/channel.c
  41. 115
      src/core/lib/surface/completion_queue.c
  42. 3
      src/core/lib/surface/completion_queue.h
  43. 3
      src/core/lib/surface/init.c
  44. 24
      src/core/lib/surface/server.c
  45. 3
      src/core/lib/transport/connectivity_state.c
  46. 35
      src/core/lib/transport/transport.c
  47. 9
      src/core/lib/transport/transport.h
  48. 77
      src/core/lib/transport/transport_op_string.c
  49. 2
      test/core/bad_client/tests/large_metadata.c
  50. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0083d5addbeca55271ed7ef93c8016bf7ca76903
  51. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/07b0bed3226eefac4a84000ec584e4ce06ebf1bf
  52. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/07cec5c8d9c856a910c6fb57da2ae954f44beed0
  53. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0c27c9999302b39bf2256a90b0cdb767fb2b6fe3
  54. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0d407f099f8418de3dd94bd2146c858a8c6575ad
  55. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0d4d486aa9fd6e9c10cc9ca8967e922cadddb2fe
  56. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0d9ba07b57eb0e076b187c4455f662db085e730b
  57. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0f6b989cec08ef9da603dc83704d85900bd22f1f
  58. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/10b25b0726cb6d820165699e5a453691c7a9c343
  59. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1231c6d007d9e43d169122348363e20d9f25ee93
  60. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/13a9b61e431c20734c19bb36d85883b6a501284e
  61. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1698ec182fad9d973b84615da3a683ecdf2d0b3b
  62. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1859e2ee759e20fe195f67615a1576ce2b7d5bbd
  63. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1a9017db5ad8a9dc6cfe72305da1683a87a73452
  64. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1bd90335afc9e0a1e6a9296e3cc27c03c1201886
  65. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1be157b0fc79f0e7e1e05dfa3cbbe1ad71528bc2
  66. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2185f411bdb1edc610f16ffc86836ae366193e03
  67. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/22661803bd1c7198df4be6e08924ef6a48af9cd4
  68. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2717067bbc0e9bfc1d90d15cddf6154800a25ec6
  69. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2825cfc19c9371f4fe70851283c68d49470d4d55
  70. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/29303c16f3afa18c2c0b84e77e587535a705a74c
  71. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2b5eb5aac77af905877bd98ec2c4d746b247abb6
  72. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2cc43573f271ecd332551c1fb34ebc8645eaefe8
  73. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2feb41037f5dd34e9f3465a2fbf1a6d355c8ce9d
  74. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/300998021c7f743ff49d9cc192343ffd43eb47f2
  75. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/310b2aff5e2ec78b6004630bed39d49f8d13bb21
  76. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/3bb052abecc1b916cc869b9aad29c9dd55a95068
  77. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/3c5fa483ebfabe6e684831ce7c413176bc998c33
  78. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/3c6444b64ace5cd6c145614ad4412382271a6120
  79. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/4045d25f065bb1d70a8b9c3751f7453d4b0625b9
  80. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/407cedf992b14edac6e19f7d440ab73c88e72465
  81. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/411966ea7d9164fc432eeab55a55248ad808bb01
  82. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/415dde26637ed3c0e803111c532a1a9ea9c49092
  83. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/4fc34239f220392581520aa8cebc659daa65a7a6
  84. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/52939682304314f04897deddfbc9c7afa8ee50a9
  85. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/5369926a559827d08bccf264876d592c7cae660d
  86. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/53ef530f65b0cff2e338a51b469c224f53b628d7
  87. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/560c1057487e6b0d2d457748c3ad8434423eb263
  88. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/564f203f678fb333c7b1f8f4df79237589ce346d
  89. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/56b0ac0636c57838f63415082b3ae2ec7a93f017
  90. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/58bcbd601894835bb3312d2a0bc56f2e0f65984c
  91. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/598d346f284bcff26d1de997c4ba5c4794c90b68
  92. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/5c14b48da74ab06b3cc20c4fe355e24f7dd7852a
  93. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/5de72e607205dc17a45df703ec4e9b63c36821ec
  94. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/5e25cf639ba8ea37543d944f5efa94824c6272ff
  95. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/5f247d7b6753f7a8798cf952f49f303c532e017c
  96. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/63a1cb41d219394c9bab947202921506f3574ad0
  97. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/650f74738d3961af2d1fe85ad8fc8212ea13cbbf
  98. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/65dff388749da6a44926b491cdc555f61d708171
  99. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/676adbb1e5b3f4f9e3cba51d3d4ef963ba4ea7e3
  100. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/67f160446ded73c408f4e5a0665731b642b6edd4
  101. Some files were not shown because too many files have changed in this diff Show More

@ -307,6 +307,10 @@ class ClientContext {
};
static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
// Should be used for framework-level extensions only.
// Applications never need to call this method.
grpc_call* c_call() { return call_; }
private:
// Disallow copy and assign.
ClientContext(const ClientContext&);

@ -166,6 +166,10 @@ class ServerContext {
async_notify_when_done_tag_ = tag;
}
// Should be used for framework-level extensions only.
// Applications never need to call this method.
grpc_call* c_call() { return call_; }
private:
friend class ::grpc::testing::InteropServerContextInspector;
friend class ::grpc::ServerInterface;

@ -219,8 +219,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"SUBCHANNEL: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val,
old_val + delta, reason);
"SUBCHANNEL: %p %s 0x%08" PRIxPTR " -> 0x%08" PRIxPTR " [%s]", c,
purpose, old_val, old_val + delta, reason);
#endif
return old_val;
}

@ -36,14 +36,11 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/metadata.h"
extern int grpc_http_write_state_trace;
void grpc_chttp2_plugin_init(void) {
grpc_chttp2_base64_encode_and_huffman_compress =
grpc_chttp2_base64_encode_and_huffman_compress_impl;
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
grpc_register_tracer("http_write_state", &grpc_http_write_state_trace);
}
void grpc_chttp2_plugin_shutdown(void) {}

File diff suppressed because it is too large Load Diff

@ -40,8 +40,8 @@
#include "src/core/lib/iomgr/error.h"
/* defined in internal.h */
typedef struct grpc_chttp2_stream_parsing grpc_chttp2_stream_parsing;
typedef struct grpc_chttp2_transport_parsing grpc_chttp2_transport_parsing;
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
#define GRPC_CHTTP2_FRAME_DATA 0
#define GRPC_CHTTP2_FRAME_HEADER 1

@ -51,16 +51,11 @@ grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser) {
void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser) {
grpc_byte_stream *bs;
if (parser->parsing_frame) {
if (parser->parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"),
1);
}
while (
(bs = grpc_chttp2_incoming_frame_queue_pop(&parser->incoming_frames))) {
grpc_byte_stream_destroy(exec_ctx, bs);
exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"));
}
GRPC_ERROR_UNREF(parser->error);
}
grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
@ -145,10 +140,10 @@ void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
stats->data_bytes += write_bytes;
}
grpc_error *grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -158,7 +153,8 @@ grpc_error *grpc_chttp2_data_parser_parse(
char *msg;
if (is_last && p->is_last_frame) {
stream_parsing->received_close = 1;
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
GRPC_ERROR_NONE);
}
if (cur == end) {
@ -171,7 +167,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
return GRPC_ERROR_REF(p->error);
fh_0:
case GRPC_CHTTP2_DATA_FH_0:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_type = *cur;
switch (p->frame_type) {
case 0:
@ -184,7 +180,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
gpr_asprintf(&msg, "Bad GRPC frame type 0x%02x", p->frame_type);
p->error = GRPC_ERROR_CREATE(msg);
p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID,
(intptr_t)stream_parsing->id);
(intptr_t)s->id);
gpr_free(msg);
msg = gpr_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
p->error =
@ -201,7 +197,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_1:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size = ((uint32_t)*cur) << 24;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_2;
@ -209,7 +205,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_2:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 16;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_3;
@ -217,7 +213,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_3:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_4;
@ -225,7 +221,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_4:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur);
p->state = GRPC_CHTTP2_DATA_FRAME;
++cur;
@ -234,35 +230,32 @@ grpc_error *grpc_chttp2_data_parser_parse(
message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
p->parsing_frame = incoming_byte_stream =
grpc_chttp2_incoming_byte_stream_create(
exec_ctx, transport_parsing, stream_parsing, p->frame_size,
message_flags, &p->incoming_frames);
grpc_chttp2_incoming_byte_stream_create(exec_ctx, t, s, p->frame_size,
message_flags);
/* fallthrough */
case GRPC_CHTTP2_DATA_FRAME:
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
if (cur == end) {
return GRPC_ERROR_NONE;
}
uint32_t remaining = (uint32_t)(end - cur);
if (remaining == p->frame_size) {
stream_parsing->stats.incoming.data_bytes += p->frame_size;
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE, 1);
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_ERROR_NONE;
} else if (remaining > p->frame_size) {
stream_parsing->stats.incoming.data_bytes += p->frame_size;
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg),
(size_t)(cur + p->frame_size - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE, 1);
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
cur += p->frame_size;
goto fh_0; /* loop */
@ -272,7 +265,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
p->frame_size -= remaining;
stream_parsing->stats.incoming.data_bytes += remaining;
s->stats.incoming.data_bytes += remaining;
return GRPC_ERROR_NONE;
}
}

@ -69,7 +69,6 @@ typedef struct {
grpc_error *error;
int is_frame_compressed;
grpc_chttp2_incoming_frame_queue incoming_frames;
grpc_chttp2_incoming_byte_stream *parsing_frame;
} grpc_chttp2_data_parser;
@ -92,10 +91,10 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
/* handle a slice of a data frame - is_last indicates the last slice of a
frame */
grpc_error *grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
uint32_t write_bytes, int is_eof,

@ -67,10 +67,11 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_goaway_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -148,12 +149,9 @@ grpc_error *grpc_chttp2_goaway_parser_parse(
p->debug_pos += (uint32_t)(end - cur);
p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
if (is_last) {
transport_parsing->goaway_received = 1;
transport_parsing->goaway_last_stream_index = p->last_stream_id;
gpr_slice_unref(transport_parsing->goaway_text);
transport_parsing->goaway_error = (grpc_status_code)p->error_code;
transport_parsing->goaway_text =
gpr_slice_new(p->debug_data, p->debug_length, gpr_free);
grpc_chttp2_add_incoming_goaway(
exec_ctx, t, (uint32_t)p->error_code,
gpr_slice_new(p->debug_data, p->debug_length, gpr_free));
p->debug_data = NULL;
}
return GRPC_ERROR_NONE;

@ -65,10 +65,11 @@ void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p);
void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p);
grpc_error *grpc_chttp2_goaway_parser_begin_frame(
grpc_chttp2_goaway_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_goaway_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
gpr_slice debug_data,

@ -73,10 +73,10 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_ping_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -91,10 +91,11 @@ grpc_error *grpc_chttp2_ping_parser_parse(
if (p->byte == 8) {
GPR_ASSERT(is_last);
if (p->is_ack) {
grpc_chttp2_ack_ping(exec_ctx, transport_parsing, p->opaque_8bytes);
grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
gpr_slice_buffer_add(&transport_parsing->qbuf,
gpr_slice_buffer_add(&t->qbuf,
grpc_chttp2_ping_create(1, p->opaque_8bytes));
grpc_chttp2_initiate_write(exec_ctx, t, false, "ping response");
}
}

@ -48,9 +48,9 @@ gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes);
grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_ping_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */

@ -39,6 +39,8 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
gpr_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
grpc_transport_one_way_stats *stats) {
@ -83,10 +85,11 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_rst_stream_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -97,19 +100,28 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(
cur++;
p->byte++;
}
stream_parsing->stats.incoming.framing_bytes += (uint64_t)(end - cur);
s->stats.incoming.framing_bytes += (uint64_t)(end - cur);
if (p->byte == 4) {
GPR_ASSERT(is_last);
stream_parsing->received_close = 1;
if (stream_parsing->forced_close_error == GRPC_ERROR_NONE) {
stream_parsing->forced_close_error = grpc_error_set_int(
GRPC_ERROR_CREATE("RST_STREAM"), GRPC_ERROR_INT_HTTP2_ERROR,
(intptr_t)((((uint32_t)p->reason_bytes[0]) << 24) |
(((uint32_t)p->reason_bytes[1]) << 16) |
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]))));
uint32_t reason = (((uint32_t)p->reason_bytes[0]) << 24) |
(((uint32_t)p->reason_bytes[1]) << 16) |
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]));
grpc_error *error = GRPC_ERROR_NONE;
if (reason != GRPC_CHTTP2_NO_ERROR) {
error = grpc_error_set_int(GRPC_ERROR_CREATE("RST_STREAM"),
GRPC_ERROR_INT_HTTP2_ERROR, reason);
grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
(grpc_chttp2_error_code)reason, s->deadline);
char *status_details;
gpr_asprintf(&status_details, "Received RST_STREAM with error code %d",
reason);
gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
gpr_free(status_details);
grpc_chttp2_fake_status(exec_ctx, t, s, status_code, &slice_details);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, error);
}
return GRPC_ERROR_NONE;

@ -49,9 +49,10 @@ gpr_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_rst_stream_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */

@ -143,10 +143,10 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
}
}
grpc_error *grpc_chttp2_settings_parser_parse(
grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
const uint8_t *cur = GPR_SLICE_START_PTR(slice);
const uint8_t *end = GPR_SLICE_END_PTR(slice);
@ -162,11 +162,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
if (cur == end) {
parser->state = GRPC_CHTTP2_SPS_ID0;
if (is_last) {
transport_parsing->settings_updated = 1;
memcpy(parser->target_settings, parser->incoming_settings,
GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
gpr_slice_buffer_add(&transport_parsing->qbuf,
grpc_chttp2_settings_ack_create());
gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
}
return GRPC_ERROR_NONE;
}
@ -226,9 +224,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
break;
case GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE:
grpc_chttp2_goaway_append(
transport_parsing->last_incoming_stream_id, sp->error_value,
t->last_incoming_stream_id, sp->error_value,
gpr_slice_from_static_string("HTTP2 settings error"),
&transport_parsing->qbuf);
&t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
grpc_error *err = GRPC_ERROR_CREATE(msg);
@ -238,18 +236,17 @@ grpc_error *grpc_chttp2_settings_parser_parse(
}
if (parser->id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE &&
parser->incoming_settings[parser->id] != parser->value) {
transport_parsing->initial_window_update =
t->initial_window_update =
(int64_t)parser->value - parser->incoming_settings[parser->id];
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "adding %d for initial_window change",
(int)transport_parsing->initial_window_update);
(int)t->initial_window_update);
}
}
parser->incoming_settings[parser->id] = parser->value;
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "CHTTP2:%s: got setting %d = %d",
transport_parsing->is_client ? "CLI" : "SVR", parser->id,
parser->value);
t->is_client ? "CLI" : "SVR", parser->id, parser->value);
}
} else if (grpc_http_trace) {
gpr_log(GPR_ERROR, "CHTTP2: Ignoring unknown setting %d (value %d)",

@ -95,9 +95,10 @@ gpr_slice grpc_chttp2_settings_ack_create(void);
grpc_error *grpc_chttp2_settings_parser_begin_frame(
grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
uint32_t *settings);
grpc_error *grpc_chttp2_settings_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */

@ -80,9 +80,8 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
}
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -94,8 +93,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
p->byte++;
}
if (stream_parsing != NULL) {
stream_parsing->stats.incoming.framing_bytes += (uint32_t)(end - cur);
if (s != NULL) {
s->stats.incoming.framing_bytes += (uint32_t)(end - cur);
}
if (p->byte == 4) {
@ -109,17 +108,26 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
}
GPR_ASSERT(is_last);
if (transport_parsing->incoming_stream_id != 0) {
if (stream_parsing != NULL) {
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", transport_parsing,
stream_parsing, outgoing_window,
if (t->incoming_stream_id != 0) {
if (s != NULL) {
bool was_zero = s->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", t, s, outgoing_window,
received_update);
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
bool is_zero = s->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"stream.read_flow_control");
}
}
} else {
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", transport_parsing,
outgoing_window, received_update);
bool was_zero = t->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", t, outgoing_window,
received_update);
bool is_zero = t->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_initiate_write(exec_ctx, t, false,
"new_global_flow_control");
}
}
}

@ -51,8 +51,7 @@ gpr_slice grpc_chttp2_window_update_create(uint32_t id, uint32_t window_delta,
grpc_error *grpc_chttp2_window_update_parser_begin_frame(
grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */

File diff suppressed because it is too large Load Diff

@ -45,7 +45,8 @@
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
typedef grpc_error *(*grpc_chttp2_hpack_parser_state)(
grpc_chttp2_hpack_parser *p, const uint8_t *beg, const uint8_t *end);
grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *beg,
const uint8_t *end);
typedef struct {
char *str;
@ -55,7 +56,7 @@ typedef struct {
struct grpc_chttp2_hpack_parser {
/* user specified callback for each header output */
void (*on_header)(void *user_data, grpc_mdelem *md);
void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem *md);
void *on_header_user_data;
grpc_error *last_error;
@ -104,15 +105,17 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
/* returns 1 on success, 0 on error */
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *beg,
const uint8_t *end);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
grpc_error *grpc_chttp2_header_parser_parse(
grpc_exec_ctx *exec_ctx, void *hpack_parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */

@ -53,31 +53,25 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/transport_impl.h"
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
/* streams are kept in various linked lists depending on what things need to
happen to them... this enum labels each list */
typedef enum {
GRPC_CHTTP2_LIST_ALL_STREAMS,
GRPC_CHTTP2_LIST_CHECK_READ_OPS,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE,
GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING,
GRPC_CHTTP2_LIST_WRITTEN,
GRPC_CHTTP2_LIST_PARSING_SEEN,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
/* streams waiting for the outgoing window in the writing path, they will be
* merged to the stalled list or writable list under transport lock. */
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
STREAM_LIST_COUNT /* must be last */
} grpc_chttp2_stream_list_id;
typedef enum {
GRPC_CHTTP2_WRITE_STATE_IDLE,
GRPC_CHTTP2_WRITE_STATE_WRITING,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_TO_COME,
} grpc_chttp2_write_state;
/* deframer state for the overall http2 stream of bytes */
typedef enum {
/* prefix: one entry per http2 connection prefix byte */
@ -152,6 +146,12 @@ typedef struct grpc_chttp2_outstanding_ping {
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
typedef struct grpc_chttp2_write_cb {
size_t call_at_byte;
grpc_closure *closure;
struct grpc_chttp2_write_cb *next;
} grpc_chttp2_write_cb;
/* forward declared in frame_data.h */
struct grpc_chttp2_incoming_byte_stream {
grpc_byte_stream base;
@ -178,12 +178,71 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_closure finished_action;
};
typedef struct {
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
grpc_endpoint *ep;
char *peer_string;
grpc_combiner *combiner;
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** has a check_read_ops been scheduled */
bool check_read_ops_scheduled;
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
uint8_t closed;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
/** maps stream id to grpc_chttp2_stream objects */
grpc_chttp2_stream_map stream_map;
grpc_closure write_action_begin_locked;
grpc_closure write_action;
grpc_closure write_action_end;
grpc_closure write_action_end_locked;
grpc_closure read_action_begin;
grpc_closure read_action_locked;
grpc_closure read_action_flush_locked;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
grpc_chttp2_stream **accepting_stream;
struct {
/* accept stream callback */
void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
/** connectivity tracking */
grpc_connectivity_state_tracker state_tracker;
} channel_callback;
/** data to write now */
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
int64_t outgoing_window;
/** is this a client? */
uint8_t is_client;
/** data to write next write */
gpr_slice_buffer qbuf;
/** window available for us to send to peer */
int64_t outgoing_window;
/** window available to announce to peer */
int64_t announce_incoming_window;
/** how much window would we like to have for incoming_window */
@ -194,8 +253,6 @@ typedef struct {
/** have we sent a goaway */
uint8_t sent_goaway;
/** is this transport a client? */
uint8_t is_client;
/** are the local settings dirty and need to be sent? */
uint8_t dirtied_local_settings;
/** have local settings been sent? */
@ -220,39 +277,6 @@ typedef struct {
/** next payload for an outgoing ping */
uint64_t ping_counter;
/** concurrent stream count: updated when not parsing,
so this is a strict over-estimation on the client */
uint32_t concurrent_stream_count;
} grpc_chttp2_transport_global;
typedef struct {
/** data to write now */
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
int64_t outgoing_window;
/** is this a client? */
uint8_t is_client;
/** callback for when writing is done */
grpc_closure done_cb;
} grpc_chttp2_transport_writing;
struct grpc_chttp2_transport_parsing {
/** is this transport a client? (boolean) */
uint8_t is_client;
/** were settings updated? */
uint8_t settings_updated;
/** was a settings ack received? */
uint8_t settings_ack_received;
/** was a goaway frame received? */
uint8_t goaway_received;
/** initial window change */
int64_t initial_window_update;
/** data to write later - after parsing */
gpr_slice_buffer qbuf;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
/** simple one shot parsers */
@ -265,13 +289,12 @@ struct grpc_chttp2_transport_parsing {
/** parser for goaway frames */
grpc_chttp2_goaway_parser goaway_parser;
/** initial window change */
int64_t initial_window_update;
/** window available for peer to send to us */
int64_t incoming_window;
/** next stream id available at the time of beginning parsing */
uint32_t next_stream_id;
uint32_t last_incoming_stream_id;
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
uint8_t incoming_frame_type;
@ -282,135 +305,31 @@ struct grpc_chttp2_transport_parsing {
uint32_t incoming_frame_size;
uint32_t incoming_stream_id;
/* current max frame size */
uint32_t max_frame_size;
/* active parser */
void *parser_data;
grpc_chttp2_stream_parsing *incoming_stream;
grpc_chttp2_stream *incoming_stream;
grpc_error *(*parser)(grpc_exec_ctx *exec_ctx, void *parser_user_data,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
/* received settings */
uint32_t settings[GRPC_CHTTP2_NUM_SETTINGS];
/* last settings that were sent */
uint32_t last_sent_settings[GRPC_CHTTP2_NUM_SETTINGS];
/* goaway data */
grpc_status_code goaway_error;
uint32_t goaway_last_stream_index;
gpr_slice goaway_text;
int64_t outgoing_window;
grpc_chttp2_write_cb *write_cb_pool;
};
typedef enum {
/** no writing activity allowed */
GRPC_CHTTP2_WRITES_CORKED,
/** no writing activity */
GRPC_CHTTP2_WRITING_INACTIVE,
/** write has been requested and scheduled against the workqueue */
GRPC_CHTTP2_WRITE_SCHEDULED,
/** write has been initiated after being reaped from the workqueue */
GRPC_CHTTP2_WRITING,
/** write has been initiated, AND another write needs to be started once it's
done */
GRPC_CHTTP2_WRITING_STALE_WITH_POLLER,
GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
} grpc_chttp2_write_state;
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
grpc_endpoint *ep;
char *peer_string;
/** when this drops to zero it's safe to shutdown the endpoint */
gpr_refcount shutdown_ep_refs;
struct {
grpc_combiner *combiner;
/** is a thread currently in the global lock */
bool global_active;
/** is a thread currently parsing */
bool parsing_active;
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** has a check_read_ops been scheduled */
bool check_read_ops_scheduled;
} executor;
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
uint8_t closed;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
/** global state for reading/writing */
grpc_chttp2_transport_global global;
/** state only accessible by the chain of execution that
set writing_state >= GRPC_WRITING, and only by the writing closure
chain. */
grpc_chttp2_transport_writing writing;
/** state only accessible by the chain of execution that
set parsing_active=1 */
grpc_chttp2_transport_parsing parsing;
/** maps stream id to grpc_chttp2_stream objects;
owned by the parsing thread when parsing */
grpc_chttp2_stream_map parsing_stream_map;
/** streams created by the client (possibly during parsing);
merged with parsing_stream_map during unlock when no
parsing is occurring */
grpc_chttp2_stream_map new_stream_map;
/** closure to execute writing */
grpc_closure writing_action;
/** closure to start reading from the endpoint */
grpc_closure reading_action;
grpc_closure reading_action_locked;
grpc_closure post_parse_locked;
/** closure to actually do parsing */
grpc_closure parsing_action;
/** closure to initiate writing */
grpc_closure initiate_writing;
/** closure to finish writing */
grpc_closure terminate_writing;
/** closure to flush read state up the stack */
grpc_closure initiate_read_flush_locked;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
grpc_chttp2_stream **accepting_stream;
struct {
/* accept stream callback */
void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
/** connectivity tracking */
grpc_connectivity_state_tracker state_tracker;
} channel_callback;
grpc_closure destroy_stream;
void *destroy_stream_arg;
/** Transport op to be applied post-parsing */
grpc_transport_op *post_parsing_op;
};
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
@ -420,20 +339,21 @@ typedef struct {
As the upper layer offers more bytes, this value increases.
As bytes are read, this value decreases. */
uint32_t max_recv_bytes;
/** The number of bytes the upper layer has offered to read but we have
not yet announced to HTTP2 flow control.
As the upper layers offer to read more bytes, this value increases.
As we advertise incoming flow control window, this value decreases. */
uint32_t unannounced_incoming_window_for_parse;
uint32_t unannounced_incoming_window_for_writing;
/** things the upper layers would like to send */
grpc_metadata_batch *send_initial_metadata;
grpc_closure *send_initial_metadata_finished;
grpc_byte_stream *send_message;
grpc_closure *send_message_finished;
grpc_metadata_batch *send_trailing_metadata;
grpc_closure *send_trailing_metadata_finished;
grpc_byte_stream *fetching_send_message;
uint32_t fetched_send_message_length;
gpr_slice fetching_slice;
int64_t fetching_slice_end_offset;
bool complete_fetch_covered_by_poller;
grpc_closure complete_fetch;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *recv_initial_metadata_ready;
grpc_byte_stream **recv_message;
@ -458,90 +378,41 @@ typedef struct {
/** Has this stream seen an error.
If true, then pending incoming frames can be thrown away. */
bool seen_error;
bool exceeded_metadata_size;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
/** the error that resulted in this stream being write-closed */
grpc_error *write_closed_error;
bool published_initial_metadata;
bool published_trailing_metadata;
bool published_metadata[2];
bool final_metadata_requested;
grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
grpc_chttp2_incoming_frame_queue incoming_frames;
gpr_timespec deadline;
} grpc_chttp2_stream_global;
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
uint8_t fetching;
bool sent_initial_metadata;
uint8_t sent_message;
uint8_t sent_trailing_metadata;
uint8_t read_closed;
/** send this initial metadata */
grpc_metadata_batch *send_initial_metadata;
grpc_byte_stream *send_message;
grpc_metadata_batch *send_trailing_metadata;
int64_t outgoing_window;
/** how much window should we announce? */
uint32_t announce_window;
gpr_slice_buffer flow_controlled_buffer;
gpr_slice fetching_slice;
size_t stream_fetched;
grpc_closure finished_fetch;
/** stats gathered during the write */
grpc_transport_one_way_stats stats;
} grpc_chttp2_stream_writing;
struct grpc_chttp2_stream_parsing {
/** saw some stream level error */
grpc_error *forced_close_error;
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
/** has this stream received a close */
uint8_t received_close;
/** how many header frames have we received? */
uint8_t header_frames_received;
/** which metadata did we get (on this parse) */
uint8_t got_metadata_on_parse[2];
/** should we raise the seen_error flag in transport_global */
bool seen_error;
bool exceeded_metadata_size;
/** window available for peer to send to us */
int64_t incoming_window;
/** parsing state for data frames */
grpc_chttp2_data_parser data_parser;
/** amount of window given */
int64_t outgoing_window;
/** number of bytes received - reset at end of parse thread execution */
int64_t received_bytes;
/** stats gathered during the parse */
grpc_transport_stream_stats stats;
/** incoming metadata */
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
};
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
grpc_chttp2_stream_global global;
grpc_chttp2_stream_writing writing;
grpc_chttp2_stream_parsing parsing;
grpc_closure init_stream;
grpc_closure destroy_stream;
void *destroy_stream_arg;
bool sent_initial_metadata;
bool sent_trailing_metadata;
/** how much window should we announce? */
uint32_t announce_window;
gpr_slice_buffer flow_controlled_buffer;
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
};
/** Transport writing call flow:
@ -557,162 +428,79 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason);
/** Someone is unlocking the transport mutex: check to see if writes
are required, and schedule them if so */
int grpc_chttp2_unlocking_check_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint);
void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
void *transport_writing, grpc_error *error);
void grpc_chttp2_cleanup_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_prepare_to_read(grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
are required, and frame them if so */
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
grpc_error *grpc_chttp2_perform_read(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice);
void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, gpr_slice slice);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
/** Get a writable stream
returns non-zero if there was a stream available */
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) GRPC_MUST_USE_RESULT;
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing);
int grpc_chttp2_list_pop_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_add_written_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing);
int grpc_chttp2_list_pop_parsing_seen_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_add_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_check_read_ops(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
bool grpc_chttp2_list_remove_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
bool grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing);
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_remove_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
grpc_chttp2_transport_parsing *transport_parsing, uint32_t id);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
uint32_t id);
void grpc_chttp2_add_incoming_goaway(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
uint32_t goaway_error, gpr_slice goaway_text);
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
/* returns 1 if this is the last stream, 0 otherwise */
int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
int grpc_chttp2_has_streams(grpc_chttp2_transport *t);
void grpc_chttp2_for_all_streams(
grpc_chttp2_transport_global *transport_global, void *user_data,
void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
grpc_chttp2_stream_global *stream_global));
void grpc_chttp2_parsing_become_skip_parser(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
void grpc_chttp2_complete_closure_step(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_closure **pclosure,
grpc_error *error);
grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t);
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_check_read_ops(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
bool grpc_chttp2_list_remove_check_read_ops(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_check_read_ops(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id);
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t id);
void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t goaway_error,
gpr_slice goaway_text);
void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_closure **pclosure,
grpc_error *error);
#define GRPC_CHTTP2_CLIENT_CONNECT_STRING "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
@ -803,57 +591,73 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
const char *var2, int is_client,
uint32_t stream_id, int64_t val1, int64_t val2);
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream,
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *stream,
grpc_status_code status, gpr_slice *details);
void grpc_chttp2_mark_stream_closed(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, int close_reads, int close_writes,
grpc_error *error);
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int close_reads,
int close_writes, grpc_error *error);
void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global);
grpc_chttp2_transport *t);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global, reason)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
const char *reason);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global,
#define GRPC_CHTTP2_STREAM_REF(stream, reason) \
grpc_chttp2_stream_ref(stream, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream, reason)
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
const char *reason);
#else
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global);
#define GRPC_CHTTP2_STREAM_REF(stream, reason) grpc_chttp2_stream_ref(stream)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream)
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
#endif
//#define GRPC_CHTTP2_REFCOUNTING_DEBUG 1
#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) \
grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
grpc_chttp2_unref_transport(cl, t, r, __FILE__, __LINE__)
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason,
const char *file, int line);
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
const char *file, int line);
#else
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) grpc_chttp2_unref_transport(cl, t)
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t);
#endif
grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
uint32_t frame_size, uint32_t flags);
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
gpr_slice slice);
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
grpc_error *error, int from_parsing_thread);
grpc_error *error);
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *parsing,
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const uint8_t *opaque_8bytes);
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
bool covered_by_poller, const char *reason);
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, bool covered_by_poller,
const char *reason);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *due_to_error);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */

File diff suppressed because it is too large Load Diff

@ -35,27 +35,6 @@
#include <grpc/support/log.h>
#define TRANSPORT_FROM_GLOBAL(tg) \
((grpc_chttp2_transport *)((char *)(tg)-offsetof(grpc_chttp2_transport, \
global)))
#define STREAM_FROM_GLOBAL(sg) \
((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
#define TRANSPORT_FROM_WRITING(tw) \
((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \
writing)))
#define STREAM_FROM_WRITING(sw) \
((grpc_chttp2_stream *)((char *)(sw)-offsetof(grpc_chttp2_stream, writing)))
#define TRANSPORT_FROM_PARSING(tp) \
((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
parsing)))
#define STREAM_FROM_PARSING(sp) \
((grpc_chttp2_stream *)((char *)(sp)-offsetof(grpc_chttp2_stream, parsing)))
/* core list management */
static int stream_list_empty(grpc_chttp2_transport *t,
@ -139,321 +118,79 @@ static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
/* wrappers for specializations */
bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
return stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE);
}
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WRITABLE);
if (r != 0) {
*stream_global = &stream->global;
*stream_writing = &stream->writing;
}
return r;
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
GPR_ASSERT(s->id != 0);
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE);
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
GPR_ASSERT(stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITING));
bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing) {
return !stream_list_empty(TRANSPORT_FROM_WRITING(transport_writing),
GRPC_CHTTP2_LIST_WRITING);
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_pop_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITING);
if (r != 0) {
*stream_writing = &stream->writing;
}
return r;
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING);
}
void grpc_chttp2_list_add_written_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITTEN);
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITTEN);
if (r != 0) {
*stream_global = &stream->global;
*stream_writing = &stream->writing;
}
return r;
void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(
TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r =
stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
void grpc_chttp2_list_add_check_read_ops(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
if (!t->check_read_ops_scheduled) {
GRPC_CHTTP2_REF_TRANSPORT(t, "initiate_read_flush_locked");
grpc_combiner_execute_finally(
exec_ctx, t->combiner, &t->read_action_flush_locked, GRPC_ERROR_NONE);
t->check_read_ops_scheduled = true;
}
return r;
stream_list_add(t, s, GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing) {
stream_list_add(TRANSPORT_FROM_PARSING(transport_parsing),
STREAM_FROM_PARSING(stream_parsing),
GRPC_CHTTP2_LIST_PARSING_SEEN);
bool grpc_chttp2_list_remove_check_read_ops(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
int grpc_chttp2_list_pop_parsing_seen_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_PARSING(transport_parsing), &stream,
GRPC_CHTTP2_LIST_PARSING_SEEN);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
}
return r;
int grpc_chttp2_list_pop_check_read_ops(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
void grpc_chttp2_list_add_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_check_read_ops(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
if (!t->executor.check_read_ops_scheduled) {
grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
&t->initiate_read_flush_locked,
GRPC_ERROR_NONE, false);
t->executor.check_read_ops_scheduled = true;
}
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
bool grpc_chttp2_list_remove_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
grpc_chttp2_stream *stream = STREAM_FROM_WRITING(stream_writing);
gpr_log(GPR_DEBUG, "writing stalled %d", stream->global.id);
if (!stream->included[GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT]) {
GRPC_CHTTP2_STREAM_REF(&stream->global, "chttp2_writing_stalled");
}
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing), stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT);
}
bool grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream *stream;
bool out = false;
grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing);
while (stream_list_pop(transport, &stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) {
gpr_log(GPR_DEBUG, "move %d from writing stalled to just stalled",
stream->global.id);
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
&stream->writing);
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &stream->global,
"chttp2_writing_stalled");
out = true;
}
return out;
}
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
gpr_log(GPR_DEBUG, "stalled %d", stream_writing->id);
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_remove_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
}
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
}
int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add_tail(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
return stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
int grpc_chttp2_has_streams(grpc_chttp2_transport *t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
void grpc_chttp2_for_all_streams(
grpc_chttp2_transport_global *transport_global, void *user_data,
void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
grpc_chttp2_stream_global *stream_global)) {
grpc_chttp2_stream *s;
grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
for (s = t->lists[GRPC_CHTTP2_LIST_ALL_STREAMS].head; s != NULL;
s = s->links[GRPC_CHTTP2_LIST_ALL_STREAMS].next) {
cb(transport_global, user_data, &s->global);
}
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}

@ -40,343 +40,198 @@
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing);
static void add_to_write_list(grpc_chttp2_write_cb **list,
grpc_chttp2_write_cb *cb) {
cb->next = *list;
*list = cb;
}
static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb,
grpc_error *error) {
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error);
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
int grpc_chttp2_unlocking_check_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_writing *stream_writing;
static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, size_t send_bytes,
grpc_chttp2_write_cb **list, grpc_error *error) {
grpc_chttp2_write_cb *cb = *list;
*list = NULL;
while (cb) {
grpc_chttp2_write_cb *next = cb->next;
if (cb->call_at_byte <= send_bytes) {
finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
} else {
cb->call_at_byte -= send_bytes;
add_to_write_list(list, cb);
}
cb = next;
}
}
GPR_TIMER_BEGIN("grpc_chttp2_unlocking_check_writes", 0);
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
if (transport_global->dirtied_local_settings &&
!transport_global->sent_local_settings) {
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
if (t->dirtied_local_settings && !t->sent_local_settings) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
&t->outbuf,
grpc_chttp2_settings_create(
transport_global->settings[GRPC_SENT_SETTINGS],
transport_global->settings[GRPC_LOCAL_SETTINGS],
transport_global->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
transport_global->force_send_settings = 0;
transport_global->dirtied_local_settings = 0;
transport_global->sent_local_settings = 1;
t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS],
t->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
t->force_send_settings = 0;
t->dirtied_local_settings = 0;
t->sent_local_settings = 1;
}
/* simple writes are queued to qbuf, and flushed here */
gpr_slice_buffer_move_into(&transport_global->qbuf,
&transport_writing->outbuf);
GPR_ASSERT(transport_global->qbuf.count == 0);
gpr_slice_buffer_move_into(&t->qbuf, &t->outbuf);
GPR_ASSERT(t->qbuf.count == 0);
grpc_chttp2_hpack_compressor_set_max_table_size(
&transport_writing->hpack_compressor,
transport_global->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
&t->hpack_compressor,
t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
transport_global, outgoing_window);
if (transport_writing->outgoing_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
&stream_global)) {
grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
false, "transport.read_flow_control");
if (t->outgoing_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"transport.read_flow_control");
}
}
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (grpc_chttp2_list_pop_writable_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
bool sent_initial_metadata = stream_writing->sent_initial_metadata;
bool become_writable = false;
stream_writing->id = stream_global->id;
stream_writing->read_closed = stream_global->read_closed;
while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
bool sent_initial_metadata = s->sent_initial_metadata;
bool now_writing = false;
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_writing, stream_writing,
outgoing_window, stream_global,
outgoing_window);
gpr_log(GPR_DEBUG, "W:%d:%s: sim=%d ann=%d fcb_len=%d (t,s)-win=%d,%d",
(int)s->id, t->is_client ? "client" : "server",
sent_initial_metadata, (int)s->announce_window,
(int)s->flow_controlled_buffer.length, (int)t->outgoing_window,
(int)s->outgoing_window);
if (!sent_initial_metadata && stream_global->send_initial_metadata) {
stream_writing->send_initial_metadata =
stream_global->send_initial_metadata;
stream_global->send_initial_metadata = NULL;
become_writable = true;
/* send initial metadata if it's available */
if (!sent_initial_metadata && s->send_initial_metadata) {
grpc_chttp2_encode_header(&t->hpack_compressor, s->id,
s->send_initial_metadata, 0, &s->stats.outgoing,
&t->outbuf);
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
now_writing = true;
}
/* send any window updates */
if (s->announce_window > 0 && sent_initial_metadata) {
uint32_t announce = s->announce_window;
gpr_slice_buffer_add(&t->outbuf,
grpc_chttp2_window_update_create(
s->id, s->announce_window, &s->stats.outgoing));
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, announce_window, announce);
}
if (sent_initial_metadata) {
if (stream_global->send_message != NULL) {
gpr_slice hdr = gpr_slice_malloc(5);
uint8_t *p = GPR_SLICE_START_PTR(hdr);
uint32_t len = stream_global->send_message->length;
GPR_ASSERT(stream_writing->send_message == NULL);
p[0] = (stream_global->send_message->flags &
GRPC_WRITE_INTERNAL_COMPRESS) != 0;
p[1] = (uint8_t)(len >> 24);
p[2] = (uint8_t)(len >> 16);
p[3] = (uint8_t)(len >> 8);
p[4] = (uint8_t)(len);
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer, hdr);
if (stream_global->send_message->length > 0) {
stream_writing->send_message = stream_global->send_message;
} else {
stream_writing->send_message = NULL;
/* send any body bytes, if allowed by flow control */
if (s->flow_controlled_buffer.length > 0) {
uint32_t max_outgoing =
(uint32_t)GPR_MIN(GRPC_CHTTP2_MAX_PAYLOAD_LENGTH,
GPR_MIN(s->outgoing_window, t->outgoing_window));
if (max_outgoing > 0) {
uint32_t send_bytes =
(uint32_t)GPR_MIN(max_outgoing, s->flow_controlled_buffer.length);
bool is_last_data_frame =
s->fetching_send_message == NULL &&
send_bytes == s->flow_controlled_buffer.length;
bool is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, send_bytes,
is_last_frame, &s->stats.outgoing,
&t->outbuf);
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, outgoing_window,
send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, outgoing_window,
send_bytes);
if (is_last_frame) {
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
}
s->sending_bytes += send_bytes;
now_writing = true;
if (s->flow_controlled_buffer.length > 0) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing");
grpc_chttp2_list_add_writable_stream(t, s);
}
} else if (t->outgoing_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
}
stream_writing->stream_fetched = 0;
stream_global->send_message = NULL;
}
if ((stream_writing->send_message != NULL ||
stream_writing->flow_controlled_buffer.length > 0) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
become_writable = true;
} else {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
}
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0) {
grpc_chttp2_encode_header(&t->hpack_compressor, s->id,
s->send_trailing_metadata, true,
&s->stats.outgoing, &t->outbuf);
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
now_writing = true;
}
if (stream_global->send_trailing_metadata) {
stream_writing->send_trailing_metadata =
stream_global->send_trailing_metadata;
stream_global->send_trailing_metadata = NULL;
become_writable = true;
}
}
if (!stream_global->read_closed &&
stream_global->unannounced_incoming_window_for_writing > 1024) {
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_global, stream_writing,
announce_window, stream_global,
unannounced_incoming_window_for_writing);
become_writable = true;
}
if (become_writable) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
if (now_writing) {
grpc_chttp2_list_add_writing_stream(t, s);
} else {
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing");
}
}
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
if (transport_global->announce_incoming_window > 0) {
uint32_t announced = (uint32_t)GPR_MIN(
transport_global->announce_incoming_window, UINT32_MAX);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_global,
announce_incoming_window, announced);
if (t->announce_incoming_window > 0) {
uint32_t announced =
(uint32_t)GPR_MIN(t->announce_incoming_window, UINT32_MAX);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, announce_incoming_window,
announced);
grpc_transport_one_way_stats throwaway_stats;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(0, announced, &throwaway_stats));
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_window_update_create(
0, announced, &throwaway_stats));
}
GPR_TIMER_END("grpc_chttp2_unlocking_check_writes", 0);
return transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing);
}
void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint) {
GPR_ASSERT(transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing));
finalize_outbuf(exec_ctx, transport_writing);
GPR_ASSERT(endpoint);
if (transport_writing->outbuf.count > 0) {
grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
&transport_writing->done_cb);
} else {
grpc_exec_ctx_sched(exec_ctx, &transport_writing->done_cb, GRPC_ERROR_NONE,
NULL);
}
}
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
GPR_TIMER_BEGIN("finalize_outbuf", 0);
bool is_first_data_frame = true;
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
uint32_t max_outgoing =
(uint32_t)GPR_MIN(GRPC_CHTTP2_MAX_PAYLOAD_LENGTH,
GPR_MIN(stream_writing->outgoing_window,
transport_writing->outgoing_window));
/* send initial metadata if it's available */
if (stream_writing->send_initial_metadata != NULL) {
grpc_chttp2_encode_header(
&transport_writing->hpack_compressor, stream_writing->id,
stream_writing->send_initial_metadata, 0, &stream_writing->stats,
&transport_writing->outbuf);
stream_writing->send_initial_metadata = NULL;
stream_writing->sent_initial_metadata = 1;
}
/* send any window updates */
if (stream_writing->announce_window > 0 &&
stream_writing->send_initial_metadata == NULL) {
uint32_t announce = stream_writing->announce_window;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(stream_writing->id,
stream_writing->announce_window,
&stream_writing->stats));
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing, stream_writing,
announce_window, announce);
stream_writing->announce_window = 0;
}
/* fetch any body bytes */
while (!stream_writing->fetching && stream_writing->send_message &&
stream_writing->flow_controlled_buffer.length < max_outgoing &&
stream_writing->stream_fetched <
stream_writing->send_message->length) {
if (grpc_byte_stream_next(exec_ctx, stream_writing->send_message,
&stream_writing->fetching_slice, max_outgoing,
&stream_writing->finished_fetch)) {
stream_writing->stream_fetched +=
GPR_SLICE_LENGTH(stream_writing->fetching_slice);
if (stream_writing->stream_fetched ==
stream_writing->send_message->length) {
stream_writing->send_message = NULL;
}
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer,
stream_writing->fetching_slice);
} else {
stream_writing->fetching = 1;
}
}
/* send any body bytes */
if (stream_writing->flow_controlled_buffer.length > 0) {
if (max_outgoing > 0) {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, stream_writing->flow_controlled_buffer.length);
int is_last_data_frame =
stream_writing->send_message == NULL &&
send_bytes == stream_writing->flow_controlled_buffer.length;
int is_last_frame = is_last_data_frame &&
stream_writing->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata);
grpc_chttp2_encode_data(
stream_writing->id, &stream_writing->flow_controlled_buffer,
send_bytes, is_last_frame, &stream_writing->stats,
&transport_writing->outbuf);
if (is_first_data_frame) {
/* TODO(dgq): this is a hack. It'll be fix in a future refactoring */
stream_writing->stats.data_bytes -= 5; /* discount grpc framing */
is_first_data_frame = false;
}
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing,
stream_writing, outgoing_window,
send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_writing,
outgoing_window, send_bytes);
if (is_last_frame) {
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
if (is_last_data_frame) {
GPR_ASSERT(stream_writing->send_message == NULL);
stream_writing->sent_message = 1;
}
} else if (transport_writing->outgoing_window == 0) {
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
/* send trailing metadata if it's available and we're ready for it */
if (stream_writing->send_message == NULL &&
stream_writing->flow_controlled_buffer.length == 0 &&
stream_writing->send_trailing_metadata != NULL) {
if (grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata)) {
grpc_chttp2_encode_data(
stream_writing->id, &stream_writing->flow_controlled_buffer, 0, 1,
&stream_writing->stats, &transport_writing->outbuf);
} else {
grpc_chttp2_encode_header(
&transport_writing->hpack_compressor, stream_writing->id,
stream_writing->send_trailing_metadata, 1, &stream_writing->stats,
&transport_writing->outbuf);
}
if (!transport_writing->is_client && !stream_writing->read_closed) {
gpr_slice_buffer_add(&transport_writing->outbuf,
grpc_chttp2_rst_stream_create(
stream_writing->id, GRPC_CHTTP2_NO_ERROR,
&stream_writing->stats));
}
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
/* if there's more to write, then loop, otherwise prepare to finish the
* write */
if ((stream_writing->flow_controlled_buffer.length > 0 ||
(stream_writing->send_message && !stream_writing->fetching)) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
} else {
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
} else {
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
GPR_TIMER_END("finalize_outbuf", 0);
return t->outbuf.count > 0;
}
void grpc_chttp2_cleanup_writing(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_chttp2_cleanup_writing", 0);
grpc_chttp2_stream_writing *stream_writing;
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream *s;
if (grpc_chttp2_list_flush_writing_stalled_by_transport(exec_ctx,
transport_writing)) {
grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
"resume_stalled_stream");
}
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
if (stream_writing->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_initial_metadata_finished, GRPC_ERROR_NONE);
}
grpc_transport_move_one_way_stats(&stream_writing->stats,
&stream_global->stats.outgoing);
if (stream_writing->sent_message) {
GPR_ASSERT(stream_writing->send_message == NULL);
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_message_finished, GRPC_ERROR_NONE);
stream_writing->sent_message = 0;
while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
if (s->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(exec_ctx, t, s,
&s->send_initial_metadata_finished,
GRPC_ERROR_REF(error));
}
if (stream_writing->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_trailing_metadata_finished, GRPC_ERROR_NONE);
if (s->sending_bytes != 0) {
update_list(exec_ctx, t, s, s->sending_bytes, &s->on_write_finished_cbs,
GRPC_ERROR_REF(error));
s->sending_bytes = 0;
}
if (stream_writing->sent_trailing_metadata) {
grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
!transport_global->is_client, 1,
GRPC_ERROR_NONE);
if (s->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(exec_ctx, t, s,
&s->send_trailing_metadata_finished,
GRPC_ERROR_REF(error));
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_REF(error));
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing");
}
gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
gpr_slice_buffer_reset_and_unref(&t->outbuf);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_chttp2_cleanup_writing", 0);
}

@ -51,7 +51,7 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
GRPC_ERROR_UNREF(error);
return;
}
closure->error = error;
closure->error_data.error = error;
closure->next_data.next = NULL;
if (closure_list->head == NULL) {
closure_list->head = closure;
@ -64,8 +64,8 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
void grpc_closure_list_fail_all(grpc_closure_list *list,
grpc_error *forced_failure) {
for (grpc_closure *c = list->head; c != NULL; c = c->next_data.next) {
if (c->error == GRPC_ERROR_NONE) {
c->error = GRPC_ERROR_REF(forced_failure);
if (c->error_data.error == GRPC_ERROR_NONE) {
c->error_data.error = GRPC_ERROR_REF(forced_failure);
}
}
GRPC_ERROR_UNREF(forced_failure);

@ -76,7 +76,10 @@ struct grpc_closure {
void *cb_arg;
/** Once queued, the result of the closure. Before then: scratch space */
grpc_error *error;
union {
grpc_error *error;
uintptr_t scratch;
} error_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */

@ -51,24 +51,46 @@ int grpc_combiner_trace = 0;
} while (0)
struct grpc_combiner {
grpc_combiner *next_combiner_on_this_exec_ctx;
grpc_workqueue *optional_workqueue;
gpr_mpscq queue;
// state is:
// lower bit - zero if orphaned
// other bits - number of items queued on the lock
gpr_atm state;
bool take_async_break_before_final_list;
// number of elements in the list that are covered by a poller: if >0, we can
// offload safely
gpr_atm covered_by_poller;
bool time_to_execute_final_list;
grpc_closure_list final_list;
grpc_closure continue_finishing;
grpc_closure offload;
};
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
typedef struct {
grpc_error *error;
bool covered_by_poller;
} error_data;
static uintptr_t pack_error_data(error_data d) {
return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
}
static error_data unpack_error_data(uintptr_t p) {
return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
}
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
lock->next_combiner_on_this_exec_ctx = NULL;
lock->time_to_execute_final_list = false;
lock->optional_workqueue = optional_workqueue;
gpr_atm_no_barrier_store(&lock->state, 1);
gpr_atm_no_barrier_store(&lock->covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
lock->take_async_break_before_final_list = false;
grpc_closure_list_init(&lock->final_list);
grpc_closure_init(&lock->offload, offload, lock);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@ -90,199 +112,177 @@ void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void queue_on_exec_ctx(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
lock->next_combiner_on_this_exec_ctx = NULL;
if (exec_ctx->active_combiner == NULL) {
exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
} else {
exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock;
exec_ctx->last_combiner = lock;
}
}
static void continue_finishing_mainline(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
GPR_TIMER_BEGIN("combiner.continue_executing_mainline", 0);
grpc_combiner *lock = arg;
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error,
bool covered_by_poller) {
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p continue_finishing_mainline", lock));
GPR_ASSERT(exec_ctx->active_combiner == NULL);
exec_ctx->active_combiner = lock;
if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
GPR_TIMER_END("combiner.continue_executing_mainline", 0);
gpr_log(GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d", lock, cl, covered_by_poller));
GPR_TIMER_BEGIN("combiner.execute", 0);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, 2);
GPR_ASSERT(last & 1); // ensure lock has not been destroyed
cl->error_data.scratch =
pack_error_data((error_data){error, covered_by_poller});
if (covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->covered_by_poller, 1);
}
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
if (last == 1) {
// code will be written when the exec_ctx calls
// grpc_combiner_continue_exec_ctx
queue_on_exec_ctx(exec_ctx, lock);
}
GPR_TIMER_END("combiner.execute", 0);
}
static void execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.execute_final", 0);
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
lock->take_async_break_before_final_list = false;
int loops = 0;
while (c != NULL) {
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
loops++;
static void move_next(grpc_exec_ctx *exec_ctx) {
exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
if (exec_ctx->active_combiner == NULL) {
exec_ctx->last_combiner = NULL;
}
GPR_TIMER_END("combiner.execute_final", 0);
}
static void continue_executing_final(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
GPR_TIMER_BEGIN("combiner.continue_executing_final", 0);
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_combiner *lock = arg;
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p continue_executing_final", lock));
GPR_ASSERT(exec_ctx->active_combiner == NULL);
exec_ctx->active_combiner = lock;
// quick peek to see if new things have turned up on the queue: if so, go back
// to executing them before the final list
if ((gpr_atm_acq_load(&lock->state) >> 1) > 1) {
if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
} else {
execute_final(exec_ctx, lock);
finish(exec_ctx, lock);
}
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
GPR_TIMER_END("combiner.continue_executing_final", 0);
queue_on_exec_ctx(exec_ctx, lock);
}
static bool start_execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.start_execute_final", 0);
GPR_ASSERT(exec_ctx->active_combiner == lock);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG,
"C:%p start_execute_final take_async_break_before_final_list=%d",
lock, lock->take_async_break_before_final_list));
if (lock->take_async_break_before_final_list) {
grpc_closure_init(&lock->continue_finishing, continue_executing_final,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
GPR_TIMER_END("combiner.start_execute_final", 0);
return false;
} else {
execute_final(exec_ctx, lock);
GPR_TIMER_END("combiner.start_execute_final", 0);
return true;
}
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx);
grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload,
GRPC_ERROR_NONE);
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.maybe_finish_one", 0);
gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
GPR_ASSERT(exec_ctx->active_combiner == lock);
if (n == NULL) {
// queue is in an inconsistant state: use this as a cue that we should
// go off and do something else for a while (and come back later)
grpc_closure_init(&lock->continue_finishing, continue_finishing_mainline,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
GPR_TIMER_END("combiner.maybe_finish_one", 0);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
grpc_combiner *lock = exec_ctx->active_combiner;
if (lock == NULL) {
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return false;
}
grpc_closure *cl = (grpc_closure *)n;
grpc_error *error = cl->error;
cl->cb(exec_ctx, cl->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("combiner.maybe_finish_one", 0);
return true;
}
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
bool (*executor)(grpc_exec_ctx * exec_ctx, grpc_combiner * lock);
GPR_TIMER_BEGIN("combiner.finish", 0);
int loops = 0;
do {
executor = maybe_finish_one;
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -2);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p finish[%d] old_state=%" PRIdPTR, lock,
loops, old_state));
switch (old_state) {
case 5: // we're down to one queued item: if it's the final list we
case 4: // should do that
if (!grpc_closure_list_empty(lock->final_list)) {
executor = start_execute_final;
}
break;
case 3: // had one count, one unorphaned --> unlocked unorphaned
GPR_TIMER_END("combiner.finish", 0);
return;
case 2: // and one count, one orphaned --> unlocked and orphaned
really_destroy(exec_ctx, lock);
GPR_TIMER_END("combiner.finish", 0);
return;
case 1:
case 0:
// these values are illegal - representing an already unlocked or
// deleted lock
GPR_UNREACHABLE_CODE(return );
if (lock->optional_workqueue != NULL &&
grpc_exec_ctx_ready_to_finish(exec_ctx) &&
gpr_atm_acq_load(&lock->covered_by_poller) > 0) {
GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
// this execution context wants to move on, and we have a workqueue (and
// so can help the execution context out): schedule remaining work to be
// picked up on the workqueue
queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
if (!lock->time_to_execute_final_list ||
// peek to see if something new has shown up, and execute that with
// priority
(gpr_atm_acq_load(&lock->state) >> 1) > 1) {
gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
if (n == NULL) {
// queue is in an inconsistant state: use this as a cue that we should
// go off and do something else for a while (and come back later)
GPR_TIMER_MARK("delay_busy", 0);
if (lock->optional_workqueue != NULL && gpr_atm_acq_load(&lock->covered_by_poller) > 0) {
queue_offload(exec_ctx, lock);
}
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
loops++;
} while (executor(exec_ctx, lock));
GPR_TIMER_END("combiner.finish", 0);
}
GPR_TIMER_BEGIN("combiner.exec1", 0);
grpc_closure *cl = (grpc_closure *)n;
error_data err = unpack_error_data(cl->error_data.scratch);
cl->cb(exec_ctx, cl->cb_arg, err.error);
if (err.covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->covered_by_poller, -1);
}
GRPC_ERROR_UNREF(err.error);
GPR_TIMER_END("combiner.exec1", 0);
} else {
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
int loops = 0;
while (c != NULL) {
GPR_TIMER_BEGIN("combiner.exec_1final", 0);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
GPR_TIMER_END("combiner.exec_1final", 0);
}
}
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error) {
GPR_TIMER_MARK("unref", 0);
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -2);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p grpc_combiner_execute c=%p", lock, cl));
GPR_TIMER_BEGIN("combiner.execute", 0);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, 2);
GPR_ASSERT(last & 1); // ensure lock has not been destroyed
if (last == 1) {
exec_ctx->active_combiner = lock;
GPR_TIMER_BEGIN("combiner.execute_first_cb", 0);
cl->cb(exec_ctx, cl->cb_arg, error);
GPR_TIMER_END("combiner.execute_first_cb", 0);
GRPC_ERROR_UNREF(error);
finish(exec_ctx, lock);
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
} else {
cl->error = error;
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
lock->time_to_execute_final_list = false;
switch (old_state) {
case 5: // we're down to one queued item: if it's the final list we
case 4: // should do that
if (!grpc_closure_list_empty(lock->final_list)) {
lock->time_to_execute_final_list = true;
}
break;
case 3: // had one count, one unorphaned --> unlocked unorphaned
move_next(exec_ctx);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
case 2: // and one count, one orphaned --> unlocked and orphaned
move_next(exec_ctx);
really_destroy(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
case 1:
case 0:
// these values are illegal - representing an already unlocked or
// deleted lock
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
GPR_UNREACHABLE_CODE(return true);
}
GPR_TIMER_END("combiner.execute", 0);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) {
grpc_combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
GRPC_ERROR_REF(error), false);
GRPC_ERROR_REF(error));
}
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool force_async_break) {
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG,
"C:%p grpc_combiner_execute_finally c=%p force_async_break=%d; ac=%p",
lock, closure, force_async_break, exec_ctx->active_combiner));
grpc_closure *closure, grpc_error *error) {
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_execute_finally c=%p; ac=%p",
lock, closure, exec_ctx->active_combiner));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
grpc_combiner_execute(exec_ctx, lock,
grpc_closure_create(enqueue_finally, closure), error);
grpc_closure_create(enqueue_finally, closure), error,
false);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
if (force_async_break) {
lock->take_async_break_before_final_list = true;
}
if (grpc_closure_list_empty(lock->final_list)) {
gpr_atm_full_fetch_add(&lock->state, 2);
}
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
void grpc_combiner_force_async_finally(grpc_combiner *lock) {
lock->take_async_break_before_final_list = true;
}

@ -52,7 +52,8 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
// Execute \a action within the lock.
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error);
grpc_closure *closure, grpc_error *error,
bool covered_by_poller);
// Execute \a action within the lock just prior to unlocking.
// if \a hint_async_break is additionally set, the combiner is tries to trip
// through the workqueue between finishing the primary queue of combined
@ -60,9 +61,9 @@ void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
// Takes a very slow and round-about path if not called from a
// grpc_combiner_execute closure
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool hint_async_break);
void grpc_combiner_force_async_finally(grpc_combiner *lock);
grpc_closure *closure, grpc_error *error);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
extern int grpc_combiner_trace;

@ -265,7 +265,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
} else {
out = gpr_malloc(sizeof(*out));
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
gpr_log(GPR_DEBUG, "%p create copying", out);
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
#endif
out->ints = gpr_avl_ref(in->ints);
out->strs = gpr_avl_ref(in->strs);

@ -37,6 +37,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
@ -60,18 +61,51 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error;
did_something = true;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
for (;;) {
if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
did_something = true;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
}
continue;
}
if (grpc_combiner_continue_exec_ctx(exec_ctx)) {
continue;
}
break;
}
GPR_ASSERT(exec_ctx->active_combiner == NULL);
if (exec_ctx->stealing_from_workqueue != NULL) {
if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
} else {
grpc_closure *c = exec_ctx->stolen_closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
grpc_error *error = c->error_data.error;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0);
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0);
grpc_exec_ctx_flush(exec_ctx);
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
return true;
}
}
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
@ -86,12 +120,25 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error,
grpc_workqueue *offload_target_or_null) {
GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0);
if (offload_target_or_null == NULL) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
} else {
} else if (exec_ctx->stealing_from_workqueue == NULL) {
exec_ctx->stealing_from_workqueue = offload_target_or_null;
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
} else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) {
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
} else { /* stealing_from_workqueue == offload_target_or_null */
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
}
GPR_TIMER_END("grpc_exec_ctx_sched", 0);
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,

@ -66,15 +66,23 @@ typedef struct grpc_combiner grpc_combiner;
*/
struct grpc_exec_ctx {
grpc_closure_list closure_list;
grpc_workqueue *stealing_from_workqueue;
grpc_closure *stolen_closure;
/** currently active combiner: updated only via combiner.c */
grpc_combiner *active_combiner;
grpc_combiner *last_combiner;
bool cached_ready_to_finish;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
};
/* initializer for grpc_exec_ctx:
prefer to use GRPC_EXEC_CTX_INIT whenever possible */
#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
{ GRPC_CLOSURE_LIST_INIT, NULL, false, finish_check_arg, finish_check }
{ \
GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \
finish_check \
}
#else
struct grpc_exec_ctx {
bool cached_ready_to_finish;
@ -85,8 +93,10 @@ struct grpc_exec_ctx {
{ false, finish_check_arg, finish_check }
#endif
/* initialize an execution context at the top level of an API call into grpc
(this is safe to use elsewhere, though possibly not as efficient) */
#define GRPC_EXEC_CTX_INIT \
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_never_ready_to_finish, NULL)
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.

@ -209,11 +209,11 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
GPR_TIMER_BEGIN("recvmsg", 1);
GPR_TIMER_BEGIN("recvmsg", 0);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
GPR_TIMER_END("recvmsg", 0);
GPR_TIMER_END("recvmsg", read_bytes >= 0);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may

@ -171,7 +171,7 @@ static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
wakeup(exec_ctx, workqueue);
}
grpc_closure *cl = (grpc_closure *)n;
grpc_error *clerr = cl->error;
grpc_error *clerr = cl->error_data.error;
cl->cb(exec_ctx, cl->cb_arg, clerr);
GRPC_ERROR_UNREF(clerr);
}
@ -185,7 +185,7 @@ void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
gpr_atm last = gpr_atm_full_fetch_add(&workqueue->state, 2);
GPR_ASSERT(last & 1);
closure->error = error;
closure->error_data.error = error;
gpr_mpscq_push(&workqueue->queue, &closure->next_data.atm_next);
if (last == 1) {
wakeup(exec_ctx, workqueue);

@ -83,6 +83,7 @@ static int g_shutdown;
static gpr_thd_id g_writing_thread;
static __thread int g_thread_id;
static int g_next_thread_id;
static int g_writing_enabled = 1;
static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
if (list->head == NULL) {
@ -177,7 +178,7 @@ static void flush_logs(gpr_timer_log_list *list) {
}
}
static void finish_writing() {
static void finish_writing(void) {
pthread_mutex_lock(&g_mu);
g_shutdown = 1;
pthread_cond_signal(&g_cv);
@ -230,6 +231,10 @@ static void gpr_timers_log_add(const char *tagstr, marker_type type,
int important, const char *file, int line) {
gpr_timer_entry *entry;
if (!g_writing_enabled) {
return;
}
if (g_thread_log == NULL || g_thread_log->num_entries == MAX_COUNT) {
rotate_log();
}
@ -261,6 +266,8 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
gpr_timers_log_add(tagstr, END, important, file, line);
}
void gpr_timer_set_enabled(int enabled) { g_writing_enabled = enabled; }
/* Basic profiler specific API functions. */
void gpr_timers_global_init(void) {}
@ -272,4 +279,6 @@ void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
void gpr_timers_set_log_filename(const char *filename) {}
void gpr_timer_set_enabled(int enabled) {}
#endif /* GRPC_BASIC_PROFILER */

@ -34,6 +34,8 @@
#ifndef GRPC_CORE_LIB_PROFILING_TIMERS_H
#define GRPC_CORE_LIB_PROFILING_TIMERS_H
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -50,18 +52,23 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
void gpr_timers_set_log_filename(const char *filename);
void gpr_timer_set_enabled(int enabled);
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \
do { \
/*printf("- %s\n", tag);*/ \
} while (0)
#define GPR_TIMER_BEGIN(tag, important) \
do { \
/*printf("%s {\n", tag);*/ \
} while (0)
#define GPR_TIMER_END(tag, important) \
do { \
/*printf("} // %s\n", tag);*/ \
} while (0)
#else /* at least one profiler requested... */

@ -99,6 +99,9 @@ void grpc_client_security_context_destroy(void *ctx) {
grpc_client_security_context *c = (grpc_client_security_context *)ctx;
grpc_call_credentials_unref(c->creds);
GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context");
if (c->extension.instance != NULL && c->extension.destroy != NULL) {
c->extension.destroy(c->extension.instance);
}
gpr_free(ctx);
}
@ -114,6 +117,9 @@ grpc_server_security_context *grpc_server_security_context_create(void) {
void grpc_server_security_context_destroy(void *ctx) {
grpc_server_security_context *c = (grpc_server_security_context *)ctx;
GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "server_security_context");
if (c->extension.instance != NULL && c->extension.destroy != NULL) {
c->extension.destroy(c->extension.instance);
}
gpr_free(ctx);
}

@ -84,6 +84,16 @@ void grpc_auth_context_unref(grpc_auth_context *policy);
void grpc_auth_property_reset(grpc_auth_property *property);
/* --- grpc_security_context_extension ---
Extension to the security context that may be set in a filter and accessed
later by a higher level method on a grpc_call object. */
typedef struct {
void *instance;
void (*destroy)(void *);
} grpc_security_context_extension;
/* --- grpc_client_security_context ---
Internal client-side security context. */
@ -91,6 +101,7 @@ void grpc_auth_property_reset(grpc_auth_property *property);
typedef struct {
grpc_call_credentials *creds;
grpc_auth_context *auth_context;
grpc_security_context_extension extension;
} grpc_client_security_context;
grpc_client_security_context *grpc_client_security_context_create(void);
@ -102,6 +113,7 @@ void grpc_client_security_context_destroy(void *ctx);
typedef struct {
grpc_auth_context *auth_context;
grpc_security_context_extension extension;
} grpc_server_security_context;
grpc_server_security_context *grpc_server_security_context_create(void);

@ -230,33 +230,33 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error);
grpc_call *grpc_call_create(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative,
const void *server_transport_data, grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count, gpr_timespec send_deadline) {
grpc_error *grpc_call_create(const grpc_call_create_args *args,
grpc_call **out_call) {
size_t i, j;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_channel_stack *channel_stack =
grpc_channel_get_channel_stack(args->channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
*out_call = call =
gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
call->channel = channel;
call->cq = cq;
call->parent = parent_call;
call->channel = args->channel;
call->cq = args->cq;
call->parent = args->parent_call;
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
call->is_client = server_transport_data == NULL;
call->is_client = args->server_transport_data == NULL;
if (call->is_client) {
GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
for (i = 0; i < add_initial_metadata_count; i++) {
call->send_extra_metadata[i].md = add_initial_metadata[i];
GPR_ASSERT(args->add_initial_metadata_count <
MAX_SEND_EXTRA_METADATA_COUNT);
for (i = 0; i < args->add_initial_metadata_count; i++) {
call->send_extra_metadata[i].md = args->add_initial_metadata[i];
}
call->send_extra_metadata_count = (int)add_initial_metadata_count;
call->send_extra_metadata_count = (int)args->add_initial_metadata_count;
} else {
GPR_ASSERT(add_initial_metadata_count == 0);
GPR_ASSERT(args->add_initial_metadata_count == 0);
call->send_extra_metadata_count = 0;
}
for (i = 0; i < 2; i++) {
@ -265,78 +265,79 @@ grpc_call *grpc_call_create(
}
}
call->send_deadline =
gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC);
GRPC_CHANNEL_INTERNAL_REF(channel, "call");
gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC);
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
grpc_error *error = grpc_call_stack_init(
&exec_ctx, channel_stack, 1, destroy_call, call, call->context,
server_transport_data, CALL_STACK_FROM_CALL(call));
args->server_transport_data, CALL_STACK_FROM_CALL(call));
if (error != GRPC_ERROR_NONE) {
intptr_t status;
if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &status))
if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &status)) {
status = GRPC_STATUS_UNKNOWN;
}
const char *error_str =
grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION);
close_with_status(&exec_ctx, call, (grpc_status_code)status,
error_str == NULL ? "unknown error" : error_str);
GRPC_ERROR_UNREF(error);
}
if (cq != NULL) {
if (args->cq != NULL) {
GPR_ASSERT(
pollset_set_alternative == NULL &&
args->pollset_set_alternative == NULL &&
"Only one of 'cq' and 'pollset_set_alternative' should be non-NULL.");
GRPC_CQ_INTERNAL_REF(cq, "bind");
GRPC_CQ_INTERNAL_REF(args->cq, "bind");
call->pollent =
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq));
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(args->cq));
}
if (pollset_set_alternative != NULL) {
call->pollent =
grpc_polling_entity_create_from_pollset_set(pollset_set_alternative);
if (args->pollset_set_alternative != NULL) {
call->pollent = grpc_polling_entity_create_from_pollset_set(
args->pollset_set_alternative);
}
if (!grpc_polling_entity_is_empty(&call->pollent)) {
grpc_call_stack_set_pollset_or_pollset_set(
&exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
if (parent_call != NULL) {
GRPC_CALL_INTERNAL_REF(parent_call, "child");
gpr_timespec send_deadline = args->send_deadline;
if (args->parent_call != NULL) {
GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
GPR_ASSERT(call->is_client);
GPR_ASSERT(!parent_call->is_client);
GPR_ASSERT(!args->parent_call->is_client);
gpr_mu_lock(&parent_call->mu);
gpr_mu_lock(&args->parent_call->mu);
if (propagation_mask & GRPC_PROPAGATE_DEADLINE) {
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
gpr_convert_clock_type(send_deadline,
parent_call->send_deadline.clock_type),
parent_call->send_deadline);
args->parent_call->send_deadline.clock_type),
args->parent_call->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
/* TODO(ctiller): This should change to use the appropriate census start_op
* call. */
if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
parent_call->context[GRPC_CONTEXT_TRACING].value,
NULL);
if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
grpc_call_context_set(
call, GRPC_CONTEXT_TRACING,
args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
} else {
GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
}
if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
}
if (parent_call->first_child == NULL) {
parent_call->first_child = call;
if (args->parent_call->first_child == NULL) {
args->parent_call->first_child = call;
call->sibling_next = call->sibling_prev = call;
} else {
call->sibling_next = parent_call->first_child;
call->sibling_prev = parent_call->first_child->sibling_prev;
call->sibling_next = args->parent_call->first_child;
call->sibling_prev = args->parent_call->first_child->sibling_prev;
call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
call;
}
gpr_mu_unlock(&parent_call->mu);
gpr_mu_unlock(&args->parent_call->mu);
}
if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) !=
0) {
@ -344,7 +345,7 @@ grpc_call *grpc_call_create(
}
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_create", 0);
return call;
return error;
}
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
@ -1196,6 +1197,10 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
char *msg = grpc_transport_stream_op_string(&bctl->op);
gpr_log(GPR_DEBUG, "receiving_stream_ready: %s", msg);
gpr_free(msg);
gpr_mu_lock(&bctl->call->mu);
if (bctl->call->has_initial_md_been_received || error != GRPC_ERROR_NONE ||
call->receiving_stream == NULL) {
@ -1306,6 +1311,10 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_call *child_call;
grpc_call *next_child_call;
char *msg = grpc_transport_stream_op_string(&bctl->op);
gpr_log(GPR_DEBUG, "finish_batch: %s", msg);
gpr_free(msg);
GRPC_ERROR_REF(error);
gpr_mu_lock(&call->mu);
@ -1396,6 +1405,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *stream_op = &bctl->op;
memset(stream_op, 0, sizeof(*stream_op));
stream_op->covered_by_poller = true;
if (nops == 0) {
GRPC_CALL_INTERNAL_REF(call, "completion");

@ -49,15 +49,29 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *user_data);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask,
grpc_completion_queue *cq,
/* if not NULL, it'll be used in lieu of \a cq */
grpc_pollset_set *pollset_set_alternative,
const void *server_transport_data,
grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count,
gpr_timespec send_deadline);
typedef struct grpc_call_create_args {
grpc_channel *channel;
grpc_call *parent_call;
uint32_t propagation_mask;
grpc_completion_queue *cq;
/* if not NULL, it'll be used in lieu of cq */
grpc_pollset_set *pollset_set_alternative;
const void *server_transport_data;
grpc_mdelem **add_initial_metadata;
size_t add_initial_metadata_count;
gpr_timespec send_deadline;
} grpc_call_create_args;
/* Create a new call based on \a args.
Regardless of success or failure, always returns a valid new call into *call
*/
grpc_error *grpc_call_create(const grpc_call_create_args *args,
grpc_call **call);
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);

@ -208,9 +208,21 @@ static grpc_call *grpc_channel_create_call_internal(
send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority);
}
return grpc_call_create(channel, parent_call, propagation_mask, cq,
pollset_set_alternative, NULL, send_metadata,
num_metadata, deadline);
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = channel;
args.parent_call = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
args.pollset_set_alternative = pollset_set_alternative;
args.server_transport_data = NULL;
args.add_initial_metadata = send_metadata;
args.add_initial_metadata_count = num_metadata;
args.send_deadline = deadline;
grpc_call *call;
GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call));
return call;
}
grpc_call *grpc_channel_create_call(grpc_channel *channel,

@ -39,6 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h"
@ -50,6 +51,9 @@
#include "src/core/lib/surface/event_string.h"
int grpc_trace_operation_failures;
#ifndef NDEBUG
int grpc_trace_pending_tags;
#endif
typedef struct {
grpc_pollset_worker **worker;
@ -313,13 +317,58 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GRPC_ERROR_UNREF(error);
}
typedef struct {
grpc_completion_queue *cq;
gpr_timespec deadline;
grpc_cq_completion *stolen_completion;
void *tag; /* for pluck */
} cq_is_finished_arg;
static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = arg;
grpc_completion_queue *cq = a->cq;
GPR_ASSERT(a->stolen_completion == NULL);
gpr_mu_lock(cq->mu);
if (cq->completed_tail != &cq->completed_head) {
a->stolen_completion = (grpc_cq_completion *)cq->completed_head.next;
cq->completed_head.next = a->stolen_completion->next & ~(uintptr_t)1;
if (a->stolen_completion == cq->completed_tail) {
cq->completed_tail = &cq->completed_head;
}
gpr_mu_unlock(cq->mu);
return true;
}
gpr_mu_unlock(cq->mu);
return gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
}
#ifndef NDEBUG
static void dump_pending_tags(grpc_completion_queue *cc) {
if (!grpc_trace_pending_tags) return;
gpr_strvec v;
gpr_strvec_init(&v);
gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:"));
for (size_t i = 0; i < cc->outstanding_tag_count; i++) {
char *s;
gpr_asprintf(&s, " %p", cc->outstanding_tags[i]);
gpr_strvec_add(&v, s);
}
char *out = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
gpr_log(GPR_DEBUG, "%s", out);
gpr_free(out);
}
#else
static void dump_pending_tags(grpc_completion_queue *cc) {}
#endif
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
grpc_pollset_worker *worker = NULL;
int first_loop = 1;
gpr_timespec now;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@ -333,11 +382,27 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
reserved));
GPR_ASSERT(!reserved);
dump_pending_tags(cc);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
cq_is_finished_arg is_finished_arg = {cc, deadline, NULL, NULL};
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
cq_is_next_finished, &is_finished_arg);
GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(cc->mu);
for (;;) {
if (is_finished_arg.stolen_completion != NULL) {
gpr_mu_unlock(cc->mu);
grpc_cq_completion *c = is_finished_arg.stolen_completion;
is_finished_arg.stolen_completion = NULL;
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
c->done(&exec_ctx, c->done_arg, c);
break;
}
if (cc->completed_tail != &cc->completed_head) {
grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
cc->completed_head.next = c->next & ~(uintptr_t)1;
@ -362,6 +427,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
first_loop = 0;
@ -387,6 +453,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
}
@ -394,6 +461,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_next", 0);
@ -424,6 +492,30 @@ static void del_plucker(grpc_completion_queue *cc, void *tag,
GPR_UNREACHABLE_CODE(return );
}
static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = arg;
grpc_completion_queue *cq = a->cq;
GPR_ASSERT(a->stolen_completion == NULL);
gpr_mu_lock(cq->mu);
grpc_cq_completion *c;
grpc_cq_completion *prev = &cq->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
&cq->completed_head) {
if (c->tag == a->tag) {
prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
if (c == cq->completed_tail) {
cq->completed_tail = prev;
}
gpr_mu_unlock(cq->mu);
a->stolen_completion = c;
return true;
}
prev = c;
}
gpr_mu_unlock(cq->mu);
return gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
}
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
@ -432,7 +524,6 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_pollset_worker *worker = NULL;
gpr_timespec now;
int first_loop = 1;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@ -448,11 +539,27 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
}
GPR_ASSERT(!reserved);
dump_pending_tags(cc);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
cq_is_finished_arg is_finished_arg = {cc, deadline, NULL, tag};
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
cq_is_pluck_finished, &is_finished_arg);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(cc->mu);
for (;;) {
if (is_finished_arg.stolen_completion != NULL) {
gpr_mu_unlock(cc->mu);
c = is_finished_arg.stolen_completion;
is_finished_arg.stolen_completion = NULL;
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
c->done(&exec_ctx, c->done_arg, c);
break;
}
prev = &cc->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
&cc->completed_head) {
@ -485,6 +592,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
memset(&ret, 0, sizeof(ret));
/* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
@ -493,6 +601,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
first_loop = 0;
@ -518,6 +627,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
}
@ -527,6 +637,7 @@ done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_pluck", 0);

@ -44,6 +44,9 @@
extern int grpc_cq_pluck_trace;
extern int grpc_cq_event_timeout_trace;
extern int grpc_trace_operation_failures;
#ifndef NDEBUG
extern int grpc_trace_pending_tags;
#endif
typedef struct grpc_cq_completion {
/** user supplied tag */

@ -173,6 +173,9 @@ void grpc_init(void) {
// Default timeout trace to 1
grpc_cq_event_timeout_trace = 1;
grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
#ifndef NDEBUG
grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
#endif
grpc_security_pre_init();
grpc_iomgr_init();
grpc_executor_init();

@ -279,6 +279,7 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
grpc_channel_element *elem;
op->send_goaway = send_goaway;
op->set_accept_stream = true;
sc->slice = gpr_slice_from_copied_string("Server shutdown");
op->goaway_message = &sc->slice;
op->goaway_status = GRPC_STATUS_OK;
@ -438,14 +439,6 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
chand->finish_destroy_channel_closure.cb_arg = chand;
grpc_transport_op *op =
grpc_make_transport_op(&chand->finish_destroy_channel_closure);
op->set_accept_stream = true;
grpc_channel_next_op(exec_ctx,
grpc_channel_stack_element(
grpc_channel_get_channel_stack(chand->channel), 0),
op);
if (error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Disconnected client: %s", msg);
@ -824,11 +817,20 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
const void *transport_server_data) {
channel_data *chand = cd;
/* create a call */
grpc_call *call = grpc_call_create(chand->channel, NULL, 0, NULL, NULL,
transport_server_data, NULL, 0,
gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = chand->channel;
args.server_transport_data = transport_server_data;
args.send_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
grpc_call *call;
grpc_error *error = grpc_call_create(&args, &call);
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
if (error != GRPC_ERROR_NONE) {
got_initial_metadata(exec_ctx, elem, error);
GRPC_ERROR_UNREF(error);
return;
}
call_data *calld = elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));

@ -180,7 +180,8 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
*w->current = tracker->current_state;
tracker->watchers = w->next;
if (grpc_connectivity_state_trace) {
gpr_log(GPR_DEBUG, "NOTIFY: %p", w->notify);
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
w->notify);
}
grpc_exec_ctx_sched(exec_ctx, w->notify,
GRPC_ERROR_REF(tracker->current_error), NULL);

@ -46,8 +46,9 @@
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "%s %p:%p REF %d->%d %s", refcount->object_type,
refcount, refcount->destroy.cb_arg, val, val + 1, reason);
gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
refcount->object_type, refcount, refcount->destroy.cb_arg, val,
val + 1, reason);
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
@ -58,8 +59,9 @@ void grpc_stream_ref(grpc_stream_refcount *refcount) {
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "%s %p:%p UNREF %d->%d %s", refcount->object_type,
refcount, refcount->destroy.cb_arg, val, val - 1, reason);
gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
refcount->object_type, refcount, refcount->destroy.cb_arg, val,
val - 1, reason);
#else
void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
@ -274,3 +276,28 @@ grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
op->op.on_consumed = &op->outer_on_complete;
return &op->op;
}
typedef struct {
grpc_closure outer_on_complete;
grpc_closure *inner_on_complete;
grpc_transport_stream_op op;
} made_transport_stream_op;
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
made_transport_stream_op *op = arg;
grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error),
NULL);
gpr_free(op);
}
grpc_transport_stream_op *grpc_make_transport_stream_op(
grpc_closure *on_complete) {
made_transport_stream_op *op = gpr_malloc(sizeof(*op));
grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
op);
op->inner_on_complete = on_complete;
memset(&op->op, 0, sizeof(op->op));
op->op.on_complete = &op->outer_on_complete;
return &op->op;
}

@ -113,6 +113,10 @@ typedef struct grpc_transport_stream_op {
have been completed. */
grpc_closure *on_complete;
/** Is the completion of this op covered by a poller (if false: the op should
complete independently of some pollset being polled) */
bool covered_by_poller;
/** Send initial metadata to the peer, from the provided metadata batch.
idempotent_request MUST be set if this is non-null */
grpc_metadata_batch *send_initial_metadata;
@ -251,6 +255,7 @@ void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
gpr_slice *optional_message);
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
char *grpc_transport_op_string(grpc_transport_op *op);
/* Send a batch of operations on a transport
@ -292,6 +297,10 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
/* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
\a on_consumed and then delete the returned transport op */
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed);
/* Allocate a grpc_transport_stream_op, and preconfigure the on_consumed closure
to \a on_consumed and then delete the returned transport op */
grpc_transport_stream_op *grpc_make_transport_stream_op(
grpc_closure *on_consumed);
#ifdef __cplusplus
}

@ -41,6 +41,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/connectivity_state.h"
/* These routines are here to facilitate debugging - they produce string
representations of various transport data structures */
@ -143,6 +144,82 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
return out;
}
char *grpc_transport_op_string(grpc_transport_op *op) {
char *tmp;
char *out;
int first = 1;
gpr_strvec b;
gpr_strvec_init(&b);
if (op->on_connectivity_state_change != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
if (op->connectivity_state != NULL) {
gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:from=%s",
op->on_connectivity_state_change,
grpc_connectivity_state_name(*op->connectivity_state));
gpr_strvec_add(&b, tmp);
} else {
gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:unsubscribe",
op->on_connectivity_state_change);
gpr_strvec_add(&b, tmp);
}
}
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
const char *err = grpc_error_string(op->disconnect_with_error);
gpr_asprintf(&tmp, "DISCONNECT:%s", err);
gpr_strvec_add(&b, tmp);
grpc_error_free_string(err);
}
if (op->send_goaway) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
char *msg = op->goaway_message == NULL
? "null"
: gpr_dump_slice(*op->goaway_message,
GPR_DUMP_ASCII | GPR_DUMP_HEX);
gpr_asprintf(&tmp, "SEND_GOAWAY:status=%d:msg=%s", op->goaway_status, msg);
if (op->goaway_message != NULL) gpr_free(msg);
gpr_strvec_add(&b, tmp);
}
if (op->set_accept_stream) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_asprintf(&tmp, "SET_ACCEPT_STREAM:%p(%p,...)", op->set_accept_stream_fn,
op->set_accept_stream_user_data);
gpr_strvec_add(&b, tmp);
}
if (op->bind_pollset != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET"));
}
if (op->bind_pollset_set != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET_SET"));
}
if (op->send_ping != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("SEND_PING"));
}
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);
return out;
}
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_transport_stream_op *op) {
char *str = grpc_transport_stream_op_string(op);

@ -50,6 +50,7 @@
"\x00\x00\x00\x04\x00\x00\x00\x00\x00" /* headers: generated from \
large_metadata.headers in this \
directory */ \
"\x00\x00\x00\x04\x01\x00\x00\x00\x00" \
"\x00" \
"5{\x01\x05\x00\x00\x00\x01" \
"\x10\x05:path\x08/foo/bar" \
@ -92,6 +93,7 @@
in this \
directory \
*/ \
"\x00\x00\x00\x04\x01\x00\x00\x00\x00" \
"\x00\x00\xc9\x01\x04\x00\x00\x00\x01" \
"\x10\x05:path\x08/foo/bar" \
"\x10\x07:scheme\x04http" \

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save