Merge branch 'direct-calls' into buffer_pools_for_realsies

reviewable/pr8239/r2
Craig Tiller 8 years ago
commit 0d19c67ea9
  1. 12
      BUILD
  2. 5
      CMakeLists.txt
  3. 6
      Makefile
  4. 1
      binding.gyp
  5. 2
      build.yaml
  6. 1
      config.m4
  7. 64
      doc/environment_variables.md
  8. 3
      gRPC-Core.podspec
  9. 2
      grpc.gemspec
  10. 2
      package.xml
  11. 4
      src/core/ext/client_config/subchannel.c
  12. 3
      src/core/ext/transport/chttp2/transport/chttp2_plugin.c
  13. 2151
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  14. 4
      src/core/ext/transport/chttp2/transport/frame.h
  15. 68
      src/core/ext/transport/chttp2/transport/frame_data.c
  16. 9
      src/core/ext/transport/chttp2/transport/frame_data.h
  17. 18
      src/core/ext/transport/chttp2/transport/frame_goaway.c
  18. 9
      src/core/ext/transport/chttp2/transport/frame_goaway.h
  19. 13
      src/core/ext/transport/chttp2/transport/frame_ping.c
  20. 8
      src/core/ext/transport/chttp2/transport/frame_ping.h
  21. 38
      src/core/ext/transport/chttp2/transport/frame_rst_stream.c
  22. 9
      src/core/ext/transport/chttp2/transport/frame_rst_stream.h
  23. 23
      src/core/ext/transport/chttp2/transport/frame_settings.c
  24. 9
      src/core/ext/transport/chttp2/transport/frame_settings.h
  25. 34
      src/core/ext/transport/chttp2/transport/frame_window_update.c
  26. 5
      src/core/ext/transport/chttp2/transport/frame_window_update.h
  27. 492
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  28. 17
      src/core/ext/transport/chttp2/transport/hpack_parser.h
  29. 649
      src/core/ext/transport/chttp2/transport/internal.h
  30. 888
      src/core/ext/transport/chttp2/transport/parsing.c
  31. 351
      src/core/ext/transport/chttp2/transport/stream_lists.c
  32. 36
      src/core/ext/transport/chttp2/transport/stream_map.c
  33. 4
      src/core/ext/transport/chttp2/transport/stream_map.h
  34. 462
      src/core/ext/transport/chttp2/transport/writing.c
  35. 16
      src/core/lib/iomgr/closure.c
  36. 11
      src/core/lib/iomgr/closure.h
  37. 349
      src/core/lib/iomgr/combiner.c
  38. 15
      src/core/lib/iomgr/combiner.h
  39. 2
      src/core/lib/iomgr/error.c
  40. 8
      src/core/lib/iomgr/error.h
  41. 207
      src/core/lib/iomgr/ev_epoll_linux.c
  42. 30
      src/core/lib/iomgr/ev_poll_and_epoll_posix.c
  43. 30
      src/core/lib/iomgr/ev_poll_posix.c
  44. 23
      src/core/lib/iomgr/ev_posix.c
  45. 13
      src/core/lib/iomgr/ev_posix.h
  46. 65
      src/core/lib/iomgr/exec_ctx.c
  47. 14
      src/core/lib/iomgr/exec_ctx.h
  48. 11
      src/core/lib/iomgr/iomgr.c
  49. 11
      src/core/lib/iomgr/tcp_posix.c
  50. 16
      src/core/lib/iomgr/workqueue.h
  51. 196
      src/core/lib/iomgr/workqueue_posix.c
  52. 61
      src/core/lib/iomgr/workqueue_posix.h
  53. 10
      src/core/lib/iomgr/workqueue_windows.c
  54. 11
      src/core/lib/profiling/basic_timers.c
  55. 2
      src/core/lib/profiling/timers.h
  56. 9
      src/core/lib/support/log.c
  57. 11
      src/core/lib/support/string.c
  58. 4
      src/core/lib/support/string.h
  59. 121
      src/core/lib/surface/call.c
  60. 32
      src/core/lib/surface/call.h
  61. 18
      src/core/lib/surface/channel.c
  62. 167
      src/core/lib/surface/completion_queue.c
  63. 3
      src/core/lib/surface/completion_queue.h
  64. 4
      src/core/lib/surface/init.c
  65. 35
      src/core/lib/surface/server.c
  66. 3
      src/core/lib/surface/server.h
  67. 3
      src/core/lib/transport/connectivity_state.c
  68. 35
      src/core/lib/transport/transport.c
  69. 9
      src/core/lib/transport/transport.h
  70. 95
      src/core/lib/transport/transport_op_string.c
  71. 1
      src/python/grpcio/grpc_core_dependencies.py
  72. 2
      test/core/bad_client/tests/large_metadata.c
  73. 25
      test/core/end2end/cq_verifier.c
  74. 1
      test/core/end2end/cq_verifier.h
  75. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0083d5addbeca55271ed7ef93c8016bf7ca76903
  76. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/07b0bed3226eefac4a84000ec584e4ce06ebf1bf
  77. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/07cec5c8d9c856a910c6fb57da2ae954f44beed0
  78. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0c27c9999302b39bf2256a90b0cdb767fb2b6fe3
  79. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0d407f099f8418de3dd94bd2146c858a8c6575ad
  80. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0d4d486aa9fd6e9c10cc9ca8967e922cadddb2fe
  81. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0d9ba07b57eb0e076b187c4455f662db085e730b
  82. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/0f6b989cec08ef9da603dc83704d85900bd22f1f
  83. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/10b25b0726cb6d820165699e5a453691c7a9c343
  84. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1231c6d007d9e43d169122348363e20d9f25ee93
  85. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/13a9b61e431c20734c19bb36d85883b6a501284e
  86. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1698ec182fad9d973b84615da3a683ecdf2d0b3b
  87. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1859e2ee759e20fe195f67615a1576ce2b7d5bbd
  88. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1a9017db5ad8a9dc6cfe72305da1683a87a73452
  89. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1bd90335afc9e0a1e6a9296e3cc27c03c1201886
  90. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/1be157b0fc79f0e7e1e05dfa3cbbe1ad71528bc2
  91. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2185f411bdb1edc610f16ffc86836ae366193e03
  92. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/22661803bd1c7198df4be6e08924ef6a48af9cd4
  93. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2717067bbc0e9bfc1d90d15cddf6154800a25ec6
  94. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2825cfc19c9371f4fe70851283c68d49470d4d55
  95. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/29303c16f3afa18c2c0b84e77e587535a705a74c
  96. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2b5eb5aac77af905877bd98ec2c4d746b247abb6
  97. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2cc43573f271ecd332551c1fb34ebc8645eaefe8
  98. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/2feb41037f5dd34e9f3465a2fbf1a6d355c8ce9d
  99. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/300998021c7f743ff49d9cc192343ffd43eb47f2
  100. BIN
      test/core/end2end/fuzzers/client_fuzzer_corpus/310b2aff5e2ec78b6004630bed39d49f8d13bb21
  101. Some files were not shown because too many files have changed in this diff Show More

12
BUILD

@ -220,7 +220,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -382,7 +381,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -619,7 +617,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -766,7 +763,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -973,7 +969,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -1112,7 +1107,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -1324,7 +1318,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -1442,7 +1435,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -1734,7 +1726,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -1847,7 +1838,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -2236,7 +2226,6 @@ objc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -2452,7 +2441,6 @@ objc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",

@ -350,7 +350,6 @@ add_library(grpc
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -608,7 +607,6 @@ add_library(grpc_cronet
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -838,7 +836,6 @@ add_library(grpc_unsecure
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -1095,7 +1092,6 @@ add_library(grpc++
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -1449,7 +1445,6 @@ add_library(grpc++_unsecure
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c

@ -2587,7 +2587,6 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -2863,7 +2862,6 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -3127,7 +3125,6 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -3319,7 +3316,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -3659,7 +3655,6 @@ LIBGRPC++_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -4288,7 +4283,6 @@ LIBGRPC++_UNSECURE_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \

@ -625,7 +625,6 @@
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_posix.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',

@ -224,7 +224,6 @@ filegroups:
- src/core/lib/iomgr/wakeup_fd_pipe.h
- src/core/lib/iomgr/wakeup_fd_posix.h
- src/core/lib/iomgr/workqueue.h
- src/core/lib/iomgr/workqueue_posix.h
- src/core/lib/iomgr/workqueue_windows.h
- src/core/lib/json/json.h
- src/core/lib/json/json_common.h
@ -310,7 +309,6 @@ filegroups:
- src/core/lib/iomgr/wakeup_fd_nospecial.c
- src/core/lib/iomgr/wakeup_fd_pipe.c
- src/core/lib/iomgr/wakeup_fd_posix.c
- src/core/lib/iomgr/workqueue_posix.c
- src/core/lib/iomgr/workqueue_windows.c
- src/core/lib/json/json.c
- src/core/lib/json/json_reader.c

@ -144,7 +144,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \

@ -0,0 +1,64 @@
gRPC environment variables
--------------------------
gRPC C core based implementations (those contained in this repository) expose
some configuration as environment variables that can be set.
* GRPC_ABORT_ON_LEAKS
A debugging aid to cause a call to abort() when gRPC objects are leaked past
grpc_shutdown(). Set to 1 to cause the abort, if unset or 0 it does not
abort the process.
* GRPC_GOOGLE_CREDENTIALS_ENV_VAR
The path to find the credentials to use when Google credentials are created
* GRPC_SSL_CIPHER_SUITES
A colon separated list of cipher suites to use with OpenSSL
Defaults to:
ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-GCM-SHA384
* GRPC_POLL_STRATEGY [posix-style environments only]
Declares which polling engines to try when starting gRPC.
This is a comma-separated list of engines, which are tried in priority order
first -> last.
Available polling engines include:
- epoll (linux-only) - a polling engine based around the epoll family of
system calls
- poll - a portable polling engine based around poll(), intended to be a
fallback engine when nothing better exists
- legacy - the (deprecated) original polling engine for gRPC
* GRPC_TRACE
A comma separated list of tracers that provide additional insight into how
gRPC C core is processing requests via debug logs. Available tracers include:
- api - traces api calls to the C core
- channel - traces operations on the C core channel stack
- combiner - traces combiner lock state
- compression - traces compression operations
- connectivity_state - traces connectivity state changes to channels
- channel_stack_builder - traces information about channel stacks being built
- http - traces state in the http2 transport engine
- http1 - traces HTTP/1.x operations performed by gRPC
- flowctl - traces http2 flow control
- op_failure - traces error information when failure is pushed onto a
completion queue
- pending_tags - [debug builds only] traces still-in-progress tags on
completion queues
- round_robin - traces the round_robin load balancing policy
- glb - traces the grpclb load balancer
- queue_pluck
- queue_timeout
- server_channel - lightweight trace of significant server channel events
- secure_endpoint - traces bytes flowing through encrypted channels
- transport_security - traces metadata about secure channel establishment
- tcp - traces bytes in and out of a channel
'all' can additionally be used to turn all traces on.
Individual traces can be disabled by prefixing them with '-'.
Example:
export GRPC_TRACE=all,-pending_tags
* GRPC_VERBOSITY
Default gRPC logging verbosity - one of:
- DEBUG - log all gRPC messages
- INFO - log INFO and ERROR message
- ERROR - log only errors

@ -307,7 +307,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/iomgr/workqueue.h',
'src/core/lib/iomgr/workqueue_posix.h',
'src/core/lib/iomgr/workqueue_windows.h',
'src/core/lib/json/json.h',
'src/core/lib/json/json_common.h',
@ -473,7 +472,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_posix.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
@ -678,7 +676,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/iomgr/workqueue.h',
'src/core/lib/iomgr/workqueue_posix.h',
'src/core/lib/iomgr/workqueue_windows.h',
'src/core/lib/json/json.h',
'src/core/lib/json/json_common.h',

@ -227,7 +227,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_posix.h )
s.files += %w( src/core/lib/iomgr/workqueue.h )
s.files += %w( src/core/lib/iomgr/workqueue_posix.h )
s.files += %w( src/core/lib/iomgr/workqueue_windows.h )
s.files += %w( src/core/lib/json/json.h )
s.files += %w( src/core/lib/json/json_common.h )
@ -393,7 +392,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/wakeup_fd_nospecial.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_posix.c )
s.files += %w( src/core/lib/iomgr/workqueue_posix.c )
s.files += %w( src/core/lib/iomgr/workqueue_windows.c )
s.files += %w( src/core/lib/json/json.c )
s.files += %w( src/core/lib/json/json_reader.c )

@ -234,7 +234,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_common.h" role="src" />
@ -400,7 +399,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_nospecial.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_reader.c" role="src" />

@ -220,8 +220,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"SUBCHANNEL: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val,
old_val + delta, reason);
"SUBCHANNEL: %p %s 0x%08" PRIxPTR " -> 0x%08" PRIxPTR " [%s]", c,
purpose, old_val, old_val + delta, reason);
#endif
return old_val;
}

@ -36,14 +36,11 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/metadata.h"
extern int grpc_http_write_state_trace;
void grpc_chttp2_plugin_init(void) {
grpc_chttp2_base64_encode_and_huffman_compress =
grpc_chttp2_base64_encode_and_huffman_compress_impl;
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
grpc_register_tracer("http_write_state", &grpc_http_write_state_trace);
}
void grpc_chttp2_plugin_shutdown(void) {}

File diff suppressed because it is too large Load Diff

@ -40,8 +40,8 @@
#include "src/core/lib/iomgr/error.h"
/* defined in internal.h */
typedef struct grpc_chttp2_stream_parsing grpc_chttp2_stream_parsing;
typedef struct grpc_chttp2_transport_parsing grpc_chttp2_transport_parsing;
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
#define GRPC_CHTTP2_FRAME_DATA 0
#define GRPC_CHTTP2_FRAME_HEADER 1

@ -51,16 +51,11 @@ grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser) {
void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser) {
grpc_byte_stream *bs;
if (parser->parsing_frame) {
if (parser->parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"),
1);
}
while (
(bs = grpc_chttp2_incoming_frame_queue_pop(&parser->incoming_frames))) {
grpc_byte_stream_destroy(exec_ctx, bs);
exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"));
}
GRPC_ERROR_UNREF(parser->error);
}
grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
@ -145,22 +140,17 @@ void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
stats->data_bytes += write_bytes;
}
grpc_error *grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *p,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_data_parser *p = parser;
uint32_t message_flags;
grpc_chttp2_incoming_byte_stream *incoming_byte_stream;
char *msg;
if (is_last && p->is_last_frame) {
stream_parsing->received_close = 1;
}
if (cur == end) {
return GRPC_ERROR_NONE;
}
@ -171,7 +161,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
return GRPC_ERROR_REF(p->error);
fh_0:
case GRPC_CHTTP2_DATA_FH_0:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_type = *cur;
switch (p->frame_type) {
case 0:
@ -184,7 +174,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
gpr_asprintf(&msg, "Bad GRPC frame type 0x%02x", p->frame_type);
p->error = GRPC_ERROR_CREATE(msg);
p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID,
(intptr_t)stream_parsing->id);
(intptr_t)s->id);
gpr_free(msg);
msg = gpr_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
p->error =
@ -201,7 +191,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_1:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size = ((uint32_t)*cur) << 24;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_2;
@ -209,7 +199,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_2:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 16;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_3;
@ -217,7 +207,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_3:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_4;
@ -225,7 +215,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_4:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur);
p->state = GRPC_CHTTP2_DATA_FRAME;
++cur;
@ -234,35 +224,32 @@ grpc_error *grpc_chttp2_data_parser_parse(
message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
p->parsing_frame = incoming_byte_stream =
grpc_chttp2_incoming_byte_stream_create(
exec_ctx, transport_parsing, stream_parsing, p->frame_size,
message_flags, &p->incoming_frames);
grpc_chttp2_incoming_byte_stream_create(exec_ctx, t, s, p->frame_size,
message_flags);
/* fallthrough */
case GRPC_CHTTP2_DATA_FRAME:
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
if (cur == end) {
return GRPC_ERROR_NONE;
}
uint32_t remaining = (uint32_t)(end - cur);
if (remaining == p->frame_size) {
stream_parsing->stats.incoming.data_bytes += p->frame_size;
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE, 1);
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_ERROR_NONE;
} else if (remaining > p->frame_size) {
stream_parsing->stats.incoming.data_bytes += p->frame_size;
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg),
(size_t)(cur + p->frame_size - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE, 1);
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
cur += p->frame_size;
goto fh_0; /* loop */
@ -272,10 +259,25 @@ grpc_error *grpc_chttp2_data_parser_parse(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
p->frame_size -= remaining;
stream_parsing->stats.incoming.data_bytes += remaining;
s->stats.incoming.data_bytes += remaining;
return GRPC_ERROR_NONE;
}
}
GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
}
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
grpc_chttp2_data_parser *p = parser;
grpc_error *error = parse_inner(exec_ctx, p, t, s, slice);
if (is_last && p->is_last_frame) {
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
GRPC_ERROR_NONE);
}
return error;
}

@ -69,7 +69,6 @@ typedef struct {
grpc_error *error;
int is_frame_compressed;
grpc_chttp2_incoming_frame_queue incoming_frames;
grpc_chttp2_incoming_byte_stream *parsing_frame;
} grpc_chttp2_data_parser;
@ -92,10 +91,10 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
/* handle a slice of a data frame - is_last indicates the last slice of a
frame */
grpc_error *grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
uint32_t write_bytes, int is_eof,

@ -67,10 +67,11 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_goaway_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -148,12 +149,9 @@ grpc_error *grpc_chttp2_goaway_parser_parse(
p->debug_pos += (uint32_t)(end - cur);
p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
if (is_last) {
transport_parsing->goaway_received = 1;
transport_parsing->goaway_last_stream_index = p->last_stream_id;
gpr_slice_unref(transport_parsing->goaway_text);
transport_parsing->goaway_error = (grpc_status_code)p->error_code;
transport_parsing->goaway_text =
gpr_slice_new(p->debug_data, p->debug_length, gpr_free);
grpc_chttp2_add_incoming_goaway(
exec_ctx, t, (uint32_t)p->error_code,
gpr_slice_new(p->debug_data, p->debug_length, gpr_free));
p->debug_data = NULL;
}
return GRPC_ERROR_NONE;

@ -65,10 +65,11 @@ void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p);
void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p);
grpc_error *grpc_chttp2_goaway_parser_begin_frame(
grpc_chttp2_goaway_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_goaway_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
gpr_slice debug_data,

@ -73,10 +73,10 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_ping_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -91,10 +91,11 @@ grpc_error *grpc_chttp2_ping_parser_parse(
if (p->byte == 8) {
GPR_ASSERT(is_last);
if (p->is_ack) {
grpc_chttp2_ack_ping(exec_ctx, transport_parsing, p->opaque_8bytes);
grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
gpr_slice_buffer_add(&transport_parsing->qbuf,
gpr_slice_buffer_add(&t->qbuf,
grpc_chttp2_ping_create(1, p->opaque_8bytes));
grpc_chttp2_initiate_write(exec_ctx, t, false, "ping response");
}
}

@ -48,9 +48,9 @@ gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes);
grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_ping_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */

@ -39,6 +39,8 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
gpr_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
grpc_transport_one_way_stats *stats) {
@ -83,10 +85,11 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_rst_stream_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -97,19 +100,28 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(
cur++;
p->byte++;
}
stream_parsing->stats.incoming.framing_bytes += (uint64_t)(end - cur);
s->stats.incoming.framing_bytes += (uint64_t)(end - cur);
if (p->byte == 4) {
GPR_ASSERT(is_last);
stream_parsing->received_close = 1;
if (stream_parsing->forced_close_error == GRPC_ERROR_NONE) {
stream_parsing->forced_close_error = grpc_error_set_int(
GRPC_ERROR_CREATE("RST_STREAM"), GRPC_ERROR_INT_HTTP2_ERROR,
(intptr_t)((((uint32_t)p->reason_bytes[0]) << 24) |
(((uint32_t)p->reason_bytes[1]) << 16) |
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]))));
uint32_t reason = (((uint32_t)p->reason_bytes[0]) << 24) |
(((uint32_t)p->reason_bytes[1]) << 16) |
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]));
grpc_error *error = GRPC_ERROR_NONE;
if (reason != GRPC_CHTTP2_NO_ERROR) {
error = grpc_error_set_int(GRPC_ERROR_CREATE("RST_STREAM"),
GRPC_ERROR_INT_HTTP2_ERROR, reason);
grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
(grpc_chttp2_error_code)reason, s->deadline);
char *status_details;
gpr_asprintf(&status_details, "Received RST_STREAM with error code %d",
reason);
gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
gpr_free(status_details);
grpc_chttp2_fake_status(exec_ctx, t, s, status_code, &slice_details);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, error);
}
return GRPC_ERROR_NONE;

@ -49,9 +49,10 @@ gpr_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_rst_stream_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */

@ -143,10 +143,10 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
}
}
grpc_error *grpc_chttp2_settings_parser_parse(
grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
const uint8_t *cur = GPR_SLICE_START_PTR(slice);
const uint8_t *end = GPR_SLICE_END_PTR(slice);
@ -162,11 +162,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
if (cur == end) {
parser->state = GRPC_CHTTP2_SPS_ID0;
if (is_last) {
transport_parsing->settings_updated = 1;
memcpy(parser->target_settings, parser->incoming_settings,
GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
gpr_slice_buffer_add(&transport_parsing->qbuf,
grpc_chttp2_settings_ack_create());
gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
}
return GRPC_ERROR_NONE;
}
@ -226,9 +224,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
break;
case GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE:
grpc_chttp2_goaway_append(
transport_parsing->last_incoming_stream_id, sp->error_value,
t->last_new_stream_id, sp->error_value,
gpr_slice_from_static_string("HTTP2 settings error"),
&transport_parsing->qbuf);
&t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
grpc_error *err = GRPC_ERROR_CREATE(msg);
@ -238,18 +236,17 @@ grpc_error *grpc_chttp2_settings_parser_parse(
}
if (parser->id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE &&
parser->incoming_settings[parser->id] != parser->value) {
transport_parsing->initial_window_update =
t->initial_window_update =
(int64_t)parser->value - parser->incoming_settings[parser->id];
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "adding %d for initial_window change",
(int)transport_parsing->initial_window_update);
(int)t->initial_window_update);
}
}
parser->incoming_settings[parser->id] = parser->value;
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "CHTTP2:%s: got setting %d = %d",
transport_parsing->is_client ? "CLI" : "SVR", parser->id,
parser->value);
t->is_client ? "CLI" : "SVR", parser->id, parser->value);
}
} else if (grpc_http_trace) {
gpr_log(GPR_ERROR, "CHTTP2: Ignoring unknown setting %d (value %d)",

@ -95,9 +95,10 @@ gpr_slice grpc_chttp2_settings_ack_create(void);
grpc_error *grpc_chttp2_settings_parser_begin_frame(
grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
uint32_t *settings);
grpc_error *grpc_chttp2_settings_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */

@ -80,9 +80,8 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
}
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -94,8 +93,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
p->byte++;
}
if (stream_parsing != NULL) {
stream_parsing->stats.incoming.framing_bytes += (uint32_t)(end - cur);
if (s != NULL) {
s->stats.incoming.framing_bytes += (uint32_t)(end - cur);
}
if (p->byte == 4) {
@ -109,17 +108,26 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
}
GPR_ASSERT(is_last);
if (transport_parsing->incoming_stream_id != 0) {
if (stream_parsing != NULL) {
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", transport_parsing,
stream_parsing, outgoing_window,
if (t->incoming_stream_id != 0) {
if (s != NULL) {
bool was_zero = s->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", t, s, outgoing_window,
received_update);
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
bool is_zero = s->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"stream.read_flow_control");
}
}
} else {
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", transport_parsing,
outgoing_window, received_update);
bool was_zero = t->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", t, outgoing_window,
received_update);
bool is_zero = t->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_initiate_write(exec_ctx, t, false,
"new_global_flow_control");
}
}
}

@ -51,8 +51,7 @@ gpr_slice grpc_chttp2_window_update_create(uint32_t id, uint32_t window_delta,
grpc_error *grpc_chttp2_window_update_parser_begin_frame(
grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */

File diff suppressed because it is too large Load Diff

@ -45,7 +45,8 @@
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
typedef grpc_error *(*grpc_chttp2_hpack_parser_state)(
grpc_chttp2_hpack_parser *p, const uint8_t *beg, const uint8_t *end);
grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *beg,
const uint8_t *end);
typedef struct {
char *str;
@ -55,7 +56,7 @@ typedef struct {
struct grpc_chttp2_hpack_parser {
/* user specified callback for each header output */
void (*on_header)(void *user_data, grpc_mdelem *md);
void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem *md);
void *on_header_user_data;
grpc_error *last_error;
@ -104,15 +105,17 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
/* returns 1 on success, 0 on error */
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *beg,
const uint8_t *end);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
grpc_error *grpc_chttp2_header_parser_parse(
grpc_exec_ctx *exec_ctx, void *hpack_parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */

@ -53,31 +53,25 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/transport_impl.h"
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
/* streams are kept in various linked lists depending on what things need to
happen to them... this enum labels each list */
typedef enum {
GRPC_CHTTP2_LIST_ALL_STREAMS,
GRPC_CHTTP2_LIST_CHECK_READ_OPS,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE,
GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING,
GRPC_CHTTP2_LIST_WRITTEN,
GRPC_CHTTP2_LIST_PARSING_SEEN,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
/* streams waiting for the outgoing window in the writing path, they will be
* merged to the stalled list or writable list under transport lock. */
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
STREAM_LIST_COUNT /* must be last */
} grpc_chttp2_stream_list_id;
typedef enum {
GRPC_CHTTP2_WRITE_STATE_IDLE,
GRPC_CHTTP2_WRITE_STATE_WRITING,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
} grpc_chttp2_write_state;
/* deframer state for the overall http2 stream of bytes */
typedef enum {
/* prefix: one entry per http2 connection prefix byte */
@ -152,6 +146,12 @@ typedef struct grpc_chttp2_outstanding_ping {
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
typedef struct grpc_chttp2_write_cb {
size_t call_at_byte;
grpc_closure *closure;
struct grpc_chttp2_write_cb *next;
} grpc_chttp2_write_cb;
/* forward declared in frame_data.h */
struct grpc_chttp2_incoming_byte_stream {
grpc_byte_stream base;
@ -161,12 +161,13 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_chttp2_transport *transport;
grpc_chttp2_stream *stream;
int is_tail;
bool is_tail;
gpr_mu slice_mu; // protects slices, on_next
gpr_slice_buffer slices;
grpc_closure *on_next;
gpr_slice *next;
uint32_t remaining_bytes;
struct {
grpc_closure closure;
@ -178,12 +179,68 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_closure finished_action;
};
typedef struct {
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
grpc_endpoint *ep;
char *peer_string;
grpc_combiner *combiner;
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
uint8_t closed;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
/** maps stream id to grpc_chttp2_stream objects */
grpc_chttp2_stream_map stream_map;
grpc_closure write_action_begin_locked;
grpc_closure write_action;
grpc_closure write_action_end;
grpc_closure write_action_end_locked;
grpc_closure read_action_begin;
grpc_closure read_action_locked;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
grpc_chttp2_stream **accepting_stream;
struct {
/* accept stream callback */
void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
/** connectivity tracking */
grpc_connectivity_state_tracker state_tracker;
} channel_callback;
/** data to write now */
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
int64_t outgoing_window;
/** is this a client? */
uint8_t is_client;
/** data to write next write */
gpr_slice_buffer qbuf;
/** window available for us to send to peer */
int64_t outgoing_window;
/** window available to announce to peer */
int64_t announce_incoming_window;
/** how much window would we like to have for incoming_window */
@ -194,8 +251,6 @@ typedef struct {
/** have we sent a goaway */
uint8_t sent_goaway;
/** is this transport a client? */
uint8_t is_client;
/** are the local settings dirty and need to be sent? */
uint8_t dirtied_local_settings;
/** have local settings been sent? */
@ -212,49 +267,14 @@ typedef struct {
/** how far to lookahead in a stream? */
uint32_t stream_lookahead;
/** last received stream id */
uint32_t last_incoming_stream_id;
/** last new stream id */
uint32_t last_new_stream_id;
/** pings awaiting responses */
grpc_chttp2_outstanding_ping pings;
/** next payload for an outgoing ping */
uint64_t ping_counter;
/** concurrent stream count: updated when not parsing,
so this is a strict over-estimation on the client */
uint32_t concurrent_stream_count;
} grpc_chttp2_transport_global;
typedef struct {
/** data to write now */
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
int64_t outgoing_window;
/** is this a client? */
uint8_t is_client;
/** callback for when writing is done */
grpc_closure done_cb;
/** maximum frame size */
uint32_t max_frame_size;
} grpc_chttp2_transport_writing;
struct grpc_chttp2_transport_parsing {
/** is this transport a client? (boolean) */
uint8_t is_client;
/** were settings updated? */
uint8_t settings_updated;
/** was a settings ack received? */
uint8_t settings_ack_received;
/** was a goaway frame received? */
uint8_t goaway_received;
/** initial window change */
int64_t initial_window_update;
/** data to write later - after parsing */
gpr_slice_buffer qbuf;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
/** simple one shot parsers */
@ -267,13 +287,12 @@ struct grpc_chttp2_transport_parsing {
/** parser for goaway frames */
grpc_chttp2_goaway_parser goaway_parser;
/** initial window change */
int64_t initial_window_update;
/** window available for peer to send to us */
int64_t incoming_window;
/** next stream id available at the time of beginning parsing */
uint32_t next_stream_id;
uint32_t last_incoming_stream_id;
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
uint8_t incoming_frame_type;
@ -284,135 +303,38 @@ struct grpc_chttp2_transport_parsing {
uint32_t incoming_frame_size;
uint32_t incoming_stream_id;
/* current max frame size */
uint32_t max_frame_size;
/* active parser */
void *parser_data;
grpc_chttp2_stream_parsing *incoming_stream;
grpc_chttp2_stream *incoming_stream;
grpc_error *(*parser)(grpc_exec_ctx *exec_ctx, void *parser_user_data,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
/* received settings */
uint32_t settings[GRPC_CHTTP2_NUM_SETTINGS];
/* last settings that were sent */
uint32_t last_sent_settings[GRPC_CHTTP2_NUM_SETTINGS];
/* goaway data */
grpc_status_code goaway_error;
uint32_t goaway_last_stream_index;
gpr_slice goaway_text;
int64_t outgoing_window;
grpc_chttp2_write_cb *write_cb_pool;
};
typedef enum {
/** no writing activity allowed */
GRPC_CHTTP2_WRITES_CORKED,
/** no writing activity */
GRPC_CHTTP2_WRITING_INACTIVE,
/** write has been requested and scheduled against the workqueue */
GRPC_CHTTP2_WRITE_SCHEDULED,
/** write has been initiated after being reaped from the workqueue */
GRPC_CHTTP2_WRITING,
/** write has been initiated, AND another write needs to be started once it's
done */
GRPC_CHTTP2_WRITING_STALE_WITH_POLLER,
GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
} grpc_chttp2_write_state;
GRPC_METADATA_NOT_PUBLISHED,
GRPC_METADATA_SYNTHESIZED_FROM_FAKE,
GRPC_METADATA_PUBLISHED_FROM_WIRE,
GPRC_METADATA_PUBLISHED_AT_CLOSE
} grpc_published_metadata_method;
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
grpc_endpoint *ep;
char *peer_string;
/** when this drops to zero it's safe to shutdown the endpoint */
gpr_refcount shutdown_ep_refs;
struct {
grpc_combiner *combiner;
/** is a thread currently in the global lock */
bool global_active;
/** is a thread currently parsing */
bool parsing_active;
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** has a check_read_ops been scheduled */
bool check_read_ops_scheduled;
} executor;
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
uint8_t closed;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
/** global state for reading/writing */
grpc_chttp2_transport_global global;
/** state only accessible by the chain of execution that
set writing_state >= GRPC_WRITING, and only by the writing closure
chain. */
grpc_chttp2_transport_writing writing;
/** state only accessible by the chain of execution that
set parsing_active=1 */
grpc_chttp2_transport_parsing parsing;
/** maps stream id to grpc_chttp2_stream objects;
owned by the parsing thread when parsing */
grpc_chttp2_stream_map parsing_stream_map;
/** streams created by the client (possibly during parsing);
merged with parsing_stream_map during unlock when no
parsing is occurring */
grpc_chttp2_stream_map new_stream_map;
/** closure to execute writing */
grpc_closure writing_action;
/** closure to start reading from the endpoint */
grpc_closure reading_action;
grpc_closure reading_action_locked;
grpc_closure post_parse_locked;
/** closure to actually do parsing */
grpc_closure parsing_action;
/** closure to initiate writing */
grpc_closure initiate_writing;
/** closure to finish writing */
grpc_closure terminate_writing;
/** closure to flush read state up the stack */
grpc_closure initiate_read_flush_locked;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
grpc_chttp2_stream **accepting_stream;
struct {
/* accept stream callback */
void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
/** connectivity tracking */
grpc_connectivity_state_tracker state_tracker;
} channel_callback;
grpc_closure destroy_stream;
void *destroy_stream_arg;
/** Transport op to be applied post-parsing */
grpc_transport_op *post_parsing_op;
};
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
@ -422,20 +344,21 @@ typedef struct {
As the upper layer offers more bytes, this value increases.
As bytes are read, this value decreases. */
uint32_t max_recv_bytes;
/** The number of bytes the upper layer has offered to read but we have
not yet announced to HTTP2 flow control.
As the upper layers offer to read more bytes, this value increases.
As we advertise incoming flow control window, this value decreases. */
uint32_t unannounced_incoming_window_for_parse;
uint32_t unannounced_incoming_window_for_writing;
/** things the upper layers would like to send */
grpc_metadata_batch *send_initial_metadata;
grpc_closure *send_initial_metadata_finished;
grpc_byte_stream *send_message;
grpc_closure *send_message_finished;
grpc_metadata_batch *send_trailing_metadata;
grpc_closure *send_trailing_metadata_finished;
grpc_byte_stream *fetching_send_message;
uint32_t fetched_send_message_length;
gpr_slice fetching_slice;
int64_t fetching_slice_end_offset;
bool complete_fetch_covered_by_poller;
grpc_closure complete_fetch;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *recv_initial_metadata_ready;
grpc_byte_stream **recv_message;
@ -455,95 +378,44 @@ typedef struct {
bool read_closed;
/** Are all published incoming byte streams closed. */
bool all_incoming_byte_streams_finished;
/** Is this stream in the stream map. */
bool in_stream_map;
/** Has this stream seen an error.
If true, then pending incoming frames can be thrown away. */
bool seen_error;
bool exceeded_metadata_size;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
/** the error that resulted in this stream being write-closed */
grpc_error *write_closed_error;
bool published_initial_metadata;
bool published_trailing_metadata;
grpc_published_metadata_method published_metadata[2];
bool final_metadata_requested;
grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
grpc_chttp2_incoming_frame_queue incoming_frames;
gpr_timespec deadline;
} grpc_chttp2_stream_global;
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
uint8_t fetching;
bool sent_initial_metadata;
uint8_t sent_message;
uint8_t sent_trailing_metadata;
uint8_t read_closed;
/** send this initial metadata */
grpc_metadata_batch *send_initial_metadata;
grpc_byte_stream *send_message;
grpc_metadata_batch *send_trailing_metadata;
int64_t outgoing_window;
/** how much window should we announce? */
uint32_t announce_window;
gpr_slice_buffer flow_controlled_buffer;
gpr_slice fetching_slice;
size_t stream_fetched;
grpc_closure finished_fetch;
/** stats gathered during the write */
grpc_transport_one_way_stats stats;
} grpc_chttp2_stream_writing;
struct grpc_chttp2_stream_parsing {
/** saw some stream level error */
grpc_error *forced_close_error;
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
/** has this stream received a close */
uint8_t received_close;
/** how many header frames have we received? */
uint8_t header_frames_received;
/** which metadata did we get (on this parse) */
uint8_t got_metadata_on_parse[2];
/** should we raise the seen_error flag in transport_global */
bool seen_error;
bool exceeded_metadata_size;
/** window available for peer to send to us */
int64_t incoming_window;
/** parsing state for data frames */
grpc_chttp2_data_parser data_parser;
/** amount of window given */
int64_t outgoing_window;
/** number of bytes received - reset at end of parse thread execution */
int64_t received_bytes;
/** stats gathered during the parse */
grpc_transport_stream_stats stats;
/** incoming metadata */
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
};
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
grpc_chttp2_stream_global global;
grpc_chttp2_stream_writing writing;
grpc_chttp2_stream_parsing parsing;
grpc_closure init_stream;
grpc_closure destroy_stream;
void *destroy_stream_arg;
bool sent_initial_metadata;
bool sent_trailing_metadata;
/** how much window should we announce? */
uint32_t announce_window;
gpr_slice_buffer flow_controlled_buffer;
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
};
/** Transport writing call flow:
@ -559,162 +431,71 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason);
/** Someone is unlocking the transport mutex: check to see if writes
are required, and schedule them if so */
int grpc_chttp2_unlocking_check_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint);
void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
void *transport_writing, grpc_error *error);
void grpc_chttp2_cleanup_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_prepare_to_read(grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
are required, and frame them if so */
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
grpc_error *grpc_chttp2_perform_read(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice);
void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, gpr_slice slice);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
/** Get a writable stream
returns non-zero if there was a stream available */
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) GRPC_MUST_USE_RESULT;
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing);
int grpc_chttp2_list_pop_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_add_written_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing);
int grpc_chttp2_list_pop_parsing_seen_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_add_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_check_read_ops(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
bool grpc_chttp2_list_remove_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
bool grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing);
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_remove_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
grpc_chttp2_transport_parsing *transport_parsing, uint32_t id);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
uint32_t id);
void grpc_chttp2_add_incoming_goaway(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
uint32_t goaway_error, gpr_slice goaway_text);
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
/* returns 1 if this is the last stream, 0 otherwise */
int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
int grpc_chttp2_has_streams(grpc_chttp2_transport *t);
void grpc_chttp2_for_all_streams(
grpc_chttp2_transport_global *transport_global, void *user_data,
void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
grpc_chttp2_stream_global *stream_global));
void grpc_chttp2_parsing_become_skip_parser(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
void grpc_chttp2_complete_closure_step(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_closure **pclosure,
grpc_error *error);
grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t);
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id);
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t id);
void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t goaway_error,
gpr_slice goaway_text);
void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_closure **pclosure,
grpc_error *error, const char *desc);
#define GRPC_CHTTP2_CLIENT_CONNECT_STRING "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
@ -805,57 +586,83 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
const char *var2, int is_client,
uint32_t stream_id, int64_t val1, int64_t val2);
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream,
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *stream,
grpc_status_code status, gpr_slice *details);
void grpc_chttp2_mark_stream_closed(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, int close_reads, int close_writes,
grpc_error *error);
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int close_reads,
int close_writes, grpc_error *error);
void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global);
grpc_chttp2_transport *t);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global, reason)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
const char *reason);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global,
#define GRPC_CHTTP2_STREAM_REF(stream, reason) \
grpc_chttp2_stream_ref(stream, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream, reason)
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
const char *reason);
#else
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global);
#define GRPC_CHTTP2_STREAM_REF(stream, reason) grpc_chttp2_stream_ref(stream)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream)
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
#endif
//#define GRPC_CHTTP2_REFCOUNTING_DEBUG 1
#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) \
grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
grpc_chttp2_unref_transport(cl, t, r, __FILE__, __LINE__)
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason,
const char *file, int line);
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
const char *file, int line);
#else
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) grpc_chttp2_unref_transport(cl, t)
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t);
#endif
grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
uint32_t frame_size, uint32_t flags);
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
gpr_slice slice);
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
grpc_error *error, int from_parsing_thread);
grpc_error *error);
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *parsing,
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const uint8_t *opaque_8bytes);
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
bool covered_by_poller, const char *reason);
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, bool covered_by_poller,
const char *reason);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *due_to_error);
void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */

File diff suppressed because it is too large Load Diff

@ -35,27 +35,6 @@
#include <grpc/support/log.h>
#define TRANSPORT_FROM_GLOBAL(tg) \
((grpc_chttp2_transport *)((char *)(tg)-offsetof(grpc_chttp2_transport, \
global)))
#define STREAM_FROM_GLOBAL(sg) \
((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
#define TRANSPORT_FROM_WRITING(tw) \
((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \
writing)))
#define STREAM_FROM_WRITING(sw) \
((grpc_chttp2_stream *)((char *)(sw)-offsetof(grpc_chttp2_stream, writing)))
#define TRANSPORT_FROM_PARSING(tp) \
((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
parsing)))
#define STREAM_FROM_PARSING(sp) \
((grpc_chttp2_stream *)((char *)(sp)-offsetof(grpc_chttp2_stream, parsing)))
/* core list management */
static int stream_list_empty(grpc_chttp2_transport *t,
@ -139,321 +118,57 @@ static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
/* wrappers for specializations */
bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
return stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE);
}
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WRITABLE);
if (r != 0) {
*stream_global = &stream->global;
*stream_writing = &stream->writing;
}
return r;
}
bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE);
}
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
GPR_ASSERT(stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITING));
}
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing) {
return !stream_list_empty(TRANSPORT_FROM_WRITING(transport_writing),
GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_pop_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITING);
if (r != 0) {
*stream_writing = &stream->writing;
}
return r;
}
void grpc_chttp2_list_add_written_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITTEN);
}
int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITTEN);
if (r != 0) {
*stream_global = &stream->global;
*stream_writing = &stream->writing;
}
return r;
}
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(
TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r =
stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
}
return r;
}
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing) {
stream_list_add(TRANSPORT_FROM_PARSING(transport_parsing),
STREAM_FROM_PARSING(stream_parsing),
GRPC_CHTTP2_LIST_PARSING_SEEN);
}
int grpc_chttp2_list_pop_parsing_seen_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_PARSING(transport_parsing), &stream,
GRPC_CHTTP2_LIST_PARSING_SEEN);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
}
return r;
}
void grpc_chttp2_list_add_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_check_read_ops(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
if (!t->executor.check_read_ops_scheduled) {
grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
&t->initiate_read_flush_locked,
GRPC_ERROR_NONE, false);
t->executor.check_read_ops_scheduled = true;
}
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
bool grpc_chttp2_list_remove_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
GPR_ASSERT(s->id != 0);
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
grpc_chttp2_stream *stream = STREAM_FROM_WRITING(stream_writing);
gpr_log(GPR_DEBUG, "writing stalled %d", stream->global.id);
if (!stream->included[GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT]) {
GRPC_CHTTP2_STREAM_REF(&stream->global, "chttp2_writing_stalled");
}
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing), stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT);
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
bool grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream *stream;
bool out = false;
grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing);
while (stream_list_pop(transport, &stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) {
gpr_log(GPR_DEBUG, "move %d from writing stalled to just stalled",
stream->global.id);
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
&stream->writing);
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &stream->global,
"chttp2_writing_stalled");
out = true;
}
return out;
bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
gpr_log(GPR_DEBUG, "stalled %d", stream_writing->id);
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING);
}
void grpc_chttp2_list_remove_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING);
}
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add_tail(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
return stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
int grpc_chttp2_has_streams(grpc_chttp2_transport *t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
void grpc_chttp2_for_all_streams(
grpc_chttp2_transport_global *transport_global, void *user_data,
void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
grpc_chttp2_stream_global *stream_global)) {
grpc_chttp2_stream *s;
grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
for (s = t->lists[GRPC_CHTTP2_LIST_ALL_STREAMS].head; s != NULL;
s = s->links[GRPC_CHTTP2_LIST_ALL_STREAMS].next) {
cb(transport_global, user_data, &s->global);
}
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}

@ -77,6 +77,7 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
GPR_ASSERT(count == 0 || keys[count - 1] < key);
GPR_ASSERT(value);
GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
if (count == capacity) {
if (map->free > capacity / 4) {
@ -96,40 +97,6 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
map->count = count + 1;
}
void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
grpc_chttp2_stream_map *dst) {
/* if src is empty we dont need to do anything */
if (src->count == src->free) {
return;
}
/* if dst is empty we simply need to swap */
if (dst->count == dst->free) {
GPR_SWAP(grpc_chttp2_stream_map, *src, *dst);
return;
}
/* the first element of src must be greater than the last of dst...
* however the maps may need compacting for this property to hold */
if (src->keys[0] <= dst->keys[dst->count - 1]) {
src->count = compact(src->keys, src->values, src->count);
src->free = 0;
dst->count = compact(dst->keys, dst->values, dst->count);
dst->free = 0;
}
GPR_ASSERT(src->keys[0] > dst->keys[dst->count - 1]);
/* if dst doesn't have capacity, resize */
if (dst->count + src->count > dst->capacity) {
dst->capacity = GPR_MAX(dst->capacity * 3 / 2, dst->count + src->count);
dst->keys = gpr_realloc(dst->keys, dst->capacity * sizeof(uint32_t));
dst->values = gpr_realloc(dst->values, dst->capacity * sizeof(void *));
}
memcpy(dst->keys + dst->count, src->keys, src->count * sizeof(uint32_t));
memcpy(dst->values + dst->count, src->values, src->count * sizeof(void *));
dst->count += src->count;
dst->free += src->free;
src->count = 0;
src->free = 0;
}
static void **find(grpc_chttp2_stream_map *map, uint32_t key) {
size_t min_idx = 0;
size_t max_idx = map->count;
@ -170,6 +137,7 @@ void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key) {
if (map->free == map->count) {
map->free = map->count = 0;
}
GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
}
return out;
}

@ -65,10 +65,6 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
or NULL otherwise */
void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key);
/* Move all elements of src into dst */
void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
grpc_chttp2_stream_map *dst);
/* Return an existing key, or NULL if it does not exist */
void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key);

@ -40,349 +40,215 @@
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing);
static void add_to_write_list(grpc_chttp2_write_cb **list,
grpc_chttp2_write_cb *cb) {
cb->next = *list;
*list = cb;
}
static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb,
grpc_error *error) {
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error,
"finish_write_cb");
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
int grpc_chttp2_unlocking_check_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_writing *stream_writing;
static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, size_t send_bytes,
grpc_chttp2_write_cb **list, grpc_error *error) {
grpc_chttp2_write_cb *cb = *list;
*list = NULL;
while (cb) {
grpc_chttp2_write_cb *next = cb->next;
if (cb->call_at_byte <= send_bytes) {
finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
} else {
cb->call_at_byte -= send_bytes;
add_to_write_list(list, cb);
}
cb = next;
}
}
GPR_TIMER_BEGIN("grpc_chttp2_unlocking_check_writes", 0);
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
transport_writing->max_frame_size =
transport_global->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
if (transport_global->dirtied_local_settings &&
!transport_global->sent_local_settings) {
if (t->dirtied_local_settings && !t->sent_local_settings) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
&t->outbuf,
grpc_chttp2_settings_create(
transport_global->settings[GRPC_SENT_SETTINGS],
transport_global->settings[GRPC_LOCAL_SETTINGS],
transport_global->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
transport_global->force_send_settings = 0;
transport_global->dirtied_local_settings = 0;
transport_global->sent_local_settings = 1;
t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS],
t->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
t->force_send_settings = 0;
t->dirtied_local_settings = 0;
t->sent_local_settings = 1;
}
/* simple writes are queued to qbuf, and flushed here */
gpr_slice_buffer_move_into(&transport_global->qbuf,
&transport_writing->outbuf);
GPR_ASSERT(transport_global->qbuf.count == 0);
gpr_slice_buffer_move_into(&t->qbuf, &t->outbuf);
GPR_ASSERT(t->qbuf.count == 0);
grpc_chttp2_hpack_compressor_set_max_table_size(
&transport_writing->hpack_compressor,
transport_global->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
&t->hpack_compressor,
t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
transport_global, outgoing_window);
if (transport_writing->outgoing_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
&stream_global)) {
grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
false, "transport.read_flow_control");
if (t->outgoing_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"transport.read_flow_control");
}
}
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (grpc_chttp2_list_pop_writable_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
bool sent_initial_metadata = stream_writing->sent_initial_metadata;
bool become_writable = false;
while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
bool sent_initial_metadata = s->sent_initial_metadata;
bool now_writing = false;
stream_writing->id = stream_global->id;
stream_writing->read_closed = stream_global->read_closed;
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t,
t->is_client ? "CLIENT" : "SERVER", s->id, sent_initial_metadata,
s->send_initial_metadata != NULL, s->announce_window));
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_writing, stream_writing,
outgoing_window, stream_global,
outgoing_window);
if (!sent_initial_metadata && stream_global->send_initial_metadata) {
stream_writing->send_initial_metadata =
stream_global->send_initial_metadata;
stream_global->send_initial_metadata = NULL;
become_writable = true;
/* send initial metadata if it's available */
if (!sent_initial_metadata && s->send_initial_metadata) {
grpc_chttp2_encode_header(
&t->hpack_compressor, s->id, s->send_initial_metadata, 0,
t->settings[GRPC_ACKED_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
&s->stats.outgoing, &t->outbuf);
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
now_writing = true;
}
/* send any window updates */
if (s->announce_window > 0) {
uint32_t announce = s->announce_window;
gpr_slice_buffer_add(&t->outbuf,
grpc_chttp2_window_update_create(
s->id, s->announce_window, &s->stats.outgoing));
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, announce_window, announce);
}
if (sent_initial_metadata) {
if (stream_global->send_message != NULL) {
gpr_slice hdr = gpr_slice_malloc(5);
uint8_t *p = GPR_SLICE_START_PTR(hdr);
uint32_t len = stream_global->send_message->length;
GPR_ASSERT(stream_writing->send_message == NULL);
p[0] = (stream_global->send_message->flags &
GRPC_WRITE_INTERNAL_COMPRESS) != 0;
p[1] = (uint8_t)(len >> 24);
p[2] = (uint8_t)(len >> 16);
p[3] = (uint8_t)(len >> 8);
p[4] = (uint8_t)(len);
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer, hdr);
if (stream_global->send_message->length > 0) {
stream_writing->send_message = stream_global->send_message;
} else {
stream_writing->send_message = NULL;
/* send any body bytes, if allowed by flow control */
if (s->flow_controlled_buffer.length > 0) {
uint32_t max_outgoing =
(uint32_t)GPR_MIN(t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
GPR_MIN(s->outgoing_window, t->outgoing_window));
if (max_outgoing > 0) {
uint32_t send_bytes =
(uint32_t)GPR_MIN(max_outgoing, s->flow_controlled_buffer.length);
bool is_last_data_frame =
s->fetching_send_message == NULL &&
send_bytes == s->flow_controlled_buffer.length;
bool is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, send_bytes,
is_last_frame, &s->stats.outgoing,
&t->outbuf);
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, outgoing_window,
send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, outgoing_window,
send_bytes);
if (is_last_frame) {
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_CHTTP2_NO_ERROR,
&s->stats.outgoing));
}
}
s->sending_bytes += send_bytes;
now_writing = true;
if (s->flow_controlled_buffer.length > 0) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
} else if (t->outgoing_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
}
stream_writing->stream_fetched = 0;
stream_global->send_message = NULL;
}
if ((stream_writing->send_message != NULL ||
stream_writing->flow_controlled_buffer.length > 0) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
become_writable = true;
} else {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0) {
grpc_chttp2_encode_header(
&t->hpack_compressor, s->id, s->send_trailing_metadata, true,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
&s->stats.outgoing, &t->outbuf);
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
gpr_slice_buffer_add(
&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_CHTTP2_NO_ERROR, &s->stats.outgoing));
}
}
if (stream_global->send_trailing_metadata) {
stream_writing->send_trailing_metadata =
stream_global->send_trailing_metadata;
stream_global->send_trailing_metadata = NULL;
become_writable = true;
now_writing = true;
}
}
if (!stream_global->read_closed &&
stream_global->unannounced_incoming_window_for_writing > 1024) {
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_global, stream_writing,
announce_window, stream_global,
unannounced_incoming_window_for_writing);
become_writable = true;
}
if (become_writable) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
if (now_writing) {
if (!grpc_chttp2_list_add_writing_stream(t, s)) {
/* already in writing list: drop ref */
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
}
} else {
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write");
}
}
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
if (transport_global->announce_incoming_window > 0) {
uint32_t announced = (uint32_t)GPR_MIN(
transport_global->announce_incoming_window, UINT32_MAX);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_global,
announce_incoming_window, announced);
if (t->announce_incoming_window > 0) {
uint32_t announced =
(uint32_t)GPR_MIN(t->announce_incoming_window, UINT32_MAX);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, announce_incoming_window,
announced);
grpc_transport_one_way_stats throwaway_stats;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(0, announced, &throwaway_stats));
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_window_update_create(
0, announced, &throwaway_stats));
}
GPR_TIMER_END("grpc_chttp2_unlocking_check_writes", 0);
return transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing);
}
void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint) {
GPR_ASSERT(transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing));
finalize_outbuf(exec_ctx, transport_writing);
GPR_ASSERT(endpoint);
if (transport_writing->outbuf.count > 0) {
grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
&transport_writing->done_cb);
} else {
grpc_exec_ctx_sched(exec_ctx, &transport_writing->done_cb, GRPC_ERROR_NONE,
NULL);
}
}
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
GPR_TIMER_BEGIN("finalize_outbuf", 0);
bool is_first_data_frame = true;
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
uint32_t max_outgoing =
(uint32_t)GPR_MIN(transport_writing->max_frame_size,
GPR_MIN(stream_writing->outgoing_window,
transport_writing->outgoing_window));
/* send initial metadata if it's available */
if (stream_writing->send_initial_metadata != NULL) {
grpc_chttp2_encode_header(
&transport_writing->hpack_compressor, stream_writing->id,
stream_writing->send_initial_metadata, 0,
transport_writing->max_frame_size, &stream_writing->stats,
&transport_writing->outbuf);
stream_writing->send_initial_metadata = NULL;
stream_writing->sent_initial_metadata = 1;
}
/* send any window updates */
if (stream_writing->announce_window > 0 &&
stream_writing->send_initial_metadata == NULL) {
uint32_t announce = stream_writing->announce_window;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(stream_writing->id,
stream_writing->announce_window,
&stream_writing->stats));
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing, stream_writing,
announce_window, announce);
stream_writing->announce_window = 0;
}
/* fetch any body bytes */
while (!stream_writing->fetching && stream_writing->send_message &&
stream_writing->flow_controlled_buffer.length < max_outgoing &&
stream_writing->stream_fetched <
stream_writing->send_message->length) {
if (grpc_byte_stream_next(exec_ctx, stream_writing->send_message,
&stream_writing->fetching_slice, max_outgoing,
&stream_writing->finished_fetch)) {
stream_writing->stream_fetched +=
GPR_SLICE_LENGTH(stream_writing->fetching_slice);
if (stream_writing->stream_fetched ==
stream_writing->send_message->length) {
stream_writing->send_message = NULL;
}
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer,
stream_writing->fetching_slice);
} else {
stream_writing->fetching = 1;
}
}
/* send any body bytes */
if (stream_writing->flow_controlled_buffer.length > 0) {
if (max_outgoing > 0) {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, stream_writing->flow_controlled_buffer.length);
int is_last_data_frame =
stream_writing->send_message == NULL &&
send_bytes == stream_writing->flow_controlled_buffer.length;
int is_last_frame = is_last_data_frame &&
stream_writing->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata);
grpc_chttp2_encode_data(
stream_writing->id, &stream_writing->flow_controlled_buffer,
send_bytes, is_last_frame, &stream_writing->stats,
&transport_writing->outbuf);
if (is_first_data_frame) {
/* TODO(dgq): this is a hack. It'll be fix in a future refactoring */
stream_writing->stats.data_bytes -= 5; /* discount grpc framing */
is_first_data_frame = false;
}
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing,
stream_writing, outgoing_window,
send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_writing,
outgoing_window, send_bytes);
if (is_last_frame) {
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
if (is_last_data_frame) {
GPR_ASSERT(stream_writing->send_message == NULL);
stream_writing->sent_message = 1;
}
} else if (transport_writing->outgoing_window == 0) {
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
/* send trailing metadata if it's available and we're ready for it */
if (stream_writing->send_message == NULL &&
stream_writing->flow_controlled_buffer.length == 0 &&
stream_writing->send_trailing_metadata != NULL) {
if (grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata)) {
grpc_chttp2_encode_data(
stream_writing->id, &stream_writing->flow_controlled_buffer, 0, 1,
&stream_writing->stats, &transport_writing->outbuf);
} else {
grpc_chttp2_encode_header(
&transport_writing->hpack_compressor, stream_writing->id,
stream_writing->send_trailing_metadata, 1,
transport_writing->max_frame_size, &stream_writing->stats,
&transport_writing->outbuf);
}
if (!transport_writing->is_client && !stream_writing->read_closed) {
gpr_slice_buffer_add(&transport_writing->outbuf,
grpc_chttp2_rst_stream_create(
stream_writing->id, GRPC_CHTTP2_NO_ERROR,
&stream_writing->stats));
}
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
/* if there's more to write, then loop, otherwise prepare to finish the
* write */
if ((stream_writing->flow_controlled_buffer.length > 0 ||
(stream_writing->send_message && !stream_writing->fetching)) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
} else {
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
} else {
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
GPR_TIMER_END("finalize_outbuf", 0);
return t->outbuf.count > 0;
}
void grpc_chttp2_cleanup_writing(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
GPR_TIMER_BEGIN("grpc_chttp2_cleanup_writing", 0);
grpc_chttp2_stream_writing *stream_writing;
grpc_chttp2_stream_global *stream_global;
if (grpc_chttp2_list_flush_writing_stalled_by_transport(exec_ctx,
transport_writing)) {
grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
"resume_stalled_stream");
}
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0);
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
if (stream_writing->sent_initial_metadata) {
while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
if (s->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_initial_metadata_finished, GRPC_ERROR_NONE);
exec_ctx, t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_REF(error), "send_initial_metadata_finished");
}
grpc_transport_move_one_way_stats(&stream_writing->stats,
&stream_global->stats.outgoing);
if (stream_writing->sent_message) {
GPR_ASSERT(stream_writing->send_message == NULL);
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_message_finished, GRPC_ERROR_NONE);
stream_writing->sent_message = 0;
if (s->sending_bytes != 0) {
update_list(exec_ctx, t, s, s->sending_bytes, &s->on_write_finished_cbs,
GRPC_ERROR_REF(error));
s->sending_bytes = 0;
}
if (stream_writing->sent_trailing_metadata) {
if (s->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_trailing_metadata_finished, GRPC_ERROR_NONE);
}
if (stream_writing->sent_trailing_metadata) {
grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
!transport_global->is_client, 1,
GRPC_ERROR_NONE);
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_REF(error));
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
}
gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
GPR_TIMER_END("grpc_chttp2_cleanup_writing", 0);
gpr_slice_buffer_reset_and_unref(&t->outbuf);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_chttp2_end_write", 0);
}

@ -35,6 +35,8 @@
#include <grpc/support/alloc.h>
#include "src/core/lib/profiling/timers.h"
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
@ -51,7 +53,7 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
GRPC_ERROR_UNREF(error);
return;
}
closure->error = error;
closure->error_data.error = error;
closure->next_data.next = NULL;
if (closure_list->head == NULL) {
closure_list->head = closure;
@ -64,8 +66,8 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
void grpc_closure_list_fail_all(grpc_closure_list *list,
grpc_error *forced_failure) {
for (grpc_closure *c = list->head; c != NULL; c = c->next_data.next) {
if (c->error == GRPC_ERROR_NONE) {
c->error = GRPC_ERROR_REF(forced_failure);
if (c->error_data.error == GRPC_ERROR_NONE) {
c->error_data.error = GRPC_ERROR_REF(forced_failure);
}
}
GRPC_ERROR_UNREF(forced_failure);
@ -110,3 +112,11 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
return &wc->wrapper;
}
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_closure_run", 0);
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_closure_run", 0);
}

@ -76,7 +76,10 @@ struct grpc_closure {
void *cb_arg;
/** Once queued, the result of the closure. Before then: scratch space */
grpc_error *error;
union {
grpc_error *error;
uintptr_t scratch;
} error_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */
@ -106,4 +109,10 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
/** return whether \a list is empty. */
bool grpc_closure_list_empty(grpc_closure_list list);
/** Run a closure directly. Caller ensures that no locks are being held above.
* Note that calling this at the end of a closure callback function itself is
* by definition safe. */
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */

@ -51,24 +51,53 @@ int grpc_combiner_trace = 0;
} while (0)
struct grpc_combiner {
grpc_combiner *next_combiner_on_this_exec_ctx;
grpc_workqueue *optional_workqueue;
gpr_mpscq queue;
// state is:
// lower bit - zero if orphaned
// other bits - number of items queued on the lock
gpr_atm state;
bool take_async_break_before_final_list;
// number of elements in the list that are covered by a poller: if >0, we can
// offload safely
gpr_atm covered_by_poller;
bool time_to_execute_final_list;
bool final_list_covered_by_poller;
grpc_closure_list final_list;
grpc_closure continue_finishing;
grpc_closure offload;
};
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
typedef struct {
grpc_error *error;
bool covered_by_poller;
} error_data;
static uintptr_t pack_error_data(error_data d) {
return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
}
static error_data unpack_error_data(uintptr_t p) {
return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
}
static bool is_covered_by_poller(grpc_combiner *lock) {
return lock->final_list_covered_by_poller ||
gpr_atm_acq_load(&lock->covered_by_poller) > 0;
}
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
lock->next_combiner_on_this_exec_ctx = NULL;
lock->time_to_execute_final_list = false;
lock->optional_workqueue = optional_workqueue;
lock->final_list_covered_by_poller = false;
gpr_atm_no_barrier_store(&lock->state, 1);
gpr_atm_no_barrier_store(&lock->covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
lock->take_async_break_before_final_list = false;
grpc_closure_list_init(&lock->final_list);
grpc_closure_init(&lock->offload, offload, lock);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@ -90,170 +119,176 @@ void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock) {
lock->next_combiner_on_this_exec_ctx = NULL;
if (exec_ctx->active_combiner == NULL) {
exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
} else {
exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock;
exec_ctx->last_combiner = lock;
}
}
static void continue_finishing_mainline(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
GPR_TIMER_BEGIN("combiner.continue_executing_mainline", 0);
grpc_combiner *lock = arg;
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p continue_finishing_mainline", lock));
GPR_ASSERT(exec_ctx->active_combiner == NULL);
static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock) {
lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner;
exec_ctx->active_combiner = lock;
if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
GPR_TIMER_END("combiner.continue_executing_mainline", 0);
if (lock->next_combiner_on_this_exec_ctx == NULL) {
exec_ctx->last_combiner = lock;
}
}
static void execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.execute_final", 0);
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
lock->take_async_break_before_final_list = false;
int loops = 0;
while (c != NULL) {
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
loops++;
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error,
bool covered_by_poller) {
GPR_TIMER_BEGIN("combiner.execute", 0);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, 2);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
cl, covered_by_poller, last));
GPR_ASSERT(last & 1); // ensure lock has not been destroyed
cl->error_data.scratch =
pack_error_data((error_data){error, covered_by_poller});
if (covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->covered_by_poller, 1);
}
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
if (last == 1) {
// code will be written when the exec_ctx calls
// grpc_combiner_continue_exec_ctx
push_last_on_exec_ctx(exec_ctx, lock);
}
GPR_TIMER_END("combiner.execute_final", 0);
GPR_TIMER_END("combiner.execute", 0);
}
static void continue_executing_final(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
GPR_TIMER_BEGIN("combiner.continue_executing_final", 0);
grpc_combiner *lock = arg;
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p continue_executing_final", lock));
GPR_ASSERT(exec_ctx->active_combiner == NULL);
exec_ctx->active_combiner = lock;
// quick peek to see if new things have turned up on the queue: if so, go back
// to executing them before the final list
if ((gpr_atm_acq_load(&lock->state) >> 1) > 1) {
if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
} else {
execute_final(exec_ctx, lock);
finish(exec_ctx, lock);
static void move_next(grpc_exec_ctx *exec_ctx) {
exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
if (exec_ctx->active_combiner == NULL) {
exec_ctx->last_combiner = NULL;
}
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
GPR_TIMER_END("combiner.continue_executing_final", 0);
}
static bool start_execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.start_execute_final", 0);
GPR_ASSERT(exec_ctx->active_combiner == lock);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG,
"C:%p start_execute_final take_async_break_before_final_list=%d",
lock, lock->take_async_break_before_final_list));
if (lock->take_async_break_before_final_list) {
grpc_closure_init(&lock->continue_finishing, continue_executing_final,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
GPR_TIMER_END("combiner.start_execute_final", 0);
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_combiner *lock = arg;
push_last_on_exec_ctx(exec_ctx, lock);
}
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
lock->optional_workqueue));
grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload,
GRPC_ERROR_NONE);
}
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
grpc_combiner *lock = exec_ctx->active_combiner;
if (lock == NULL) {
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return false;
} else {
execute_final(exec_ctx, lock);
GPR_TIMER_END("combiner.start_execute_final", 0);
return true;
}
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.maybe_finish_one", 0);
gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
GPR_ASSERT(exec_ctx->active_combiner == lock);
if (n == NULL) {
// Queue is in an transiently inconsistent state: a new item is being queued
// but is not visible to this thread yet.
// Use this as a cue that we should go off and do something else for a while
// (and come back later)
grpc_closure_init(&lock->continue_finishing, continue_finishing_mainline,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
GPR_TIMER_END("combiner.maybe_finish_one", 0);
return false;
gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
"is_covered_by_poller=%d exec_ctx_ready_to_finish=%d "
"time_to_execute_final_list=%d",
lock, lock->optional_workqueue, is_covered_by_poller(lock),
grpc_exec_ctx_ready_to_finish(exec_ctx),
lock->time_to_execute_final_list));
if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) &&
grpc_exec_ctx_ready_to_finish(exec_ctx)) {
GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
// this execution context wants to move on, and we have a workqueue (and
// so can help the execution context out): schedule remaining work to be
// picked up on the workqueue
queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
grpc_closure *cl = (grpc_closure *)n;
grpc_error *error = cl->error;
cl->cb(exec_ctx, cl->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("combiner.maybe_finish_one", 0);
return true;
}
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
bool (*executor)(grpc_exec_ctx * exec_ctx, grpc_combiner * lock);
GPR_TIMER_BEGIN("combiner.finish", 0);
int loops = 0;
do {
executor = maybe_finish_one;
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -2);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p finish[%d] old_state=%" PRIdPTR, lock,
loops, old_state));
switch (old_state) {
default:
// we have multiple queued work items: just continue executing them
break;
case 5: // we're down to one queued item: if it's the final list we
case 4: // should do that
if (!grpc_closure_list_empty(lock->final_list)) {
executor = start_execute_final;
}
break;
case 3: // had one count, one unorphaned --> unlocked unorphaned
GPR_TIMER_END("combiner.finish", 0);
return;
case 2: // and one count, one orphaned --> unlocked and orphaned
really_destroy(exec_ctx, lock);
GPR_TIMER_END("combiner.finish", 0);
return;
case 1:
case 0:
// these values are illegal - representing an already unlocked or
// deleted lock
GPR_UNREACHABLE_CODE(return );
if (!lock->time_to_execute_final_list ||
// peek to see if something new has shown up, and execute that with
// priority
(gpr_atm_acq_load(&lock->state) >> 1) > 1) {
gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
if (n == NULL) {
// queue is in an inconsistant state: use this as a cue that we should
// go off and do something else for a while (and come back later)
GPR_TIMER_MARK("delay_busy", 0);
if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) {
queue_offload(exec_ctx, lock);
}
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
loops++;
} while (executor(exec_ctx, lock));
GPR_TIMER_END("combiner.finish", 0);
}
GPR_TIMER_BEGIN("combiner.exec1", 0);
grpc_closure *cl = (grpc_closure *)n;
error_data err = unpack_error_data(cl->error_data.scratch);
cl->cb(exec_ctx, cl->cb_arg, err.error);
if (err.covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->covered_by_poller, -1);
}
GRPC_ERROR_UNREF(err.error);
GPR_TIMER_END("combiner.exec1", 0);
} else {
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
lock->final_list_covered_by_poller = false;
int loops = 0;
while (c != NULL) {
GPR_TIMER_BEGIN("combiner.exec_1final", 0);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
GPR_TIMER_END("combiner.exec_1final", 0);
}
}
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error) {
GPR_TIMER_MARK("unref", 0);
move_next(exec_ctx);
lock->time_to_execute_final_list = false;
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -2);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p grpc_combiner_execute c=%p", lock, cl));
GPR_TIMER_BEGIN("combiner.execute", 0);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, 2);
GPR_ASSERT(last & 1); // ensure lock has not been destroyed
if (last == 1) {
exec_ctx->active_combiner = lock;
GPR_TIMER_BEGIN("combiner.execute_first_cb", 0);
cl->cb(exec_ctx, cl->cb_arg, error);
GPR_TIMER_END("combiner.execute_first_cb", 0);
GRPC_ERROR_UNREF(error);
finish(exec_ctx, lock);
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
} else {
cl->error = error;
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
switch (old_state) {
default:
// we have multiple queued work items: just continue executing them
break;
case 5: // we're down to one queued item: if it's the final list we
case 4: // should do that
if (!grpc_closure_list_empty(lock->final_list)) {
lock->time_to_execute_final_list = true;
}
break;
case 3: // had one count, one unorphaned --> unlocked unorphaned
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
case 2: // and one count, one orphaned --> unlocked and orphaned
really_destroy(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
case 1:
case 0:
// these values are illegal - representing an already unlocked or
// deleted lock
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
GPR_UNREACHABLE_CODE(return true);
}
GPR_TIMER_END("combiner.execute", 0);
push_first_on_exec_ctx(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
@ -264,30 +299,26 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool force_async_break) {
bool covered_by_poller) {
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG,
"C:%p grpc_combiner_execute_finally c=%p force_async_break=%d; ac=%p",
lock, closure, force_async_break, exec_ctx->active_combiner));
GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
closure, exec_ctx->active_combiner, covered_by_poller));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
grpc_combiner_execute(exec_ctx, lock,
grpc_closure_create(enqueue_finally, closure), error);
grpc_closure_create(enqueue_finally, closure), error,
false);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
if (force_async_break) {
lock->take_async_break_before_final_list = true;
}
if (grpc_closure_list_empty(lock->final_list)) {
gpr_atm_full_fetch_add(&lock->state, 2);
}
if (covered_by_poller) {
lock->final_list_covered_by_poller = true;
}
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
void grpc_combiner_force_async_finally(grpc_combiner *lock) {
lock->take_async_break_before_final_list = true;
}

@ -52,19 +52,14 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
// Execute \a action within the lock.
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error);
grpc_closure *closure, grpc_error *error,
bool covered_by_poller);
// Execute \a action within the lock just prior to unlocking.
// if \a hint_async_break is true, the combiner tries to hand execution to
// another thread before finishing the primary queue of combined closures and
// executing the finally list.
// Deprecation warning: \a hint_async_break will be removed in a future version
// Takes a very slow and round-about path if not called from a
// grpc_combiner_execute closure.
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool hint_async_break);
// Deprecated: force the finally list execution onto another thread
void grpc_combiner_force_async_finally(grpc_combiner *lock);
bool covered_by_poller);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
extern int grpc_combiner_trace;

@ -265,7 +265,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
} else {
out = gpr_malloc(sizeof(*out));
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
gpr_log(GPR_DEBUG, "%p create copying", out);
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
#endif
out->ints = gpr_avl_ref(in->ints);
out->strs = gpr_avl_ref(in->strs);

@ -123,9 +123,13 @@ typedef enum {
GRPC_ERROR_TIME_CREATED,
} grpc_error_times;
/// The following "special" errors can be propagated without allocating memory.
/// They are always even so that other code (particularly combiner locks) can
/// safely use the lower bit for themselves.
#define GRPC_ERROR_NONE ((grpc_error *)NULL)
#define GRPC_ERROR_OOM ((grpc_error *)1)
#define GRPC_ERROR_CANCELLED ((grpc_error *)2)
#define GRPC_ERROR_OOM ((grpc_error *)2)
#define GRPC_ERROR_CANCELLED ((grpc_error *)4)
const char *grpc_error_string(grpc_error *error);
void grpc_error_free_string(const char *str);

@ -152,14 +152,13 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
//#define GRPC_PI_REF_COUNT_DEBUG
#ifdef GRPC_PI_REF_COUNT_DEBUG
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
#else /* defined(GRPC_PI_REF_COUNT_DEBUG) */
#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
@ -185,8 +184,11 @@ typedef struct polling_island {
* (except mu and ref_count) are invalid and must be ignored. */
gpr_atm merged_to;
/* The workqueue associated with this polling island */
grpc_workqueue *workqueue;
gpr_atm poller_count;
gpr_mu workqueue_read_mu;
gpr_mpscq workqueue_items;
gpr_atm workqueue_item_count;
grpc_wakeup_fd workqueue_wakeup_fd;
/* The fd of the underlying epoll set */
int epoll_fd;
@ -275,6 +277,8 @@ static bool append_error(grpc_error **composite, grpc_error *error,
threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
static grpc_wakeup_fd polling_island_wakeup_fd;
static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
@ -289,12 +293,12 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
#ifdef GRPC_PI_REF_COUNT_DEBUG
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
int line) {
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_add_ref(pi);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
@ -302,12 +306,42 @@ static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
char *reason, char *file, int line) {
const char *reason, const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_unref(exec_ctx, pi);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
if (workqueue != NULL) {
pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue != NULL) {
pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
}
}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_add_ref((polling_island *)workqueue);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_unref(exec_ctx, (polling_island *)workqueue);
}
}
#endif
static void pi_add_ref(polling_island *pi) {
@ -315,10 +349,7 @@ static void pi_add_ref(polling_island *pi) {
}
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
/* If ref count went to one, we're back to just the workqueue owning a ref.
Unref the workqueue to break the loop.
If ref count went to zero, delete the polling island.
/* If ref count went to zero, delete the polling island.
Note that this deletion not be done under a lock. Once the ref count goes
to zero, we are guaranteed that no one else holds a reference to the
polling island (and that there is no racing pi_add_ref() call either).
@ -326,20 +357,12 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
Also, if we are deleting the polling island and the merged_to field is
non-empty, we should remove a ref to the merged_to polling island
*/
switch (gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
case 2: /* last external ref: the only one now owned is by the workqueue */
GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
break;
case 1: {
polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
polling_island_delete(exec_ctx, pi);
if (next != NULL) {
PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
break;
if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
polling_island_delete(exec_ctx, pi);
if (next != NULL) {
PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
case 0:
GPR_UNREACHABLE_CODE(return );
}
}
@ -488,11 +511,20 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
pi->workqueue = NULL;
gpr_mu_init(&pi->workqueue_read_mu);
gpr_mpscq_init(&pi->workqueue_items);
gpr_atm_rel_store(&pi->workqueue_item_count, 0);
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
err_desc)) {
goto done;
}
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@ -501,26 +533,14 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
}
polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
if (append_error(error, grpc_workqueue_create(exec_ctx, &pi->workqueue),
err_desc) &&
*error == GRPC_ERROR_NONE) {
polling_island_add_fds_locked(pi, &pi->workqueue->wakeup_read_fd, 1, true,
error);
GPR_ASSERT(pi->workqueue->wakeup_read_fd->polling_island == NULL);
pi->workqueue->wakeup_read_fd->polling_island = pi;
PI_ADD_REF(pi, "fd");
}
done:
if (*error != GRPC_ERROR_NONE) {
if (pi->workqueue != NULL) {
GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
}
polling_island_delete(exec_ctx, pi);
pi = NULL;
}
@ -533,7 +553,11 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
gpr_mu_destroy(&pi->workqueue_read_mu);
gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_free(pi->fds);
gpr_free(pi);
}
@ -678,6 +702,40 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
static void workqueue_maybe_wakeup(polling_island *pi) {
bool force_wakeup = false;
bool is_current_poller = (g_current_thread_polling_island == pi);
gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
if (force_wakeup || current_pollers > min_current_pollers_for_wakeup) {
GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
}
}
static void workqueue_move_items_to_parent(polling_island *q) {
polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
if (p == NULL) {
return;
}
gpr_mu_lock(&q->workqueue_read_mu);
int num_added = 0;
while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
if (n != NULL) {
gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
gpr_mpscq_push(&p->workqueue_items, n);
num_added++;
}
}
gpr_mu_unlock(&q->workqueue_read_mu);
if (num_added > 0) {
workqueue_maybe_wakeup(p);
}
workqueue_move_items_to_parent(p);
}
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@ -702,6 +760,8 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
workqueue_move_items_to_parent(q);
}
/* else if p == q, nothing needs to be done */
@ -712,6 +772,26 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
/* take a ref to the workqueue: otherwise it can happen that whatever events
* this kicks off ends up destroying the workqueue before this function
* completes */
GRPC_WORKQUEUE_REF(workqueue, "enqueue");
polling_island *pi = (polling_island *)workqueue;
gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
closure->error_data.error = error;
gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
if (last == 0) {
workqueue_maybe_wakeup(pi);
}
workqueue_move_items_to_parent(pi);
GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
GPR_TIMER_END("workqueue.enqueue", 0);
}
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@ -1042,11 +1122,8 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
gpr_mu_lock(&fd->mu);
grpc_workqueue *workqueue = NULL;
if (fd->polling_island != NULL) {
workqueue =
GRPC_WORKQUEUE_REF(fd->polling_island->workqueue, "get_workqueue");
}
grpc_workqueue *workqueue = GRPC_WORKQUEUE_REF(
(grpc_workqueue *)fd->polling_island, "fd_get_workqueue");
gpr_mu_unlock(&fd->mu);
return workqueue;
}
@ -1299,7 +1376,26 @@ static void pollset_reset(grpc_pollset *pollset) {
GPR_ASSERT(pollset->polling_island == NULL);
}
#define GRPC_EPOLL_MAX_EVENTS 1000
static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
polling_island *pi) {
if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
gpr_mu_unlock(&pi->workqueue_read_mu);
if (n != NULL) {
if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) {
workqueue_maybe_wakeup(pi);
}
grpc_closure *c = (grpc_closure *)n;
grpc_closure_run(exec_ctx, c, c->error_data.error);
return true;
} else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
workqueue_maybe_wakeup(pi);
}
}
return false;
}
#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
@ -1354,7 +1450,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->mu);
do {
if (!maybe_do_workqueue_work(exec_ctx, pi)) {
gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
g_current_thread_polling_island = pi;
GRPC_SCHEDULING_START_BLOCKING_REGION;
ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
sig_mask);
@ -1386,6 +1485,11 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
append_error(error,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
err_desc);
} else if (data_ptr == &pi->workqueue_wakeup_fd) {
append_error(error,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
err_desc);
maybe_do_workqueue_work(exec_ctx, pi);
} else if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
@ -1408,7 +1512,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
}
}
}
} while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
g_current_thread_polling_island = NULL;
gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
}
GPR_ASSERT(pi != NULL);
@ -1868,6 +1975,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue,
.shutdown_engine = shutdown_engine,
};

@ -1988,6 +1988,32 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
/*******************************************************************************
* workqueue stubs
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
}
/*******************************************************************************
* event engine binding
*/
@ -2029,6 +2055,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue,
.shutdown_engine = shutdown_engine,
};

@ -1235,6 +1235,32 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
/*******************************************************************************
* workqueue stubs
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
}
/*******************************************************************************
* event engine binding
*/
@ -1273,6 +1299,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue,
.shutdown_engine = shutdown_engine,
};

@ -258,4 +258,27 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
grpc_error *grpc_kick_poller(void) { return g_event_engine->kick_poller(); }
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason) {
return g_event_engine->workqueue_ref(workqueue, file, line, reason);
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
g_event_engine->workqueue_unref(exec_ctx, workqueue, file, line, reason);
}
#else
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
return g_event_engine->workqueue_ref(workqueue);
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
g_event_engine->workqueue_unref(exec_ctx, workqueue);
}
#endif
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
grpc_closure *closure, grpc_error *error) {
g_event_engine->workqueue_enqueue(exec_ctx, workqueue, closure, error);
}
#endif // GPR_POSIX_SOCKET

@ -40,6 +40,7 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
typedef struct grpc_fd grpc_fd;
@ -95,6 +96,18 @@ typedef struct grpc_event_engine_vtable {
grpc_error *(*kick_poller)(void);
void (*shutdown_engine)(void);
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue, const char *file,
int line, const char *reason);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
void (*workqueue_enqueue)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
grpc_closure *closure, grpc_error *error);
} grpc_event_engine_vtable;
void grpc_event_engine_init(void);

@ -37,6 +37,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
@ -60,18 +61,47 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error;
did_something = true;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
for (;;) {
if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
did_something = true;
grpc_closure_run(exec_ctx, c, c->error_data.error);
c = next;
}
continue;
}
if (grpc_combiner_continue_exec_ctx(exec_ctx)) {
continue;
}
break;
}
GPR_ASSERT(exec_ctx->active_combiner == NULL);
if (exec_ctx->stealing_from_workqueue != NULL) {
if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
} else {
grpc_closure *c = exec_ctx->stolen_closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
grpc_error *error = c->error_data.error;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0);
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0);
grpc_exec_ctx_flush(exec_ctx);
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
return true;
}
}
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
@ -86,12 +116,25 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error,
grpc_workqueue *offload_target_or_null) {
GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0);
if (offload_target_or_null == NULL) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
} else {
} else if (exec_ctx->stealing_from_workqueue == NULL) {
exec_ctx->stealing_from_workqueue = offload_target_or_null;
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
} else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) {
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
} else { /* stealing_from_workqueue == offload_target_or_null */
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
}
GPR_TIMER_END("grpc_exec_ctx_sched", 0);
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,

@ -66,15 +66,23 @@ typedef struct grpc_combiner grpc_combiner;
*/
struct grpc_exec_ctx {
grpc_closure_list closure_list;
grpc_workqueue *stealing_from_workqueue;
grpc_closure *stolen_closure;
/** currently active combiner: updated only via combiner.c */
grpc_combiner *active_combiner;
grpc_combiner *last_combiner;
bool cached_ready_to_finish;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
};
/* initializer for grpc_exec_ctx:
prefer to use GRPC_EXEC_CTX_INIT whenever possible */
#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
{ GRPC_CLOSURE_LIST_INIT, NULL, false, finish_check_arg, finish_check }
{ \
GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \
finish_check \
}
#else
struct grpc_exec_ctx {
bool cached_ready_to_finish;
@ -85,8 +93,10 @@ struct grpc_exec_ctx {
{ false, finish_check_arg, finish_check }
#endif
/* initialize an execution context at the top level of an API call into grpc
(this is safe to use elsewhere, though possibly not as efficient) */
#define GRPC_EXEC_CTX_INIT \
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_never_ready_to_finish, NULL)
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.

@ -112,6 +112,14 @@ void grpc_iomgr_shutdown(void) {
continue;
}
if (g_root_object.next != &g_root_object) {
if (grpc_iomgr_abort_on_leaks()) {
gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
" iomgr objects before shutdown deadline: "
"memory leaks are likely",
count_objects());
dump_objects("LEAKED");
abort();
}
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
@ -122,9 +130,6 @@ void grpc_iomgr_shutdown(void) {
"memory leaks are likely",
count_objects());
dump_objects("LEAKED");
if (grpc_iomgr_abort_on_leaks()) {
abort();
}
}
break;
}

@ -177,7 +177,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
tcp->read_cb = NULL;
tcp->incoming_buffer = NULL;
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
grpc_closure_run(exec_ctx, cb, error);
}
#define MAX_READ_IOVEC 4
@ -209,11 +209,11 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
GPR_TIMER_BEGIN("recvmsg", 1);
GPR_TIMER_BEGIN("recvmsg", 0);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
GPR_TIMER_END("recvmsg", 0);
GPR_TIMER_END("recvmsg", read_bytes >= 0);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
@ -392,11 +392,8 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error_free_string(str);
}
GPR_TIMER_BEGIN("tcp_handle_write.cb", 0);
cb->cb(exec_ctx, cb->cb_arg, error);
GPR_TIMER_END("tcp_handle_write.cb", 0);
grpc_closure_run(exec_ctx, cb, error);
TCP_UNREF(exec_ctx, tcp, "write");
GRPC_ERROR_UNREF(error);
}
}

@ -40,10 +40,6 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#ifdef GPR_POSIX_SOCKET
#include "src/core/lib/iomgr/workqueue_posix.h"
#endif
#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/workqueue_windows.h"
#endif
@ -58,20 +54,20 @@
string will be printed alongside the refcount. When it is not defined, the
string will be discarded at compilation time. */
//#define GRPC_WORKQUEUE_REFCOUNT_DEBUG
/*#define GRPC_WORKQUEUE_REFCOUNT_DEBUG*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define GRPC_WORKQUEUE_REF(p, r) \
(grpc_workqueue_ref((p), __FILE__, __LINE__, (r)), (p))
grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
#define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \
grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
const char *reason);
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
#define GRPC_WORKQUEUE_REF(p, r) (grpc_workqueue_ref((p)), (p))
#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
void grpc_workqueue_ref(grpc_workqueue *workqueue);
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif

@ -1,196 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/lib/iomgr/workqueue.h"
#include <stdio.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/profiling/timers.h"
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
grpc_workqueue **workqueue) {
char name[32];
*workqueue = gpr_malloc(sizeof(grpc_workqueue));
gpr_ref_init(&(*workqueue)->refs, 1);
gpr_atm_no_barrier_store(&(*workqueue)->state, 1);
grpc_error *err = grpc_wakeup_fd_init(&(*workqueue)->wakeup_fd);
if (err != GRPC_ERROR_NONE) {
gpr_free(*workqueue);
return err;
}
sprintf(name, "workqueue:%p", (void *)(*workqueue));
(*workqueue)->wakeup_read_fd = grpc_fd_create(
GRPC_WAKEUP_FD_GET_READ_FD(&(*workqueue)->wakeup_fd), name);
gpr_mpscq_init(&(*workqueue)->queue);
grpc_closure_init(&(*workqueue)->read_closure, on_readable, *workqueue);
grpc_fd_notify_on_read(exec_ctx, (*workqueue)->wakeup_read_fd,
&(*workqueue)->read_closure);
return GRPC_ERROR_NONE;
}
static void workqueue_destroy(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
grpc_fd_shutdown(exec_ctx, workqueue->wakeup_read_fd);
}
static void workqueue_orphan(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (gpr_atm_full_fetch_add(&workqueue->state, -1) == 1) {
workqueue_destroy(exec_ctx, workqueue);
}
}
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
const char *reason) {
if (workqueue == NULL) return;
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p ref %d -> %d %s",
workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count + 1,
reason);
gpr_ref(&workqueue->refs);
}
#else
void grpc_workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue == NULL) return;
gpr_ref(&workqueue->refs);
}
#endif
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue == NULL) return;
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p unref %d -> %d %s",
workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count - 1,
reason);
if (gpr_unref(&workqueue->refs)) {
workqueue_orphan(exec_ctx, workqueue);
}
}
#else
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
if (workqueue == NULL) return;
if (gpr_unref(&workqueue->refs)) {
workqueue_orphan(exec_ctx, workqueue);
}
}
#endif
static void drain(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
abort();
}
static void wakeup(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
GPR_TIMER_MARK("workqueue.wakeup", 0);
grpc_error *err = grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
if (!GRPC_LOG_IF_ERROR("wakeupfd_wakeup", err)) {
drain(exec_ctx, workqueue);
}
}
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.on_readable", 0);
grpc_workqueue *workqueue = arg;
if (error != GRPC_ERROR_NONE) {
/* HACK: let wakeup_fd code know that we stole the fd */
workqueue->wakeup_fd.read_fd = 0;
grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy");
GPR_ASSERT(gpr_atm_no_barrier_load(&workqueue->state) == 0);
gpr_free(workqueue);
} else {
error = grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd);
gpr_mpscq_node *n = gpr_mpscq_pop(&workqueue->queue);
if (error == GRPC_ERROR_NONE) {
grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
&workqueue->read_closure);
} else {
/* recurse to get error handling */
on_readable(exec_ctx, arg, error);
}
if (n == NULL) {
/* try again - queue in an inconsistant state */
wakeup(exec_ctx, workqueue);
} else {
switch (gpr_atm_full_fetch_add(&workqueue->state, -2)) {
case 3: // had one count, one unorphaned --> done, unorphaned
break;
case 2: // had one count, one orphaned --> done, orphaned
workqueue_destroy(exec_ctx, workqueue);
break;
case 1:
case 0:
// these values are illegal - representing an already done or
// deleted workqueue
GPR_UNREACHABLE_CODE(break);
default:
// schedule a wakeup since there's more to do
wakeup(exec_ctx, workqueue);
}
grpc_closure *cl = (grpc_closure *)n;
grpc_error *clerr = cl->error;
cl->cb(exec_ctx, cl->cb_arg, clerr);
GRPC_ERROR_UNREF(clerr);
}
}
GPR_TIMER_END("workqueue.on_readable", 0);
}
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
grpc_closure *closure, grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
gpr_atm last = gpr_atm_full_fetch_add(&workqueue->state, 2);
GPR_ASSERT(last & 1);
closure->error = error;
gpr_mpscq_push(&workqueue->queue, &closure->next_data.atm_next);
if (last == 1) {
wakeup(exec_ctx, workqueue);
}
GPR_TIMER_END("workqueue.enqueue", 0);
}
#endif /* GPR_POSIX_SOCKET */

@ -1,61 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H
#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/support/mpscq.h"
struct grpc_fd;
struct grpc_workqueue {
gpr_refcount refs;
gpr_mpscq queue;
// state is:
// lower bit - zero if orphaned
// other bits - number of items enqueued
gpr_atm state;
grpc_wakeup_fd wakeup_fd;
struct grpc_fd *wakeup_read_fd;
grpc_closure read_closure;
};
/** Create a work queue. Returns an error if creation fails. If creation
succeeds, sets *workqueue to point to it. */
grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
grpc_workqueue **workqueue);
#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H */

@ -43,12 +43,16 @@
// workqueues.
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
const char *reason) {}
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason) {
return workqueue;
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
void grpc_workqueue_ref(grpc_workqueue *workqueue) {}
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif

@ -83,6 +83,7 @@ static int g_shutdown;
static gpr_thd_id g_writing_thread;
static __thread int g_thread_id;
static int g_next_thread_id;
static int g_writing_enabled = 1;
static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
if (list->head == NULL) {
@ -177,7 +178,7 @@ static void flush_logs(gpr_timer_log_list *list) {
}
}
static void finish_writing() {
static void finish_writing(void) {
pthread_mutex_lock(&g_mu);
g_shutdown = 1;
pthread_cond_signal(&g_cv);
@ -230,6 +231,10 @@ static void gpr_timers_log_add(const char *tagstr, marker_type type,
int important, const char *file, int line) {
gpr_timer_entry *entry;
if (!g_writing_enabled) {
return;
}
if (g_thread_log == NULL || g_thread_log->num_entries == MAX_COUNT) {
rotate_log();
}
@ -261,6 +266,8 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
gpr_timers_log_add(tagstr, END, important, file, line);
}
void gpr_timer_set_enabled(int enabled) { g_writing_enabled = enabled; }
/* Basic profiler specific API functions. */
void gpr_timers_global_init(void) {}
@ -272,4 +279,6 @@ void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
void gpr_timers_set_log_filename(const char *filename) {}
void gpr_timer_set_enabled(int enabled) {}
#endif /* GRPC_BASIC_PROFILER */

@ -50,6 +50,8 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
void gpr_timers_set_log_filename(const char *filename);
void gpr_timer_set_enabled(int enabled);
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \

@ -60,8 +60,9 @@ const char *gpr_log_severity_string(gpr_log_severity severity) {
void gpr_log_message(const char *file, int line, gpr_log_severity severity,
const char *message) {
if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print))
if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print)) {
return;
}
gpr_log_func_args lfargs;
memset(&lfargs, 0, sizeof(lfargs));
@ -82,11 +83,11 @@ void gpr_log_verbosity_init() {
gpr_atm min_severity_to_print = GPR_LOG_SEVERITY_ERROR;
if (verbosity != NULL) {
if (strcmp(verbosity, "DEBUG") == 0) {
if (gpr_stricmp(verbosity, "DEBUG") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_DEBUG;
} else if (strcmp(verbosity, "INFO") == 0) {
} else if (gpr_stricmp(verbosity, "INFO") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_INFO;
} else if (strcmp(verbosity, "ERROR") == 0) {
} else if (gpr_stricmp(verbosity, "ERROR") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_ERROR;
}
gpr_free(verbosity);

@ -304,3 +304,14 @@ void gpr_strvec_add(gpr_strvec *sv, char *str) {
char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) {
return gpr_strjoin((const char **)sv->strs, sv->count, final_length);
}
int gpr_stricmp(const char *a, const char *b) {
int ca, cb;
do {
ca = tolower(*a);
cb = tolower(*b);
++a;
++b;
} while (ca == cb && ca && cb);
return ca - cb;
}

@ -118,6 +118,10 @@ void gpr_strvec_add(gpr_strvec *strs, char *add);
total_length as per gpr_strjoin */
char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length);
/** Case insensitive string comparison... return <0 if lower(a)<lower(b), ==0 if
lower(a)==lower(b), >0 if lower(a)>lower(b) */
int gpr_stricmp(const char *a, const char *b);
#ifdef __cplusplus
}
#endif

@ -230,33 +230,33 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error);
grpc_call *grpc_call_create(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative,
const void *server_transport_data, grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count, gpr_timespec send_deadline) {
grpc_error *grpc_call_create(const grpc_call_create_args *args,
grpc_call **out_call) {
size_t i, j;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_channel_stack *channel_stack =
grpc_channel_get_channel_stack(args->channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
*out_call = call;
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
call->channel = channel;
call->cq = cq;
call->parent = parent_call;
call->channel = args->channel;
call->cq = args->cq;
call->parent = args->parent_call;
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
call->is_client = server_transport_data == NULL;
call->is_client = args->server_transport_data == NULL;
if (call->is_client) {
GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
for (i = 0; i < add_initial_metadata_count; i++) {
call->send_extra_metadata[i].md = add_initial_metadata[i];
GPR_ASSERT(args->add_initial_metadata_count <
MAX_SEND_EXTRA_METADATA_COUNT);
for (i = 0; i < args->add_initial_metadata_count; i++) {
call->send_extra_metadata[i].md = args->add_initial_metadata[i];
}
call->send_extra_metadata_count = (int)add_initial_metadata_count;
call->send_extra_metadata_count = (int)args->add_initial_metadata_count;
} else {
GPR_ASSERT(add_initial_metadata_count == 0);
GPR_ASSERT(args->add_initial_metadata_count == 0);
call->send_extra_metadata_count = 0;
}
for (i = 0; i < 2; i++) {
@ -265,78 +265,79 @@ grpc_call *grpc_call_create(
}
}
call->send_deadline =
gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC);
GRPC_CHANNEL_INTERNAL_REF(channel, "call");
gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC);
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
grpc_error *error = grpc_call_stack_init(
&exec_ctx, channel_stack, 1, destroy_call, call, call->context,
server_transport_data, CALL_STACK_FROM_CALL(call));
args->server_transport_data, CALL_STACK_FROM_CALL(call));
if (error != GRPC_ERROR_NONE) {
intptr_t status;
if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &status))
if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &status)) {
status = GRPC_STATUS_UNKNOWN;
}
const char *error_str =
grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION);
close_with_status(&exec_ctx, call, (grpc_status_code)status,
error_str == NULL ? "unknown error" : error_str);
GRPC_ERROR_UNREF(error);
}
if (cq != NULL) {
if (args->cq != NULL) {
GPR_ASSERT(
pollset_set_alternative == NULL &&
args->pollset_set_alternative == NULL &&
"Only one of 'cq' and 'pollset_set_alternative' should be non-NULL.");
GRPC_CQ_INTERNAL_REF(cq, "bind");
GRPC_CQ_INTERNAL_REF(args->cq, "bind");
call->pollent =
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq));
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(args->cq));
}
if (pollset_set_alternative != NULL) {
call->pollent =
grpc_polling_entity_create_from_pollset_set(pollset_set_alternative);
if (args->pollset_set_alternative != NULL) {
call->pollent = grpc_polling_entity_create_from_pollset_set(
args->pollset_set_alternative);
}
if (!grpc_polling_entity_is_empty(&call->pollent)) {
grpc_call_stack_set_pollset_or_pollset_set(
&exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
if (parent_call != NULL) {
GRPC_CALL_INTERNAL_REF(parent_call, "child");
gpr_timespec send_deadline = args->send_deadline;
if (args->parent_call != NULL) {
GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
GPR_ASSERT(call->is_client);
GPR_ASSERT(!parent_call->is_client);
GPR_ASSERT(!args->parent_call->is_client);
gpr_mu_lock(&parent_call->mu);
gpr_mu_lock(&args->parent_call->mu);
if (propagation_mask & GRPC_PROPAGATE_DEADLINE) {
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
gpr_convert_clock_type(send_deadline,
parent_call->send_deadline.clock_type),
parent_call->send_deadline);
args->parent_call->send_deadline.clock_type),
args->parent_call->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
/* TODO(ctiller): This should change to use the appropriate census start_op
* call. */
if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
parent_call->context[GRPC_CONTEXT_TRACING].value,
NULL);
if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
grpc_call_context_set(
call, GRPC_CONTEXT_TRACING,
args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
} else {
GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
}
if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
}
if (parent_call->first_child == NULL) {
parent_call->first_child = call;
if (args->parent_call->first_child == NULL) {
args->parent_call->first_child = call;
call->sibling_next = call->sibling_prev = call;
} else {
call->sibling_next = parent_call->first_child;
call->sibling_prev = parent_call->first_child->sibling_prev;
call->sibling_next = args->parent_call->first_child;
call->sibling_prev = args->parent_call->first_child->sibling_prev;
call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
call;
}
gpr_mu_unlock(&parent_call->mu);
gpr_mu_unlock(&args->parent_call->mu);
}
if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) !=
0) {
@ -344,7 +345,7 @@ grpc_call *grpc_call_create(
}
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_create", 0);
return call;
return error;
}
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
@ -1089,9 +1090,14 @@ static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
static void post_batch_completion(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
grpc_error *error = bctl->error;
if (bctl->recv_final_op) {
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
grpc_exec_ctx_sched(exec_ctx, bctl->notify_tag, bctl->error, NULL);
grpc_closure_run(exec_ctx, bctl->notify_tag, error);
gpr_mu_lock(&call->mu);
bctl->call->used_batches =
(uint8_t)(bctl->call->used_batches &
@ -1100,7 +1106,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, bctl->error,
grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, error,
finish_batch_completion, bctl, &bctl->cq_completion);
}
}
@ -1246,6 +1252,14 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
}
}
static void add_batch_error(batch_control *bctl, grpc_error *error) {
if (error == GRPC_ERROR_NONE) return;
if (bctl->error == GRPC_ERROR_NONE) {
bctl->error = GRPC_ERROR_CREATE("Call batch operation failed");
}
bctl->error = grpc_error_add_child(bctl->error, error);
}
static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *bctlp, grpc_error *error) {
batch_control *bctl = bctlp;
@ -1253,9 +1267,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&call->mu);
if (error != GRPC_ERROR_NONE) {
bctl->error = GRPC_ERROR_REF(error);
} else {
add_batch_error(bctl, GRPC_ERROR_REF(error));
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
grpc_metadata_batch_filter(md, recv_initial_filter, call);
@ -1348,8 +1361,7 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
GRPC_ERROR_UNREF(bctl->error);
bctl->error = GRPC_ERROR_REF(error);
add_batch_error(bctl, GRPC_ERROR_REF(error));
gpr_mu_unlock(&call->mu);
if (gpr_unref(&bctl->steps_to_complete)) {
post_batch_completion(exec_ctx, bctl);
@ -1385,6 +1397,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *stream_op = &bctl->op;
memset(stream_op, 0, sizeof(*stream_op));
stream_op->covered_by_poller = true;
if (nops == 0) {
GRPC_CALL_INTERNAL_REF(call, "completion");

@ -49,15 +49,29 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *user_data);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask,
grpc_completion_queue *cq,
/* if not NULL, it'll be used in lieu of \a cq */
grpc_pollset_set *pollset_set_alternative,
const void *server_transport_data,
grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count,
gpr_timespec send_deadline);
typedef struct grpc_call_create_args {
grpc_channel *channel;
grpc_call *parent_call;
uint32_t propagation_mask;
grpc_completion_queue *cq;
/* if not NULL, it'll be used in lieu of cq */
grpc_pollset_set *pollset_set_alternative;
const void *server_transport_data;
grpc_mdelem **add_initial_metadata;
size_t add_initial_metadata_count;
gpr_timespec send_deadline;
} grpc_call_create_args;
/* Create a new call based on \a args.
Regardless of success or failure, always returns a valid new call into *call
*/
grpc_error *grpc_call_create(const grpc_call_create_args *args,
grpc_call **call);
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);

@ -193,9 +193,21 @@ static grpc_call *grpc_channel_create_call_internal(
send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority);
}
return grpc_call_create(channel, parent_call, propagation_mask, cq,
pollset_set_alternative, NULL, send_metadata,
num_metadata, deadline);
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = channel;
args.parent_call = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
args.pollset_set_alternative = pollset_set_alternative;
args.server_transport_data = NULL;
args.add_initial_metadata = send_metadata;
args.add_initial_metadata_count = num_metadata;
args.send_deadline = deadline;
grpc_call *call;
GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call));
return call;
}
grpc_call *grpc_channel_create_call(grpc_channel *channel,

@ -39,6 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h"
@ -50,6 +51,9 @@
#include "src/core/lib/surface/event_string.h"
int grpc_trace_operation_failures;
#ifndef NDEBUG
int grpc_trace_pending_tags;
#endif
typedef struct {
grpc_pollset_worker **worker;
@ -67,6 +71,9 @@ struct grpc_completion_queue {
gpr_refcount pending_events;
/** Once owning_refs drops to zero, we will destroy the cq */
gpr_refcount owning_refs;
/** counter of how many things have ever been queued on this completion queue
useful for avoiding locks to check the queue */
gpr_atm things_queued_ever;
/** 0 initially, 1 once we've begun shutting down */
int shutdown;
int shutdown_called;
@ -121,15 +128,6 @@ void grpc_cq_global_shutdown(void) {
}
}
struct grpc_cq_alarm {
grpc_timer alarm;
grpc_cq_completion completion;
/** completion queue where events about this alarm will be posted */
grpc_completion_queue *cq;
/** user supplied tag */
void *tag;
};
grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_completion_queue *cc;
GPR_ASSERT(!reserved);
@ -166,6 +164,7 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
cc->is_server_cq = 0;
cc->is_non_listening_server_cq = 0;
cc->num_pluckers = 0;
gpr_atm_no_barrier_store(&cc->things_queued_ever, 0);
#ifndef NDEBUG
cc->outstanding_tag_count = 0;
#endif
@ -276,6 +275,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GPR_ASSERT(found);
#endif
shutdown = gpr_unref(&cc->pending_events);
gpr_atm_no_barrier_fetch_add(&cc->things_queued_ever, 1);
if (!shutdown) {
cc->completed_tail->next =
((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
@ -313,13 +313,66 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GRPC_ERROR_UNREF(error);
}
typedef struct {
gpr_atm last_seen_things_queued_ever;
grpc_completion_queue *cq;
gpr_timespec deadline;
grpc_cq_completion *stolen_completion;
void *tag; /* for pluck */
bool first_loop;
} cq_is_finished_arg;
static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = arg;
grpc_completion_queue *cq = a->cq;
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cq->things_queued_ever);
if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
gpr_mu_lock(cq->mu);
a->last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cq->things_queued_ever);
if (cq->completed_tail != &cq->completed_head) {
a->stolen_completion = (grpc_cq_completion *)cq->completed_head.next;
cq->completed_head.next = a->stolen_completion->next & ~(uintptr_t)1;
if (a->stolen_completion == cq->completed_tail) {
cq->completed_tail = &cq->completed_head;
}
gpr_mu_unlock(cq->mu);
return true;
}
gpr_mu_unlock(cq->mu);
}
return !a->first_loop &&
gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
}
#ifndef NDEBUG
static void dump_pending_tags(grpc_completion_queue *cc) {
if (!grpc_trace_pending_tags) return;
gpr_strvec v;
gpr_strvec_init(&v);
gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:"));
for (size_t i = 0; i < cc->outstanding_tag_count; i++) {
char *s;
gpr_asprintf(&s, " %p", cc->outstanding_tags[i]);
gpr_strvec_add(&v, s);
}
char *out = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
gpr_log(GPR_DEBUG, "%s", out);
gpr_free(out);
}
#else
static void dump_pending_tags(grpc_completion_queue *cc) {}
#endif
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
grpc_pollset_worker *worker = NULL;
int first_loop = 1;
gpr_timespec now;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@ -333,11 +386,33 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
reserved));
GPR_ASSERT(!reserved);
dump_pending_tags(cc);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(cc->mu);
cq_is_finished_arg is_finished_arg = {
.last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cc->things_queued_ever),
.cq = cc,
.deadline = deadline,
.stolen_completion = NULL,
.tag = NULL,
.first_loop = true};
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
cq_is_next_finished, &is_finished_arg);
for (;;) {
if (is_finished_arg.stolen_completion != NULL) {
gpr_mu_unlock(cc->mu);
grpc_cq_completion *c = is_finished_arg.stolen_completion;
is_finished_arg.stolen_completion = NULL;
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
c->done(&exec_ctx, c->done_arg, c);
break;
}
if (cc->completed_tail != &cc->completed_head) {
grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
cc->completed_head.next = c->next & ~(uintptr_t)1;
@ -358,13 +433,13 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
first_loop = 0;
/* Check alarms - these are a global resource so we just ping
each time through on every pollset.
May update deadline to ensure timely wakeups.
@ -387,13 +462,16 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
}
is_finished_arg.first_loop = false;
}
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_next", 0);
@ -424,6 +502,37 @@ static void del_plucker(grpc_completion_queue *cc, void *tag,
GPR_UNREACHABLE_CODE(return );
}
static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = arg;
grpc_completion_queue *cq = a->cq;
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cq->things_queued_ever);
if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
gpr_mu_lock(cq->mu);
a->last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cq->things_queued_ever);
grpc_cq_completion *c;
grpc_cq_completion *prev = &cq->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
&cq->completed_head) {
if (c->tag == a->tag) {
prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
if (c == cq->completed_tail) {
cq->completed_tail = prev;
}
gpr_mu_unlock(cq->mu);
a->stolen_completion = c;
return true;
}
prev = c;
}
gpr_mu_unlock(cq->mu);
}
return !a->first_loop &&
gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
}
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
@ -431,8 +540,6 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
gpr_timespec now;
int first_loop = 1;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@ -448,11 +555,33 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
}
GPR_ASSERT(!reserved);
dump_pending_tags(cc);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(cc->mu);
cq_is_finished_arg is_finished_arg = {
.last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cc->things_queued_ever),
.cq = cc,
.deadline = deadline,
.stolen_completion = NULL,
.tag = tag,
.first_loop = true};
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
cq_is_pluck_finished, &is_finished_arg);
for (;;) {
if (is_finished_arg.stolen_completion != NULL) {
gpr_mu_unlock(cc->mu);
c = is_finished_arg.stolen_completion;
is_finished_arg.stolen_completion = NULL;
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
c->done(&exec_ctx, c->done_arg, c);
break;
}
prev = &cc->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
&cc->completed_head) {
@ -485,17 +614,18 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
memset(&ret, 0, sizeof(ret));
/* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
del_plucker(cc, tag, &worker);
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
first_loop = 0;
/* Check alarms - these are a global resource so we just ping
each time through on every pollset.
May update deadline to ensure timely wakeups.
@ -518,15 +648,18 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
}
is_finished_arg.first_loop = false;
del_plucker(cc, tag, &worker);
}
done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_pluck", 0);

@ -44,6 +44,9 @@
extern int grpc_cq_pluck_trace;
extern int grpc_cq_event_timeout_trace;
extern int grpc_trace_operation_failures;
#ifndef NDEBUG
extern int grpc_trace_pending_tags;
#endif
typedef struct grpc_cq_completion {
/** user supplied tag */

@ -177,12 +177,16 @@ void grpc_init(void) {
grpc_register_tracer("compression", &grpc_compression_trace);
grpc_register_tracer("queue_pluck", &grpc_cq_pluck_trace);
grpc_register_tracer("combiner", &grpc_combiner_trace);
grpc_register_tracer("server_channel", &grpc_server_channel_trace);
// Default pluck trace to 1
grpc_cq_pluck_trace = 1;
grpc_register_tracer("queue_timeout", &grpc_cq_event_timeout_trace);
// Default timeout trace to 1
grpc_cq_event_timeout_trace = 1;
grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
#ifndef NDEBUG
grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
#endif
grpc_security_pre_init();
grpc_iomgr_init();
grpc_executor_init();

@ -71,6 +71,8 @@ typedef struct registered_method registered_method;
typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
int grpc_server_channel_trace = 0;
typedef struct requested_call {
requested_call_type type;
size_t cq_idx;
@ -280,6 +282,7 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
grpc_channel_element *elem;
op->send_goaway = send_goaway;
op->set_accept_stream = true;
sc->slice = gpr_slice_from_copied_string("Server shutdown");
op->goaway_message = &sc->slice;
op->goaway_status = GRPC_STATUS_OK;
@ -439,6 +442,13 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
chand->finish_destroy_channel_closure.cb_arg = chand;
if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Disconnected client: %s", msg);
grpc_error_free_string(msg);
}
GRPC_ERROR_UNREF(error);
grpc_transport_op *op =
grpc_make_transport_op(&chand->finish_destroy_channel_closure);
op->set_accept_stream = true;
@ -446,13 +456,6 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_channel_stack_element(
grpc_channel_get_channel_stack(chand->channel), 0),
op);
if (error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Disconnected client: %s", msg);
grpc_error_free_string(msg);
}
GRPC_ERROR_UNREF(error);
}
static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
@ -773,8 +776,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_ERROR_CREATE_REFERENCING("Missing :authority or :path", &error, 1);
}
grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv_initial_metadata, error,
NULL);
grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error);
}
static void server_mutate_op(grpc_call_element *elem,
@ -829,11 +831,20 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
const void *transport_server_data) {
channel_data *chand = cd;
/* create a call */
grpc_call *call = grpc_call_create(chand->channel, NULL, 0, NULL, NULL,
transport_server_data, NULL, 0,
gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = chand->channel;
args.server_transport_data = transport_server_data;
args.send_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
grpc_call *call;
grpc_error *error = grpc_call_create(&args, &call);
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
if (error != GRPC_ERROR_NONE) {
got_initial_metadata(exec_ctx, elem, error);
GRPC_ERROR_UNREF(error);
return;
}
call_data *calld = elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));

@ -40,6 +40,9 @@
extern const grpc_channel_filter grpc_server_top_filter;
/** Lightweight tracing of server channel state */
extern int grpc_server_channel_trace;
/* Add a listener to the server: when the server starts, it will call start,
and when it shuts down, it will call destroy */
void grpc_server_add_listener(

@ -180,7 +180,8 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
*w->current = tracker->current_state;
tracker->watchers = w->next;
if (grpc_connectivity_state_trace) {
gpr_log(GPR_DEBUG, "NOTIFY: %p", w->notify);
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
w->notify);
}
grpc_exec_ctx_sched(exec_ctx, w->notify,
GRPC_ERROR_REF(tracker->current_error), NULL);

@ -46,8 +46,9 @@
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "%s %p:%p REF %d->%d %s", refcount->object_type,
refcount, refcount->destroy.cb_arg, val, val + 1, reason);
gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
refcount->object_type, refcount, refcount->destroy.cb_arg, val,
val + 1, reason);
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
@ -58,8 +59,9 @@ void grpc_stream_ref(grpc_stream_refcount *refcount) {
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "%s %p:%p UNREF %d->%d %s", refcount->object_type,
refcount, refcount->destroy.cb_arg, val, val - 1, reason);
gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
refcount->object_type, refcount, refcount->destroy.cb_arg, val,
val - 1, reason);
#else
void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
@ -274,3 +276,28 @@ grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
op->op.on_consumed = &op->outer_on_complete;
return &op->op;
}
typedef struct {
grpc_closure outer_on_complete;
grpc_closure *inner_on_complete;
grpc_transport_stream_op op;
} made_transport_stream_op;
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
made_transport_stream_op *op = arg;
grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error),
NULL);
gpr_free(op);
}
grpc_transport_stream_op *grpc_make_transport_stream_op(
grpc_closure *on_complete) {
made_transport_stream_op *op = gpr_malloc(sizeof(*op));
grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
op);
op->inner_on_complete = on_complete;
memset(&op->op, 0, sizeof(op->op));
op->op.on_complete = &op->outer_on_complete;
return &op->op;
}

@ -113,6 +113,10 @@ typedef struct grpc_transport_stream_op {
have been completed. */
grpc_closure *on_complete;
/** Is the completion of this op covered by a poller (if false: the op should
complete independently of some pollset being polled) */
bool covered_by_poller;
/** Send initial metadata to the peer, from the provided metadata batch.
idempotent_request MUST be set if this is non-null */
grpc_metadata_batch *send_initial_metadata;
@ -252,6 +256,7 @@ void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
gpr_slice *optional_message);
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
char *grpc_transport_op_string(grpc_transport_op *op);
/* Send a batch of operations on a transport
@ -293,6 +298,10 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
/* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
\a on_consumed and then delete the returned transport op */
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed);
/* Allocate a grpc_transport_stream_op, and preconfigure the on_consumed closure
to \a on_consumed and then delete the returned transport op */
grpc_transport_stream_op *grpc_make_transport_stream_op(
grpc_closure *on_consumed);
#ifdef __cplusplus
}

@ -41,6 +41,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/connectivity_state.h"
/* These routines are here to facilitate debugging - they produce string
representations of various transport data structures */
@ -72,14 +73,14 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
char *tmp;
char *out;
int first = 1;
bool first = true;
gpr_strvec b;
gpr_strvec_init(&b);
if (op->send_initial_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
first = false;
gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));
put_metadata_list(&b, *op->send_initial_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
@ -87,7 +88,7 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
if (op->send_message != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
first = false;
gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d",
op->send_message->flags, op->send_message->length);
gpr_strvec_add(&b, tmp);
@ -95,7 +96,7 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
if (op->send_trailing_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
first = false;
gpr_strvec_add(&b, gpr_strdup("SEND_TRAILING_METADATA{"));
put_metadata_list(&b, *op->send_trailing_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
@ -103,25 +104,25 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
if (op->recv_initial_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
first = false;
gpr_strvec_add(&b, gpr_strdup("RECV_INITIAL_METADATA"));
}
if (op->recv_message != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
first = false;
gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
}
if (op->recv_trailing_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
first = false;
gpr_strvec_add(&b, gpr_strdup("RECV_TRAILING_METADATA"));
}
if (op->cancel_error != GRPC_ERROR_NONE) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
first = false;
const char *msg = grpc_error_string(op->cancel_error);
gpr_asprintf(&tmp, "CANCEL:%s", msg);
grpc_error_free_string(msg);
@ -130,7 +131,7 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
if (op->close_error != GRPC_ERROR_NONE) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
first = false;
const char *msg = grpc_error_string(op->close_error);
gpr_asprintf(&tmp, "CLOSE:%s", msg);
grpc_error_free_string(msg);
@ -143,6 +144,82 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
return out;
}
char *grpc_transport_op_string(grpc_transport_op *op) {
char *tmp;
char *out;
bool first = true;
gpr_strvec b;
gpr_strvec_init(&b);
if (op->on_connectivity_state_change != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
if (op->connectivity_state != NULL) {
gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:from=%s",
op->on_connectivity_state_change,
grpc_connectivity_state_name(*op->connectivity_state));
gpr_strvec_add(&b, tmp);
} else {
gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:unsubscribe",
op->on_connectivity_state_change);
gpr_strvec_add(&b, tmp);
}
}
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
const char *err = grpc_error_string(op->disconnect_with_error);
gpr_asprintf(&tmp, "DISCONNECT:%s", err);
gpr_strvec_add(&b, tmp);
grpc_error_free_string(err);
}
if (op->send_goaway) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
char *msg = op->goaway_message == NULL
? "null"
: gpr_dump_slice(*op->goaway_message,
GPR_DUMP_ASCII | GPR_DUMP_HEX);
gpr_asprintf(&tmp, "SEND_GOAWAY:status=%d:msg=%s", op->goaway_status, msg);
if (op->goaway_message != NULL) gpr_free(msg);
gpr_strvec_add(&b, tmp);
}
if (op->set_accept_stream) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
gpr_asprintf(&tmp, "SET_ACCEPT_STREAM:%p(%p,...)", op->set_accept_stream_fn,
op->set_accept_stream_user_data);
gpr_strvec_add(&b, tmp);
}
if (op->bind_pollset != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET"));
}
if (op->bind_pollset_set != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET_SET"));
}
if (op->send_ping != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
gpr_strvec_add(&b, gpr_strdup("SEND_PING"));
}
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);
return out;
}
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_transport_stream_op *op) {
char *str = grpc_transport_stream_op_string(op);

@ -138,7 +138,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_posix.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',

@ -50,6 +50,7 @@
"\x00\x00\x00\x04\x00\x00\x00\x00\x00" /* headers: generated from \
large_metadata.headers in this \
directory */ \
"\x00\x00\x00\x04\x01\x00\x00\x00\x00" \
"\x00" \
"5{\x01\x05\x00\x00\x00\x01" \
"\x10\x05:path\x08/foo/bar" \
@ -92,6 +93,7 @@
in this \
directory \
*/ \
"\x00\x00\x00\x04\x01\x00\x00\x00\x00" \
"\x00\x00\xc9\x01\x04\x00\x00\x00\x01" \
"\x10\x05:path\x08/foo/bar" \
"\x10\x07:scheme\x04http" \

@ -126,14 +126,14 @@ static gpr_slice merge_slices(gpr_slice *slices, size_t nslices) {
return out;
}
static int byte_buffer_eq_slice(grpc_byte_buffer *bb, gpr_slice b) {
int raw_byte_buffer_eq_slice(grpc_byte_buffer *rbb, gpr_slice b) {
gpr_slice a;
int ok;
if (!bb) return 0;
if (!rbb) return 0;
a = merge_slices(bb->data.raw.slice_buffer.slices,
bb->data.raw.slice_buffer.count);
a = merge_slices(rbb->data.raw.slice_buffer.slices,
rbb->data.raw.slice_buffer.count);
ok = GPR_SLICE_LENGTH(a) == GPR_SLICE_LENGTH(b) &&
0 == memcmp(GPR_SLICE_START_PTR(a), GPR_SLICE_START_PTR(b),
GPR_SLICE_LENGTH(a));
@ -142,6 +142,21 @@ static int byte_buffer_eq_slice(grpc_byte_buffer *bb, gpr_slice b) {
return ok;
}
int byte_buffer_eq_slice(grpc_byte_buffer *bb, gpr_slice b) {
grpc_byte_buffer_reader reader;
grpc_byte_buffer *rbb;
int res;
GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, bb) &&
"Couldn't init byte buffer reader");
rbb = grpc_raw_byte_buffer_from_reader(&reader);
res = raw_byte_buffer_eq_slice(rbb, b);
grpc_byte_buffer_reader_destroy(&reader);
grpc_byte_buffer_destroy(rbb);
return res;
}
int byte_buffer_eq_string(grpc_byte_buffer *bb, const char *str) {
grpc_byte_buffer_reader reader;
grpc_byte_buffer *rbb;
@ -150,7 +165,7 @@ int byte_buffer_eq_string(grpc_byte_buffer *bb, const char *str) {
GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, bb) &&
"Couldn't init byte buffer reader");
rbb = grpc_raw_byte_buffer_from_reader(&reader);
res = byte_buffer_eq_slice(rbb, gpr_slice_from_copied_string(str));
res = raw_byte_buffer_eq_slice(rbb, gpr_slice_from_copied_string(str));
grpc_byte_buffer_reader_destroy(&reader);
grpc_byte_buffer_destroy(rbb);

@ -67,6 +67,7 @@ void cq_expect_completion(cq_verifier *v, const char *file, int line, void *tag,
#define CQ_EXPECT_COMPLETION(v, tag, success) \
cq_expect_completion(v, __FILE__, __LINE__, tag, success)
int byte_buffer_eq_slice(grpc_byte_buffer *bb, gpr_slice b);
int byte_buffer_eq_string(grpc_byte_buffer *byte_buffer, const char *string);
int contains_metadata(grpc_metadata_array *array, const char *key,
const char *value);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save