Merge remote-tracking branch 'upstream/master' into rename_client_config

pull/8095/head
Mark D. Roth 8 years ago
commit eb35a1d16b
  1. 32
      BUILD
  2. 12
      CMakeLists.txt
  3. 205
      Makefile
  4. 2
      README.md
  5. 4
      binding.gyp
  6. 44
      build.yaml
  7. 1
      composer.json
  8. 4
      config.m4
  9. 2
      doc/core/pending_api_cleanups.md
  10. 15
      doc/cpp/pending_api_cleanups.md
  11. 2
      doc/environment_variables.md
  12. 121
      doc/epoll-polling-engine.md
  13. BIN
      doc/images/new_epoll_impl.png
  14. BIN
      doc/images/old_epoll_impl.png
  15. 66
      doc/interop-test-descriptions.md
  16. 1
      examples/php/composer.json
  17. 42
      examples/python/helloworld/run_codegen.py
  18. 12
      gRPC-Core.podspec
  19. 8
      grpc.gemspec
  20. 20
      include/grpc++/impl/codegen/client_context.h
  21. 2
      include/grpc++/impl/codegen/server_context.h
  22. 67
      include/grpc++/test/server_context_test_spouse.h
  23. 20
      include/grpc/impl/codegen/grpc_types.h
  24. 6
      include/grpc/support/alloc.h
  25. 8
      package.xml
  26. 4
      src/compiler/README.md
  27. 3
      src/compiler/csharp_generator.cc
  28. 6
      src/core/README.md
  29. 4
      src/core/ext/census/grpc_filter.c
  30. 219
      src/core/ext/client_channel/client_channel.c
  31. 4
      src/core/ext/client_channel/http_connect_handshaker.c
  32. 24
      src/core/ext/client_channel/lb_policy.h
  33. 296
      src/core/ext/client_channel/method_config.c
  34. 116
      src/core/ext/client_channel/method_config.h
  35. 9
      src/core/ext/client_channel/resolver_result.h
  36. 8
      src/core/ext/client_channel/subchannel.c
  37. 3
      src/core/ext/client_channel/subchannel.h
  38. 211
      src/core/ext/lb_policy/grpclb/grpclb.c
  39. 13
      src/core/ext/lb_policy/pick_first/pick_first.c
  40. 15
      src/core/ext/lb_policy/round_robin/round_robin.c
  41. 20
      src/core/ext/load_reporting/load_reporting.h
  42. 4
      src/core/ext/load_reporting/load_reporting_filter.c
  43. 5
      src/core/ext/resolver/sockaddr/sockaddr_resolver.c
  44. 196
      src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
  45. 3
      src/core/ext/transport/chttp2/transport/chttp2_plugin.c
  46. 2214
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  47. 4
      src/core/ext/transport/chttp2/transport/frame.h
  48. 68
      src/core/ext/transport/chttp2/transport/frame_data.c
  49. 9
      src/core/ext/transport/chttp2/transport/frame_data.h
  50. 18
      src/core/ext/transport/chttp2/transport/frame_goaway.c
  51. 9
      src/core/ext/transport/chttp2/transport/frame_goaway.h
  52. 13
      src/core/ext/transport/chttp2/transport/frame_ping.c
  53. 8
      src/core/ext/transport/chttp2/transport/frame_ping.h
  54. 38
      src/core/ext/transport/chttp2/transport/frame_rst_stream.c
  55. 9
      src/core/ext/transport/chttp2/transport/frame_rst_stream.h
  56. 23
      src/core/ext/transport/chttp2/transport/frame_settings.c
  57. 9
      src/core/ext/transport/chttp2/transport/frame_settings.h
  58. 34
      src/core/ext/transport/chttp2/transport/frame_window_update.c
  59. 5
      src/core/ext/transport/chttp2/transport/frame_window_update.h
  60. 492
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  61. 17
      src/core/ext/transport/chttp2/transport/hpack_parser.h
  62. 654
      src/core/ext/transport/chttp2/transport/internal.h
  63. 910
      src/core/ext/transport/chttp2/transport/parsing.c
  64. 351
      src/core/ext/transport/chttp2/transport/stream_lists.c
  65. 36
      src/core/ext/transport/chttp2/transport/stream_map.c
  66. 4
      src/core/ext/transport/chttp2/transport/stream_map.h
  67. 466
      src/core/ext/transport/chttp2/transport/writing.c
  68. 2
      src/core/ext/transport/cronet/transport/cronet_transport.c
  69. 12
      src/core/lib/channel/channel_args.c
  70. 4
      src/core/lib/channel/channel_args.h
  71. 4
      src/core/lib/channel/channel_stack.c
  72. 4
      src/core/lib/channel/channel_stack.h
  73. 76
      src/core/lib/channel/deadline_filter.c
  74. 33
      src/core/lib/channel/deadline_filter.h
  75. 2
      src/core/lib/channel/handshaker.c
  76. 60
      src/core/lib/channel/message_size_filter.c
  77. 18
      src/core/lib/iomgr/closure.c
  78. 11
      src/core/lib/iomgr/closure.h
  79. 372
      src/core/lib/iomgr/combiner.c
  80. 15
      src/core/lib/iomgr/combiner.h
  81. 18
      src/core/lib/iomgr/error.c
  82. 12
      src/core/lib/iomgr/error.h
  83. 231
      src/core/lib/iomgr/ev_epoll_linux.c
  84. 30
      src/core/lib/iomgr/ev_poll_and_epoll_posix.c
  85. 280
      src/core/lib/iomgr/ev_poll_posix.c
  86. 1
      src/core/lib/iomgr/ev_poll_posix.h
  87. 24
      src/core/lib/iomgr/ev_posix.c
  88. 13
      src/core/lib/iomgr/ev_posix.h
  89. 61
      src/core/lib/iomgr/exec_ctx.c
  90. 24
      src/core/lib/iomgr/exec_ctx.h
  91. 11
      src/core/lib/iomgr/iomgr.c
  92. 11
      src/core/lib/iomgr/tcp_posix.c
  93. 4
      src/core/lib/iomgr/tcp_server.h
  94. 29
      src/core/lib/iomgr/tcp_server_posix.c
  95. 16
      src/core/lib/iomgr/tcp_server_windows.c
  96. 17
      src/core/lib/iomgr/timer.h
  97. 118
      src/core/lib/iomgr/wakeup_fd_cv.c
  98. 80
      src/core/lib/iomgr/wakeup_fd_cv.h
  99. 12
      src/core/lib/iomgr/wakeup_fd_pipe.c
  100. 33
      src/core/lib/iomgr/wakeup_fd_posix.c
  101. Some files were not shown because too many files have changed in this diff Show More

32
BUILD

@ -217,10 +217,10 @@ cc_library(
"src/core/lib/iomgr/timer_heap.h",
"src/core/lib/iomgr/udp_server.h",
"src/core/lib/iomgr/unix_sockets_posix.h",
"src/core/lib/iomgr/wakeup_fd_cv.h",
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -239,6 +239,7 @@ cc_library(
"src/core/lib/surface/server.h",
"src/core/lib/transport/byte_stream.h",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/mdstr_hash_table.h",
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/static_metadata.h",
@ -298,6 +299,7 @@ cc_library(
"src/core/ext/client_channel/lb_policy.h",
"src/core/ext/client_channel/lb_policy_factory.h",
"src/core/ext/client_channel/lb_policy_registry.h",
"src/core/ext/client_channel/method_config.h",
"src/core/ext/client_channel/parse_address.h",
"src/core/ext/client_channel/resolver.h",
"src/core/ext/client_channel/resolver_factory.h",
@ -379,11 +381,11 @@ cc_library(
"src/core/lib/iomgr/udp_server.c",
"src/core/lib/iomgr/unix_sockets_posix.c",
"src/core/lib/iomgr/unix_sockets_posix_noop.c",
"src/core/lib/iomgr/wakeup_fd_cv.c",
"src/core/lib/iomgr/wakeup_fd_eventfd.c",
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -409,6 +411,7 @@ cc_library(
"src/core/lib/surface/version.c",
"src/core/lib/transport/byte_stream.c",
"src/core/lib/transport/connectivity_state.c",
"src/core/lib/transport/mdstr_hash_table.c",
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/static_metadata.c",
@ -478,6 +481,7 @@ cc_library(
"src/core/ext/client_channel/lb_policy.c",
"src/core/ext/client_channel/lb_policy_factory.c",
"src/core/ext/client_channel/lb_policy_registry.c",
"src/core/ext/client_channel/method_config.c",
"src/core/ext/client_channel/parse_address.c",
"src/core/ext/client_channel/resolver.c",
"src/core/ext/client_channel/resolver_factory.c",
@ -618,10 +622,10 @@ cc_library(
"src/core/lib/iomgr/timer_heap.h",
"src/core/lib/iomgr/udp_server.h",
"src/core/lib/iomgr/unix_sockets_posix.h",
"src/core/lib/iomgr/wakeup_fd_cv.h",
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -640,6 +644,7 @@ cc_library(
"src/core/lib/surface/server.h",
"src/core/lib/transport/byte_stream.h",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/mdstr_hash_table.h",
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/static_metadata.h",
@ -676,6 +681,7 @@ cc_library(
"src/core/ext/client_channel/lb_policy.h",
"src/core/ext/client_channel/lb_policy_factory.h",
"src/core/ext/client_channel/lb_policy_registry.h",
"src/core/ext/client_channel/method_config.h",
"src/core/ext/client_channel/parse_address.h",
"src/core/ext/client_channel/resolver.h",
"src/core/ext/client_channel/resolver_factory.h",
@ -765,11 +771,11 @@ cc_library(
"src/core/lib/iomgr/udp_server.c",
"src/core/lib/iomgr/unix_sockets_posix.c",
"src/core/lib/iomgr/unix_sockets_posix_noop.c",
"src/core/lib/iomgr/wakeup_fd_cv.c",
"src/core/lib/iomgr/wakeup_fd_eventfd.c",
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -795,6 +801,7 @@ cc_library(
"src/core/lib/surface/version.c",
"src/core/lib/transport/byte_stream.c",
"src/core/lib/transport/connectivity_state.c",
"src/core/lib/transport/mdstr_hash_table.c",
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/static_metadata.c",
@ -838,6 +845,7 @@ cc_library(
"src/core/ext/client_channel/lb_policy.c",
"src/core/ext/client_channel/lb_policy_factory.c",
"src/core/ext/client_channel/lb_policy_registry.c",
"src/core/ext/client_channel/method_config.c",
"src/core/ext/client_channel/parse_address.c",
"src/core/ext/client_channel/resolver.c",
"src/core/ext/client_channel/resolver_factory.c",
@ -974,10 +982,10 @@ cc_library(
"src/core/lib/iomgr/timer_heap.h",
"src/core/lib/iomgr/udp_server.h",
"src/core/lib/iomgr/unix_sockets_posix.h",
"src/core/lib/iomgr/wakeup_fd_cv.h",
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -996,6 +1004,7 @@ cc_library(
"src/core/lib/surface/server.h",
"src/core/lib/transport/byte_stream.h",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/mdstr_hash_table.h",
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/static_metadata.h",
@ -1031,6 +1040,7 @@ cc_library(
"src/core/ext/client_channel/lb_policy.h",
"src/core/ext/client_channel/lb_policy_factory.h",
"src/core/ext/client_channel/lb_policy_registry.h",
"src/core/ext/client_channel/method_config.h",
"src/core/ext/client_channel/parse_address.h",
"src/core/ext/client_channel/resolver.h",
"src/core/ext/client_channel/resolver_factory.h",
@ -1113,11 +1123,11 @@ cc_library(
"src/core/lib/iomgr/udp_server.c",
"src/core/lib/iomgr/unix_sockets_posix.c",
"src/core/lib/iomgr/unix_sockets_posix_noop.c",
"src/core/lib/iomgr/wakeup_fd_cv.c",
"src/core/lib/iomgr/wakeup_fd_eventfd.c",
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -1143,6 +1153,7 @@ cc_library(
"src/core/lib/surface/version.c",
"src/core/lib/transport/byte_stream.c",
"src/core/lib/transport/connectivity_state.c",
"src/core/lib/transport/mdstr_hash_table.c",
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/static_metadata.c",
@ -1186,6 +1197,7 @@ cc_library(
"src/core/ext/client_channel/lb_policy.c",
"src/core/ext/client_channel/lb_policy_factory.c",
"src/core/ext/client_channel/lb_policy_registry.c",
"src/core/ext/client_channel/method_config.c",
"src/core/ext/client_channel/parse_address.c",
"src/core/ext/client_channel/resolver.c",
"src/core/ext/client_channel/resolver_factory.c",
@ -1876,11 +1888,11 @@ objc_library(
"src/core/lib/iomgr/udp_server.c",
"src/core/lib/iomgr/unix_sockets_posix.c",
"src/core/lib/iomgr/unix_sockets_posix_noop.c",
"src/core/lib/iomgr/wakeup_fd_cv.c",
"src/core/lib/iomgr/wakeup_fd_eventfd.c",
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -1906,6 +1918,7 @@ objc_library(
"src/core/lib/surface/version.c",
"src/core/lib/transport/byte_stream.c",
"src/core/lib/transport/connectivity_state.c",
"src/core/lib/transport/mdstr_hash_table.c",
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/static_metadata.c",
@ -1975,6 +1988,7 @@ objc_library(
"src/core/ext/client_channel/lb_policy.c",
"src/core/ext/client_channel/lb_policy_factory.c",
"src/core/ext/client_channel/lb_policy_registry.c",
"src/core/ext/client_channel/method_config.c",
"src/core/ext/client_channel/parse_address.c",
"src/core/ext/client_channel/resolver.c",
"src/core/ext/client_channel/resolver_factory.c",
@ -2094,10 +2108,10 @@ objc_library(
"src/core/lib/iomgr/timer_heap.h",
"src/core/lib/iomgr/udp_server.h",
"src/core/lib/iomgr/unix_sockets_posix.h",
"src/core/lib/iomgr/wakeup_fd_cv.h",
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -2116,6 +2130,7 @@ objc_library(
"src/core/lib/surface/server.h",
"src/core/lib/transport/byte_stream.h",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/mdstr_hash_table.h",
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/static_metadata.h",
@ -2175,6 +2190,7 @@ objc_library(
"src/core/ext/client_channel/lb_policy.h",
"src/core/ext/client_channel/lb_policy_factory.h",
"src/core/ext/client_channel/lb_policy_registry.h",
"src/core/ext/client_channel/method_config.h",
"src/core/ext/client_channel/parse_address.h",
"src/core/ext/client_channel/resolver.h",
"src/core/ext/client_channel/resolver_factory.h",

@ -346,11 +346,11 @@ add_library(grpc
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
src/core/lib/iomgr/unix_sockets_posix_noop.c
src/core/lib/iomgr/wakeup_fd_cv.c
src/core/lib/iomgr/wakeup_fd_eventfd.c
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -376,6 +376,7 @@ add_library(grpc
src/core/lib/surface/version.c
src/core/lib/transport/byte_stream.c
src/core/lib/transport/connectivity_state.c
src/core/lib/transport/mdstr_hash_table.c
src/core/lib/transport/metadata.c
src/core/lib/transport/metadata_batch.c
src/core/lib/transport/static_metadata.c
@ -445,6 +446,7 @@ add_library(grpc
src/core/ext/client_channel/lb_policy.c
src/core/ext/client_channel/lb_policy_factory.c
src/core/ext/client_channel/lb_policy_registry.c
src/core/ext/client_channel/method_config.c
src/core/ext/client_channel/parse_address.c
src/core/ext/client_channel/resolver.c
src/core/ext/client_channel/resolver_factory.c
@ -605,11 +607,11 @@ add_library(grpc_cronet
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
src/core/lib/iomgr/unix_sockets_posix_noop.c
src/core/lib/iomgr/wakeup_fd_cv.c
src/core/lib/iomgr/wakeup_fd_eventfd.c
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -635,6 +637,7 @@ add_library(grpc_cronet
src/core/lib/surface/version.c
src/core/lib/transport/byte_stream.c
src/core/lib/transport/connectivity_state.c
src/core/lib/transport/mdstr_hash_table.c
src/core/lib/transport/metadata.c
src/core/lib/transport/metadata_batch.c
src/core/lib/transport/static_metadata.c
@ -678,6 +681,7 @@ add_library(grpc_cronet
src/core/ext/client_channel/lb_policy.c
src/core/ext/client_channel/lb_policy_factory.c
src/core/ext/client_channel/lb_policy_registry.c
src/core/ext/client_channel/method_config.c
src/core/ext/client_channel/parse_address.c
src/core/ext/client_channel/resolver.c
src/core/ext/client_channel/resolver_factory.c
@ -836,11 +840,11 @@ add_library(grpc_unsecure
src/core/lib/iomgr/udp_server.c
src/core/lib/iomgr/unix_sockets_posix.c
src/core/lib/iomgr/unix_sockets_posix_noop.c
src/core/lib/iomgr/wakeup_fd_cv.c
src/core/lib/iomgr/wakeup_fd_eventfd.c
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -866,6 +870,7 @@ add_library(grpc_unsecure
src/core/lib/surface/version.c
src/core/lib/transport/byte_stream.c
src/core/lib/transport/connectivity_state.c
src/core/lib/transport/mdstr_hash_table.c
src/core/lib/transport/metadata.c
src/core/lib/transport/metadata_batch.c
src/core/lib/transport/static_metadata.c
@ -909,6 +914,7 @@ add_library(grpc_unsecure
src/core/ext/client_channel/lb_policy.c
src/core/ext/client_channel/lb_policy_factory.c
src/core/ext/client_channel/lb_policy_registry.c
src/core/ext/client_channel/method_config.c
src/core/ext/client_channel/parse_address.c
src/core/ext/client_channel/resolver.c
src/core/ext/client_channel/resolver_factory.c

@ -1025,6 +1025,7 @@ transport_security_test: $(BINDIR)/$(CONFIG)/transport_security_test
udp_server_test: $(BINDIR)/$(CONFIG)/udp_server_test
uri_fuzzer_test: $(BINDIR)/$(CONFIG)/uri_fuzzer_test
uri_parser_test: $(BINDIR)/$(CONFIG)/uri_parser_test
wakeup_fd_cv_test: $(BINDIR)/$(CONFIG)/wakeup_fd_cv_test
alarm_cpp_test: $(BINDIR)/$(CONFIG)/alarm_cpp_test
async_end2end_test: $(BINDIR)/$(CONFIG)/async_end2end_test
auth_property_iterator_test: $(BINDIR)/$(CONFIG)/auth_property_iterator_test
@ -1071,6 +1072,7 @@ reconnect_interop_server: $(BINDIR)/$(CONFIG)/reconnect_interop_server
secure_auth_context_test: $(BINDIR)/$(CONFIG)/secure_auth_context_test
secure_sync_unary_ping_pong_test: $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test
server_builder_plugin_test: $(BINDIR)/$(CONFIG)/server_builder_plugin_test
server_context_test_spouse_test: $(BINDIR)/$(CONFIG)/server_context_test_spouse_test
server_crash_test: $(BINDIR)/$(CONFIG)/server_crash_test
server_crash_test_client: $(BINDIR)/$(CONFIG)/server_crash_test_client
shutdown_test: $(BINDIR)/$(CONFIG)/shutdown_test
@ -1131,6 +1133,7 @@ bad_ssl_cert_server: $(BINDIR)/$(CONFIG)/bad_ssl_cert_server
bad_ssl_cert_test: $(BINDIR)/$(CONFIG)/bad_ssl_cert_test
h2_census_test: $(BINDIR)/$(CONFIG)/h2_census_test
h2_compress_test: $(BINDIR)/$(CONFIG)/h2_compress_test
h2_fake_resolver_test: $(BINDIR)/$(CONFIG)/h2_fake_resolver_test
h2_fakesec_test: $(BINDIR)/$(CONFIG)/h2_fakesec_test
h2_fd_test: $(BINDIR)/$(CONFIG)/h2_fd_test
h2_full_test: $(BINDIR)/$(CONFIG)/h2_full_test
@ -1149,6 +1152,7 @@ h2_ssl_proxy_test: $(BINDIR)/$(CONFIG)/h2_ssl_proxy_test
h2_uds_test: $(BINDIR)/$(CONFIG)/h2_uds_test
h2_census_nosec_test: $(BINDIR)/$(CONFIG)/h2_census_nosec_test
h2_compress_nosec_test: $(BINDIR)/$(CONFIG)/h2_compress_nosec_test
h2_fake_resolver_nosec_test: $(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test
h2_fd_nosec_test: $(BINDIR)/$(CONFIG)/h2_fd_nosec_test
h2_full_nosec_test: $(BINDIR)/$(CONFIG)/h2_full_nosec_test
h2_full+pipe_nosec_test: $(BINDIR)/$(CONFIG)/h2_full+pipe_nosec_test
@ -1340,6 +1344,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/transport_security_test \
$(BINDIR)/$(CONFIG)/udp_server_test \
$(BINDIR)/$(CONFIG)/uri_parser_test \
$(BINDIR)/$(CONFIG)/wakeup_fd_cv_test \
$(BINDIR)/$(CONFIG)/public_headers_must_be_c89 \
$(BINDIR)/$(CONFIG)/badreq_bad_client_test \
$(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test \
@ -1355,6 +1360,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/bad_ssl_cert_test \
$(BINDIR)/$(CONFIG)/h2_census_test \
$(BINDIR)/$(CONFIG)/h2_compress_test \
$(BINDIR)/$(CONFIG)/h2_fake_resolver_test \
$(BINDIR)/$(CONFIG)/h2_fakesec_test \
$(BINDIR)/$(CONFIG)/h2_fd_test \
$(BINDIR)/$(CONFIG)/h2_full_test \
@ -1373,6 +1379,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/h2_uds_test \
$(BINDIR)/$(CONFIG)/h2_census_nosec_test \
$(BINDIR)/$(CONFIG)/h2_compress_nosec_test \
$(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test \
$(BINDIR)/$(CONFIG)/h2_fd_nosec_test \
$(BINDIR)/$(CONFIG)/h2_full_nosec_test \
$(BINDIR)/$(CONFIG)/h2_full+pipe_nosec_test \
@ -1439,6 +1446,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/secure_auth_context_test \
$(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test \
$(BINDIR)/$(CONFIG)/server_builder_plugin_test \
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test \
$(BINDIR)/$(CONFIG)/server_crash_test \
$(BINDIR)/$(CONFIG)/server_crash_test_client \
$(BINDIR)/$(CONFIG)/shutdown_test \
@ -1526,6 +1534,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/secure_auth_context_test \
$(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test \
$(BINDIR)/$(CONFIG)/server_builder_plugin_test \
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test \
$(BINDIR)/$(CONFIG)/server_crash_test \
$(BINDIR)/$(CONFIG)/server_crash_test_client \
$(BINDIR)/$(CONFIG)/shutdown_test \
@ -1738,6 +1747,8 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/udp_server_test || ( echo test udp_server_test failed ; exit 1 )
$(E) "[RUN] Testing uri_parser_test"
$(Q) $(BINDIR)/$(CONFIG)/uri_parser_test || ( echo test uri_parser_test failed ; exit 1 )
$(E) "[RUN] Testing wakeup_fd_cv_test"
$(Q) $(BINDIR)/$(CONFIG)/wakeup_fd_cv_test || ( echo test wakeup_fd_cv_test failed ; exit 1 )
$(E) "[RUN] Testing public_headers_must_be_c89"
$(Q) $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 || ( echo test public_headers_must_be_c89 failed ; exit 1 )
$(E) "[RUN] Testing badreq_bad_client_test"
@ -1828,6 +1839,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test || ( echo test secure_sync_unary_ping_pong_test failed ; exit 1 )
$(E) "[RUN] Testing server_builder_plugin_test"
$(Q) $(BINDIR)/$(CONFIG)/server_builder_plugin_test || ( echo test server_builder_plugin_test failed ; exit 1 )
$(E) "[RUN] Testing server_context_test_spouse_test"
$(Q) $(BINDIR)/$(CONFIG)/server_context_test_spouse_test || ( echo test server_context_test_spouse_test failed ; exit 1 )
$(E) "[RUN] Testing server_crash_test"
$(Q) $(BINDIR)/$(CONFIG)/server_crash_test || ( echo test server_crash_test failed ; exit 1 )
$(E) "[RUN] Testing shutdown_test"
@ -2590,11 +2603,11 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
src/core/lib/iomgr/unix_sockets_posix_noop.c \
src/core/lib/iomgr/wakeup_fd_cv.c \
src/core/lib/iomgr/wakeup_fd_eventfd.c \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -2620,6 +2633,7 @@ LIBGRPC_SRC = \
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -2689,6 +2703,7 @@ LIBGRPC_SRC = \
src/core/ext/client_channel/lb_policy.c \
src/core/ext/client_channel/lb_policy_factory.c \
src/core/ext/client_channel/lb_policy_registry.c \
src/core/ext/client_channel/method_config.c \
src/core/ext/client_channel/parse_address.c \
src/core/ext/client_channel/resolver.c \
src/core/ext/client_channel/resolver_factory.c \
@ -2867,11 +2882,11 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
src/core/lib/iomgr/unix_sockets_posix_noop.c \
src/core/lib/iomgr/wakeup_fd_cv.c \
src/core/lib/iomgr/wakeup_fd_eventfd.c \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -2897,6 +2912,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -2940,6 +2956,7 @@ LIBGRPC_CRONET_SRC = \
src/core/ext/client_channel/lb_policy.c \
src/core/ext/client_channel/lb_policy_factory.c \
src/core/ext/client_channel/lb_policy_registry.c \
src/core/ext/client_channel/method_config.c \
src/core/ext/client_channel/parse_address.c \
src/core/ext/client_channel/resolver.c \
src/core/ext/client_channel/resolver_factory.c \
@ -3134,11 +3151,11 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
src/core/lib/iomgr/unix_sockets_posix_noop.c \
src/core/lib/iomgr/wakeup_fd_cv.c \
src/core/lib/iomgr/wakeup_fd_eventfd.c \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -3164,6 +3181,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -3328,11 +3346,11 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
src/core/lib/iomgr/unix_sockets_posix_noop.c \
src/core/lib/iomgr/wakeup_fd_cv.c \
src/core/lib/iomgr/wakeup_fd_eventfd.c \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -3358,6 +3376,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -3401,6 +3420,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/client_channel/lb_policy.c \
src/core/ext/client_channel/lb_policy_factory.c \
src/core/ext/client_channel/lb_policy_registry.c \
src/core/ext/client_channel/method_config.c \
src/core/ext/client_channel/parse_address.c \
src/core/ext/client_channel/resolver.c \
src/core/ext/client_channel/resolver_factory.c \
@ -3948,6 +3968,55 @@ endif
endif
LIBGRPC++_TEST_SRC = \
src/cpp/test/server_context_test_spouse.cc \
PUBLIC_HEADERS_CXX += \
LIBGRPC++_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure libraries if you don't have OpenSSL.
$(LIBDIR)/$(CONFIG)/libgrpc++_test.a: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build a C++ library if you don't have protobuf - a bit overreached, but still okay.
$(LIBDIR)/$(CONFIG)/libgrpc++_test.a: protobuf_dep_error
else
$(LIBDIR)/$(CONFIG)/libgrpc++_test.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTOBUF_DEP) $(LIBGRPC++_TEST_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
$(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc++_test.a
$(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBGRPC++_TEST_OBJS)
ifeq ($(SYSTEM),Darwin)
$(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc++_test.a
endif
endif
endif
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(LIBGRPC++_TEST_OBJS:.o=.dep)
endif
endif
LIBGRPC++_TEST_CONFIG_SRC = \
test/cpp/util/test_config_cc.cc \
@ -10757,6 +10826,38 @@ endif
endif
WAKEUP_FD_CV_TEST_SRC = \
test/core/iomgr/wakeup_fd_cv_test.c \
WAKEUP_FD_CV_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(WAKEUP_FD_CV_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/wakeup_fd_cv_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/wakeup_fd_cv_test: $(WAKEUP_FD_CV_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(WAKEUP_FD_CV_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/wakeup_fd_cv_test
endif
$(OBJDIR)/$(CONFIG)/test/core/iomgr/wakeup_fd_cv_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_wakeup_fd_cv_test: $(WAKEUP_FD_CV_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(WAKEUP_FD_CV_TEST_OBJS:.o=.dep)
endif
endif
ALARM_CPP_TEST_SRC = \
test/cpp/common/alarm_cpp_test.cc \
@ -12713,6 +12814,49 @@ endif
endif
SERVER_CONTEXT_TEST_SPOUSE_TEST_SRC = \
test/cpp/test/server_context_test_spouse_test.cc \
SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(SERVER_CONTEXT_TEST_SPOUSE_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test: $(PROTOBUF_DEP) $(SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/server_context_test_spouse_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/test/server_context_test_spouse_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_server_context_test_spouse_test: $(SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS:.o=.dep)
endif
endif
SERVER_CRASH_TEST_SRC = \
test/cpp/end2end/server_crash_test.cc \
@ -14429,6 +14573,38 @@ endif
endif
H2_FAKE_RESOLVER_TEST_SRC = \
test/core/end2end/fixtures/h2_fake_resolver.c \
H2_FAKE_RESOLVER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(H2_FAKE_RESOLVER_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/h2_fake_resolver_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/h2_fake_resolver_test: $(H2_FAKE_RESOLVER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(H2_FAKE_RESOLVER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/h2_fake_resolver_test
endif
$(OBJDIR)/$(CONFIG)/test/core/end2end/fixtures/h2_fake_resolver.o: $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_h2_fake_resolver_test: $(H2_FAKE_RESOLVER_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(H2_FAKE_RESOLVER_TEST_OBJS:.o=.dep)
endif
endif
H2_FAKESEC_TEST_SRC = \
test/core/end2end/fixtures/h2_fakesec.c \
@ -14981,6 +15157,26 @@ ifneq ($(NO_DEPS),true)
endif
H2_FAKE_RESOLVER_NOSEC_TEST_SRC = \
test/core/end2end/fixtures/h2_fake_resolver.c \
H2_FAKE_RESOLVER_NOSEC_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(H2_FAKE_RESOLVER_NOSEC_TEST_SRC))))
$(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test: $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test
$(OBJDIR)/$(CONFIG)/test/core/end2end/fixtures/h2_fake_resolver.o: $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_h2_fake_resolver_nosec_test: $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS:.o=.dep)
ifneq ($(NO_DEPS),true)
-include $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS:.o=.dep)
endif
H2_FD_NOSEC_TEST_SRC = \
test/core/end2end/fixtures/h2_fd.c \
@ -15674,6 +15870,7 @@ src/cpp/ext/proto_server_reflection_plugin.cc: $(OPENSSL_DEP)
src/cpp/ext/reflection.grpc.pb.cc: $(OPENSSL_DEP)
src/cpp/ext/reflection.pb.cc: $(OPENSSL_DEP)
src/cpp/server/secure_server_credentials.cc: $(OPENSSL_DEP)
src/cpp/test/server_context_test_spouse.cc: $(OPENSSL_DEP)
src/csharp/ext/grpc_csharp_ext.c: $(OPENSSL_DEP)
test/core/bad_client/bad_client.c: $(OPENSSL_DEP)
test/core/bad_ssl/server_common.c: $(OPENSSL_DEP)

@ -21,7 +21,7 @@ See [tools/run_tests](tools/run_tests) for more guidance on how to run various t
This repository contains source code for gRPC libraries for multiple languages written on top of shared C core library [src/core] (src/core).
Libraries in different languages are in different states of development. We are seeking contributions for all of these libraries.
Libraries in different languages may be in different states of development. We are seeking contributions for all of these libraries.
| Language | Source | Status |
|-------------------------|-------------------------------------|---------|

@ -621,11 +621,11 @@
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
'src/core/lib/iomgr/unix_sockets_posix_noop.c',
'src/core/lib/iomgr/wakeup_fd_cv.c',
'src/core/lib/iomgr/wakeup_fd_eventfd.c',
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_posix.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
@ -651,6 +651,7 @@
'src/core/lib/surface/version.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
'src/core/lib/transport/mdstr_hash_table.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/static_metadata.c',
@ -720,6 +721,7 @@
'src/core/ext/client_channel/lb_policy.c',
'src/core/ext/client_channel/lb_policy_factory.c',
'src/core/ext/client_channel/lb_policy_registry.c',
'src/core/ext/client_channel/method_config.c',
'src/core/ext/client_channel/parse_address.c',
'src/core/ext/client_channel/resolver.c',
'src/core/ext/client_channel/resolver_factory.c',

@ -221,10 +221,10 @@ filegroups:
- src/core/lib/iomgr/timer_heap.h
- src/core/lib/iomgr/udp_server.h
- src/core/lib/iomgr/unix_sockets_posix.h
- src/core/lib/iomgr/wakeup_fd_cv.h
- src/core/lib/iomgr/wakeup_fd_pipe.h
- src/core/lib/iomgr/wakeup_fd_posix.h
- src/core/lib/iomgr/workqueue.h
- src/core/lib/iomgr/workqueue_posix.h
- src/core/lib/iomgr/workqueue_windows.h
- src/core/lib/json/json.h
- src/core/lib/json/json_common.h
@ -243,6 +243,7 @@ filegroups:
- src/core/lib/surface/server.h
- src/core/lib/transport/byte_stream.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/mdstr_hash_table.h
- src/core/lib/transport/metadata.h
- src/core/lib/transport/metadata_batch.h
- src/core/lib/transport/static_metadata.h
@ -306,11 +307,11 @@ filegroups:
- src/core/lib/iomgr/udp_server.c
- src/core/lib/iomgr/unix_sockets_posix.c
- src/core/lib/iomgr/unix_sockets_posix_noop.c
- src/core/lib/iomgr/wakeup_fd_cv.c
- src/core/lib/iomgr/wakeup_fd_eventfd.c
- src/core/lib/iomgr/wakeup_fd_nospecial.c
- src/core/lib/iomgr/wakeup_fd_pipe.c
- src/core/lib/iomgr/wakeup_fd_posix.c
- src/core/lib/iomgr/workqueue_posix.c
- src/core/lib/iomgr/workqueue_windows.c
- src/core/lib/json/json.c
- src/core/lib/json/json_reader.c
@ -336,6 +337,7 @@ filegroups:
- src/core/lib/surface/version.c
- src/core/lib/transport/byte_stream.c
- src/core/lib/transport/connectivity_state.c
- src/core/lib/transport/mdstr_hash_table.c
- src/core/lib/transport/metadata.c
- src/core/lib/transport/metadata_batch.c
- src/core/lib/transport/static_metadata.c
@ -356,6 +358,7 @@ filegroups:
- src/core/ext/client_channel/lb_policy.h
- src/core/ext/client_channel/lb_policy_factory.h
- src/core/ext/client_channel/lb_policy_registry.h
- src/core/ext/client_channel/method_config.h
- src/core/ext/client_channel/parse_address.h
- src/core/ext/client_channel/resolver.h
- src/core/ext/client_channel/resolver_factory.h
@ -376,6 +379,7 @@ filegroups:
- src/core/ext/client_channel/lb_policy.c
- src/core/ext/client_channel/lb_policy_factory.c
- src/core/ext/client_channel/lb_policy_registry.c
- src/core/ext/client_channel/method_config.c
- src/core/ext/client_channel/parse_address.c
- src/core/ext/client_channel/resolver.c
- src/core/ext/client_channel/resolver_factory.c
@ -1022,6 +1026,15 @@ libs:
language: c++
src:
- src/proto/grpc/reflection/v1alpha/reflection.proto
- name: grpc++_test
build: test
language: c++
headers:
- include/grpc++/test/server_context_test_spouse.h
src:
- src/cpp/test/server_context_test_spouse.cc
deps:
- grpc++
- name: grpc++_test_config
build: private
language: c++
@ -2592,6 +2605,20 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: wakeup_fd_cv_test
build: test
language: c
src:
- test/core/iomgr/wakeup_fd_cv_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
platforms:
- mac
- linux
- posix
- name: alarm_cpp_test
gtest: true
build: test
@ -3225,6 +3252,19 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: server_context_test_spouse_test
gtest: true
build: test
language: c++
src:
- test/cpp/test/server_context_test_spouse_test.cc
deps:
- grpc_test_util
- grpc++_test
- grpc++
- grpc
- gpr_test_util
- gpr
- name: server_crash_test
gtest: true
cpu_cost: 0.1

@ -7,6 +7,7 @@
"license": "BSD-3-Clause",
"require": {
"php": ">=5.5.0",
"ext-grpc": "*",
"google/protobuf": "v3.1.0-alpha-1"
},
"require-dev": {

@ -140,11 +140,11 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/udp_server.c \
src/core/lib/iomgr/unix_sockets_posix.c \
src/core/lib/iomgr/unix_sockets_posix_noop.c \
src/core/lib/iomgr/wakeup_fd_cv.c \
src/core/lib/iomgr/wakeup_fd_eventfd.c \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -170,6 +170,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -239,6 +240,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/client_channel/lb_policy.c \
src/core/ext/client_channel/lb_policy_factory.c \
src/core/ext/client_channel/lb_policy_registry.c \
src/core/ext/client_channel/method_config.c \
src/core/ext/client_channel/parse_address.c \
src/core/ext/client_channel/resolver.c \
src/core/ext/client_channel/resolver_factory.c \

@ -15,3 +15,5 @@ number:
`include/grpc/impl/codegen/grpc_types.h` (commit `af00d8b`)
- remove `ServerBuilder::SetMaxMessageSize()` method from
`include/grpc++/server_builder.h` (commit `6980362`)
- remove `GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY` macro from
`include/grpc/impl/codegen/grpc_types.h` (commit `59c9f90`)

@ -0,0 +1,15 @@
There are times when we make changes that include a temporary shim for
backward-compatibility (e.g., a macro or some other function to preserve
the original API) to avoid having to bump the major version number in
the next release. However, when we do eventually want to release a
feature that does change the API in a non-backward-compatible way, we
will wind up bumping the major version number anyway, at which point we
can take the opportunity to clean up any pending backward-compatibility
shims.
This file lists all pending backward-compatibility changes that should
be cleaned up the next time we are going to bump the major version
number:
- remove `ClientContext::set_fail_fast()` method from
`include/grpc++/impl/codegen/client_context.h` (commit `9477724`)

@ -51,6 +51,7 @@ some configuration as environment variables that can be set.
- glb - traces the grpclb load balancer
- queue_pluck
- queue_timeout
- server_channel - lightweight trace of significant server channel events
- secure_endpoint - traces bytes flowing through encrypted channels
- transport_security - traces metadata about secure channel establishment
- tcp - traces bytes in and out of a channel
@ -64,4 +65,3 @@ some configuration as environment variables that can be set.
- DEBUG - log all gRPC messages
- INFO - log INFO and ERROR message
- ERROR - log only errors

@ -0,0 +1,121 @@
# `epoll`-based pollset implementation in gRPC
Sree Kuchibhotla (sreek@) [May - 2016]
(Design input from Craig Tiller and David Klempner)
> Status: As of June 2016, this change is implemented and merged.
> * The bulk of the functionality is in: [ev_poll_linux.c](https://github.com/grpc/grpc/blob/master/src/core/lib/iomgr/ev_epoll_linux.c)
> * Pull request: https://github.com/grpc/grpc/pull/6803
## 1. Introduction
The document talks about the proposed changes to `epoll`-based implementation of pollsets in gRPC. Section-2 gives an overview of the current implementation, Section-3 talks about the problems in the current implementation and finally Section-4 talks about the proposed changes.
## 2. Current `epoll`-based implementation in gRPC
![image](images/old_epoll_impl.png)
**Figure 1: Current implementation**
A gRPC client or a server can have more than one completion queue. Each completion queue creates a pollset.
The gRPC core library does not create any threads[^1] on its own and relies on the application using the gRPC core library to provide the threads. A thread starts to poll for events by calling the gRPC core surface APIs `grpc_completion_queue_next()` or `grpc_completion_queue_pluck()`. More than one thread can call `grpc_completion_queue_next()`on the same completion queue[^2].
A file descriptor can be in more than one completion queue. There are examples in the next section that show how this can happen.
When an event of interest happens in a pollset, multiple threads are woken up and there are no guarantees on which thread actually ends up performing the work i.e executing the callbacks associated with that event. The thread that performs the work finally queues a completion event `grpc_cq_completion` on the appropriate completion queue and "kicks" (i.e wakes ups) the thread that is actually interested in that event (which can be itself - in which case there is no thread hop)
For example, in **Figure 1**, if `fd1` becomes readable, any one of the threads i.e *Threads 1* to *Threads K* or *Thread P*, might be woken up. Let's say *Thread P* was calling a `grpc_completion_queue_pluck()` and was actually interested in the event on `fd1` but *Thread 1* woke up. In this case, *Thread 1* executes the callbacks and finally kicks *Thread P* by signalling `event_fd_P`. *Thread P* wakes up, realizes that there is a new completion event for it and returns from `grpc_completion_queue_pluck()` to its caller.
## 3. Issues in the current architecture
### _Thundering Herds_
If multiple threads concurrently call `epoll_wait()`, we are guaranteed that only one thread is woken up if one of the `fds` in the set becomes readable/writable. However, in our current implementation, the threads do not directly call a blocking `epoll_wait()`[^3]. Instead, they call `poll()` on the set containing `[event_fd`[^4]`, epoll_fd]`. **(see Figure 1)**
Considering the fact that an `fd` can be in multiple `pollsets` and that each `pollset` might have multiple poller threads, it means that whenever an `fd` becomes readable/writable, all the threads in all the `pollsets` (in which that `fd` is present) are woken up.
The performance impact of this would be more conspicuous on the server side. Here are a two examples of thundering herds on the server side.
Example 1: Listening fds on server
* A gRPC server can have multiple server completion queues (i.e completion queues which are used to listen for incoming channels).
* A gRPC server can also listen on more than one TCP-port.
* A listening socket is created for each port the gRPC server would be listening on.
* Every listening socket's fd is added to all the server completion queues' pollsets. (Currently we do not do any sharding of the listening fds across these pollsets).
This means that for every incoming new channel, all the threads waiting on all the pollsets are woken up.
Example 2: New Incoming-channel fds on server
* Currently, every new incoming channel's `fd` (i.e the socket `fd` that is returned by doing an `accept()` on the new incoming channel) is added to all the server completion queues' pollsets [^5]).
* Clearly, this would also cause all thundering herd problem for every read onthat fd
There are other scenarios especially on the client side where an fd can end up being on multiple pollsets which would cause thundering herds on the clients.
## 4. Proposed changes to the current `epoll`-based polling implementation:
The main idea in this proposal is to group 'related' `fds` into a single epoll-based set. This would ensure that only one thread wakes up in case of an event on one of the `fds` in the epoll set.
To accomplish this, we introduce a new abstraction called `polling_island` which will have an epoll set underneath (See **Figure 2** below). A `polling_island` contains the following:
* `epoll_fd`: The file descriptor of the underlying epoll set
* `fd_set`: The set of 'fds' in the pollset island i.e in the epoll set (The pollset island merging operation described later requires the list of fds in the pollset island and currently there is no API available to enumerate all the fds in an epoll set)
* `event_fd`: A level triggered _event fd_ that is used to wake up all the threads waiting on this epoll set (Note: This `event_fd` is added to the underlying epoll set during pollset island creation. This is useful in the pollset island merging operation described later)
* `merged_to`: The polling island into which this one merged. See section 4.2 (case 2) for more details on this. Also note that if `merged_to` is set, all the other fields in this polling island are not used anymore
In this new model, only one thread wakes up whenever an event of interest happens in an epoll set.
![drawing](images/new_epoll_impl.png)
**Figure 2: Proposed changes**
### 4.1 Relation between `fd`, `pollset` and `polling_island:`
* An `fd` may belong to multiple `pollsets` but belongs to exactly one `polling_island`
* A `pollset` belongs to exactly one `polling_island`
* An `fd` and the `pollset(s`) it belongs to, have same `polling_island`
### 4.2 Algorithm to add an `fd` to a `pollset`
There are two cases to check here:
* **Case 1:** Both `fd` and `pollset` already belong to the same `polling_island`
* This is straightforward and nothing really needs to be done here
* **Case 2:** The `fd `and `pollset` point to different `polling_islands`: In this case we _merge_ both the polling islands i.e:
* Add all the `fds` from the smaller `polling_island `to the larger `polling_island` and update the `merged_to` pointer on the smaller island to point to the larger island.
* Wake up all the threads waiting on the smaller `polling_island`'s `epoll_fd` (by signalling the `event_fd` on that island) and make them now wait on the larger `polling_island`'s `epoll_fd`
* Update `fd` and `pollset` to now point to the larger `polling_island`
### 4.3 Directed wakeups:
The new implementation, just like the current implementation, does not provide us any guarantees that the thread that is woken up is the thread that is actually interested in the event. So the thread that woke up executes the callbacks and finally has to 'kick' the appropriate polling thread interested in the event.
In the current implementation, every polling thread also had a `event_fd` on which it was listening to and hence waking it up was as simple as signalling that `event_fd`. However, using an `event_fd` also meant that every thread has to use a `poll()` (on `event_fd` and `epoll_fd`) instead of doing an `epoll_wait()` and this resulted in the thundering herd problems described above.
The proposal here is to use signals and kicking a thread would just be sending a signal to that thread. Unfortunately there are only a few signals available on posix systems and most of them have pre-determined behavior leaving only a few signals `SIGUSR1`, `SIGUSR2` and `SIGRTx (SIGRTMIN to SIGRTMAX)` for custom use.
The calling application might have registered other signal handlers for these signals. `We will provide a new API where the applications can "give a signal number" to gRPC library to use for this purpose.
```
void grpc_use_signal(int signal_num)
```
If the calling application does not provide a signal number, then the gRPC library will relegate to using a model similar to the current implementation (where every thread does a blocking `poll()` on its `wakeup_fd` and the `epoll_fd`). The function` psi_wait() `in figure 2 implements this logic.
**>> **(**NOTE**: Or alternatively, we can implement a turnstile polling (i.e having only one thread calling `epoll_wait()` on the epoll set at any time - which all other threads call poll on their `wakeup_fds`)
in case of not getting a signal number from the applications.
## Notes
[^1]: Only exception is in case of name-resolution
[^2]: However, a `grpc_completion_queue_next()` and `grpc_completion_queue_pluck()` must not be called in parallel on the same completion queue
[^3]: The threads first do a blocking` poll()` with `[wakeup_fd, epoll_fd]`. If the `poll()` returns due to an event of interest in the epoll set, they then call a non-blocking i.e a zero-timeout `epoll_wait()` on the `epoll_fd`
[^4]: `event_fd` is the linux platform specific implementation of `grpc_wakeup_fd`. A `wakeup_fd` is used to wake up polling threads typically when the event for which the polling thread is waiting is already completed by some other thread. It is also used to wake up the polling threads in case of shutdowns or to re-evaluate the poller's interest in the fds to poll (the last scenario is only in case of `poll`-based (not `epoll`-based) implementation of `pollsets`).
[^5]: See more details about the issue here https://github.com/grpc/grpc/issues/5470 and for a proposed fix here: https://github.com/grpc/grpc/pull/6149

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

@ -60,6 +60,35 @@ Client asserts:
*It may be possible to use UnaryCall instead of EmptyCall, but it is harder to
ensure that the proto serialized to zero bytes.*
### cacheable_unary
This test verifies that gRPC requests marked as cacheable use GET verb instead
of POST, and that server sets appropriate cache control headers for the response
to be cached by a proxy. This test requires that the server is behind
a caching proxy. Use of current timestamp in the request prevents accidental
cache matches left over from previous tests.
Server features:
* [CacheableUnaryCall][]
Procedure:
1. Client calls CacheableUnaryCall with `SimpleRequest` request with payload
set to current timestamp. Timestamp format is irrelevant, and resolution is
in nanoseconds.
Client adds a `x-user-ip` header with value `1.2.3.4` to the request.
This is done since some proxys such as GFE will not cache requests from
localhost.
Client marks the request as cacheable by setting the cacheable flag in the
request context. Longer term this should be driven by the method option
specified in the proto file itself.
2. Client calls CacheableUnaryCall with `SimpleRequest` request again
immediately with the same payload as the previous request. Cacheable flag is
also set for this request's context.
Client asserts:
* Both calls were successful
* The payload body of both responses is the same.
### large_unary
This test verifies unary calls succeed in sending messages, and touches on flow
@ -750,25 +779,38 @@ Client asserts:
### unimplemented_method
Status: Ready for implementation. Blocking beta.
This test verifies calling unimplemented RPC method returns the UNIMPLEMENTED status code.
This test verifies that calling an unimplemented RPC method returns the
UNIMPLEMENTED status code.
Server features:
N/A
Procedure:
* Client calls `grpc.testing.UnimplementedService/UnimplementedCall` with an
empty request (defined as `grpc.testing.Empty`):
* Client calls `grpc.testing.TestService/UnimplementedMethod` with an empty
request (defined as `grpc.testing.Empty`):
```
{
}
```
Client asserts:
* received status code is 12 (UNIMPLEMENTED)
### unimplemented_service
This test verifies calling an unimplemented server returns the UNIMPLEMENTED
status code.
Server features:
N/A
Procedure:
* Client calls `grpc.testing.UnimplementedService/UnimplementedCall` with an
empty request (defined as `grpc.testing.Empty`)
Client asserts:
* received status code is 12 (UNIMPLEMENTED)
* received status message is empty or null/unset
### cancel_after_begin
@ -941,6 +983,18 @@ payload body of size `SimpleRequest.response_size` bytes and type as appropriate
for the `SimpleRequest.response_type`. If the server does not support the
`response_type`, then it should fail the RPC with `INVALID_ARGUMENT`.
### CacheableUnaryCall
Server gets the default SimpleRequest proto as the request. The content of the
request is ignored. It returns the SimpleResponse proto with the payload set
to current timestamp. The timestamp is an integer representing current time
with nanosecond resolution. This integer is formated as ASCII decimal in the
response. The format is not really important as long as the response payload
is different for each request. In addition it adds
1. cache control headers such that the response can be cached by proxies in
the response path. Server should be behind a caching proxy for this test
to pass. Currently we set the max-age to 60 seconds.
### CompressedResponse
[CompressedResponse]: #compressedresponse

@ -2,6 +2,7 @@
"name": "grpc/grpc-demo",
"description": "gRPC example for PHP",
"require": {
"ext-grpc": "*",
"grpc/grpc": "v1.0.0"
}
}

@ -1,42 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Runs protoc with the gRPC plugin to generate messages and gRPC stubs."""
from grpc.tools import protoc
protoc.main(
(
'',
'-I../../protos',
'--python_out=.',
'--grpc_python_out=.',
'../../protos/helloworld.proto',
)
)

@ -304,10 +304,10 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/timer_heap.h',
'src/core/lib/iomgr/udp_server.h',
'src/core/lib/iomgr/unix_sockets_posix.h',
'src/core/lib/iomgr/wakeup_fd_cv.h',
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/iomgr/workqueue.h',
'src/core/lib/iomgr/workqueue_posix.h',
'src/core/lib/iomgr/workqueue_windows.h',
'src/core/lib/json/json.h',
'src/core/lib/json/json_common.h',
@ -326,6 +326,7 @@ Pod::Spec.new do |s|
'src/core/lib/surface/server.h',
'src/core/lib/transport/byte_stream.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/mdstr_hash_table.h',
'src/core/lib/transport/metadata.h',
'src/core/lib/transport/metadata_batch.h',
'src/core/lib/transport/static_metadata.h',
@ -385,6 +386,7 @@ Pod::Spec.new do |s|
'src/core/ext/client_channel/lb_policy.h',
'src/core/ext/client_channel/lb_policy_factory.h',
'src/core/ext/client_channel/lb_policy_registry.h',
'src/core/ext/client_channel/method_config.h',
'src/core/ext/client_channel/parse_address.h',
'src/core/ext/client_channel/resolver.h',
'src/core/ext/client_channel/resolver_factory.h',
@ -470,11 +472,11 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
'src/core/lib/iomgr/unix_sockets_posix_noop.c',
'src/core/lib/iomgr/wakeup_fd_cv.c',
'src/core/lib/iomgr/wakeup_fd_eventfd.c',
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_posix.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
@ -500,6 +502,7 @@ Pod::Spec.new do |s|
'src/core/lib/surface/version.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
'src/core/lib/transport/mdstr_hash_table.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/static_metadata.c',
@ -569,6 +572,7 @@ Pod::Spec.new do |s|
'src/core/ext/client_channel/lb_policy.c',
'src/core/ext/client_channel/lb_policy_factory.c',
'src/core/ext/client_channel/lb_policy_registry.c',
'src/core/ext/client_channel/method_config.c',
'src/core/ext/client_channel/parse_address.c',
'src/core/ext/client_channel/resolver.c',
'src/core/ext/client_channel/resolver_factory.c',
@ -677,10 +681,10 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/timer_heap.h',
'src/core/lib/iomgr/udp_server.h',
'src/core/lib/iomgr/unix_sockets_posix.h',
'src/core/lib/iomgr/wakeup_fd_cv.h',
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/iomgr/workqueue.h',
'src/core/lib/iomgr/workqueue_posix.h',
'src/core/lib/iomgr/workqueue_windows.h',
'src/core/lib/json/json.h',
'src/core/lib/json/json_common.h',
@ -699,6 +703,7 @@ Pod::Spec.new do |s|
'src/core/lib/surface/server.h',
'src/core/lib/transport/byte_stream.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/mdstr_hash_table.h',
'src/core/lib/transport/metadata.h',
'src/core/lib/transport/metadata_batch.h',
'src/core/lib/transport/static_metadata.h',
@ -758,6 +763,7 @@ Pod::Spec.new do |s|
'src/core/ext/client_channel/lb_policy.h',
'src/core/ext/client_channel/lb_policy_factory.h',
'src/core/ext/client_channel/lb_policy_registry.h',
'src/core/ext/client_channel/method_config.h',
'src/core/ext/client_channel/parse_address.h',
'src/core/ext/client_channel/resolver.h',
'src/core/ext/client_channel/resolver_factory.h',

@ -224,10 +224,10 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/timer_heap.h )
s.files += %w( src/core/lib/iomgr/udp_server.h )
s.files += %w( src/core/lib/iomgr/unix_sockets_posix.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_cv.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_posix.h )
s.files += %w( src/core/lib/iomgr/workqueue.h )
s.files += %w( src/core/lib/iomgr/workqueue_posix.h )
s.files += %w( src/core/lib/iomgr/workqueue_windows.h )
s.files += %w( src/core/lib/json/json.h )
s.files += %w( src/core/lib/json/json_common.h )
@ -246,6 +246,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/server.h )
s.files += %w( src/core/lib/transport/byte_stream.h )
s.files += %w( src/core/lib/transport/connectivity_state.h )
s.files += %w( src/core/lib/transport/mdstr_hash_table.h )
s.files += %w( src/core/lib/transport/metadata.h )
s.files += %w( src/core/lib/transport/metadata_batch.h )
s.files += %w( src/core/lib/transport/static_metadata.h )
@ -305,6 +306,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/client_channel/lb_policy.h )
s.files += %w( src/core/ext/client_channel/lb_policy_factory.h )
s.files += %w( src/core/ext/client_channel/lb_policy_registry.h )
s.files += %w( src/core/ext/client_channel/method_config.h )
s.files += %w( src/core/ext/client_channel/parse_address.h )
s.files += %w( src/core/ext/client_channel/resolver.h )
s.files += %w( src/core/ext/client_channel/resolver_factory.h )
@ -390,11 +392,11 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/udp_server.c )
s.files += %w( src/core/lib/iomgr/unix_sockets_posix.c )
s.files += %w( src/core/lib/iomgr/unix_sockets_posix_noop.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_cv.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_eventfd.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_nospecial.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_posix.c )
s.files += %w( src/core/lib/iomgr/workqueue_posix.c )
s.files += %w( src/core/lib/iomgr/workqueue_windows.c )
s.files += %w( src/core/lib/json/json.c )
s.files += %w( src/core/lib/json/json_reader.c )
@ -420,6 +422,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/version.c )
s.files += %w( src/core/lib/transport/byte_stream.c )
s.files += %w( src/core/lib/transport/connectivity_state.c )
s.files += %w( src/core/lib/transport/mdstr_hash_table.c )
s.files += %w( src/core/lib/transport/metadata.c )
s.files += %w( src/core/lib/transport/metadata_batch.c )
s.files += %w( src/core/lib/transport/static_metadata.c )
@ -489,6 +492,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/client_channel/lb_policy.c )
s.files += %w( src/core/ext/client_channel/lb_policy_factory.c )
s.files += %w( src/core/ext/client_channel/lb_policy_registry.c )
s.files += %w( src/core/ext/client_channel/method_config.c )
s.files += %w( src/core/ext/client_channel/parse_address.c )
s.files += %w( src/core/ext/client_channel/resolver.c )
s.files += %w( src/core/ext/client_channel/resolver_factory.c )

@ -226,8 +226,14 @@ class ClientContext {
/// EXPERIMENTAL: Set this request to be cacheable
void set_cacheable(bool cacheable) { cacheable_ = cacheable; }
/// EXPERIMENTAL: Trigger fail-fast or not on this request
void set_fail_fast(bool fail_fast) { fail_fast_ = fail_fast; }
/// EXPERIMENTAL: Trigger wait-for-ready or not on this request
void set_wait_for_ready(bool wait_for_ready) {
wait_for_ready_ = wait_for_ready;
wait_for_ready_explicitly_set_ = true;
}
/// DEPRECATED: Use set_wait_for_ready() instead.
void set_fail_fast(bool fail_fast) { set_wait_for_ready(!fail_fast); }
#ifndef GRPC_CXX0X_NO_CHRONO
/// Return the deadline for the client call.
@ -347,14 +353,18 @@ class ClientContext {
uint32_t initial_metadata_flags() const {
return (idempotent_ ? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST : 0) |
(fail_fast_ ? 0 : GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY) |
(cacheable_ ? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST : 0);
(wait_for_ready_ ? GRPC_INITIAL_METADATA_WAIT_FOR_READY : 0) |
(cacheable_ ? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST : 0) |
(wait_for_ready_explicitly_set_
? GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
: 0);
}
grpc::string authority() { return authority_; }
bool initial_metadata_received_;
bool fail_fast_;
bool wait_for_ready_;
bool wait_for_ready_explicitly_set_;
bool idempotent_;
bool cacheable_;
std::shared_ptr<Channel> channel_;

@ -86,6 +86,7 @@ class ServerInterface;
namespace testing {
class InteropServerContextInspector;
class ServerContextTestSpouse;
} // namespace testing
// Interface of server side rpc context.
@ -173,6 +174,7 @@ class ServerContext {
private:
friend class ::grpc::testing::InteropServerContextInspector;
friend class ::grpc::testing::ServerContextTestSpouse;
friend class ::grpc::ServerInterface;
friend class ::grpc::Server;
template <class W, class R>

@ -0,0 +1,67 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_TEST_SERVER_CONTEXT_TEST_SPOUSE_H
#define GRPCXX_TEST_SERVER_CONTEXT_TEST_SPOUSE_H
#include <map>
#include <grpc++/server_context.h>
namespace grpc {
namespace testing {
// A test-only class to access private members and methods of ServerContext.
class ServerContextTestSpouse {
public:
explicit ServerContextTestSpouse(ServerContext* ctx) : ctx_(ctx) {}
// Inject client metadata to the ServerContext for the test. The test spouse
// must be alive when ServerContext::client_metadata is called.
void AddClientMetadata(const grpc::string& key, const grpc::string& value);
std::multimap<grpc::string, grpc::string> GetInitialMetadata() const {
return ctx_->initial_metadata_;
}
std::multimap<grpc::string, grpc::string> GetTrailingMetadata() const {
return ctx_->trailing_metadata_;
}
private:
ServerContext* ctx_; // not owned
std::multimap<grpc::string, grpc::string> client_metadata_storage_;
};
} // namespace testing
} // namespace grpc
#endif // GRPCXX_TEST_SERVER_CONTEXT_TEST_SPOUSE_H

@ -201,6 +201,9 @@ typedef struct {
#define GRPC_ARG_MAX_METADATA_SIZE "grpc.max_metadata_size"
/** If non-zero, allow the use of SO_REUSEPORT if it's available (default 1) */
#define GRPC_ARG_ALLOW_REUSEPORT "grpc.so_reuseport"
/** Service config data, to be passed to subchannels.
Not intended for external use. */
#define GRPC_ARG_SERVICE_CONFIG "grpc.service_config"
/** \} */
/** Result of a grpc call. If the caller satisfies the prerequisites of a
@ -257,15 +260,22 @@ typedef enum grpc_call_error {
/** Signal that the call is idempotent */
#define GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST (0x00000010u)
/** Signal that the call should not return UNAVAILABLE before it has started */
#define GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY (0x00000020u)
#define GRPC_INITIAL_METADATA_WAIT_FOR_READY (0x00000020u)
/** DEPRECATED: for backward compatibility */
#define GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY \
GRPC_INITIAL_METADATA_WAIT_FOR_READY
/** Signal that the call is cacheable. GRPC is free to use GET verb */
#define GRPC_INITIAL_METADATA_CACHEABLE_REQUEST (0x00000040u)
/** Signal that GRPC_INITIAL_METADATA_WAIT_FOR_READY was explicitly set
by the calling application. */
#define GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET (0x00000080u)
/** Mask of all valid flags */
#define GRPC_INITIAL_METADATA_USED_MASK \
(GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST | \
GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY | \
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST)
#define GRPC_INITIAL_METADATA_USED_MASK \
(GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST | \
GRPC_INITIAL_METADATA_WAIT_FOR_READY | \
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST | \
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET)
/** A single metadata element */
typedef struct grpc_metadata {

@ -48,7 +48,11 @@ typedef struct gpr_allocation_functions {
void (*free_fn)(void *ptr);
} gpr_allocation_functions;
/* malloc, never returns NULL */
/* malloc.
* If size==0, always returns NULL. Otherwise this function never returns NULL.
* The pointer returned is suitably aligned for any kind of variable it could
* contain.
*/
GPRAPI void *gpr_malloc(size_t size);
/* free */
GPRAPI void gpr_free(void *ptr);

@ -231,10 +231,10 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_heap.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/udp_server.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/unix_sockets_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_cv.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_common.h" role="src" />
@ -253,6 +253,7 @@
<file baseinstalldir="/" name="src/core/lib/surface/server.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/byte_stream.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/mdstr_hash_table.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata_batch.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/static_metadata.h" role="src" />
@ -312,6 +313,7 @@
<file baseinstalldir="/" name="src/core/ext/client_channel/lb_policy.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/lb_policy_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/lb_policy_registry.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/method_config.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/parse_address.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/resolver.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/resolver_factory.h" role="src" />
@ -397,11 +399,11 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/udp_server.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/unix_sockets_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/unix_sockets_posix_noop.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_cv.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_eventfd.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_nospecial.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_reader.c" role="src" />
@ -427,6 +429,7 @@
<file baseinstalldir="/" name="src/core/lib/surface/version.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/byte_stream.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/mdstr_hash_table.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata_batch.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/static_metadata.c" role="src" />
@ -496,6 +499,7 @@
<file baseinstalldir="/" name="src/core/ext/client_channel/lb_policy.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/lb_policy_factory.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/lb_policy_registry.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/method_config.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/parse_address.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/resolver.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_channel/resolver_factory.c" role="src" />

@ -0,0 +1,4 @@
#Overview
This directory contains source code for gRPC protocol buffer compiler (*protoc*) plugins. Along with `protoc`,
these plugins are used to generate gRPC client and server stubs from `.proto` files.

@ -494,6 +494,9 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
}
// override NewInstance method
out->Print(
"/// <summary>Creates a new instance of client from given "
"<c>ClientBaseConfiguration</c>.</summary>\n");
out->Print(
"protected override $name$ NewInstance(ClientBaseConfiguration "
"configuration)\n",

@ -1,8 +1,4 @@
#Overview
This directory contains source code for shared C library. Libraries in other languages in this repository (C++, Ruby,
This directory contains source code for C library (a.k.a the *gRPC C core*) that provides all gRPC's core functionality through a low level API. Libraries in other languages in this repository (C++, Ruby,
Python, PHP, NodeJS, Objective-C) are layered on top of this library.
#Status
Beta

@ -133,7 +133,7 @@ static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
d->start_ts = args->start_time;
return GRPC_ERROR_NONE;
}
@ -152,7 +152,7 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
d->start_ts = args->start_time;
/* TODO(hongyu): call census_tracing_start_op here. */
grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
return GRPC_ERROR_NONE;

@ -43,6 +43,7 @@
#include <grpc/support/useful.h>
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/method_config.h"
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
@ -53,6 +54,9 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/static_metadata.h"
/* Client channel implementation */
@ -68,12 +72,13 @@ typedef struct client_channel_channel_data {
/** client channel factory */
grpc_client_channel_factory *client_channel_factory;
/** mutex protecting client configuration, including all
variables below in this data structure */
/** mutex protecting all variables below in this data structure */
gpr_mu mu;
/** currently active load balancer - guarded by mu */
/** currently active load balancer */
grpc_lb_policy *lb_policy;
/** incoming resolver result - set by resolver.next(), guarded by mu */
/** method config table */
grpc_method_config_table *method_config_table;
/** incoming resolver result - set by resolver.next() */
grpc_resolver_result *resolver_result;
/** a list of closures that are all waiting for config to come in */
grpc_closure_list waiting_for_config_closures;
@ -111,10 +116,10 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
if ((state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
state == GRPC_CHANNEL_SHUTDOWN) &&
chand->lb_policy != NULL) {
/* cancel fail-fast picks */
/* cancel picks with wait_for_ready=false */
grpc_lb_policy_cancel_picks(
exec_ctx, chand->lb_policy,
/* mask= */ GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY,
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
/* check= */ 0, GRPC_ERROR_REF(error));
}
grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error,
@ -172,6 +177,7 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
channel_data *chand = arg;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
grpc_method_config_table *method_config_table = NULL;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
bool exit_idle = false;
grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
@ -220,6 +226,13 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
state =
grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
}
const grpc_arg *channel_arg = grpc_channel_args_find(
lb_policy_args.additional_args, GRPC_ARG_SERVICE_CONFIG);
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
method_config_table = grpc_method_config_table_ref(
(grpc_method_config_table *)channel_arg->value.pointer.p);
}
grpc_resolver_result_unref(exec_ctx, chand->resolver_result);
chand->resolver_result = NULL;
}
@ -232,6 +245,10 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&chand->mu);
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
if (chand->method_config_table != NULL) {
grpc_method_config_table_unref(chand->method_config_table);
}
chand->method_config_table = method_config_table;
if (lb_policy != NULL) {
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
NULL);
@ -392,6 +409,9 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
if (chand->method_config_table != NULL) {
grpc_method_config_table_unref(chand->method_config_table);
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(chand->interested_parties);
gpr_mu_destroy(&chand->mu);
@ -424,7 +444,16 @@ typedef struct client_channel_call_data {
// stack and each has its own mutex. If/when we have time, find a way
// to avoid this without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
grpc_mdstr *path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
enum {
WAIT_FOR_READY_UNSET,
WAIT_FOR_READY_FALSE,
WAIT_FOR_READY_TRUE
} wait_for_ready_from_service_config;
grpc_closure read_service_config;
grpc_error *cancel_error;
@ -513,10 +542,14 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
call_data *calld = arg;
grpc_call_element *elem = arg;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
gpr_mu_lock(&calld->mu);
GPR_ASSERT(calld->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (calld->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
@ -528,10 +561,11 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_CREATE_REFERENCING(
"Cancelled before creating subchannel", &error, 1));
} else {
/* Create call on subchannel. */
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent, calld->deadline,
&subchannel_call);
exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
calld->deadline, &subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
@ -564,6 +598,9 @@ typedef struct {
grpc_closure closure;
} continue_picking_args;
/** Return true if subchannel is available immediately (in which case on_ready
should not be called), or false otherwise (in which case on_ready should be
called when the subchannel is available). */
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
@ -577,11 +614,15 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
} else if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags,
cpa->connected_subchannel, cpa->on_ready,
GRPC_ERROR_NONE)) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
} else {
call_data *calld = cpa->elem->call_data;
gpr_mu_lock(&calld->mu);
if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags, cpa->connected_subchannel,
cpa->on_ready, GRPC_ERROR_NONE)) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
}
gpr_mu_unlock(&calld->mu);
}
gpr_free(cpa);
}
@ -624,18 +665,33 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (chand->lb_policy != NULL) {
grpc_lb_policy *lb_policy = chand->lb_policy;
int r;
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
gpr_mu_unlock(&chand->mu);
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
const bool wait_for_ready_set_from_api =
initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
const bool wait_for_ready_set_from_service_config =
calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET;
if (!wait_for_ready_set_from_api &&
wait_for_ready_set_from_service_config) {
if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
// TODO(dgq): make this deadline configurable somehow.
const grpc_lb_policy_pick_args inputs = {
calld->pollent, initial_metadata, initial_metadata_flags,
&calld->lb_token_mdelem, gpr_inf_future(GPR_CLOCK_MONOTONIC)};
r = grpc_lb_policy_pick(exec_ctx, lb_policy, &inputs, connected_subchannel,
NULL, on_ready);
initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
gpr_inf_future(GPR_CLOCK_MONOTONIC)};
const bool result = grpc_lb_policy_pick(
exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
GPR_TIMER_END("pick_subchannel", 0);
return r;
return result;
}
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = true;
@ -672,6 +728,7 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
/* try to (atomically) get the call */
@ -739,14 +796,20 @@ retry:
calld->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
grpc_closure_init(&calld->next_step, subchannel_ready, calld);
grpc_closure_init(&calld->next_step, subchannel_ready, elem);
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from
call_data should be provided to channel_data's interested_parties, so
that IO of the lb_policy and resolver could be done under it. */
if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
op->send_initial_metadata_flags,
&calld->connected_subchannel, &calld->next_step,
GRPC_ERROR_NONE)) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
} else {
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
}
}
/* if we've got a subchannel, then let's ask it to create a call */
@ -754,8 +817,8 @@ retry:
calld->connected_subchannel != NULL) {
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent, calld->deadline,
&subchannel_call);
exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
calld->deadline, &subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
@ -772,13 +835,69 @@ retry:
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
// Gets data from the service config. Invoked when the resolver returns
// its initial result.
static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// If this is an error, there's no point in looking at the service config.
if (error == GRPC_ERROR_NONE) {
// Get the method config table from channel data.
gpr_mu_lock(&chand->mu);
grpc_method_config_table *method_config_table = NULL;
if (chand->method_config_table != NULL) {
method_config_table =
grpc_method_config_table_ref(chand->method_config_table);
}
gpr_mu_unlock(&chand->mu);
// If the method config table was present, use it.
if (method_config_table != NULL) {
const grpc_method_config *method_config =
grpc_method_config_table_get_method_config(method_config_table,
calld->path);
if (method_config != NULL) {
const gpr_timespec *per_method_timeout =
grpc_method_config_get_timeout(method_config);
const bool *wait_for_ready =
grpc_method_config_get_wait_for_ready(method_config);
if (per_method_timeout != NULL || wait_for_ready != NULL) {
gpr_mu_lock(&calld->mu);
if (per_method_timeout != NULL) {
gpr_timespec per_method_deadline =
gpr_time_add(calld->call_start_time, *per_method_timeout);
if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
calld->deadline = per_method_deadline;
// Reset deadline timer.
grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
}
}
if (wait_for_ready != NULL) {
calld->wait_for_ready_from_service_config =
*wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE;
}
gpr_mu_unlock(&calld->mu);
}
}
grpc_method_config_table_unref(method_config_table);
}
}
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
}
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_deadline_state_init(exec_ctx, elem, args);
calld->deadline = args->deadline;
// Initialize data members.
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
calld->path = GRPC_MDSTR_REF(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
calld->cancel_error = GRPC_ERROR_NONE;
gpr_atm_rel_store(&calld->subchannel_call, 0);
gpr_mu_init(&calld->mu);
@ -789,6 +908,55 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
calld->owning_call = args->call_stack;
calld->pollent = NULL;
// If the resolver has already returned results, then we can access
// the service config parameters immediately. Otherwise, we need to
// defer that work until the resolver returns an initial result.
// TODO(roth): This code is almost but not quite identical to the code
// in read_service_config() above. It would be nice to find a way to
// combine them, to avoid having to maintain it twice.
gpr_mu_lock(&chand->mu);
if (chand->lb_policy != NULL) {
// We already have a resolver result, so check for service config.
if (chand->method_config_table != NULL) {
grpc_method_config_table *method_config_table =
grpc_method_config_table_ref(chand->method_config_table);
gpr_mu_unlock(&chand->mu);
grpc_method_config *method_config =
grpc_method_config_table_get_method_config(method_config_table,
args->path);
if (method_config != NULL) {
const gpr_timespec *per_method_timeout =
grpc_method_config_get_timeout(method_config);
if (per_method_timeout != NULL) {
gpr_timespec per_method_deadline =
gpr_time_add(calld->call_start_time, *per_method_timeout);
calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
}
const bool *wait_for_ready =
grpc_method_config_get_wait_for_ready(method_config);
if (wait_for_ready != NULL) {
calld->wait_for_ready_from_service_config =
*wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE;
}
}
grpc_method_config_table_unref(method_config_table);
} else {
gpr_mu_unlock(&chand->mu);
}
} else {
// We don't yet have a resolver result, so register a callback to
// get the service config data once the resolver returns.
// Take a reference to the call stack to be owned by the callback.
GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
grpc_closure_init(&calld->read_service_config, read_service_config, elem);
grpc_closure_list_append(&chand->waiting_for_config_closures,
&calld->read_service_config, GRPC_ERROR_NONE);
gpr_mu_unlock(&chand->mu);
}
// Start the deadline timer with the current deadline value. If we
// do not yet have service config data, then the timer may be reset
// later.
grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
return GRPC_ERROR_NONE;
}
@ -799,6 +967,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
void *and_free_memory) {
call_data *calld = elem->call_data;
grpc_deadline_state_destroy(exec_ctx, elem);
GRPC_MDSTR_UNREF(calld->path);
GRPC_ERROR_UNREF(calld->cancel_error);
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {

@ -31,7 +31,7 @@
*
*/
#include "src/core/ext/client_config/http_connect_handshaker.h"
#include "src/core/ext/client_channel/http_connect_handshaker.h"
#include <string.h>
@ -40,7 +40,7 @@
#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/client_config/uri_parser.h"
#include "src/core/ext/client_channel/uri_parser.h"
#include "src/core/lib/http/format_request.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/timer.h"

@ -55,8 +55,6 @@ struct grpc_lb_policy {
/** Extra arguments for an LB pick */
typedef struct grpc_lb_policy_pick_args {
/** Parties interested in the pick's progress */
grpc_polling_entity *pollent;
/** Initial metadata associated with the picking call. */
grpc_metadata_batch *initial_metadata;
/** Bitmask used for selective cancelling. See \a
@ -142,15 +140,19 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable);
/** Find an appropriate target for this call, based on \a pick_args.
Picking can be synchronous or asynchronous. In the synchronous case, when a
pick is readily available, it'll be returned in \a target and a non-zero
value will be returned.
In the asynchronous case, zero is returned and \a on_complete will be called
once \a target and \a user_data have been set. Any IO should be done under
\a pick_args->pollent. The opaque \a user_data output argument corresponds
to information that may need be propagated from the LB policy. It may be
NULL. Errors are signaled by receiving a NULL \a *target. */
/** Finds an appropriate subchannel for a call, based on \a pick_args.
\a target will be set to the selected subchannel, or NULL on failure.
Upon success, \a user_data will be set to whatever opaque information
may need to be propagated from the LB policy, or NULL if not needed.
If the pick succeeds and a result is known immediately, a non-zero
value will be returned. Otherwise, \a on_complete will be invoked
once the pick is complete with its error argument set to indicate
success or failure.
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,

@ -0,0 +1,296 @@
//
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "src/core/ext/client_channel/method_config.h"
#include <string.h>
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/transport/mdstr_hash_table.h"
#include "src/core/lib/transport/metadata.h"
//
// grpc_method_config
//
// bool vtable
static void* bool_copy(void* valuep) {
bool value = *(bool*)valuep;
bool* new_value = gpr_malloc(sizeof(bool));
*new_value = value;
return new_value;
}
static int bool_cmp(void* v1, void* v2) {
bool b1 = *(bool*)v1;
bool b2 = *(bool*)v2;
if (!b1 && b2) return -1;
if (b1 && !b2) return 1;
return 0;
}
static grpc_mdstr_hash_table_vtable bool_vtable = {gpr_free, bool_copy,
bool_cmp};
// timespec vtable
static void* timespec_copy(void* valuep) {
gpr_timespec value = *(gpr_timespec*)valuep;
gpr_timespec* new_value = gpr_malloc(sizeof(gpr_timespec));
*new_value = value;
return new_value;
}
static int timespec_cmp(void* v1, void* v2) {
return gpr_time_cmp(*(gpr_timespec*)v1, *(gpr_timespec*)v2);
}
static grpc_mdstr_hash_table_vtable timespec_vtable = {gpr_free, timespec_copy,
timespec_cmp};
// int32 vtable
static void* int32_copy(void* valuep) {
int32_t value = *(int32_t*)valuep;
int32_t* new_value = gpr_malloc(sizeof(int32_t));
*new_value = value;
return new_value;
}
static int int32_cmp(void* v1, void* v2) {
int32_t i1 = *(int32_t*)v1;
int32_t i2 = *(int32_t*)v2;
if (i1 < i2) return -1;
if (i1 > i2) return 1;
return 0;
}
static grpc_mdstr_hash_table_vtable int32_vtable = {gpr_free, int32_copy,
int32_cmp};
// Hash table keys.
#define GRPC_METHOD_CONFIG_WAIT_FOR_READY "grpc.wait_for_ready" // bool
#define GRPC_METHOD_CONFIG_TIMEOUT "grpc.timeout" // gpr_timespec
#define GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES \
"grpc.max_request_message_bytes" // int32
#define GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES \
"grpc.max_response_message_bytes" // int32
struct grpc_method_config {
grpc_mdstr_hash_table* table;
grpc_mdstr* wait_for_ready_key;
grpc_mdstr* timeout_key;
grpc_mdstr* max_request_message_bytes_key;
grpc_mdstr* max_response_message_bytes_key;
};
grpc_method_config* grpc_method_config_create(
bool* wait_for_ready, gpr_timespec* timeout,
int32_t* max_request_message_bytes, int32_t* max_response_message_bytes) {
grpc_method_config* method_config = gpr_malloc(sizeof(grpc_method_config));
memset(method_config, 0, sizeof(grpc_method_config));
method_config->wait_for_ready_key =
grpc_mdstr_from_string(GRPC_METHOD_CONFIG_WAIT_FOR_READY);
method_config->timeout_key =
grpc_mdstr_from_string(GRPC_METHOD_CONFIG_TIMEOUT);
method_config->max_request_message_bytes_key =
grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES);
method_config->max_response_message_bytes_key =
grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES);
grpc_mdstr_hash_table_entry entries[4];
size_t num_entries = 0;
if (wait_for_ready != NULL) {
entries[num_entries].key = method_config->wait_for_ready_key;
entries[num_entries].value = wait_for_ready;
entries[num_entries].vtable = &bool_vtable;
++num_entries;
}
if (timeout != NULL) {
entries[num_entries].key = method_config->timeout_key;
entries[num_entries].value = timeout;
entries[num_entries].vtable = &timespec_vtable;
++num_entries;
}
if (max_request_message_bytes != NULL) {
entries[num_entries].key = method_config->max_request_message_bytes_key;
entries[num_entries].value = max_request_message_bytes;
entries[num_entries].vtable = &int32_vtable;
++num_entries;
}
if (max_response_message_bytes != NULL) {
entries[num_entries].key = method_config->max_response_message_bytes_key;
entries[num_entries].value = max_response_message_bytes;
entries[num_entries].vtable = &int32_vtable;
++num_entries;
}
method_config->table = grpc_mdstr_hash_table_create(num_entries, entries);
return method_config;
}
grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config) {
grpc_mdstr_hash_table_ref(method_config->table);
return method_config;
}
void grpc_method_config_unref(grpc_method_config* method_config) {
if (grpc_mdstr_hash_table_unref(method_config->table)) {
GRPC_MDSTR_UNREF(method_config->wait_for_ready_key);
GRPC_MDSTR_UNREF(method_config->timeout_key);
GRPC_MDSTR_UNREF(method_config->max_request_message_bytes_key);
GRPC_MDSTR_UNREF(method_config->max_response_message_bytes_key);
gpr_free(method_config);
}
}
int grpc_method_config_cmp(const grpc_method_config* method_config1,
const grpc_method_config* method_config2) {
return grpc_mdstr_hash_table_cmp(method_config1->table,
method_config2->table);
}
const bool* grpc_method_config_get_wait_for_ready(
const grpc_method_config* method_config) {
return grpc_mdstr_hash_table_get(method_config->table,
method_config->wait_for_ready_key);
}
const gpr_timespec* grpc_method_config_get_timeout(
const grpc_method_config* method_config) {
return grpc_mdstr_hash_table_get(method_config->table,
method_config->timeout_key);
}
const int32_t* grpc_method_config_get_max_request_message_bytes(
const grpc_method_config* method_config) {
return grpc_mdstr_hash_table_get(
method_config->table, method_config->max_request_message_bytes_key);
}
const int32_t* grpc_method_config_get_max_response_message_bytes(
const grpc_method_config* method_config) {
return grpc_mdstr_hash_table_get(
method_config->table, method_config->max_response_message_bytes_key);
}
//
// grpc_method_config_table
//
static void method_config_unref(void* valuep) {
grpc_method_config_unref(valuep);
}
static void* method_config_ref(void* valuep) {
return grpc_method_config_ref(valuep);
}
static int method_config_cmp(void* valuep1, void* valuep2) {
return grpc_method_config_cmp(valuep1, valuep2);
}
static const grpc_mdstr_hash_table_vtable method_config_table_vtable = {
method_config_unref, method_config_ref, method_config_cmp};
grpc_method_config_table* grpc_method_config_table_create(
size_t num_entries, grpc_method_config_table_entry* entries) {
grpc_mdstr_hash_table_entry* hash_table_entries =
gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) * num_entries);
for (size_t i = 0; i < num_entries; ++i) {
hash_table_entries[i].key = entries[i].method_name;
hash_table_entries[i].value = entries[i].method_config;
hash_table_entries[i].vtable = &method_config_table_vtable;
}
grpc_method_config_table* method_config_table =
grpc_mdstr_hash_table_create(num_entries, hash_table_entries);
gpr_free(hash_table_entries);
return method_config_table;
}
grpc_method_config_table* grpc_method_config_table_ref(
grpc_method_config_table* table) {
return grpc_mdstr_hash_table_ref(table);
}
void grpc_method_config_table_unref(grpc_method_config_table* table) {
grpc_mdstr_hash_table_unref(table);
}
int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
const grpc_method_config_table* table2) {
return grpc_mdstr_hash_table_cmp(table1, table2);
}
grpc_method_config* grpc_method_config_table_get_method_config(
const grpc_method_config_table* table, const grpc_mdstr* path) {
grpc_method_config* method_config = grpc_mdstr_hash_table_get(table, path);
// If we didn't find a match for the path, try looking for a wildcard
// entry (i.e., change "/service/method" to "/service/*").
if (method_config == NULL) {
const char* path_str = grpc_mdstr_as_c_string(path);
const char* sep = strrchr(path_str, '/') + 1;
const size_t len = (size_t)(sep - path_str);
char* buf = gpr_malloc(len + 2); // '*' and NUL
memcpy(buf, path_str, len);
buf[len] = '*';
buf[len + 1] = '\0';
grpc_mdstr* wildcard_path = grpc_mdstr_from_string(buf);
gpr_free(buf);
method_config = grpc_mdstr_hash_table_get(table, wildcard_path);
GRPC_MDSTR_UNREF(wildcard_path);
}
return method_config;
}
static void* copy_arg(void* p) { return grpc_method_config_table_ref(p); }
static void destroy_arg(void* p) { grpc_method_config_table_unref(p); }
static int cmp_arg(void* p1, void* p2) {
return grpc_method_config_table_cmp(p1, p2);
}
static grpc_arg_pointer_vtable arg_vtable = {copy_arg, destroy_arg, cmp_arg};
grpc_arg grpc_method_config_table_create_channel_arg(
grpc_method_config_table* table) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
arg.key = GRPC_ARG_SERVICE_CONFIG;
arg.value.pointer.p = table;
arg.value.pointer.vtable = &arg_vtable;
return arg;
}

@ -0,0 +1,116 @@
//
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_METHOD_CONFIG_H
#define GRPC_CORE_EXT_CLIENT_CHANNEL_METHOD_CONFIG_H
#include <stdbool.h>
#include <grpc/impl/codegen/gpr_types.h>
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/transport/mdstr_hash_table.h"
#include "src/core/lib/transport/metadata.h"
/// Per-method configuration.
typedef struct grpc_method_config grpc_method_config;
/// Creates a grpc_method_config with the specified parameters.
/// Any parameter may be NULL to indicate that the value is unset.
///
/// \a wait_for_ready indicates whether the client should wait until the
/// request deadline for the channel to become ready, even if there is a
/// temporary failure before the deadline while attempting to connect.
///
/// \a timeout indicates the timeout for calls.
///
/// \a max_request_message_bytes and \a max_response_message_bytes
/// indicate the maximum sizes of the request (checked when sending) and
/// response (checked when receiving) messages.
grpc_method_config* grpc_method_config_create(
bool* wait_for_ready, gpr_timespec* timeout,
int32_t* max_request_message_bytes, int32_t* max_response_message_bytes);
grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config);
void grpc_method_config_unref(grpc_method_config* method_config);
/// Compares two grpc_method_configs.
/// The sort order is stable but undefined.
int grpc_method_config_cmp(const grpc_method_config* method_config1,
const grpc_method_config* method_config2);
/// These methods return NULL if the requested field is unset.
/// The caller does NOT take ownership of the result.
const bool* grpc_method_config_get_wait_for_ready(
const grpc_method_config* method_config);
const gpr_timespec* grpc_method_config_get_timeout(
const grpc_method_config* method_config);
const int32_t* grpc_method_config_get_max_request_message_bytes(
const grpc_method_config* method_config);
const int32_t* grpc_method_config_get_max_response_message_bytes(
const grpc_method_config* method_config);
/// A table of method configs.
typedef grpc_mdstr_hash_table grpc_method_config_table;
typedef struct grpc_method_config_table_entry {
/// The name is of one of the following forms:
/// service/method -- specifies exact service and method name
/// service/* -- matches all methods for the specified service
grpc_mdstr* method_name;
grpc_method_config* method_config;
} grpc_method_config_table_entry;
/// Takes new references to all keys and values in \a entries.
grpc_method_config_table* grpc_method_config_table_create(
size_t num_entries, grpc_method_config_table_entry* entries);
grpc_method_config_table* grpc_method_config_table_ref(
grpc_method_config_table* table);
void grpc_method_config_table_unref(grpc_method_config_table* table);
/// Compares two grpc_method_config_tables.
/// The sort order is stable but undefined.
int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
const grpc_method_config_table* table2);
/// Gets the method config for the specified \a path, which should be of
/// the form "/service/method".
/// Returns NULL if the method has no config.
/// Caller does NOT own a reference to the result.
grpc_method_config* grpc_method_config_table_get_method_config(
const grpc_method_config_table* table, const grpc_mdstr* path);
/// Returns a channel arg containing \a table.
grpc_arg grpc_method_config_table_create_channel_arg(
grpc_method_config_table* table);
#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_METHOD_CONFIG_H */

@ -51,22 +51,17 @@ typedef struct grpc_resolver_result grpc_resolver_result;
grpc_resolver_result* grpc_resolver_result_create(
const char* server_name, grpc_lb_addresses* addresses,
const char* lb_policy_name, grpc_channel_args* lb_policy_args);
void grpc_resolver_result_ref(grpc_resolver_result* result);
void grpc_resolver_result_unref(grpc_exec_ctx* exec_ctx,
grpc_resolver_result* result);
/// Caller does NOT take ownership of result.
/// Accessors. Caller does NOT take ownership of results.
const char* grpc_resolver_result_get_server_name(grpc_resolver_result* result);
/// Caller does NOT take ownership of result.
grpc_lb_addresses* grpc_resolver_result_get_addresses(
grpc_resolver_result* result);
/// Caller does NOT take ownership of result.
const char* grpc_resolver_result_get_lb_policy_name(
grpc_resolver_result* result);
/// Caller does NOT take ownership of result.
grpc_channel_args* grpc_resolver_result_get_lb_policy_args(
grpc_resolver_result* result);

@ -220,8 +220,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"SUBCHANNEL: %p %12s 0x%08d -> 0x%08d [%s]", c, purpose, (int)old_val,
(int)(old_val + delta), reason);
"SUBCHANNEL: %p %s 0x%08" PRIxPTR " -> 0x%08" PRIxPTR " [%s]", c,
purpose, old_val, old_val + delta, reason);
#endif
return old_val;
}
@ -704,7 +704,7 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
grpc_polling_entity *pollent, gpr_timespec deadline,
grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
*call = gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
@ -712,7 +712,7 @@ grpc_error *grpc_connected_subchannel_create_call(
(*call)->connection = con; // Ref is added below.
grpc_error *error =
grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, *call,
NULL, NULL, deadline, callstk);
NULL, NULL, path, deadline, callstk);
if (error != GRPC_ERROR_NONE) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);

@ -38,6 +38,7 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
/** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */
@ -110,7 +111,7 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
/** construct a subchannel call */
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
grpc_polling_entity *pollent, gpr_timespec deadline,
grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **subchannel_call);
/** process a transport level op */

@ -69,8 +69,8 @@
* possible scenarios:
*
* 1. This is the first server list received. There was no previous instance of
* the Round Robin policy. \a rr_handover() will instantiate the RR policy
* and perform all the pending operations over it.
* the Round Robin policy. \a rr_handover_locked() will instantiate the RR
* policy and perform all the pending operations over it.
* 2. There's already a RR policy instance active. We need to introduce the new
* one build from the new serverlist, but taking care not to disrupt the
* operations in progress over the old RR instance. This is done by
@ -78,7 +78,7 @@
* references are held on the old RR policy, it'll be destroyed and \a
* glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
* state. At this point we can transition to a new RR instance safely, which
* is done once again via \a rr_handover().
* is done once again via \a rr_handover_locked().
*
*
* Once a RR policy instance is in place (and getting updated as described),
@ -86,8 +86,8 @@
* forwarding them to the RR instance. Any time there's no RR policy available
* (ie, right after the creation of the gRPCLB policy, if an empty serverlist
* is received, etc), pick/ping requests are added to a list of pending
* picks/pings to be flushed and serviced as part of \a rr_handover() the moment
* the RR policy instance becomes available.
* picks/pings to be flushed and serviced as part of \a rr_handover_locked() the
* moment the RR policy instance becomes available.
*
* \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
* high level design and details. */
@ -113,6 +113,7 @@
#include "src/core/ext/client_channel/parse_address.h"
#include "src/core/ext/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/support/string.h"
@ -134,6 +135,9 @@ static void initial_metadata_add_lb_token(
}
typedef struct wrapped_rr_closure_arg {
/* the closure instance using this struct as argument */
grpc_closure wrapper_closure;
/* the original closure. Usually a on_complete/notify cb for pick() and ping()
* calls against the internal RR instance, respectively. */
grpc_closure *wrapped_closure;
@ -155,9 +159,8 @@ typedef struct wrapped_rr_closure_arg {
/* The RR instance related to the closure */
grpc_lb_policy *rr_policy;
/* when not NULL, represents a pending_{pick,ping} node to be freed upon
* closure execution */
void *owning_pending_node; /* to be freed if not NULL */
/* heap memory to be freed upon closure execution. */
void *free_when_done;
} wrapped_rr_closure_arg;
/* The \a on_complete closure passed as part of the pick requires keeping a
@ -183,10 +186,10 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
}
}
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
NULL);
gpr_free(wc_arg->owning_pending_node);
GPR_ASSERT(wc_arg->free_when_done != NULL);
gpr_free(wc_arg->free_when_done);
}
/* Linked list of pending pick requests. It stores all information needed to
@ -207,10 +210,6 @@ typedef struct pending_pick {
* upon error. */
grpc_connected_subchannel **target;
/* a closure wrapping the original on_complete one to be invoked once the
* pick() has completed (regardless of success) */
grpc_closure wrapped_on_complete;
/* args for wrapped_on_complete */
wrapped_rr_closure_arg wrapped_on_complete_arg;
} pending_pick;
@ -230,8 +229,9 @@ static void add_pending_pick(pending_pick **root,
pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
pick_args->lb_token_mdelem_storage;
grpc_closure_init(&pp->wrapped_on_complete, wrapped_rr_closure,
&pp->wrapped_on_complete_arg);
pp->wrapped_on_complete_arg.free_when_done = pp;
grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
wrapped_rr_closure, &pp->wrapped_on_complete_arg);
*root = pp;
}
@ -239,10 +239,6 @@ static void add_pending_pick(pending_pick **root,
typedef struct pending_ping {
struct pending_ping *next;
/* a closure wrapping the original on_complete one to be invoked once the
* ping() has completed (regardless of success) */
grpc_closure wrapped_notify;
/* args for wrapped_notify */
wrapped_rr_closure_arg wrapped_notify_arg;
} pending_ping;
@ -251,10 +247,11 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
pending_ping *pping = gpr_malloc(sizeof(*pping));
memset(pping, 0, sizeof(pending_ping));
memset(&pping->wrapped_notify_arg, 0, sizeof(wrapped_rr_closure_arg));
pping->next = *root;
grpc_closure_init(&pping->wrapped_notify, wrapped_rr_closure,
&pping->wrapped_notify_arg);
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
wrapped_rr_closure, &pping->wrapped_notify_arg);
*root = pping;
}
@ -274,6 +271,7 @@ typedef struct glb_lb_policy {
/** who the client is trying to communicate with */
const char *server_name;
grpc_client_channel_factory *cc_factory;
grpc_channel_args *args;
/** deadline for the LB's call */
gpr_timespec deadline;
@ -307,13 +305,6 @@ typedef struct glb_lb_policy {
/** for tracking of the RR connectivity */
rr_connectivity_data *rr_connectivity;
/* a wrapped (see \a wrapped_rr_closure) on-complete closure for readily
* available RR picks */
grpc_closure wrapped_on_complete;
/* arguments for the wrapped_on_complete closure */
wrapped_rr_closure_arg wc_arg;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
@ -329,8 +320,8 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
if (server->port >> 16 != 0) {
if (log) {
gpr_log(GPR_ERROR,
"Invalid port '%d' at index %zu of serverlist. Ignoring.",
server->port, idx);
"Invalid port '%d' at index %lu of serverlist. Ignoring.",
server->port, (unsigned long)idx);
}
return false;
}
@ -338,9 +329,9 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
if (ip->size != 4 && ip->size != 16) {
if (log) {
gpr_log(GPR_ERROR,
"Expected IP to be 4 or 16 bytes, got %d at index %zu of "
"Expected IP to be 4 or 16 bytes, got %d at index %lu of "
"serverlist. Ignoring",
ip->size, idx);
ip->size, (unsigned long)idx);
}
return false;
}
@ -399,14 +390,14 @@ static grpc_lb_addresses *process_serverlist(
GPR_ARRAY_SIZE(server->load_balance_token) - 1;
grpc_mdstr *lb_token_mdstr = grpc_mdstr_from_buffer(
(uint8_t *)server->load_balance_token, lb_token_size);
user_data = grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_LOAD_REPORTING_INITIAL, lb_token_mdstr);
user_data = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_LB_TOKEN,
lb_token_mdstr);
} else {
gpr_log(GPR_ERROR,
"Missing LB token for backend address '%s'. The empty token will "
"be used instead",
grpc_sockaddr_to_uri((struct sockaddr *)&addr.addr));
user_data = GRPC_MDELEM_LOAD_REPORTING_INITIAL_EMPTY;
user_data = GRPC_MDELEM_LB_TOKEN_EMPTY;
}
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
@ -424,9 +415,43 @@ static void lb_token_destroy(void *token) {
if (token != NULL) GRPC_MDELEM_UNREF(token);
}
static grpc_lb_policy *create_rr(grpc_exec_ctx *exec_ctx,
const grpc_grpclb_serverlist *serverlist,
glb_lb_policy *glb_policy) {
/* perform a pick over \a rr_policy. Given that a pick can return immediately
* (ignoring its completion callback) we need to perform the cleanups this
* callback would be otherwise resposible for */
static bool pick_from_internal_rr_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *rr_policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
GPR_ASSERT(rr_policy != NULL);
const bool pick_done =
grpc_lb_policy_pick(exec_ctx, rr_policy, pick_args, target,
(void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick");
/* add the load reporting initial metadata */
initial_metadata_add_lb_token(pick_args->initial_metadata,
pick_args->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
gpr_free(wc_arg);
}
/* else, the pending pick will be registered and taken care of by the
* pending pick list inside the RR policy (glb_policy->rr_policy).
* Eventually, wrapped_on_complete will be called, which will -among other
* things- add the LB token to the call's initial metadata */
return pick_done;
}
static grpc_lb_policy *create_rr_locked(
grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist,
glb_lb_policy *glb_policy) {
GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0);
grpc_lb_policy_args args;
@ -434,6 +459,7 @@ static grpc_lb_policy *create_rr(grpc_exec_ctx *exec_ctx,
args.server_name = glb_policy->server_name;
args.client_channel_factory = glb_policy->cc_factory;
args.addresses = process_serverlist(serverlist);
args.additional_args = glb_policy->args;
grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
@ -446,18 +472,21 @@ static grpc_lb_policy *create_rr(grpc_exec_ctx *exec_ctx,
return rr;
}
static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_error *error) {
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy, grpc_error *error) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
glb_policy->rr_policy =
create_rr(exec_ctx, glb_policy->serverlist, glb_policy);
create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Created RR policy (0x%" PRIxPTR ")",
(intptr_t)glb_policy->rr_policy);
}
GPR_ASSERT(glb_policy->rr_policy != NULL);
grpc_pollset_set_add_pollset_set(exec_ctx,
glb_policy->rr_policy->interested_parties,
glb_policy->base.interested_parties);
glb_policy->rr_connectivity->state = grpc_lb_policy_check_connectivity(
exec_ctx, glb_policy->rr_policy, &error);
grpc_lb_policy_notify_on_state_change(
@ -478,11 +507,9 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, &pp->pick_args,
pp->target,
(void **)&pp->wrapped_on_complete_arg.lb_token,
&pp->wrapped_on_complete);
pp->wrapped_on_complete_arg.owning_pending_node = pp;
pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
&pp->pick_args, pp->target,
&pp->wrapped_on_complete_arg);
}
pending_ping *pping;
@ -495,8 +522,7 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
(intptr_t)glb_policy->rr_policy);
}
grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy,
&pping->wrapped_notify);
pping->wrapped_notify_arg.owning_pending_node = pping;
&pping->wrapped_notify_arg.wrapper_closure);
}
}
@ -509,13 +535,16 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) {
/* a RR policy is shutting down but there's a serverlist available ->
* perform a handover */
rr_handover(exec_ctx, glb_policy, error);
gpr_mu_lock(&glb_policy->mu);
rr_handover_locked(exec_ctx, glb_policy, error);
gpr_mu_unlock(&glb_policy->mu);
} else {
/* shutting down and no new serverlist available. Bail out. */
gpr_free(rr_conn_data);
}
} else {
if (error == GRPC_ERROR_NONE) {
gpr_mu_lock(&glb_policy->mu);
/* RR not shutting down. Mimic the RR's policy state */
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
rr_conn_data->state, GRPC_ERROR_REF(error),
@ -524,6 +553,7 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
&rr_conn_data->state,
&rr_conn_data->on_change);
gpr_mu_unlock(&glb_policy->mu);
} else { /* error */
gpr_free(rr_conn_data);
}
@ -557,6 +587,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
* Create a client channel over them to communicate with a LB service */
glb_policy->server_name = gpr_strdup(args->server_name);
glb_policy->cc_factory = args->client_channel_factory;
glb_policy->args = grpc_channel_args_copy(args->additional_args);
GPR_ASSERT(glb_policy->cc_factory != NULL);
/* construct a target from the addresses in args, given in the form
@ -623,6 +654,7 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GPR_ASSERT(glb_policy->pending_picks == NULL);
GPR_ASSERT(glb_policy->pending_pings == NULL);
gpr_free((void *)glb_policy->server_name);
grpc_channel_args_destroy(glb_policy->args);
grpc_channel_destroy(glb_policy->lb_channel);
glb_policy->lb_channel = NULL;
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
@ -648,15 +680,15 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete, GRPC_ERROR_NONE,
NULL);
grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_NONE, NULL);
pp = next;
}
while (pping != NULL) {
pending_ping *next = pping->next;
grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify, GRPC_ERROR_NONE,
NULL);
grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
GRPC_ERROR_NONE, NULL);
pping = next;
}
@ -686,11 +718,9 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
grpc_polling_entity_del_from_pollset_set(
exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties);
*target = NULL;
grpc_exec_ctx_sched(
exec_ctx, &pp->wrapped_on_complete,
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
} else {
pp->next = glb_policy->pending_picks;
@ -719,10 +749,8 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
grpc_polling_entity_del_from_pollset_set(
exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties);
grpc_exec_ctx_sched(
exec_ctx, &pp->wrapped_on_complete,
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
} else {
pp->next = glb_policy->pending_picks;
@ -761,7 +789,7 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_CREATE("No mdelem storage for the LB token. Load reporting "
"won't work without it. Failing"),
NULL);
return 1;
return 0;
}
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
@ -775,39 +803,20 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
(intptr_t)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
memset(&glb_policy->wc_arg, 0, sizeof(wrapped_rr_closure_arg));
glb_policy->wc_arg.rr_policy = glb_policy->rr_policy;
glb_policy->wc_arg.target = target;
glb_policy->wc_arg.wrapped_closure = on_complete;
glb_policy->wc_arg.lb_token_mdelem_storage =
pick_args->lb_token_mdelem_storage;
glb_policy->wc_arg.initial_metadata = pick_args->initial_metadata;
glb_policy->wc_arg.owning_pending_node = NULL;
grpc_closure_init(&glb_policy->wrapped_on_complete, wrapped_rr_closure,
&glb_policy->wc_arg);
pick_done =
grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, pick_args, target,
(void **)&glb_policy->wc_arg.lb_token,
&glb_policy->wrapped_on_complete);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
(intptr_t)glb_policy->wc_arg.rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->wc_arg.rr_policy, "glb_pick");
/* add the load reporting initial metadata */
initial_metadata_add_lb_token(
pick_args->initial_metadata, pick_args->lb_token_mdelem_storage,
GRPC_MDELEM_REF(glb_policy->wc_arg.lb_token));
}
wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg));
memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg));
grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg);
wc_arg->rr_policy = glb_policy->rr_policy;
wc_arg->target = target;
wc_arg->wrapped_closure = on_complete;
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
wc_arg->initial_metadata = pick_args->initial_metadata;
wc_arg->free_when_done = wc_arg;
pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
pick_args, target, wc_arg);
} else {
/* else, the pending pick will be registered and taken care of by the
* pending pick list inside the RR policy (glb_policy->rr_policy) */
grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent,
glb_policy->base.interested_parties);
add_pending_pick(&glb_policy->pending_picks, pick_args, target,
on_complete);
@ -931,7 +940,7 @@ static lb_client_data *lb_client_data_create(glb_lb_policy *glb_policy) {
/* Note the following LB call progresses every time there's activity in \a
* glb_policy->base.interested_parties, which is comprised of the polling
* entities passed to glb_pick(). */
* entities from \a client_channel. */
lb_client->lb_call = grpc_channel_create_pollset_set_call(
glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
@ -1070,12 +1079,13 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
if (serverlist != NULL) {
gpr_slice_unref(response_slice);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Serverlist with %zu servers received",
serverlist->num_servers);
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
(unsigned long)serverlist->num_servers);
}
/* update serverlist */
if (serverlist->num_servers > 0) {
gpr_mu_lock(&lb_client->glb_policy->mu);
if (grpc_grpclb_serverlist_equals(lb_client->glb_policy->serverlist,
serverlist)) {
if (grpc_lb_glb_trace) {
@ -1093,7 +1103,7 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
if (lb_client->glb_policy->rr_policy == NULL) {
/* initial "handover", in this case from a null RR policy, meaning
* it'll just create the first RR policy instance */
rr_handover(exec_ctx, lb_client->glb_policy, error);
rr_handover_locked(exec_ctx, lb_client->glb_policy, error);
} else {
/* unref the RR policy, eventually leading to its substitution with a
* new one constructed from the received serverlist (see
@ -1101,6 +1111,7 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_LB_POLICY_UNREF(exec_ctx, lb_client->glb_policy->rr_policy,
"serverlist_received");
}
gpr_mu_unlock(&lb_client->glb_policy->mu);
} else {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
@ -1155,10 +1166,10 @@ static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg,
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
"status from lb server received. Status = %d, Details = '%s', "
"Capaticy "
"= %zu",
"Capacity "
"= %lu",
lb_client->status, lb_client->status_details,
lb_client->status_details_capacity);
(unsigned long)lb_client->status_details_capacity);
}
/* TODO(dgq): deal with stream termination properly (fire up another one?
* fail the original call?) */

@ -39,7 +39,6 @@
typedef struct pending_pick {
struct pending_pick *next;
grpc_polling_entity *pollent;
uint32_t initial_metadata_flags;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
@ -119,8 +118,6 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
p->base.interested_parties);
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
pp = next;
@ -138,8 +135,6 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
p->base.interested_parties);
*target = NULL;
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
@ -168,8 +163,6 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
p->base.interested_parties);
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
@ -229,11 +222,8 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent,
p->base.interested_parties);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollent = pick_args->pollent;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->on_complete = on_complete;
@ -319,8 +309,6 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = selected;
grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
p->base.interested_parties);
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
@ -478,6 +466,7 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
sc_args.addr =
(struct sockaddr *)(&args->addresses->addresses[i].address.addr);
sc_args.addr_len = args->addresses->addresses[i].address.len;
sc_args.args = args->additional_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);

@ -78,9 +78,6 @@ int grpc_lb_round_robin_trace = 0;
typedef struct pending_pick {
struct pending_pick *next;
/* polling entity for the pick()'s async notification */
grpc_polling_entity *pollent;
/* output argument where to store the pick()ed user_data. It'll be NULL if no
* such data is present or there's an error (the definite test for errors is
* \a target being NULL). */
@ -318,8 +315,6 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
p->base.interested_parties);
*target = NULL;
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
@ -348,8 +343,6 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
p->base.interested_parties);
*pp->target = NULL;
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
@ -403,7 +396,6 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
/* readily available, report right away */
gpr_mu_unlock(&p->mu);
*target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
if (user_data != NULL) {
@ -416,17 +408,15 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
gpr_mu_unlock(&p->mu);
return 1;
} else {
/* no pick currently available. Save for later in list of pending picks */
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent,
p->base.interested_parties);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollent = pick_args->pollent;
pp->target = target;
pp->on_complete = on_complete;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@ -482,8 +472,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
(void *)selected->subchannel, (void *)selected);
}
grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
p->base.interested_parties);
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
@ -641,6 +629,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
sc_args.addr =
(struct sockaddr *)(&args->addresses->addresses[i].address.addr);
sc_args.addr_len = args->addresses->addresses[i].address.len;
sc_args.args = args->additional_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);

@ -37,13 +37,21 @@
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/channel/channel_stack.h"
/** Metadata key for initial metadata coming from clients */
/* TODO(dgq): change to the final value TBD */
#define GRPC_LOAD_REPORTING_INITIAL_MD_KEY "load-reporting-initial"
/** Metadata key for the gRPC LB load balancer token.
*
* The value corresponding to this key is an opaque token that is given to the
* frontend as part of each pick; the frontend sends this token to the backend
* in each request it sends when using that pick. The token is used by the
* backend to verify the request and to allow the backend to report load to the
* gRPC LB system. */
#define GRPC_LB_TOKEN_MD_KEY "lb-token"
/** Metadata key for trailing metadata from servers */
/* TODO(dgq): change to the final value TBD */
#define GRPC_LOAD_REPORTING_TRAILING_MD_KEY "load-reporting-trailing"
/** Metadata key for gRPC LB cost reporting.
*
* The value corresponding to this key is an opaque binary blob reported by the
* backend as part of its trailing metadata containing cost information for the
* call. */
#define GRPC_LB_COST_MD_KEY "lb-cost"
/** Identifiers for the invocation point of the users LR callback */
typedef enum grpc_load_reporting_source {

@ -75,7 +75,7 @@ static grpc_mdelem *recv_md_filter(void *user_data, grpc_mdelem *md) {
if (md->key == GRPC_MDSTR_PATH) {
calld->service_method = grpc_mdstr_as_c_string(md->value);
} else if (md->key == GRPC_MDSTR_LOAD_REPORTING_INITIAL) {
} else if (md->key == GRPC_MDSTR_LB_TOKEN) {
calld->initial_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value));
return NULL;
}
@ -193,7 +193,7 @@ static grpc_mdelem *lr_trailing_md_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (md->key == GRPC_MDSTR_LOAD_REPORTING_TRAILING) {
if (md->key == GRPC_MDSTR_LB_COST) {
calld->trailing_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value));
return NULL;
}

@ -33,6 +33,7 @@
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
@ -119,7 +120,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
*r->target_result = grpc_resolver_result_create(
r->target_name,
grpc_lb_addresses_copy(r->addresses, NULL /* user_data_copy */),
NULL /* lb_policy_name */, NULL);
NULL /* lb_policy_name */, NULL /* lb_policy_args */);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
}
@ -128,8 +129,8 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
sockaddr_resolver *r = (sockaddr_resolver *)gr;
gpr_mu_destroy(&r->mu);
grpc_lb_addresses_destroy(r->addresses, NULL /* user_data_destroy */);
gpr_free(r->target_name);
grpc_lb_addresses_destroy(r->addresses, NULL /* user_data_destroy */);
gpr_free(r);
}

@ -61,13 +61,12 @@ typedef struct server_secure_state {
grpc_server_credentials *creds;
bool is_shutdown;
gpr_mu mu;
gpr_refcount refcount;
grpc_closure destroy_closure;
grpc_closure *destroy_callback;
grpc_closure tcp_server_shutdown_complete;
grpc_closure *server_destroy_listener_done;
} server_secure_state;
typedef struct server_secure_connect {
server_secure_state *state;
server_secure_state *server_state;
grpc_pollset *accepting_pollset;
grpc_tcp_server_acceptor *acceptor;
grpc_handshake_manager *handshake_mgr;
@ -77,39 +76,28 @@ typedef struct server_secure_connect {
grpc_channel_args *args;
} server_secure_connect;
static void state_ref(server_secure_state *state) { gpr_ref(&state->refcount); }
static void state_unref(server_secure_state *state) {
if (gpr_unref(&state->refcount)) {
/* ensure all threads have unlocked */
gpr_mu_lock(&state->mu);
gpr_mu_unlock(&state->mu);
/* clean up */
GRPC_SECURITY_CONNECTOR_UNREF(&state->sc->base, "server");
grpc_server_credentials_unref(state->creds);
gpr_free(state);
}
}
static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
grpc_security_status status,
grpc_endpoint *secure_endpoint,
grpc_auth_context *auth_context) {
server_secure_connect *state = statep;
server_secure_connect *connection_state = statep;
if (status == GRPC_SECURITY_OK) {
if (secure_endpoint) {
gpr_mu_lock(&state->state->mu);
if (!state->state->is_shutdown) {
gpr_mu_lock(&connection_state->server_state->mu);
if (!connection_state->server_state->is_shutdown) {
grpc_transport *transport = grpc_create_chttp2_transport(
exec_ctx, grpc_server_get_channel_args(state->state->server),
exec_ctx, grpc_server_get_channel_args(
connection_state->server_state->server),
secure_endpoint, 0);
grpc_arg args_to_add[2];
args_to_add[0] = grpc_server_credentials_to_arg(state->state->creds);
args_to_add[0] = grpc_server_credentials_to_arg(
connection_state->server_state->creds);
args_to_add[1] = grpc_auth_context_to_arg(auth_context);
grpc_channel_args *args_copy = grpc_channel_args_copy_and_add(
state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
grpc_server_setup_transport(exec_ctx, state->state->server, transport,
state->accepting_pollset, args_copy);
connection_state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
grpc_server_setup_transport(
exec_ctx, connection_state->server_state->server, transport,
connection_state->accepting_pollset, args_copy);
grpc_channel_args_destroy(args_copy);
grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL);
} else {
@ -117,21 +105,21 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
* gone away. */
grpc_endpoint_destroy(exec_ctx, secure_endpoint);
}
gpr_mu_unlock(&state->state->mu);
gpr_mu_unlock(&connection_state->server_state->mu);
}
} else {
gpr_log(GPR_ERROR, "Secure transport failed with error %d", status);
}
grpc_channel_args_destroy(state->args);
state_unref(state->state);
gpr_free(state);
grpc_channel_args_destroy(connection_state->args);
grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp);
gpr_free(connection_state);
}
static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint,
grpc_channel_args *args,
gpr_slice_buffer *read_buffer, void *user_data,
grpc_error *error) {
server_secure_connect *state = user_data;
server_secure_connect *connection_state = user_data;
if (error != GRPC_ERROR_NONE) {
const char *error_str = grpc_error_string(error);
gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str);
@ -139,81 +127,107 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint,
GRPC_ERROR_UNREF(error);
grpc_channel_args_destroy(args);
gpr_free(read_buffer);
grpc_handshake_manager_shutdown(exec_ctx, state->handshake_mgr);
grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr);
state_unref(state->state);
gpr_free(state);
grpc_handshake_manager_shutdown(exec_ctx, connection_state->handshake_mgr);
grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp);
gpr_free(connection_state);
return;
}
grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr);
state->handshake_mgr = NULL;
grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
connection_state->handshake_mgr = NULL;
// TODO(roth, jboeuf): Convert security connector handshaking to use new
// handshake API, and then move the code from on_secure_handshake_done()
// into this function.
state->args = args;
connection_state->args = args;
grpc_server_security_connector_do_handshake(
exec_ctx, state->state->sc, state->acceptor, endpoint, read_buffer,
state->deadline, on_secure_handshake_done, state);
exec_ctx, connection_state->server_state->sc, connection_state->acceptor,
endpoint, read_buffer, connection_state->deadline,
on_secure_handshake_done, connection_state);
}
static void on_accept(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *tcp,
grpc_pollset *accepting_pollset,
grpc_tcp_server_acceptor *acceptor) {
server_secure_connect *state = gpr_malloc(sizeof(*state));
state->state = statep;
state_ref(state->state);
state->accepting_pollset = accepting_pollset;
state->acceptor = acceptor;
state->handshake_mgr = grpc_handshake_manager_create();
server_secure_state *server_state = statep;
server_secure_connect *connection_state = NULL;
gpr_mu_lock(&server_state->mu);
if (server_state->is_shutdown) {
gpr_mu_unlock(&server_state->mu);
grpc_endpoint_destroy(exec_ctx, tcp);
return;
}
gpr_mu_unlock(&server_state->mu);
grpc_tcp_server_ref(server_state->tcp);
connection_state = gpr_malloc(sizeof(*connection_state));
connection_state->server_state = server_state;
connection_state->accepting_pollset = accepting_pollset;
connection_state->acceptor = acceptor;
connection_state->handshake_mgr = grpc_handshake_manager_create();
// TODO(roth): We should really get this timeout value from channel
// args instead of hard-coding it.
state->deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(120, GPR_TIMESPAN));
connection_state->deadline = gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN));
grpc_handshake_manager_do_handshake(
exec_ctx, state->handshake_mgr, tcp,
grpc_server_get_channel_args(state->state->server), state->deadline,
acceptor, on_handshake_done, state);
exec_ctx, connection_state->handshake_mgr, tcp,
grpc_server_get_channel_args(connection_state->server_state->server),
connection_state->deadline, acceptor, on_handshake_done,
connection_state);
}
/* Server callback: start listening on our ports */
static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep,
grpc_pollset **pollsets, size_t pollset_count) {
server_secure_state *state = statep;
grpc_tcp_server_start(exec_ctx, state->tcp, pollsets, pollset_count,
on_accept, state);
static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
void *statep, grpc_pollset **pollsets,
size_t pollset_count) {
server_secure_state *server_state = statep;
gpr_mu_lock(&server_state->mu);
server_state->is_shutdown = false;
gpr_mu_unlock(&server_state->mu);
grpc_tcp_server_start(exec_ctx, server_state->tcp, pollsets, pollset_count,
on_accept, server_state);
}
static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep,
grpc_error *error) {
server_secure_state *state = statep;
if (state->destroy_callback != NULL) {
state->destroy_callback->cb(exec_ctx, state->destroy_callback->cb_arg,
GRPC_ERROR_REF(error));
static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *statep,
grpc_error *error) {
server_secure_state *server_state = statep;
/* ensure all threads have unlocked */
gpr_mu_lock(&server_state->mu);
grpc_closure *destroy_done = server_state->server_destroy_listener_done;
GPR_ASSERT(server_state->is_shutdown);
gpr_mu_unlock(&server_state->mu);
/* clean up */
grpc_server_security_connector_shutdown(exec_ctx, server_state->sc);
/* Flush queued work before a synchronous unref. */
grpc_exec_ctx_flush(exec_ctx);
GRPC_SECURITY_CONNECTOR_UNREF(&server_state->sc->base, "server");
grpc_server_credentials_unref(server_state->creds);
if (destroy_done != NULL) {
destroy_done->cb(exec_ctx, destroy_done->cb_arg, GRPC_ERROR_REF(error));
grpc_exec_ctx_flush(exec_ctx);
}
grpc_server_security_connector_shutdown(exec_ctx, state->sc);
state_unref(state);
gpr_free(server_state);
}
/* Server callback: destroy the tcp listener (so we don't generate further
callbacks) */
static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep,
grpc_closure *callback) {
server_secure_state *state = statep;
static void server_destroy_listener(grpc_exec_ctx *exec_ctx,
grpc_server *server, void *statep,
grpc_closure *callback) {
server_secure_state *server_state = statep;
grpc_tcp_server *tcp;
gpr_mu_lock(&state->mu);
state->is_shutdown = true;
state->destroy_callback = callback;
tcp = state->tcp;
gpr_mu_unlock(&state->mu);
gpr_mu_lock(&server_state->mu);
server_state->is_shutdown = true;
server_state->server_destroy_listener_done = callback;
tcp = server_state->tcp;
gpr_mu_unlock(&server_state->mu);
grpc_tcp_server_shutdown_listeners(exec_ctx, tcp);
grpc_tcp_server_unref(exec_ctx, tcp);
grpc_tcp_server_unref(exec_ctx, server_state->tcp);
}
int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_server_credentials *creds) {
grpc_resolved_addresses *resolved = NULL;
grpc_tcp_server *tcp = NULL;
server_secure_state *state = NULL;
server_secure_state *server_state = NULL;
size_t i;
size_t count = 0;
int port_num = -1;
@ -253,22 +267,22 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
if (err != GRPC_ERROR_NONE) {
goto error;
}
state = gpr_malloc(sizeof(*state));
memset(state, 0, sizeof(*state));
grpc_closure_init(&state->destroy_closure, destroy_done, state);
err = grpc_tcp_server_create(&state->destroy_closure,
server_state = gpr_malloc(sizeof(*server_state));
memset(server_state, 0, sizeof(*server_state));
grpc_closure_init(&server_state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, server_state);
err = grpc_tcp_server_create(&server_state->tcp_server_shutdown_complete,
grpc_server_get_channel_args(server), &tcp);
if (err != GRPC_ERROR_NONE) {
goto error;
}
state->server = server;
state->tcp = tcp;
state->sc = sc;
state->creds = grpc_server_credentials_ref(creds);
state->is_shutdown = false;
gpr_mu_init(&state->mu);
gpr_ref_init(&state->refcount, 1);
server_state->server = server;
server_state->tcp = tcp;
server_state->sc = sc;
server_state->creds = grpc_server_credentials_ref(creds);
server_state->is_shutdown = true;
gpr_mu_init(&server_state->mu);
errors = gpr_malloc(sizeof(*errors) * resolved->naddrs);
for (i = 0; i < resolved->naddrs; i++) {
@ -313,7 +327,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_resolved_addresses_destroy(resolved);
/* Register with the server only upon success */
grpc_server_add_listener(&exec_ctx, server, state, start, destroy);
grpc_server_add_listener(&exec_ctx, server, server_state,
server_start_listener, server_destroy_listener);
grpc_exec_ctx_finish(&exec_ctx);
return port_num;
@ -334,10 +349,11 @@ error:
grpc_tcp_server_unref(&exec_ctx, tcp);
} else {
if (sc) {
grpc_exec_ctx_flush(&exec_ctx);
GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "server");
}
if (state) {
gpr_free(state);
if (server_state) {
gpr_free(server_state);
}
}
grpc_exec_ctx_finish(&exec_ctx);

@ -36,14 +36,11 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/metadata.h"
extern int grpc_http_write_state_trace;
void grpc_chttp2_plugin_init(void) {
grpc_chttp2_base64_encode_and_huffman_compress =
grpc_chttp2_base64_encode_and_huffman_compress_impl;
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
grpc_register_tracer("http_write_state", &grpc_http_write_state_trace);
}
void grpc_chttp2_plugin_shutdown(void) {}

File diff suppressed because it is too large Load Diff

@ -40,8 +40,8 @@
#include "src/core/lib/iomgr/error.h"
/* defined in internal.h */
typedef struct grpc_chttp2_stream_parsing grpc_chttp2_stream_parsing;
typedef struct grpc_chttp2_transport_parsing grpc_chttp2_transport_parsing;
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
#define GRPC_CHTTP2_FRAME_DATA 0
#define GRPC_CHTTP2_FRAME_HEADER 1

@ -51,16 +51,11 @@ grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser) {
void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser) {
grpc_byte_stream *bs;
if (parser->parsing_frame) {
if (parser->parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"),
1);
}
while (
(bs = grpc_chttp2_incoming_frame_queue_pop(&parser->incoming_frames))) {
grpc_byte_stream_destroy(exec_ctx, bs);
exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"));
}
GRPC_ERROR_UNREF(parser->error);
}
grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
@ -145,22 +140,17 @@ void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
stats->data_bytes += write_bytes;
}
grpc_error *grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *p,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_data_parser *p = parser;
uint32_t message_flags;
grpc_chttp2_incoming_byte_stream *incoming_byte_stream;
char *msg;
if (is_last && p->is_last_frame) {
stream_parsing->received_close = 1;
}
if (cur == end) {
return GRPC_ERROR_NONE;
}
@ -171,7 +161,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
return GRPC_ERROR_REF(p->error);
fh_0:
case GRPC_CHTTP2_DATA_FH_0:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_type = *cur;
switch (p->frame_type) {
case 0:
@ -184,7 +174,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
gpr_asprintf(&msg, "Bad GRPC frame type 0x%02x", p->frame_type);
p->error = GRPC_ERROR_CREATE(msg);
p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID,
(intptr_t)stream_parsing->id);
(intptr_t)s->id);
gpr_free(msg);
msg = gpr_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
p->error =
@ -201,7 +191,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_1:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size = ((uint32_t)*cur) << 24;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_2;
@ -209,7 +199,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_2:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 16;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_3;
@ -217,7 +207,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_3:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_4;
@ -225,7 +215,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_4:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur);
p->state = GRPC_CHTTP2_DATA_FRAME;
++cur;
@ -234,35 +224,32 @@ grpc_error *grpc_chttp2_data_parser_parse(
message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
p->parsing_frame = incoming_byte_stream =
grpc_chttp2_incoming_byte_stream_create(
exec_ctx, transport_parsing, stream_parsing, p->frame_size,
message_flags, &p->incoming_frames);
grpc_chttp2_incoming_byte_stream_create(exec_ctx, t, s, p->frame_size,
message_flags);
/* fallthrough */
case GRPC_CHTTP2_DATA_FRAME:
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
if (cur == end) {
return GRPC_ERROR_NONE;
}
uint32_t remaining = (uint32_t)(end - cur);
if (remaining == p->frame_size) {
stream_parsing->stats.incoming.data_bytes += p->frame_size;
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE, 1);
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_ERROR_NONE;
} else if (remaining > p->frame_size) {
stream_parsing->stats.incoming.data_bytes += p->frame_size;
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg),
(size_t)(cur + p->frame_size - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE, 1);
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
cur += p->frame_size;
goto fh_0; /* loop */
@ -272,10 +259,25 @@ grpc_error *grpc_chttp2_data_parser_parse(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
p->frame_size -= remaining;
stream_parsing->stats.incoming.data_bytes += remaining;
s->stats.incoming.data_bytes += remaining;
return GRPC_ERROR_NONE;
}
}
GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
}
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
grpc_chttp2_data_parser *p = parser;
grpc_error *error = parse_inner(exec_ctx, p, t, s, slice);
if (is_last && p->is_last_frame) {
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
GRPC_ERROR_NONE);
}
return error;
}

@ -69,7 +69,6 @@ typedef struct {
grpc_error *error;
int is_frame_compressed;
grpc_chttp2_incoming_frame_queue incoming_frames;
grpc_chttp2_incoming_byte_stream *parsing_frame;
} grpc_chttp2_data_parser;
@ -92,10 +91,10 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
/* handle a slice of a data frame - is_last indicates the last slice of a
frame */
grpc_error *grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
uint32_t write_bytes, int is_eof,

@ -67,10 +67,11 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_goaway_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -148,12 +149,9 @@ grpc_error *grpc_chttp2_goaway_parser_parse(
p->debug_pos += (uint32_t)(end - cur);
p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
if (is_last) {
transport_parsing->goaway_received = 1;
transport_parsing->goaway_last_stream_index = p->last_stream_id;
gpr_slice_unref(transport_parsing->goaway_text);
transport_parsing->goaway_error = (grpc_status_code)p->error_code;
transport_parsing->goaway_text =
gpr_slice_new(p->debug_data, p->debug_length, gpr_free);
grpc_chttp2_add_incoming_goaway(
exec_ctx, t, (uint32_t)p->error_code,
gpr_slice_new(p->debug_data, p->debug_length, gpr_free));
p->debug_data = NULL;
}
return GRPC_ERROR_NONE;

@ -65,10 +65,11 @@ void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p);
void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p);
grpc_error *grpc_chttp2_goaway_parser_begin_frame(
grpc_chttp2_goaway_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_goaway_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
gpr_slice debug_data,

@ -73,10 +73,10 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_ping_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -91,10 +91,11 @@ grpc_error *grpc_chttp2_ping_parser_parse(
if (p->byte == 8) {
GPR_ASSERT(is_last);
if (p->is_ack) {
grpc_chttp2_ack_ping(exec_ctx, transport_parsing, p->opaque_8bytes);
grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
gpr_slice_buffer_add(&transport_parsing->qbuf,
gpr_slice_buffer_add(&t->qbuf,
grpc_chttp2_ping_create(1, p->opaque_8bytes));
grpc_chttp2_initiate_write(exec_ctx, t, false, "ping response");
}
}

@ -48,9 +48,9 @@ gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes);
grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_ping_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */

@ -39,6 +39,8 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
gpr_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
grpc_transport_one_way_stats *stats) {
@ -83,10 +85,11 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_rst_stream_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -97,19 +100,28 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(
cur++;
p->byte++;
}
stream_parsing->stats.incoming.framing_bytes += (uint64_t)(end - cur);
s->stats.incoming.framing_bytes += (uint64_t)(end - cur);
if (p->byte == 4) {
GPR_ASSERT(is_last);
stream_parsing->received_close = 1;
if (stream_parsing->forced_close_error == GRPC_ERROR_NONE) {
stream_parsing->forced_close_error = grpc_error_set_int(
GRPC_ERROR_CREATE("RST_STREAM"), GRPC_ERROR_INT_HTTP2_ERROR,
(intptr_t)((((uint32_t)p->reason_bytes[0]) << 24) |
(((uint32_t)p->reason_bytes[1]) << 16) |
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]))));
uint32_t reason = (((uint32_t)p->reason_bytes[0]) << 24) |
(((uint32_t)p->reason_bytes[1]) << 16) |
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]));
grpc_error *error = GRPC_ERROR_NONE;
if (reason != GRPC_CHTTP2_NO_ERROR) {
error = grpc_error_set_int(GRPC_ERROR_CREATE("RST_STREAM"),
GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)reason);
grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
(grpc_chttp2_error_code)reason, s->deadline);
char *status_details;
gpr_asprintf(&status_details, "Received RST_STREAM with error code %d",
reason);
gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
gpr_free(status_details);
grpc_chttp2_fake_status(exec_ctx, t, s, status_code, &slice_details);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, error);
}
return GRPC_ERROR_NONE;

@ -49,9 +49,10 @@ gpr_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_rst_stream_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */

@ -143,10 +143,10 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
}
}
grpc_error *grpc_chttp2_settings_parser_parse(
grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
const uint8_t *cur = GPR_SLICE_START_PTR(slice);
const uint8_t *end = GPR_SLICE_END_PTR(slice);
@ -162,11 +162,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
if (cur == end) {
parser->state = GRPC_CHTTP2_SPS_ID0;
if (is_last) {
transport_parsing->settings_updated = 1;
memcpy(parser->target_settings, parser->incoming_settings,
GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
gpr_slice_buffer_add(&transport_parsing->qbuf,
grpc_chttp2_settings_ack_create());
gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
}
return GRPC_ERROR_NONE;
}
@ -226,9 +224,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
break;
case GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE:
grpc_chttp2_goaway_append(
transport_parsing->last_incoming_stream_id, sp->error_value,
t->last_new_stream_id, sp->error_value,
gpr_slice_from_static_string("HTTP2 settings error"),
&transport_parsing->qbuf);
&t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
grpc_error *err = GRPC_ERROR_CREATE(msg);
@ -238,18 +236,17 @@ grpc_error *grpc_chttp2_settings_parser_parse(
}
if (parser->id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE &&
parser->incoming_settings[parser->id] != parser->value) {
transport_parsing->initial_window_update =
t->initial_window_update =
(int64_t)parser->value - parser->incoming_settings[parser->id];
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "adding %d for initial_window change",
(int)transport_parsing->initial_window_update);
(int)t->initial_window_update);
}
}
parser->incoming_settings[parser->id] = parser->value;
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "CHTTP2:%s: got setting %d = %d",
transport_parsing->is_client ? "CLI" : "SVR", parser->id,
parser->value);
t->is_client ? "CLI" : "SVR", parser->id, parser->value);
}
} else if (grpc_http_trace) {
gpr_log(GPR_ERROR, "CHTTP2: Ignoring unknown setting %d (value %d)",

@ -95,9 +95,10 @@ gpr_slice grpc_chttp2_settings_ack_create(void);
grpc_error *grpc_chttp2_settings_parser_begin_frame(
grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
uint32_t *settings);
grpc_error *grpc_chttp2_settings_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */

@ -80,9 +80,8 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
}
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -94,8 +93,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
p->byte++;
}
if (stream_parsing != NULL) {
stream_parsing->stats.incoming.framing_bytes += (uint32_t)(end - cur);
if (s != NULL) {
s->stats.incoming.framing_bytes += (uint32_t)(end - cur);
}
if (p->byte == 4) {
@ -109,17 +108,26 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
}
GPR_ASSERT(is_last);
if (transport_parsing->incoming_stream_id != 0) {
if (stream_parsing != NULL) {
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", transport_parsing,
stream_parsing, outgoing_window,
if (t->incoming_stream_id != 0) {
if (s != NULL) {
bool was_zero = s->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", t, s, outgoing_window,
received_update);
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
bool is_zero = s->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"stream.read_flow_control");
}
}
} else {
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", transport_parsing,
outgoing_window, received_update);
bool was_zero = t->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", t, outgoing_window,
received_update);
bool is_zero = t->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_initiate_write(exec_ctx, t, false,
"new_global_flow_control");
}
}
}

@ -51,8 +51,7 @@ gpr_slice grpc_chttp2_window_update_create(uint32_t id, uint32_t window_delta,
grpc_error *grpc_chttp2_window_update_parser_begin_frame(
grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */

File diff suppressed because it is too large Load Diff

@ -45,7 +45,8 @@
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
typedef grpc_error *(*grpc_chttp2_hpack_parser_state)(
grpc_chttp2_hpack_parser *p, const uint8_t *beg, const uint8_t *end);
grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *beg,
const uint8_t *end);
typedef struct {
char *str;
@ -55,7 +56,7 @@ typedef struct {
struct grpc_chttp2_hpack_parser {
/* user specified callback for each header output */
void (*on_header)(void *user_data, grpc_mdelem *md);
void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem *md);
void *on_header_user_data;
grpc_error *last_error;
@ -104,15 +105,17 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
/* returns 1 on success, 0 on error */
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *beg,
const uint8_t *end);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
grpc_error *grpc_chttp2_header_parser_parse(
grpc_exec_ctx *exec_ctx, void *hpack_parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */

@ -53,31 +53,25 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/transport_impl.h"
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
/* streams are kept in various linked lists depending on what things need to
happen to them... this enum labels each list */
typedef enum {
GRPC_CHTTP2_LIST_ALL_STREAMS,
GRPC_CHTTP2_LIST_CHECK_READ_OPS,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE,
GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING,
GRPC_CHTTP2_LIST_WRITTEN,
GRPC_CHTTP2_LIST_PARSING_SEEN,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
/* streams waiting for the outgoing window in the writing path, they will be
* merged to the stalled list or writable list under transport lock. */
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
STREAM_LIST_COUNT /* must be last */
} grpc_chttp2_stream_list_id;
typedef enum {
GRPC_CHTTP2_WRITE_STATE_IDLE,
GRPC_CHTTP2_WRITE_STATE_WRITING,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
} grpc_chttp2_write_state;
/* deframer state for the overall http2 stream of bytes */
typedef enum {
/* prefix: one entry per http2 connection prefix byte */
@ -152,6 +146,12 @@ typedef struct grpc_chttp2_outstanding_ping {
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
typedef struct grpc_chttp2_write_cb {
int64_t call_at_byte;
grpc_closure *closure;
struct grpc_chttp2_write_cb *next;
} grpc_chttp2_write_cb;
/* forward declared in frame_data.h */
struct grpc_chttp2_incoming_byte_stream {
grpc_byte_stream base;
@ -161,12 +161,13 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_chttp2_transport *transport;
grpc_chttp2_stream *stream;
int is_tail;
bool is_tail;
gpr_mu slice_mu; // protects slices, on_next
gpr_slice_buffer slices;
grpc_closure *on_next;
gpr_slice *next;
uint32_t remaining_bytes;
struct {
grpc_closure closure;
@ -178,12 +179,68 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_closure finished_action;
};
typedef struct {
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
grpc_endpoint *ep;
char *peer_string;
grpc_combiner *combiner;
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
uint8_t closed;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
/** maps stream id to grpc_chttp2_stream objects */
grpc_chttp2_stream_map stream_map;
grpc_closure write_action_begin_locked;
grpc_closure write_action;
grpc_closure write_action_end;
grpc_closure write_action_end_locked;
grpc_closure read_action_begin;
grpc_closure read_action_locked;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
grpc_chttp2_stream **accepting_stream;
struct {
/* accept stream callback */
void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
/** connectivity tracking */
grpc_connectivity_state_tracker state_tracker;
} channel_callback;
/** data to write now */
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
int64_t outgoing_window;
/** is this a client? */
uint8_t is_client;
/** data to write next write */
gpr_slice_buffer qbuf;
/** window available for us to send to peer */
int64_t outgoing_window;
/** window available to announce to peer */
int64_t announce_incoming_window;
/** how much window would we like to have for incoming_window */
@ -194,8 +251,6 @@ typedef struct {
/** have we sent a goaway */
uint8_t sent_goaway;
/** is this transport a client? */
uint8_t is_client;
/** are the local settings dirty and need to be sent? */
uint8_t dirtied_local_settings;
/** have local settings been sent? */
@ -212,49 +267,14 @@ typedef struct {
/** how far to lookahead in a stream? */
uint32_t stream_lookahead;
/** last received stream id */
uint32_t last_incoming_stream_id;
/** last new stream id */
uint32_t last_new_stream_id;
/** pings awaiting responses */
grpc_chttp2_outstanding_ping pings;
/** next payload for an outgoing ping */
uint64_t ping_counter;
/** concurrent stream count: updated when not parsing,
so this is a strict over-estimation on the client */
uint32_t concurrent_stream_count;
} grpc_chttp2_transport_global;
typedef struct {
/** data to write now */
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
int64_t outgoing_window;
/** is this a client? */
uint8_t is_client;
/** callback for when writing is done */
grpc_closure done_cb;
/** maximum frame size */
uint32_t max_frame_size;
} grpc_chttp2_transport_writing;
struct grpc_chttp2_transport_parsing {
/** is this transport a client? (boolean) */
uint8_t is_client;
/** were settings updated? */
uint8_t settings_updated;
/** was a settings ack received? */
uint8_t settings_ack_received;
/** was a goaway frame received? */
uint8_t goaway_received;
/** initial window change */
int64_t initial_window_update;
/** data to write later - after parsing */
gpr_slice_buffer qbuf;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
/** simple one shot parsers */
@ -267,13 +287,12 @@ struct grpc_chttp2_transport_parsing {
/** parser for goaway frames */
grpc_chttp2_goaway_parser goaway_parser;
/** initial window change */
int64_t initial_window_update;
/** window available for peer to send to us */
int64_t incoming_window;
/** next stream id available at the time of beginning parsing */
uint32_t next_stream_id;
uint32_t last_incoming_stream_id;
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
uint8_t incoming_frame_type;
@ -284,135 +303,42 @@ struct grpc_chttp2_transport_parsing {
uint32_t incoming_frame_size;
uint32_t incoming_stream_id;
/* current max frame size */
uint32_t max_frame_size;
/* active parser */
void *parser_data;
grpc_chttp2_stream_parsing *incoming_stream;
grpc_chttp2_stream *incoming_stream;
grpc_error *(*parser)(grpc_exec_ctx *exec_ctx, void *parser_user_data,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
/* received settings */
uint32_t settings[GRPC_CHTTP2_NUM_SETTINGS];
/* last settings that were sent */
uint32_t last_sent_settings[GRPC_CHTTP2_NUM_SETTINGS];
/* goaway data */
grpc_status_code goaway_error;
uint32_t goaway_last_stream_index;
gpr_slice goaway_text;
int64_t outgoing_window;
grpc_chttp2_write_cb *write_cb_pool;
/* if non-NULL, close the transport with this error when writes are finished
*/
grpc_error *close_transport_on_writes_finished;
};
typedef enum {
/** no writing activity allowed */
GRPC_CHTTP2_WRITES_CORKED,
/** no writing activity */
GRPC_CHTTP2_WRITING_INACTIVE,
/** write has been requested and scheduled against the workqueue */
GRPC_CHTTP2_WRITE_SCHEDULED,
/** write has been initiated after being reaped from the workqueue */
GRPC_CHTTP2_WRITING,
/** write has been initiated, AND another write needs to be started once it's
done */
GRPC_CHTTP2_WRITING_STALE_WITH_POLLER,
GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
} grpc_chttp2_write_state;
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
grpc_endpoint *ep;
char *peer_string;
/** when this drops to zero it's safe to shutdown the endpoint */
gpr_refcount shutdown_ep_refs;
struct {
grpc_combiner *combiner;
/** is a thread currently in the global lock */
bool global_active;
/** is a thread currently parsing */
bool parsing_active;
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** has a check_read_ops been scheduled */
bool check_read_ops_scheduled;
} executor;
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
uint8_t closed;
GRPC_METADATA_NOT_PUBLISHED,
GRPC_METADATA_SYNTHESIZED_FROM_FAKE,
GRPC_METADATA_PUBLISHED_FROM_WIRE,
GPRC_METADATA_PUBLISHED_AT_CLOSE
} grpc_published_metadata_method;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
/** global state for reading/writing */
grpc_chttp2_transport_global global;
/** state only accessible by the chain of execution that
set writing_state >= GRPC_WRITING, and only by the writing closure
chain. */
grpc_chttp2_transport_writing writing;
/** state only accessible by the chain of execution that
set parsing_active=1 */
grpc_chttp2_transport_parsing parsing;
/** maps stream id to grpc_chttp2_stream objects;
owned by the parsing thread when parsing */
grpc_chttp2_stream_map parsing_stream_map;
/** streams created by the client (possibly during parsing);
merged with parsing_stream_map during unlock when no
parsing is occurring */
grpc_chttp2_stream_map new_stream_map;
/** closure to execute writing */
grpc_closure writing_action;
/** closure to start reading from the endpoint */
grpc_closure reading_action;
grpc_closure reading_action_locked;
grpc_closure post_parse_locked;
/** closure to actually do parsing */
grpc_closure parsing_action;
/** closure to initiate writing */
grpc_closure initiate_writing;
/** closure to finish writing */
grpc_closure terminate_writing;
/** closure to flush read state up the stack */
grpc_closure initiate_read_flush_locked;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
grpc_chttp2_stream **accepting_stream;
struct {
/* accept stream callback */
void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
/** connectivity tracking */
grpc_connectivity_state_tracker state_tracker;
} channel_callback;
grpc_closure destroy_stream;
void *destroy_stream_arg;
/** Transport op to be applied post-parsing */
grpc_transport_op *post_parsing_op;
};
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
@ -422,20 +348,22 @@ typedef struct {
As the upper layer offers more bytes, this value increases.
As bytes are read, this value decreases. */
uint32_t max_recv_bytes;
/** The number of bytes the upper layer has offered to read but we have
not yet announced to HTTP2 flow control.
As the upper layers offer to read more bytes, this value increases.
As we advertise incoming flow control window, this value decreases. */
uint32_t unannounced_incoming_window_for_parse;
uint32_t unannounced_incoming_window_for_writing;
/** things the upper layers would like to send */
grpc_metadata_batch *send_initial_metadata;
grpc_closure *send_initial_metadata_finished;
grpc_byte_stream *send_message;
grpc_closure *send_message_finished;
grpc_metadata_batch *send_trailing_metadata;
grpc_closure *send_trailing_metadata_finished;
grpc_byte_stream *fetching_send_message;
uint32_t fetched_send_message_length;
gpr_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
bool complete_fetch_covered_by_poller;
grpc_closure complete_fetch;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *recv_initial_metadata_ready;
grpc_byte_stream **recv_message;
@ -455,95 +383,44 @@ typedef struct {
bool read_closed;
/** Are all published incoming byte streams closed. */
bool all_incoming_byte_streams_finished;
/** Is this stream in the stream map. */
bool in_stream_map;
/** Has this stream seen an error.
If true, then pending incoming frames can be thrown away. */
bool seen_error;
bool exceeded_metadata_size;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
/** the error that resulted in this stream being write-closed */
grpc_error *write_closed_error;
bool published_initial_metadata;
bool published_trailing_metadata;
grpc_published_metadata_method published_metadata[2];
bool final_metadata_requested;
grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
grpc_chttp2_incoming_frame_queue incoming_frames;
gpr_timespec deadline;
} grpc_chttp2_stream_global;
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
uint8_t fetching;
bool sent_initial_metadata;
uint8_t sent_message;
uint8_t sent_trailing_metadata;
uint8_t read_closed;
/** send this initial metadata */
grpc_metadata_batch *send_initial_metadata;
grpc_byte_stream *send_message;
grpc_metadata_batch *send_trailing_metadata;
int64_t outgoing_window;
/** how much window should we announce? */
uint32_t announce_window;
gpr_slice_buffer flow_controlled_buffer;
gpr_slice fetching_slice;
size_t stream_fetched;
grpc_closure finished_fetch;
/** stats gathered during the write */
grpc_transport_one_way_stats stats;
} grpc_chttp2_stream_writing;
struct grpc_chttp2_stream_parsing {
/** saw some stream level error */
grpc_error *forced_close_error;
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
/** has this stream received a close */
uint8_t received_close;
/** how many header frames have we received? */
uint8_t header_frames_received;
/** which metadata did we get (on this parse) */
uint8_t got_metadata_on_parse[2];
/** should we raise the seen_error flag in transport_global */
bool seen_error;
bool exceeded_metadata_size;
/** window available for peer to send to us */
int64_t incoming_window;
/** parsing state for data frames */
grpc_chttp2_data_parser data_parser;
/** amount of window given */
int64_t outgoing_window;
/** number of bytes received - reset at end of parse thread execution */
int64_t received_bytes;
/** stats gathered during the parse */
grpc_transport_stream_stats stats;
/** incoming metadata */
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
};
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
grpc_chttp2_stream_global global;
grpc_chttp2_stream_writing writing;
grpc_chttp2_stream_parsing parsing;
grpc_closure init_stream;
grpc_closure destroy_stream;
void *destroy_stream_arg;
bool sent_initial_metadata;
bool sent_trailing_metadata;
/** how much window should we announce? */
uint32_t announce_window;
gpr_slice_buffer flow_controlled_buffer;
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
};
/** Transport writing call flow:
@ -559,162 +436,71 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason);
/** Someone is unlocking the transport mutex: check to see if writes
are required, and schedule them if so */
int grpc_chttp2_unlocking_check_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint);
void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
void *transport_writing, grpc_error *error);
void grpc_chttp2_cleanup_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_prepare_to_read(grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
are required, and frame them if so */
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
grpc_error *grpc_chttp2_perform_read(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice);
void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, gpr_slice slice);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
/** Get a writable stream
returns non-zero if there was a stream available */
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) GRPC_MUST_USE_RESULT;
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing);
int grpc_chttp2_list_pop_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_add_written_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing);
int grpc_chttp2_list_pop_parsing_seen_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_add_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_check_read_ops(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
bool grpc_chttp2_list_remove_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
bool grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing);
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_remove_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
grpc_chttp2_transport_parsing *transport_parsing, uint32_t id);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
uint32_t id);
void grpc_chttp2_add_incoming_goaway(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
uint32_t goaway_error, gpr_slice goaway_text);
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
/* returns 1 if this is the last stream, 0 otherwise */
int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
int grpc_chttp2_has_streams(grpc_chttp2_transport *t);
void grpc_chttp2_for_all_streams(
grpc_chttp2_transport_global *transport_global, void *user_data,
void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
grpc_chttp2_stream_global *stream_global));
void grpc_chttp2_parsing_become_skip_parser(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
void grpc_chttp2_complete_closure_step(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_closure **pclosure,
grpc_error *error);
grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t);
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id);
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t id);
void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t goaway_error,
gpr_slice goaway_text);
void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_closure **pclosure,
grpc_error *error, const char *desc);
#define GRPC_CHTTP2_CLIENT_CONNECT_STRING "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
@ -805,57 +591,83 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
const char *var2, int is_client,
uint32_t stream_id, int64_t val1, int64_t val2);
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream,
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *stream,
grpc_status_code status, gpr_slice *details);
void grpc_chttp2_mark_stream_closed(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, int close_reads, int close_writes,
grpc_error *error);
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int close_reads,
int close_writes, grpc_error *error);
void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global);
grpc_chttp2_transport *t);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global, reason)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
const char *reason);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global,
#define GRPC_CHTTP2_STREAM_REF(stream, reason) \
grpc_chttp2_stream_ref(stream, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream, reason)
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
const char *reason);
#else
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global);
#define GRPC_CHTTP2_STREAM_REF(stream, reason) grpc_chttp2_stream_ref(stream)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream)
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
#endif
//#define GRPC_CHTTP2_REFCOUNTING_DEBUG 1
#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) \
grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
grpc_chttp2_unref_transport(cl, t, r, __FILE__, __LINE__)
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason,
const char *file, int line);
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
const char *file, int line);
#else
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) grpc_chttp2_unref_transport(cl, t)
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t);
#endif
grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
uint32_t frame_size, uint32_t flags);
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
gpr_slice slice);
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
grpc_error *error, int from_parsing_thread);
grpc_error *error);
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *parsing,
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const uint8_t *opaque_8bytes);
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
bool covered_by_poller, const char *reason);
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, bool covered_by_poller,
const char *reason);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *due_to_error);
void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */

File diff suppressed because it is too large Load Diff

@ -35,27 +35,6 @@
#include <grpc/support/log.h>
#define TRANSPORT_FROM_GLOBAL(tg) \
((grpc_chttp2_transport *)((char *)(tg)-offsetof(grpc_chttp2_transport, \
global)))
#define STREAM_FROM_GLOBAL(sg) \
((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
#define TRANSPORT_FROM_WRITING(tw) \
((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \
writing)))
#define STREAM_FROM_WRITING(sw) \
((grpc_chttp2_stream *)((char *)(sw)-offsetof(grpc_chttp2_stream, writing)))
#define TRANSPORT_FROM_PARSING(tp) \
((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
parsing)))
#define STREAM_FROM_PARSING(sp) \
((grpc_chttp2_stream *)((char *)(sp)-offsetof(grpc_chttp2_stream, parsing)))
/* core list management */
static int stream_list_empty(grpc_chttp2_transport *t,
@ -139,321 +118,57 @@ static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
/* wrappers for specializations */
bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
return stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE);
}
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WRITABLE);
if (r != 0) {
*stream_global = &stream->global;
*stream_writing = &stream->writing;
}
return r;
}
bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE);
}
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
GPR_ASSERT(stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITING));
}
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing) {
return !stream_list_empty(TRANSPORT_FROM_WRITING(transport_writing),
GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_pop_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITING);
if (r != 0) {
*stream_writing = &stream->writing;
}
return r;
}
void grpc_chttp2_list_add_written_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITTEN);
}
int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITTEN);
if (r != 0) {
*stream_global = &stream->global;
*stream_writing = &stream->writing;
}
return r;
}
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(
TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r =
stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
}
return r;
}
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing) {
stream_list_add(TRANSPORT_FROM_PARSING(transport_parsing),
STREAM_FROM_PARSING(stream_parsing),
GRPC_CHTTP2_LIST_PARSING_SEEN);
}
int grpc_chttp2_list_pop_parsing_seen_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_PARSING(transport_parsing), &stream,
GRPC_CHTTP2_LIST_PARSING_SEEN);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
}
return r;
}
void grpc_chttp2_list_add_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_check_read_ops(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
if (!t->executor.check_read_ops_scheduled) {
grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
&t->initiate_read_flush_locked,
GRPC_ERROR_NONE, false);
t->executor.check_read_ops_scheduled = true;
}
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
bool grpc_chttp2_list_remove_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
GPR_ASSERT(s->id != 0);
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
grpc_chttp2_stream *stream = STREAM_FROM_WRITING(stream_writing);
gpr_log(GPR_DEBUG, "writing stalled %d", stream->global.id);
if (!stream->included[GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT]) {
GRPC_CHTTP2_STREAM_REF(&stream->global, "chttp2_writing_stalled");
}
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing), stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT);
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
bool grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream *stream;
bool out = false;
grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing);
while (stream_list_pop(transport, &stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) {
gpr_log(GPR_DEBUG, "move %d from writing stalled to just stalled",
stream->global.id);
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
&stream->writing);
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &stream->global,
"chttp2_writing_stalled");
out = true;
}
return out;
bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
gpr_log(GPR_DEBUG, "stalled %d", stream_writing->id);
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING);
}
void grpc_chttp2_list_remove_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING);
}
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add_tail(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
return stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
int grpc_chttp2_has_streams(grpc_chttp2_transport *t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
void grpc_chttp2_for_all_streams(
grpc_chttp2_transport_global *transport_global, void *user_data,
void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
grpc_chttp2_stream_global *stream_global)) {
grpc_chttp2_stream *s;
grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
for (s = t->lists[GRPC_CHTTP2_LIST_ALL_STREAMS].head; s != NULL;
s = s->links[GRPC_CHTTP2_LIST_ALL_STREAMS].next) {
cb(transport_global, user_data, &s->global);
}
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}

@ -77,6 +77,7 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
GPR_ASSERT(count == 0 || keys[count - 1] < key);
GPR_ASSERT(value);
GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
if (count == capacity) {
if (map->free > capacity / 4) {
@ -96,40 +97,6 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
map->count = count + 1;
}
void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
grpc_chttp2_stream_map *dst) {
/* if src is empty we dont need to do anything */
if (src->count == src->free) {
return;
}
/* if dst is empty we simply need to swap */
if (dst->count == dst->free) {
GPR_SWAP(grpc_chttp2_stream_map, *src, *dst);
return;
}
/* the first element of src must be greater than the last of dst...
* however the maps may need compacting for this property to hold */
if (src->keys[0] <= dst->keys[dst->count - 1]) {
src->count = compact(src->keys, src->values, src->count);
src->free = 0;
dst->count = compact(dst->keys, dst->values, dst->count);
dst->free = 0;
}
GPR_ASSERT(src->keys[0] > dst->keys[dst->count - 1]);
/* if dst doesn't have capacity, resize */
if (dst->count + src->count > dst->capacity) {
dst->capacity = GPR_MAX(dst->capacity * 3 / 2, dst->count + src->count);
dst->keys = gpr_realloc(dst->keys, dst->capacity * sizeof(uint32_t));
dst->values = gpr_realloc(dst->values, dst->capacity * sizeof(void *));
}
memcpy(dst->keys + dst->count, src->keys, src->count * sizeof(uint32_t));
memcpy(dst->values + dst->count, src->values, src->count * sizeof(void *));
dst->count += src->count;
dst->free += src->free;
src->count = 0;
src->free = 0;
}
static void **find(grpc_chttp2_stream_map *map, uint32_t key) {
size_t min_idx = 0;
size_t max_idx = map->count;
@ -170,6 +137,7 @@ void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key) {
if (map->free == map->count) {
map->free = map->count = 0;
}
GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
}
return out;
}

@ -65,10 +65,6 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
or NULL otherwise */
void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key);
/* Move all elements of src into dst */
void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
grpc_chttp2_stream_map *dst);
/* Return an existing key, or NULL if it does not exist */
void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key);

@ -40,349 +40,221 @@
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing);
static void add_to_write_list(grpc_chttp2_write_cb **list,
grpc_chttp2_write_cb *cb) {
cb->next = *list;
*list = cb;
}
static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb,
grpc_error *error) {
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error,
"finish_write_cb");
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
int grpc_chttp2_unlocking_check_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_writing *stream_writing;
static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int64_t send_bytes,
grpc_chttp2_write_cb **list, grpc_error *error) {
grpc_chttp2_write_cb *cb = *list;
*list = NULL;
s->flow_controlled_bytes_written += send_bytes;
while (cb) {
grpc_chttp2_write_cb *next = cb->next;
if (cb->call_at_byte <= s->flow_controlled_bytes_written) {
finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
} else {
add_to_write_list(list, cb);
}
cb = next;
}
GRPC_ERROR_UNREF(error);
}
GPR_TIMER_BEGIN("grpc_chttp2_unlocking_check_writes", 0);
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
transport_writing->max_frame_size =
transport_global->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
if (transport_global->dirtied_local_settings &&
!transport_global->sent_local_settings) {
if (t->dirtied_local_settings && !t->sent_local_settings) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
&t->outbuf,
grpc_chttp2_settings_create(
transport_global->settings[GRPC_SENT_SETTINGS],
transport_global->settings[GRPC_LOCAL_SETTINGS],
transport_global->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
transport_global->force_send_settings = 0;
transport_global->dirtied_local_settings = 0;
transport_global->sent_local_settings = 1;
t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS],
t->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
t->force_send_settings = 0;
t->dirtied_local_settings = 0;
t->sent_local_settings = 1;
}
/* simple writes are queued to qbuf, and flushed here */
gpr_slice_buffer_move_into(&transport_global->qbuf,
&transport_writing->outbuf);
GPR_ASSERT(transport_global->qbuf.count == 0);
gpr_slice_buffer_move_into(&t->qbuf, &t->outbuf);
GPR_ASSERT(t->qbuf.count == 0);
grpc_chttp2_hpack_compressor_set_max_table_size(
&transport_writing->hpack_compressor,
transport_global->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
&t->hpack_compressor,
t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
transport_global, outgoing_window);
if (transport_writing->outgoing_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
&stream_global)) {
grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
false, "transport.read_flow_control");
if (t->outgoing_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"transport.read_flow_control");
}
}
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (grpc_chttp2_list_pop_writable_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
bool sent_initial_metadata = stream_writing->sent_initial_metadata;
bool become_writable = false;
while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
bool sent_initial_metadata = s->sent_initial_metadata;
bool now_writing = false;
stream_writing->id = stream_global->id;
stream_writing->read_closed = stream_global->read_closed;
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t,
t->is_client ? "CLIENT" : "SERVER", s->id, sent_initial_metadata,
s->send_initial_metadata != NULL, s->announce_window));
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_writing, stream_writing,
outgoing_window, stream_global,
outgoing_window);
if (!sent_initial_metadata && stream_global->send_initial_metadata) {
stream_writing->send_initial_metadata =
stream_global->send_initial_metadata;
stream_global->send_initial_metadata = NULL;
become_writable = true;
/* send initial metadata if it's available */
if (!sent_initial_metadata && s->send_initial_metadata) {
grpc_chttp2_encode_header(
&t->hpack_compressor, s->id, s->send_initial_metadata, 0,
t->settings[GRPC_ACKED_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
&s->stats.outgoing, &t->outbuf);
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
now_writing = true;
}
/* send any window updates */
if (s->announce_window > 0) {
uint32_t announce = s->announce_window;
gpr_slice_buffer_add(&t->outbuf,
grpc_chttp2_window_update_create(
s->id, s->announce_window, &s->stats.outgoing));
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, announce_window, announce);
}
if (sent_initial_metadata) {
if (stream_global->send_message != NULL) {
gpr_slice hdr = gpr_slice_malloc(5);
uint8_t *p = GPR_SLICE_START_PTR(hdr);
uint32_t len = stream_global->send_message->length;
GPR_ASSERT(stream_writing->send_message == NULL);
p[0] = (stream_global->send_message->flags &
GRPC_WRITE_INTERNAL_COMPRESS) != 0;
p[1] = (uint8_t)(len >> 24);
p[2] = (uint8_t)(len >> 16);
p[3] = (uint8_t)(len >> 8);
p[4] = (uint8_t)(len);
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer, hdr);
if (stream_global->send_message->length > 0) {
stream_writing->send_message = stream_global->send_message;
} else {
stream_writing->send_message = NULL;
/* send any body bytes, if allowed by flow control */
if (s->flow_controlled_buffer.length > 0) {
uint32_t max_outgoing =
(uint32_t)GPR_MIN(t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
GPR_MIN(s->outgoing_window, t->outgoing_window));
if (max_outgoing > 0) {
uint32_t send_bytes =
(uint32_t)GPR_MIN(max_outgoing, s->flow_controlled_buffer.length);
bool is_last_data_frame =
s->fetching_send_message == NULL &&
send_bytes == s->flow_controlled_buffer.length;
bool is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, send_bytes,
is_last_frame, &s->stats.outgoing,
&t->outbuf);
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, outgoing_window,
send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, outgoing_window,
send_bytes);
if (is_last_frame) {
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_CHTTP2_NO_ERROR,
&s->stats.outgoing));
}
}
s->sending_bytes += send_bytes;
now_writing = true;
if (s->flow_controlled_buffer.length > 0) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
} else if (t->outgoing_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
}
stream_writing->stream_fetched = 0;
stream_global->send_message = NULL;
}
if ((stream_writing->send_message != NULL ||
stream_writing->flow_controlled_buffer.length > 0) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
become_writable = true;
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0) {
if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,
&s->stats.outgoing, &t->outbuf);
} else {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_encode_header(
&t->hpack_compressor, s->id, s->send_trailing_metadata, true,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
&s->stats.outgoing, &t->outbuf);
}
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
gpr_slice_buffer_add(
&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_CHTTP2_NO_ERROR, &s->stats.outgoing));
}
now_writing = true;
}
if (stream_global->send_trailing_metadata) {
stream_writing->send_trailing_metadata =
stream_global->send_trailing_metadata;
stream_global->send_trailing_metadata = NULL;
become_writable = true;
}
}
if (!stream_global->read_closed &&
stream_global->unannounced_incoming_window_for_writing > 1024) {
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_global, stream_writing,
announce_window, stream_global,
unannounced_incoming_window_for_writing);
become_writable = true;
}
if (become_writable) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
if (now_writing) {
if (!grpc_chttp2_list_add_writing_stream(t, s)) {
/* already in writing list: drop ref */
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
}
} else {
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write");
}
}
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
if (transport_global->announce_incoming_window > 0) {
uint32_t announced = (uint32_t)GPR_MIN(
transport_global->announce_incoming_window, UINT32_MAX);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_global,
announce_incoming_window, announced);
if (t->announce_incoming_window > 0) {
uint32_t announced =
(uint32_t)GPR_MIN(t->announce_incoming_window, UINT32_MAX);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, announce_incoming_window,
announced);
grpc_transport_one_way_stats throwaway_stats;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(0, announced, &throwaway_stats));
}
GPR_TIMER_END("grpc_chttp2_unlocking_check_writes", 0);
return transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing);
}
void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint) {
GPR_ASSERT(transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing));
finalize_outbuf(exec_ctx, transport_writing);
GPR_ASSERT(endpoint);
if (transport_writing->outbuf.count > 0) {
grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
&transport_writing->done_cb);
} else {
grpc_exec_ctx_sched(exec_ctx, &transport_writing->done_cb, GRPC_ERROR_NONE,
NULL);
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_window_update_create(
0, announced, &throwaway_stats));
}
}
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
GPR_TIMER_BEGIN("finalize_outbuf", 0);
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
bool is_first_data_frame = true;
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
uint32_t max_outgoing =
(uint32_t)GPR_MIN(transport_writing->max_frame_size,
GPR_MIN(stream_writing->outgoing_window,
transport_writing->outgoing_window));
/* send initial metadata if it's available */
if (stream_writing->send_initial_metadata != NULL) {
grpc_chttp2_encode_header(
&transport_writing->hpack_compressor, stream_writing->id,
stream_writing->send_initial_metadata, 0,
transport_writing->max_frame_size, &stream_writing->stats,
&transport_writing->outbuf);
stream_writing->send_initial_metadata = NULL;
stream_writing->sent_initial_metadata = 1;
}
/* send any window updates */
if (stream_writing->announce_window > 0 &&
stream_writing->send_initial_metadata == NULL) {
uint32_t announce = stream_writing->announce_window;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(stream_writing->id,
stream_writing->announce_window,
&stream_writing->stats));
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing, stream_writing,
announce_window, announce);
stream_writing->announce_window = 0;
}
/* fetch any body bytes */
while (!stream_writing->fetching && stream_writing->send_message &&
stream_writing->flow_controlled_buffer.length < max_outgoing &&
stream_writing->stream_fetched <
stream_writing->send_message->length) {
if (grpc_byte_stream_next(exec_ctx, stream_writing->send_message,
&stream_writing->fetching_slice, max_outgoing,
&stream_writing->finished_fetch)) {
stream_writing->stream_fetched +=
GPR_SLICE_LENGTH(stream_writing->fetching_slice);
if (stream_writing->stream_fetched ==
stream_writing->send_message->length) {
stream_writing->send_message = NULL;
}
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer,
stream_writing->fetching_slice);
} else {
stream_writing->fetching = 1;
}
}
/* send any body bytes */
if (stream_writing->flow_controlled_buffer.length > 0) {
if (max_outgoing > 0) {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, stream_writing->flow_controlled_buffer.length);
int is_last_data_frame =
stream_writing->send_message == NULL &&
send_bytes == stream_writing->flow_controlled_buffer.length;
int is_last_frame = is_last_data_frame &&
stream_writing->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata);
grpc_chttp2_encode_data(
stream_writing->id, &stream_writing->flow_controlled_buffer,
send_bytes, is_last_frame, &stream_writing->stats,
&transport_writing->outbuf);
if (is_first_data_frame) {
/* TODO(dgq): this is a hack. It'll be fix in a future refactoring */
stream_writing->stats.data_bytes -= 5; /* discount grpc framing */
is_first_data_frame = false;
}
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing,
stream_writing, outgoing_window,
send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_writing,
outgoing_window, send_bytes);
if (is_last_frame) {
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
if (is_last_data_frame) {
GPR_ASSERT(stream_writing->send_message == NULL);
stream_writing->sent_message = 1;
}
} else if (transport_writing->outgoing_window == 0) {
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
/* send trailing metadata if it's available and we're ready for it */
if (stream_writing->send_message == NULL &&
stream_writing->flow_controlled_buffer.length == 0 &&
stream_writing->send_trailing_metadata != NULL) {
if (grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata)) {
grpc_chttp2_encode_data(
stream_writing->id, &stream_writing->flow_controlled_buffer, 0, 1,
&stream_writing->stats, &transport_writing->outbuf);
} else {
grpc_chttp2_encode_header(
&transport_writing->hpack_compressor, stream_writing->id,
stream_writing->send_trailing_metadata, 1,
transport_writing->max_frame_size, &stream_writing->stats,
&transport_writing->outbuf);
}
if (!transport_writing->is_client && !stream_writing->read_closed) {
gpr_slice_buffer_add(&transport_writing->outbuf,
grpc_chttp2_rst_stream_create(
stream_writing->id, GRPC_CHTTP2_NO_ERROR,
&stream_writing->stats));
}
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
/* if there's more to write, then loop, otherwise prepare to finish the
* write */
if ((stream_writing->flow_controlled_buffer.length > 0 ||
(stream_writing->send_message && !stream_writing->fetching)) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
} else {
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
} else {
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
GPR_TIMER_END("finalize_outbuf", 0);
return t->outbuf.count > 0;
}
void grpc_chttp2_cleanup_writing(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
GPR_TIMER_BEGIN("grpc_chttp2_cleanup_writing", 0);
grpc_chttp2_stream_writing *stream_writing;
grpc_chttp2_stream_global *stream_global;
if (grpc_chttp2_list_flush_writing_stalled_by_transport(exec_ctx,
transport_writing)) {
grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
"resume_stalled_stream");
}
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0);
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
if (stream_writing->sent_initial_metadata) {
while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
if (s->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_initial_metadata_finished, GRPC_ERROR_NONE);
exec_ctx, t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_REF(error), "send_initial_metadata_finished");
}
grpc_transport_move_one_way_stats(&stream_writing->stats,
&stream_global->stats.outgoing);
if (stream_writing->sent_message) {
GPR_ASSERT(stream_writing->send_message == NULL);
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_message_finished, GRPC_ERROR_NONE);
stream_writing->sent_message = 0;
if (s->sending_bytes != 0) {
update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
&s->on_write_finished_cbs, GRPC_ERROR_REF(error));
s->sending_bytes = 0;
}
if (stream_writing->sent_trailing_metadata) {
if (s->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_trailing_metadata_finished, GRPC_ERROR_NONE);
}
if (stream_writing->sent_trailing_metadata) {
grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
!transport_global->is_client, 1,
GRPC_ERROR_NONE);
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_REF(error));
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
}
gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
GPR_TIMER_END("grpc_chttp2_cleanup_writing", 0);
gpr_slice_buffer_reset_and_unref(&t->outbuf);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_chttp2_end_write", 0);
}

@ -543,7 +543,7 @@ static void create_grpc_frame(gpr_slice_buffer *write_slice_buffer,
static void convert_metadata_to_cronet_headers(
grpc_linked_mdelem *head, const char *host, char **pp_url,
cronet_bidirectional_stream_header **pp_headers, size_t *p_num_headers,
const char ** method) {
const char **method) {
grpc_linked_mdelem *curr = head;
/* Walk the linked list and get number of header fields */
size_t num_headers_available = 0;

@ -272,6 +272,18 @@ int grpc_channel_args_compare(const grpc_channel_args *a,
return 0;
}
const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
const char *name) {
if (args != NULL) {
for (size_t i = 0; i < args->num_args; ++i) {
if (strcmp(args->args[i].key, name) == 0) {
return &args->args[i];
}
}
}
return NULL;
}
int grpc_channel_arg_get_integer(grpc_arg *arg, grpc_integer_options options) {
if (arg->type != GRPC_ARG_INTEGER) {
gpr_log(GPR_ERROR, "%s ignored: it must be an integer", arg->key);

@ -89,6 +89,10 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
int grpc_channel_args_compare(const grpc_channel_args *a,
const grpc_channel_args *b);
/** Returns the value of argument \a name from \a args, or NULL if not found. */
const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
const char *name);
typedef struct grpc_integer_options {
int default_value; // Return this if value is outside of expected bounds.
int min_value;

@ -162,7 +162,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
gpr_timespec deadline, grpc_call_stack *call_stack) {
grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
size_t count = channel_stack->count;
@ -179,10 +179,12 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
args.start_time = gpr_now(GPR_CLOCK_MONOTONIC);
for (i = 0; i < count; i++) {
args.call_stack = call_stack;
args.server_transport_data = transport_server_data;
args.context = context;
args.path = path;
args.deadline = deadline;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;

@ -74,6 +74,8 @@ typedef struct {
grpc_call_stack *call_stack;
const void *server_transport_data;
grpc_call_context_element *context;
grpc_mdstr *path;
gpr_timespec start_time;
gpr_timespec deadline;
} grpc_call_element_args;
@ -225,7 +227,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
gpr_timespec deadline, grpc_call_stack *call_stack);
grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack);
/* Set a pollset or a pollset_set for a call stack: must occur before the first
* op is started */
void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,

@ -64,30 +64,49 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
}
// Starts the deadline timer.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// Note: We do not start the timer if there is already a timer
// pending. This should be okay, because this is only called from two
// functions exported by this module: grpc_deadline_state_start(), which
// starts the initial timer, and grpc_deadline_state_reset(), which
// cancels any pre-existing timer before starting a new one. In
// particular, we want to ensure that if grpc_deadline_state_start()
// winds up trying to start the timer after grpc_deadline_state_reset()
// has already done so, we ignore the value from the former.
if (!deadline_state->timer_pending &&
gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// Take a reference to the call stack, to be owned by the timer.
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
gpr_mu_lock(&deadline_state->timer_mu);
deadline_state->timer_pending = true;
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
elem, gpr_now(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(&deadline_state->timer_mu);
}
}
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
gpr_mu_lock(&deadline_state->timer_mu);
start_timer_if_needed_locked(exec_ctx, elem, deadline);
gpr_mu_unlock(&deadline_state->timer_mu);
}
// Cancels the deadline timer.
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
gpr_mu_lock(&deadline_state->timer_mu);
static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
if (deadline_state->timer_pending) {
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
deadline_state->timer_pending = false;
}
}
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
gpr_mu_lock(&deadline_state->timer_mu);
cancel_timer_if_needed_locked(exec_ctx, deadline_state);
gpr_mu_unlock(&deadline_state->timer_mu);
}
@ -108,6 +127,21 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
op->on_complete = &deadline_state->on_complete;
}
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack) {
grpc_deadline_state* deadline_state = elem->call_data;
memset(deadline_state, 0, sizeof(*deadline_state));
deadline_state->call_stack = call_stack;
gpr_mu_init(&deadline_state->timer_mu);
}
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
grpc_deadline_state* deadline_state = elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
gpr_mu_destroy(&deadline_state->timer_mu);
}
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
@ -122,16 +156,11 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
gpr_free(state);
}
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_element_args* args) {
grpc_deadline_state* deadline_state = elem->call_data;
memset(deadline_state, 0, sizeof(*deadline_state));
deadline_state->call_stack = args->call_stack;
gpr_mu_init(&deadline_state->timer_mu);
void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec deadline) {
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
const gpr_timespec deadline =
gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// When the deadline passes, we indicate the failure by sending down
// an op with cancel_error set. However, we can't send down any ops
@ -148,11 +177,13 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
}
}
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
gpr_mu_destroy(&deadline_state->timer_mu);
gpr_mu_lock(&deadline_state->timer_mu);
cancel_timer_if_needed_locked(exec_ctx, deadline_state);
start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
gpr_mu_unlock(&deadline_state->timer_mu);
}
void grpc_deadline_state_client_start_transport_stream_op(
@ -209,7 +240,8 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element_args* args) {
// Note: size of call data is different between client and server.
memset(elem->call_data, 0, elem->filter->sizeof_call_data);
grpc_deadline_state_init(exec_ctx, elem, args);
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
grpc_deadline_state_start(exec_ctx, elem, args->deadline);
return GRPC_ERROR_NONE;
}

@ -54,18 +54,37 @@ typedef struct grpc_deadline_state {
grpc_closure* next_on_complete;
} grpc_deadline_state;
// To be used in a filter's init_call_elem(), destroy_call_elem(), and
// start_transport_stream_op() methods to enforce call deadlines.
//
// REQUIRES: The first field in elem->call_data is a grpc_deadline_state.
// NOTE: All of these functions require that the first field in
// elem->call_data is a grpc_deadline_state.
//
// For grpc_deadline_state_client_start_transport_stream_op(), it is the
// caller's responsibility to chain to the next filter if necessary
// after the function returns.
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_element_args* args);
grpc_call_stack* call_stack);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
// Starts the timer with the specified deadline.
// Should be called from the filter's init_call_elem() method.
void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec deadline);
// Cancels the existing timer and starts a new one with new_deadline.
//
// Note: It is generally safe to call this with an earlier deadline
// value than the current one, but not the reverse. No checks are done
// to ensure that the timer callback is not invoked while it is in the
// process of being reset, which means that attempting to increase the
// deadline may result in the timer being called twice.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline);
// To be called from the client-side filter's start_transport_stream_op()
// method. Ensures that the deadline timer is cancelled when the call
// is completed.
//
// Note: It is the caller's responsibility to chain to the next filter if
// necessary after this function returns.
void grpc_deadline_state_client_start_transport_stream_op(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op* op);

@ -183,7 +183,7 @@ void grpc_handshake_manager_do_handshake(
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
grpc_handshaker_done_cb cb, void* user_data) {
grpc_channel_args* args_copy = grpc_channel_args_copy(args);
gpr_slice_buffer* read_buffer = malloc(sizeof(*read_buffer));
gpr_slice_buffer* read_buffer = gpr_malloc(sizeof(*read_buffer));
gpr_slice_buffer_init(read_buffer);
if (mgr->count == 0) {
// No handshakers registered, so we just immediately call the done

@ -38,6 +38,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/client_channel/method_config.h"
#include "src/core/lib/channel/channel_args.h"
#define DEFAULT_MAX_SEND_MESSAGE_LENGTH -1 // Unlimited.
@ -45,6 +46,8 @@
#define DEFAULT_MAX_RECV_MESSAGE_LENGTH (4 * 1024 * 1024)
typedef struct call_data {
int max_send_size;
int max_recv_size;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
// call our next_recv_message_ready member after handling it.
@ -58,6 +61,8 @@ typedef struct call_data {
typedef struct channel_data {
int max_send_size;
int max_recv_size;
// Method config table.
grpc_method_config_table* method_config_table;
} channel_data;
// Callback invoked when we receive a message. Here we check the max
@ -66,13 +71,12 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
grpc_error* error) {
grpc_call_element* elem = user_data;
call_data* calld = elem->call_data;
channel_data* chand = elem->channel_data;
if (*calld->recv_message != NULL && chand->max_recv_size >= 0 &&
(*calld->recv_message)->length > (size_t)chand->max_recv_size) {
if (*calld->recv_message != NULL && calld->max_recv_size >= 0 &&
(*calld->recv_message)->length > (size_t)calld->max_recv_size) {
char* message_string;
gpr_asprintf(&message_string,
"Received message larger than max (%u vs. %d)",
(*calld->recv_message)->length, chand->max_recv_size);
(*calld->recv_message)->length, calld->max_recv_size);
grpc_error* new_error = grpc_error_set_int(
GRPC_ERROR_CREATE(message_string), GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_INVALID_ARGUMENT);
@ -93,13 +97,12 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_transport_stream_op* op) {
call_data* calld = elem->call_data;
channel_data* chand = elem->channel_data;
// Check max send message size.
if (op->send_message != NULL && chand->max_send_size >= 0 &&
op->send_message->length > (size_t)chand->max_send_size) {
if (op->send_message != NULL && calld->max_send_size >= 0 &&
op->send_message->length > (size_t)calld->max_send_size) {
char* message_string;
gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)",
op->send_message->length, chand->max_send_size);
op->send_message->length, calld->max_send_size);
gpr_slice message = gpr_slice_from_copied_string(message_string);
gpr_free(message_string);
grpc_call_element_send_close_with_message(
@ -119,9 +122,37 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_call_element_args* args) {
channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;
grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem);
// Get max sizes from channel data, then merge in per-method config values.
// Note: Per-method config is only available on the client, so we
// apply the max request size to the send limit and the max response
// size to the receive limit.
calld->max_send_size = chand->max_send_size;
calld->max_recv_size = chand->max_recv_size;
if (chand->method_config_table != NULL) {
grpc_method_config* method_config =
grpc_method_config_table_get_method_config(chand->method_config_table,
args->path);
if (method_config != NULL) {
const int32_t* max_request_message_bytes =
grpc_method_config_get_max_request_message_bytes(method_config);
if (max_request_message_bytes != NULL &&
(*max_request_message_bytes < calld->max_send_size ||
calld->max_send_size < 0)) {
calld->max_send_size = *max_request_message_bytes;
}
const int32_t* max_response_message_bytes =
grpc_method_config_get_max_response_message_bytes(method_config);
if (max_response_message_bytes != NULL &&
(*max_response_message_bytes < calld->max_recv_size ||
calld->max_recv_size < 0)) {
calld->max_recv_size = *max_response_message_bytes;
}
}
}
return GRPC_ERROR_NONE;
}
@ -155,11 +186,22 @@ static void init_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_arg_get_integer(&args->channel_args->args[i], options);
}
}
// Get method config table from channel args.
const grpc_arg* channel_arg =
grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG);
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
chand->method_config_table = grpc_method_config_table_ref(
(grpc_method_config_table*)channel_arg->value.pointer.p);
}
}
// Destructor for channel_data.
static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem) {}
grpc_channel_element* elem) {
channel_data* chand = elem->channel_data;
grpc_method_config_table_unref(chand->method_config_table);
}
const grpc_channel_filter grpc_message_size_filter = {
start_transport_stream_op,

@ -35,6 +35,8 @@
#include <grpc/support/alloc.h>
#include "src/core/lib/profiling/timers.h"
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
@ -51,7 +53,7 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
GRPC_ERROR_UNREF(error);
return;
}
closure->error = error;
closure->error_data.error = error;
closure->next_data.next = NULL;
if (closure_list->head == NULL) {
closure_list->head = closure;
@ -64,8 +66,8 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
void grpc_closure_list_fail_all(grpc_closure_list *list,
grpc_error *forced_failure) {
for (grpc_closure *c = list->head; c != NULL; c = c->next_data.next) {
if (c->error == GRPC_ERROR_NONE) {
c->error = GRPC_ERROR_REF(forced_failure);
if (c->error_data.error == GRPC_ERROR_NONE) {
c->error_data.error = GRPC_ERROR_REF(forced_failure);
}
}
GRPC_ERROR_UNREF(forced_failure);
@ -110,3 +112,13 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
return &wc->wrapper;
}
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != NULL) {
c->cb(exec_ctx, c->cb_arg, error);
}
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_closure_run", 0);
}

@ -76,7 +76,10 @@ struct grpc_closure {
void *cb_arg;
/** Once queued, the result of the closure. Before then: scratch space */
grpc_error *error;
union {
grpc_error *error;
uintptr_t scratch;
} error_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */
@ -106,4 +109,10 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
/** return whether \a list is empty. */
bool grpc_closure_list_empty(grpc_closure_list list);
/** Run a closure directly. Caller ensures that no locks are being held above.
* Note that calling this at the end of a closure callback function itself is
* by definition safe. */
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */

@ -50,25 +50,57 @@ int grpc_combiner_trace = 0;
} \
} while (0)
#define STATE_UNORPHANED 1
#define STATE_ELEM_COUNT_LOW_BIT 2
struct grpc_combiner {
grpc_combiner *next_combiner_on_this_exec_ctx;
grpc_workqueue *optional_workqueue;
gpr_mpscq queue;
// state is:
// lower bit - zero if orphaned
// other bits - number of items queued on the lock
// lower bit - zero if orphaned (STATE_UNORPHANED)
// other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT)
gpr_atm state;
bool take_async_break_before_final_list;
// number of elements in the list that are covered by a poller: if >0, we can
// offload safely
gpr_atm elements_covered_by_poller;
bool time_to_execute_final_list;
bool final_list_covered_by_poller;
grpc_closure_list final_list;
grpc_closure continue_finishing;
grpc_closure offload;
};
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
typedef struct {
grpc_error *error;
bool covered_by_poller;
} error_data;
static uintptr_t pack_error_data(error_data d) {
return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
}
static error_data unpack_error_data(uintptr_t p) {
return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
}
static bool is_covered_by_poller(grpc_combiner *lock) {
return lock->final_list_covered_by_poller ||
gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0;
}
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
lock->next_combiner_on_this_exec_ctx = NULL;
lock->time_to_execute_final_list = false;
lock->optional_workqueue = optional_workqueue;
gpr_atm_no_barrier_store(&lock->state, 1);
lock->final_list_covered_by_poller = false;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
lock->take_async_break_before_final_list = false;
grpc_closure_list_init(&lock->final_list);
grpc_closure_init(&lock->offload, offload, lock);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@ -82,7 +114,7 @@ static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -1);
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
if (old_state == 1) {
@ -90,170 +122,186 @@ void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock) {
lock->next_combiner_on_this_exec_ctx = NULL;
if (exec_ctx->active_combiner == NULL) {
exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
} else {
exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock;
exec_ctx->last_combiner = lock;
}
}
static void continue_finishing_mainline(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
GPR_TIMER_BEGIN("combiner.continue_executing_mainline", 0);
grpc_combiner *lock = arg;
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p continue_finishing_mainline", lock));
GPR_ASSERT(exec_ctx->active_combiner == NULL);
static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock) {
lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner;
exec_ctx->active_combiner = lock;
if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
GPR_TIMER_END("combiner.continue_executing_mainline", 0);
if (lock->next_combiner_on_this_exec_ctx == NULL) {
exec_ctx->last_combiner = lock;
}
}
static void execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.execute_final", 0);
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
lock->take_async_break_before_final_list = false;
int loops = 0;
while (c != NULL) {
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
loops++;
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error,
bool covered_by_poller) {
GPR_TIMER_BEGIN("combiner.execute", 0);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
cl, covered_by_poller, last));
GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
cl->error_data.scratch =
pack_error_data((error_data){error, covered_by_poller});
if (covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1);
}
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
if (last == 1) {
// first element on this list: add it to the list of combiner locks
// executing within this exec_ctx
push_last_on_exec_ctx(exec_ctx, lock);
}
GPR_TIMER_END("combiner.execute_final", 0);
GPR_TIMER_END("combiner.execute", 0);
}
static void continue_executing_final(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
GPR_TIMER_BEGIN("combiner.continue_executing_final", 0);
grpc_combiner *lock = arg;
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p continue_executing_final", lock));
GPR_ASSERT(exec_ctx->active_combiner == NULL);
exec_ctx->active_combiner = lock;
// quick peek to see if new things have turned up on the queue: if so, go back
// to executing them before the final list
if ((gpr_atm_acq_load(&lock->state) >> 1) > 1) {
if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
} else {
execute_final(exec_ctx, lock);
finish(exec_ctx, lock);
static void move_next(grpc_exec_ctx *exec_ctx) {
exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
if (exec_ctx->active_combiner == NULL) {
exec_ctx->last_combiner = NULL;
}
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
GPR_TIMER_END("combiner.continue_executing_final", 0);
}
static bool start_execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.start_execute_final", 0);
GPR_ASSERT(exec_ctx->active_combiner == lock);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG,
"C:%p start_execute_final take_async_break_before_final_list=%d",
lock, lock->take_async_break_before_final_list));
if (lock->take_async_break_before_final_list) {
grpc_closure_init(&lock->continue_finishing, continue_executing_final,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
GPR_TIMER_END("combiner.start_execute_final", 0);
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_combiner *lock = arg;
push_last_on_exec_ctx(exec_ctx, lock);
}
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
lock->optional_workqueue));
grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload,
GRPC_ERROR_NONE);
}
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
grpc_combiner *lock = exec_ctx->active_combiner;
if (lock == NULL) {
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return false;
} else {
execute_final(exec_ctx, lock);
GPR_TIMER_END("combiner.start_execute_final", 0);
return true;
}
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.maybe_finish_one", 0);
gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
GPR_ASSERT(exec_ctx->active_combiner == lock);
if (n == NULL) {
// Queue is in an transiently inconsistent state: a new item is being queued
// but is not visible to this thread yet.
// Use this as a cue that we should go off and do something else for a while
// (and come back later)
grpc_closure_init(&lock->continue_finishing, continue_finishing_mainline,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
GPR_TIMER_END("combiner.maybe_finish_one", 0);
return false;
gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
"is_covered_by_poller=%d exec_ctx_ready_to_finish=%d "
"time_to_execute_final_list=%d",
lock, lock->optional_workqueue, is_covered_by_poller(lock),
grpc_exec_ctx_ready_to_finish(exec_ctx),
lock->time_to_execute_final_list));
if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) &&
grpc_exec_ctx_ready_to_finish(exec_ctx)) {
GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
// this execution context wants to move on, and we have a workqueue (and
// so can help the execution context out): schedule remaining work to be
// picked up on the workqueue
queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
grpc_closure *cl = (grpc_closure *)n;
grpc_error *error = cl->error;
cl->cb(exec_ctx, cl->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("combiner.maybe_finish_one", 0);
return true;
}
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
bool (*executor)(grpc_exec_ctx * exec_ctx, grpc_combiner * lock);
GPR_TIMER_BEGIN("combiner.finish", 0);
int loops = 0;
do {
executor = maybe_finish_one;
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -2);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p finish[%d] old_state=%" PRIdPTR, lock,
loops, old_state));
switch (old_state) {
default:
// we have multiple queued work items: just continue executing them
break;
case 5: // we're down to one queued item: if it's the final list we
case 4: // should do that
if (!grpc_closure_list_empty(lock->final_list)) {
executor = start_execute_final;
}
break;
case 3: // had one count, one unorphaned --> unlocked unorphaned
GPR_TIMER_END("combiner.finish", 0);
return;
case 2: // and one count, one orphaned --> unlocked and orphaned
really_destroy(exec_ctx, lock);
GPR_TIMER_END("combiner.finish", 0);
return;
case 1:
case 0:
// these values are illegal - representing an already unlocked or
// deleted lock
GPR_UNREACHABLE_CODE(return );
if (!lock->time_to_execute_final_list ||
// peek to see if something new has shown up, and execute that with
// priority
(gpr_atm_acq_load(&lock->state) >> 1) > 1) {
gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
if (n == NULL) {
// queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later)
GPR_TIMER_MARK("delay_busy", 0);
if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) {
queue_offload(exec_ctx, lock);
}
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
loops++;
} while (executor(exec_ctx, lock));
GPR_TIMER_END("combiner.finish", 0);
}
GPR_TIMER_BEGIN("combiner.exec1", 0);
grpc_closure *cl = (grpc_closure *)n;
error_data err = unpack_error_data(cl->error_data.scratch);
cl->cb(exec_ctx, cl->cb_arg, err.error);
if (err.covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1);
}
GRPC_ERROR_UNREF(err.error);
GPR_TIMER_END("combiner.exec1", 0);
} else {
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
lock->final_list_covered_by_poller = false;
int loops = 0;
while (c != NULL) {
GPR_TIMER_BEGIN("combiner.exec_1final", 0);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
GPR_TIMER_END("combiner.exec_1final", 0);
}
}
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error) {
GPR_TIMER_MARK("unref", 0);
move_next(exec_ctx);
lock->time_to_execute_final_list = false;
gpr_atm old_state =
gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p grpc_combiner_execute c=%p", lock, cl));
GPR_TIMER_BEGIN("combiner.execute", 0);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, 2);
GPR_ASSERT(last & 1); // ensure lock has not been destroyed
if (last == 1) {
exec_ctx->active_combiner = lock;
GPR_TIMER_BEGIN("combiner.execute_first_cb", 0);
cl->cb(exec_ctx, cl->cb_arg, error);
GPR_TIMER_END("combiner.execute_first_cb", 0);
GRPC_ERROR_UNREF(error);
finish(exec_ctx, lock);
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
} else {
cl->error = error;
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
// Define a macro to ease readability of the following switch statement.
#define OLD_STATE_WAS(orphaned, elem_count) \
(((orphaned) ? 0 : STATE_UNORPHANED) | \
((elem_count)*STATE_ELEM_COUNT_LOW_BIT))
// Depending on what the previous state was, we need to perform different
// actions.
switch (old_state) {
default:
// we have multiple queued work items: just continue executing them
break;
case OLD_STATE_WAS(false, 2):
case OLD_STATE_WAS(true, 2):
// we're down to one queued item: if it's the final list we should do that
if (!grpc_closure_list_empty(lock->final_list)) {
lock->time_to_execute_final_list = true;
}
break;
case OLD_STATE_WAS(false, 1):
// had one count, one unorphaned --> unlocked unorphaned
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
case OLD_STATE_WAS(true, 1):
// and one count, one orphaned --> unlocked and orphaned
really_destroy(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
case OLD_STATE_WAS(false, 0):
case OLD_STATE_WAS(true, 0):
// these values are illegal - representing an already unlocked or
// deleted lock
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
GPR_UNREACHABLE_CODE(return true);
}
GPR_TIMER_END("combiner.execute", 0);
push_first_on_exec_ctx(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
@ -264,30 +312,26 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool force_async_break) {
bool covered_by_poller) {
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG,
"C:%p grpc_combiner_execute_finally c=%p force_async_break=%d; ac=%p",
lock, closure, force_async_break, exec_ctx->active_combiner));
GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
closure, exec_ctx->active_combiner, covered_by_poller));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
grpc_combiner_execute(exec_ctx, lock,
grpc_closure_create(enqueue_finally, closure), error);
grpc_closure_create(enqueue_finally, closure), error,
false);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
if (force_async_break) {
lock->take_async_break_before_final_list = true;
}
if (grpc_closure_list_empty(lock->final_list)) {
gpr_atm_full_fetch_add(&lock->state, 2);
gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
}
if (covered_by_poller) {
lock->final_list_covered_by_poller = true;
}
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
void grpc_combiner_force_async_finally(grpc_combiner *lock) {
lock->take_async_break_before_final_list = true;
}

@ -52,19 +52,14 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
// Execute \a action within the lock.
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error);
grpc_closure *closure, grpc_error *error,
bool covered_by_poller);
// Execute \a action within the lock just prior to unlocking.
// if \a hint_async_break is true, the combiner tries to hand execution to
// another thread before finishing the primary queue of combined closures and
// executing the finally list.
// Deprecation warning: \a hint_async_break will be removed in a future version
// Takes a very slow and round-about path if not called from a
// grpc_combiner_execute closure.
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool hint_async_break);
// Deprecated: force the finally list execution onto another thread
void grpc_combiner_force_async_finally(grpc_combiner *lock);
bool covered_by_poller);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
extern int grpc_combiner_trace;

@ -120,6 +120,8 @@ static const char *error_int_name(grpc_error_ints key) {
return "http_status";
case GRPC_ERROR_INT_LIMIT:
return "limit";
case GRPC_ERROR_INT_OCCURRED_DURING_WRITE:
return "occurred_during_write";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
@ -144,6 +146,8 @@ static const char *error_str_name(grpc_error_strs key) {
return "tsi_error";
case GRPC_ERROR_STR_FILENAME:
return "filename";
case GRPC_ERROR_STR_QUEUED_BUFFERS:
return "queued_buffers";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
@ -265,7 +269,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
} else {
out = gpr_malloc(sizeof(*out));
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
gpr_log(GPR_DEBUG, "%p create copying", out);
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
#endif
out->ints = gpr_avl_ref(in->ints);
out->strs = gpr_avl_ref(in->strs);
@ -523,21 +527,25 @@ static char *fmt_time(void *p) {
return out;
}
static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap) {
static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap,
bool *first) {
if (n == NULL) return;
add_errs(n->left, s, sz, cap);
add_errs(n->left, s, sz, cap, first);
if (!*first) append_chr(',', s, sz, cap);
*first = false;
const char *e = grpc_error_string(n->value);
append_str(e, s, sz, cap);
grpc_error_free_string(e);
add_errs(n->right, s, sz, cap);
add_errs(n->right, s, sz, cap, first);
}
static char *errs_string(grpc_error *err) {
char *s = NULL;
size_t sz = 0;
size_t cap = 0;
bool first = true;
append_chr('[', &s, &sz, &cap);
add_errs(err->errs.root, &s, &sz, &cap);
add_errs(err->errs.root, &s, &sz, &cap, &first);
append_chr(']', &s, &sz, &cap);
append_chr(0, &s, &sz, &cap);
return s;

@ -100,6 +100,8 @@ typedef enum {
GRPC_ERROR_INT_HTTP_STATUS,
/// context sensitive limit associated with the error
GRPC_ERROR_INT_LIMIT,
/// chttp2: did the error occur while a write was in progress
GRPC_ERROR_INT_OCCURRED_DURING_WRITE,
} grpc_error_ints;
typedef enum {
@ -121,6 +123,8 @@ typedef enum {
GRPC_ERROR_STR_TSI_ERROR,
/// filename that we were trying to read/write when this error occurred
GRPC_ERROR_STR_FILENAME,
/// which data was queued for writing when the error occurred
GRPC_ERROR_STR_QUEUED_BUFFERS
} grpc_error_strs;
typedef enum {
@ -128,9 +132,13 @@ typedef enum {
GRPC_ERROR_TIME_CREATED,
} grpc_error_times;
/// The following "special" errors can be propagated without allocating memory.
/// They are always even so that other code (particularly combiner locks) can
/// safely use the lower bit for themselves.
#define GRPC_ERROR_NONE ((grpc_error *)NULL)
#define GRPC_ERROR_OOM ((grpc_error *)1)
#define GRPC_ERROR_CANCELLED ((grpc_error *)2)
#define GRPC_ERROR_OOM ((grpc_error *)2)
#define GRPC_ERROR_CANCELLED ((grpc_error *)4)
const char *grpc_error_string(grpc_error *error);
void grpc_error_free_string(const char *str);

@ -152,20 +152,20 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
//#define GRPC_PI_REF_COUNT_DEBUG
#ifdef GRPC_PI_REF_COUNT_DEBUG
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
#else /* defined(GRPC_PI_REF_COUNT_DEBUG) */
#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
@ -185,8 +185,17 @@ typedef struct polling_island {
* (except mu and ref_count) are invalid and must be ignored. */
gpr_atm merged_to;
/* The workqueue associated with this polling island */
grpc_workqueue *workqueue;
/* Number of threads currently polling on this island */
gpr_atm poller_count;
/* Mutex guarding the read end of the workqueue (must be held to pop from
* workqueue_items) */
gpr_mu workqueue_read_mu;
/* Queue of closures to be executed */
gpr_mpscq workqueue_items;
/* Count of items in workqueue_items */
gpr_atm workqueue_item_count;
/* Wakeup fd used to wake pollers to check the contents of workqueue_items */
grpc_wakeup_fd workqueue_wakeup_fd;
/* The fd of the underlying epoll set */
int epoll_fd;
@ -275,6 +284,10 @@ static bool append_error(grpc_error **composite, grpc_error *error,
threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
static grpc_wakeup_fd polling_island_wakeup_fd;
/* The polling island being polled right now.
See comments in workqueue_maybe_wakeup for why this is tracked. */
static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
@ -289,12 +302,12 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
#ifdef GRPC_PI_REF_COUNT_DEBUG
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
int line) {
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_add_ref(pi);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
@ -302,12 +315,42 @@ static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
char *reason, char *file, int line) {
const char *reason, const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_unref(exec_ctx, pi);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
if (workqueue != NULL) {
pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue != NULL) {
pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
}
}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_add_ref((polling_island *)workqueue);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_unref(exec_ctx, (polling_island *)workqueue);
}
}
#endif
static void pi_add_ref(polling_island *pi) {
@ -315,10 +358,7 @@ static void pi_add_ref(polling_island *pi) {
}
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
/* If ref count went to one, we're back to just the workqueue owning a ref.
Unref the workqueue to break the loop.
If ref count went to zero, delete the polling island.
/* If ref count went to zero, delete the polling island.
Note that this deletion not be done under a lock. Once the ref count goes
to zero, we are guaranteed that no one else holds a reference to the
polling island (and that there is no racing pi_add_ref() call either).
@ -326,20 +366,12 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
Also, if we are deleting the polling island and the merged_to field is
non-empty, we should remove a ref to the merged_to polling island
*/
switch (gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
case 2: /* last external ref: the only one now owned is by the workqueue */
GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
break;
case 1: {
polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
polling_island_delete(exec_ctx, pi);
if (next != NULL) {
PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
break;
if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
polling_island_delete(exec_ctx, pi);
if (next != NULL) {
PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
case 0:
GPR_UNREACHABLE_CODE(return );
}
}
@ -488,11 +520,20 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
pi->workqueue = NULL;
gpr_mu_init(&pi->workqueue_read_mu);
gpr_mpscq_init(&pi->workqueue_items);
gpr_atm_rel_store(&pi->workqueue_item_count, 0);
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
err_desc)) {
goto done;
}
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@ -501,26 +542,14 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
}
polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
if (append_error(error, grpc_workqueue_create(exec_ctx, &pi->workqueue),
err_desc) &&
*error == GRPC_ERROR_NONE) {
polling_island_add_fds_locked(pi, &pi->workqueue->wakeup_read_fd, 1, true,
error);
GPR_ASSERT(pi->workqueue->wakeup_read_fd->polling_island == NULL);
pi->workqueue->wakeup_read_fd->polling_island = pi;
PI_ADD_REF(pi, "fd");
}
done:
if (*error != GRPC_ERROR_NONE) {
if (pi->workqueue != NULL) {
GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
}
polling_island_delete(exec_ctx, pi);
pi = NULL;
}
@ -533,7 +562,11 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
gpr_mu_destroy(&pi->workqueue_read_mu);
gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_free(pi->fds);
gpr_free(pi);
}
@ -678,6 +711,45 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
static void workqueue_maybe_wakeup(polling_island *pi) {
/* If this thread is the current poller, then it may be that it's about to
decrement the current poller count, so we need to look past this thread */
bool is_current_poller = (g_current_thread_polling_island == pi);
gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
/* Only issue a wakeup if it's likely that some poller could come in and take
it right now. Note that since we do an anticipatory mpscq_pop every poll
loop, it's ok if we miss the wakeup here, as we'll get the work item when
the next poller enters anyway. */
if (current_pollers > min_current_pollers_for_wakeup) {
GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
}
}
static void workqueue_move_items_to_parent(polling_island *q) {
polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
if (p == NULL) {
return;
}
gpr_mu_lock(&q->workqueue_read_mu);
int num_added = 0;
while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
if (n != NULL) {
gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
gpr_mpscq_push(&p->workqueue_items, n);
num_added++;
}
}
gpr_mu_unlock(&q->workqueue_read_mu);
if (num_added > 0) {
workqueue_maybe_wakeup(p);
}
workqueue_move_items_to_parent(p);
}
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@ -702,6 +774,8 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
workqueue_move_items_to_parent(q);
}
/* else if p == q, nothing needs to be done */
@ -712,6 +786,26 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
/* take a ref to the workqueue: otherwise it can happen that whatever events
* this kicks off ends up destroying the workqueue before this function
* completes */
GRPC_WORKQUEUE_REF(workqueue, "enqueue");
polling_island *pi = (polling_island *)workqueue;
gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
closure->error_data.error = error;
gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
if (last == 0) {
workqueue_maybe_wakeup(pi);
}
workqueue_move_items_to_parent(pi);
GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
GPR_TIMER_END("workqueue.enqueue", 0);
}
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@ -1042,11 +1136,8 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
gpr_mu_lock(&fd->mu);
grpc_workqueue *workqueue = NULL;
if (fd->polling_island != NULL) {
workqueue =
GRPC_WORKQUEUE_REF(fd->polling_island->workqueue, "get_workqueue");
}
grpc_workqueue *workqueue = GRPC_WORKQUEUE_REF(
(grpc_workqueue *)fd->polling_island, "fd_get_workqueue");
gpr_mu_unlock(&fd->mu);
return workqueue;
}
@ -1299,7 +1390,29 @@ static void pollset_reset(grpc_pollset *pollset) {
GPR_ASSERT(pollset->polling_island == NULL);
}
#define GRPC_EPOLL_MAX_EVENTS 1000
static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
polling_island *pi) {
if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
gpr_mu_unlock(&pi->workqueue_read_mu);
if (n != NULL) {
if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) {
workqueue_maybe_wakeup(pi);
}
grpc_closure *c = (grpc_closure *)n;
grpc_closure_run(exec_ctx, c, c->error_data.error);
return true;
} else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
/* n == NULL might mean there's work but it's not available to be popped
* yet - try to ensure another workqueue wakes up to check shortly if so
*/
workqueue_maybe_wakeup(pi);
}
}
return false;
}
#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
@ -1354,7 +1467,13 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->mu);
do {
/* If we get some workqueue work to do, it might end up completing an item on
the completion queue, so there's no need to poll... so we skip that and
redo the complete loop to verify */
if (!maybe_do_workqueue_work(exec_ctx, pi)) {
gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
g_current_thread_polling_island = pi;
GRPC_SCHEDULING_START_BLOCKING_REGION;
ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
sig_mask);
@ -1386,6 +1505,11 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
append_error(error,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
err_desc);
} else if (data_ptr == &pi->workqueue_wakeup_fd) {
append_error(error,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
err_desc);
maybe_do_workqueue_work(exec_ctx, pi);
} else if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
@ -1408,7 +1532,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
}
}
}
} while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
g_current_thread_polling_island = NULL;
gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
}
GPR_ASSERT(pi != NULL);
@ -1868,6 +1995,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue,
.shutdown_engine = shutdown_engine,
};
@ -1892,6 +2023,10 @@ const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
return NULL;
}
if (!grpc_has_wakeup_fd()) {
return NULL;
}
if (!is_epoll_available()) {
return NULL;
}

@ -1988,6 +1988,32 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
/*******************************************************************************
* workqueue stubs
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
}
/*******************************************************************************
* event engine binding
*/
@ -2029,6 +2055,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue,
.shutdown_engine = shutdown_engine,
};

@ -47,10 +47,12 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/thd.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@ -245,6 +247,28 @@ struct grpc_pollset_set {
grpc_fd **fds;
};
/*******************************************************************************
* condition variable polling definitions
*/
#define CV_POLL_PERIOD_MS 1000
#define CV_DEFAULT_TABLE_SIZE 16
typedef enum poll_status_t { INPROGRESS, COMPLETED, CANCELLED } poll_status_t;
typedef struct poll_args {
gpr_refcount refcount;
gpr_cv *cv;
struct pollfd *fds;
nfds_t nfds;
int timeout;
int retval;
int err;
gpr_atm status;
} poll_args;
cv_fd_table g_cvfds;
/*******************************************************************************
* fd_posix.c
*/
@ -961,8 +985,15 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (errno != EINTR) {
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
}
for (i = 2; i < pfd_count; i++) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
// Wake up all the file descriptors, if we have an invalid one
// we can identify it on the next pollset_work()
fd_end_poll(exec_ctx, &watchers[i], 1, 1, pollset);
}
}
} else if (r == 0) {
for (i = 2; i < pfd_count; i++) {
@ -1235,11 +1266,238 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
/*******************************************************************************
* workqueue stubs
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
}
/*******************************************************************************
* Condition Variable polling extensions
*/
static void decref_poll_args(poll_args *args) {
if (gpr_unref(&args->refcount)) {
gpr_free(args->fds);
gpr_cv_destroy(args->cv);
gpr_free(args->cv);
gpr_free(args);
}
}
// Poll in a background thread
static void run_poll(void *arg) {
int timeout, retval;
poll_args *pargs = (poll_args *)arg;
while (gpr_atm_no_barrier_load(&pargs->status) == INPROGRESS) {
if (pargs->timeout < 0) {
timeout = CV_POLL_PERIOD_MS;
} else {
timeout = GPR_MIN(CV_POLL_PERIOD_MS, pargs->timeout);
pargs->timeout -= timeout;
}
retval = g_cvfds.poll(pargs->fds, pargs->nfds, timeout);
if (retval != 0 || pargs->timeout == 0) {
pargs->retval = retval;
pargs->err = errno;
break;
}
}
gpr_mu_lock(&g_cvfds.mu);
if (gpr_atm_no_barrier_load(&pargs->status) == INPROGRESS) {
// Signal main thread that the poll completed
gpr_atm_no_barrier_store(&pargs->status, COMPLETED);
gpr_cv_signal(pargs->cv);
}
decref_poll_args(pargs);
g_cvfds.pollcount--;
if (g_cvfds.shutdown && g_cvfds.pollcount == 0) {
gpr_cv_signal(&g_cvfds.shutdown_complete);
}
gpr_mu_unlock(&g_cvfds.mu);
}
// This function overrides poll() to handle condition variable wakeup fds
static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
unsigned int i;
int res, idx;
gpr_cv *pollcv;
cv_node *cvn, *prev;
nfds_t nsockfds = 0;
gpr_thd_id t_id;
gpr_thd_options opt;
poll_args *pargs = NULL;
gpr_mu_lock(&g_cvfds.mu);
pollcv = gpr_malloc(sizeof(gpr_cv));
gpr_cv_init(pollcv);
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
idx = FD_TO_IDX(fds[i].fd);
cvn = gpr_malloc(sizeof(cv_node));
cvn->cv = pollcv;
cvn->next = g_cvfds.cvfds[idx].cvs;
g_cvfds.cvfds[idx].cvs = cvn;
// We should return immediately if there are pending events,
// but we still need to call poll() to check for socket events
if (g_cvfds.cvfds[idx].is_set) {
timeout = 0;
}
} else if (fds[i].fd >= 0) {
nsockfds++;
}
}
if (nsockfds > 0) {
pargs = gpr_malloc(sizeof(struct poll_args));
// Both the main thread and calling thread get a reference
gpr_ref_init(&pargs->refcount, 2);
pargs->cv = pollcv;
pargs->fds = gpr_malloc(sizeof(struct pollfd) * nsockfds);
pargs->nfds = nsockfds;
pargs->timeout = timeout;
pargs->retval = 0;
pargs->err = 0;
gpr_atm_no_barrier_store(&pargs->status, INPROGRESS);
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd >= 0) {
pargs->fds[idx].fd = fds[i].fd;
pargs->fds[idx].events = fds[i].events;
pargs->fds[idx].revents = 0;
idx++;
}
}
g_cvfds.pollcount++;
opt = gpr_thd_options_default();
gpr_thd_options_set_detached(&opt);
gpr_thd_new(&t_id, &run_poll, pargs, &opt);
// We want the poll() thread to trigger the deadline, so wait forever here
gpr_cv_wait(pollcv, &g_cvfds.mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
if (gpr_atm_no_barrier_load(&pargs->status) == COMPLETED) {
res = pargs->retval;
errno = pargs->err;
} else {
res = 0;
errno = 0;
gpr_atm_no_barrier_store(&pargs->status, CANCELLED);
}
} else {
gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME);
deadline =
gpr_time_add(deadline, gpr_time_from_millis(timeout, GPR_TIMESPAN));
gpr_cv_wait(pollcv, &g_cvfds.mu, deadline);
res = 0;
}
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
cvn = g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs;
prev = NULL;
while (cvn->cv != pollcv) {
prev = cvn;
cvn = cvn->next;
GPR_ASSERT(cvn);
}
if (!prev) {
g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs = cvn->next;
} else {
prev->next = cvn->next;
}
gpr_free(cvn);
if (g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].is_set) {
fds[i].revents = POLLIN;
if (res >= 0) res++;
}
} else if (fds[i].fd >= 0 &&
gpr_atm_no_barrier_load(&pargs->status) == COMPLETED) {
fds[i].revents = pargs->fds[idx].revents;
idx++;
}
}
if (pargs) {
decref_poll_args(pargs);
} else {
gpr_cv_destroy(pollcv);
gpr_free(pollcv);
}
gpr_mu_unlock(&g_cvfds.mu);
return res;
}
static void global_cv_fd_table_init() {
gpr_mu_init(&g_cvfds.mu);
gpr_mu_lock(&g_cvfds.mu);
gpr_cv_init(&g_cvfds.shutdown_complete);
g_cvfds.shutdown = 0;
g_cvfds.pollcount = 0;
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
g_cvfds.cvfds = gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.free_fds = NULL;
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = NULL;
g_cvfds.cvfds[i].next_free = g_cvfds.free_fds;
g_cvfds.free_fds = &g_cvfds.cvfds[i];
}
// Override the poll function with one that supports cvfds
g_cvfds.poll = grpc_poll_function;
grpc_poll_function = &cvfd_poll;
gpr_mu_unlock(&g_cvfds.mu);
}
static void global_cv_fd_table_shutdown() {
gpr_mu_lock(&g_cvfds.mu);
g_cvfds.shutdown = 1;
// Attempt to wait for all abandoned poll() threads to terminate
// Not doing so will result in reported memory leaks
if (g_cvfds.pollcount > 0) {
int res = gpr_cv_wait(&g_cvfds.shutdown_complete, &g_cvfds.mu,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_seconds(3, GPR_TIMESPAN)));
GPR_ASSERT(res == 0);
}
gpr_cv_destroy(&g_cvfds.shutdown_complete);
grpc_poll_function = g_cvfds.poll;
gpr_free(g_cvfds.cvfds);
gpr_mu_unlock(&g_cvfds.mu);
gpr_mu_destroy(&g_cvfds.mu);
}
/*******************************************************************************
* event engine binding
*/
static void shutdown_engine(void) { pollset_global_shutdown(); }
static void shutdown_engine(void) {
pollset_global_shutdown();
if (grpc_cv_wakeup_fds_enabled()) {
global_cv_fd_table_shutdown();
}
}
static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset),
@ -1273,11 +1531,29 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue,
.shutdown_engine = shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_posix(void) {
if (!grpc_has_wakeup_fd()) {
return NULL;
}
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
return NULL;
}
return &vtable;
}
const grpc_event_engine_vtable *grpc_init_poll_cv_posix(void) {
global_cv_fd_table_init();
grpc_enable_cv_wakeup_fds(1);
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
global_cv_fd_table_shutdown();
grpc_enable_cv_wakeup_fds(0);
return NULL;
}
return &vtable;

@ -37,5 +37,6 @@
#include "src/core/lib/iomgr/ev_posix.h"
const grpc_event_engine_vtable *grpc_init_poll_posix(void);
const grpc_event_engine_vtable *grpc_init_poll_cv_posix(void);
#endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_POSIX_H */

@ -66,6 +66,7 @@ typedef struct {
static const event_engine_factory g_factories[] = {
{"epoll", grpc_init_epoll_linux},
{"poll", grpc_init_poll_posix},
{"poll-cv", grpc_init_poll_cv_posix},
{"legacy", grpc_init_poll_and_epoll_posix},
};
@ -258,4 +259,27 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
grpc_error *grpc_kick_poller(void) { return g_event_engine->kick_poller(); }
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason) {
return g_event_engine->workqueue_ref(workqueue, file, line, reason);
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
g_event_engine->workqueue_unref(exec_ctx, workqueue, file, line, reason);
}
#else
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
return g_event_engine->workqueue_ref(workqueue);
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
g_event_engine->workqueue_unref(exec_ctx, workqueue);
}
#endif
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
grpc_closure *closure, grpc_error *error) {
g_event_engine->workqueue_enqueue(exec_ctx, workqueue, closure, error);
}
#endif // GPR_POSIX_SOCKET

@ -40,6 +40,7 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
typedef struct grpc_fd grpc_fd;
@ -95,6 +96,18 @@ typedef struct grpc_event_engine_vtable {
grpc_error *(*kick_poller)(void);
void (*shutdown_engine)(void);
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue, const char *file,
int line, const char *reason);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
void (*workqueue_enqueue)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
grpc_closure *closure, grpc_error *error);
} grpc_event_engine_vtable;
void grpc_event_engine_init(void);

@ -37,6 +37,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
@ -60,18 +61,43 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error;
did_something = true;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
for (;;) {
if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
did_something = true;
grpc_closure_run(exec_ctx, c, c->error_data.error);
c = next;
}
} else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) {
break;
}
}
GPR_ASSERT(exec_ctx->active_combiner == NULL);
if (exec_ctx->stealing_from_workqueue != NULL) {
if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
} else {
grpc_closure *c = exec_ctx->stolen_closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
grpc_error *error = c->error_data.error;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0);
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0);
grpc_exec_ctx_flush(exec_ctx);
did_something = true;
}
}
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
@ -86,12 +112,25 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error,
grpc_workqueue *offload_target_or_null) {
GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0);
if (offload_target_or_null == NULL) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
} else {
} else if (exec_ctx->stealing_from_workqueue == NULL) {
exec_ctx->stealing_from_workqueue = offload_target_or_null;
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
} else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) {
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
} else { /* stealing_from_workqueue == offload_target_or_null */
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
}
GPR_TIMER_END("grpc_exec_ctx_sched", 0);
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,

@ -66,15 +66,33 @@ typedef struct grpc_combiner grpc_combiner;
#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
struct grpc_exec_ctx {
grpc_closure_list closure_list;
/** The workqueue we're stealing work from.
As items are queued to the execution context, we try to steal one
workqueue item and execute it inline (assuming the exec_ctx is not
finished) - doing so does not invalidate the workqueue's contract, and
provides a small latency win in cases where we get a hit */
grpc_workqueue *stealing_from_workqueue;
/** The workqueue item that was stolen from the workqueue above. When new
items are scheduled to be offloaded to that workqueue, we need to update
this like a 1-deep fifo to maintain the invariant that workqueue items
queued by one thread are started in order */
grpc_closure *stolen_closure;
/** currently active combiner: updated only via combiner.c */
grpc_combiner *active_combiner;
/** last active combiner in the active combiner list */
grpc_combiner *last_combiner;
bool cached_ready_to_finish;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
};
/* initializer for grpc_exec_ctx:
prefer to use GRPC_EXEC_CTX_INIT whenever possible */
#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
{ GRPC_CLOSURE_LIST_INIT, NULL, false, finish_check_arg, finish_check }
{ \
GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \
finish_check \
}
#else
struct grpc_exec_ctx {
bool cached_ready_to_finish;
@ -85,8 +103,10 @@ struct grpc_exec_ctx {
{ false, finish_check_arg, finish_check }
#endif
/* initialize an execution context at the top level of an API call into grpc
(this is safe to use elsewhere, though possibly not as efficient) */
#define GRPC_EXEC_CTX_INIT \
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_never_ready_to_finish, NULL)
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.

@ -112,6 +112,14 @@ void grpc_iomgr_shutdown(void) {
continue;
}
if (g_root_object.next != &g_root_object) {
if (grpc_iomgr_abort_on_leaks()) {
gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
" iomgr objects before shutdown deadline: "
"memory leaks are likely",
count_objects());
dump_objects("LEAKED");
abort();
}
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
@ -122,9 +130,6 @@ void grpc_iomgr_shutdown(void) {
"memory leaks are likely",
count_objects());
dump_objects("LEAKED");
if (grpc_iomgr_abort_on_leaks()) {
abort();
}
}
break;
}

@ -177,7 +177,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
tcp->read_cb = NULL;
tcp->incoming_buffer = NULL;
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
grpc_closure_run(exec_ctx, cb, error);
}
#define MAX_READ_IOVEC 4
@ -209,11 +209,11 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
GPR_TIMER_BEGIN("recvmsg", 1);
GPR_TIMER_BEGIN("recvmsg", 0);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
GPR_TIMER_END("recvmsg", 0);
GPR_TIMER_END("recvmsg", read_bytes >= 0);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
@ -392,11 +392,8 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error_free_string(str);
}
GPR_TIMER_BEGIN("tcp_handle_write.cb", 0);
cb->cb(exec_ctx, cb->cb_arg, error);
GPR_TIMER_END("tcp_handle_write.cb", 0);
grpc_closure_run(exec_ctx, cb, error);
TCP_UNREF(exec_ctx, tcp, "write");
GRPC_ERROR_UNREF(error);
}
}

@ -101,8 +101,8 @@ grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s);
void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
grpc_closure *shutdown_starting);
/* If the refcount drops to zero, delete s, and call (exec_ctx==NULL) or enqueue
a call (exec_ctx!=NULL) to shutdown_complete. */
/* If the refcount drops to zero, enqueue calls on exec_ctx to
shutdown_listeners and delete s. */
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s);
/* Shutdown the fds of listeners. */

@ -191,6 +191,9 @@ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
}
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_mu_lock(&s->mu);
GPR_ASSERT(s->shutdown);
gpr_mu_unlock(&s->mu);
if (s->shutdown_complete != NULL) {
grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
}
@ -652,6 +655,7 @@ unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
unsigned port_index) {
unsigned num_fds = 0;
grpc_tcp_listener *sp;
gpr_mu_lock(&s->mu);
for (sp = s->head; sp && port_index != 0; sp = sp->next) {
if (!sp->is_sibling) {
--port_index;
@ -659,12 +663,15 @@ unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
}
for (; sp; sp = sp->sibling, ++num_fds)
;
gpr_mu_unlock(&s->mu);
return num_fds;
}
int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
unsigned fd_index) {
grpc_tcp_listener *sp;
int fd;
gpr_mu_lock(&s->mu);
for (sp = s->head; sp && port_index != 0; sp = sp->next) {
if (!sp->is_sibling) {
--port_index;
@ -673,10 +680,12 @@ int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
for (; sp && fd_index != 0; sp = sp->sibling, --fd_index)
;
if (sp) {
return sp->fd;
fd = sp->fd;
} else {
return -1;
fd = -1;
}
gpr_mu_unlock(&s->mu);
return fd;
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
@ -722,7 +731,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
}
grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
gpr_ref(&s->refs);
gpr_ref_non_zero(&s->refs);
return s;
}
@ -736,19 +745,11 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
/* Complete shutdown_starting work before destroying. */
grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL);
gpr_mu_unlock(&s->mu);
if (exec_ctx == NULL) {
grpc_exec_ctx_flush(&local_exec_ctx);
tcp_server_destroy(&local_exec_ctx, s);
grpc_exec_ctx_finish(&local_exec_ctx);
} else {
grpc_exec_ctx_finish(&local_exec_ctx);
tcp_server_destroy(exec_ctx, s);
}
tcp_server_destroy(exec_ctx, s);
}
}

@ -139,7 +139,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
gpr_ref(&s->refs);
gpr_ref_non_zero(&s->refs);
return s;
}
@ -174,19 +174,11 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (gpr_unref(&s->refs)) {
/* Complete shutdown_starting work before destroying. */
grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL);
gpr_mu_unlock(&s->mu);
if (exec_ctx == NULL) {
grpc_exec_ctx_flush(&local_exec_ctx);
tcp_server_destroy(&local_exec_ctx, s);
grpc_exec_ctx_finish(&local_exec_ctx);
} else {
grpc_exec_ctx_finish(&local_exec_ctx);
tcp_server_destroy(exec_ctx, s);
}
tcp_server_destroy(exec_ctx, s);
}
}

@ -49,11 +49,11 @@ typedef struct grpc_timer {
} grpc_timer;
/* Initialize *timer. When expired or canceled, timer_cb will be called with
*timer_cb_arg and status to indicate if it expired (SUCCESS) or was
canceled (CANCELLED). timer_cb is guaranteed to be called exactly once,
and application code should check the status to determine how it was
invoked. The application callback is also responsible for maintaining
information about when to free up any user-level state. */
*timer_cb_arg and error set to indicate if it expired (GRPC_ERROR_NONE) or
was canceled (GRPC_ERROR_CANCELLED). timer_cb is guaranteed to be called
exactly once, and application code should check the error to determine
how it was invoked. The application callback is also responsible for
maintaining information about when to free up any user-level state. */
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
void *timer_cb_arg, gpr_timespec now);
@ -74,8 +74,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
In all of these cases, the cancellation is still considered successful.
They are essentially distinguished in that the timer_cb will be run
exactly once from either the cancellation (with status CANCELLED)
or from the activation (with status SUCCESS)
exactly once from either the cancellation (with error GRPC_ERROR_CANCELLED)
or from the activation (with error GRPC_ERROR_NONE).
Note carefully that the callback function MAY occur in the same callstack
as grpc_timer_cancel. It's expected that most timers will be cancelled (their
@ -83,14 +83,13 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
that cancellation costs as little as possible. Making callbacks run inline
matches this aim.
Requires: cancel() must happen after add() on a given timer */
Requires: cancel() must happen after init() on a given timer */
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer);
/* iomgr internal api for dealing with timers */
/* Check for timers to be run, and run them.
Return true if timer callbacks were executed.
Drops drop_mu if it is non-null before executing callbacks.
If next is non-null, TRY to update *next with the next running timer
IF that timer occurs before *next current value.
*next is never guaranteed to be updated on any given execution; however,

@ -0,0 +1,118 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_WAKEUP_FD
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include <errno.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#define MAX_TABLE_RESIZE 256
extern cv_fd_table g_cvfds;
static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
unsigned int i, newsize;
int idx;
gpr_mu_lock(&g_cvfds.mu);
if (!g_cvfds.free_fds) {
newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
g_cvfds.cvfds = gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
for (i = g_cvfds.size; i < newsize; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = NULL;
g_cvfds.cvfds[i].next_free = g_cvfds.free_fds;
g_cvfds.free_fds = &g_cvfds.cvfds[i];
}
g_cvfds.size = newsize;
}
idx = (int)(g_cvfds.free_fds - g_cvfds.cvfds);
g_cvfds.free_fds = g_cvfds.free_fds->next_free;
g_cvfds.cvfds[idx].cvs = NULL;
g_cvfds.cvfds[idx].is_set = 0;
fd_info->read_fd = IDX_TO_FD(idx);
fd_info->write_fd = -1;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
cv_node* cvn;
gpr_mu_lock(&g_cvfds.mu);
g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 1;
cvn = g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs;
while (cvn) {
gpr_cv_signal(cvn->cv);
cvn = cvn->next;
}
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
static grpc_error* cv_fd_consume(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 0;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
static void cv_fd_destroy(grpc_wakeup_fd* fd_info) {
if (fd_info->read_fd == 0) {
return;
}
gpr_mu_lock(&g_cvfds.mu);
// Assert that there are no active pollers
GPR_ASSERT(!g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs);
g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
g_cvfds.free_fds = &g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)];
gpr_mu_unlock(&g_cvfds.mu);
}
static int cv_check_availability(void) { return 1; }
const grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable = {
cv_fd_init, cv_fd_consume, cv_fd_wakeup, cv_fd_destroy,
cv_check_availability};
#endif /* GPR_POSIX_WAKUP_FD */

@ -0,0 +1,80 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* wakeup_fd_cv uses condition variables to implement wakeup fds.
*
* It is intended for use only in cases when eventfd() and pipe() are not
* available. It can only be used with the "poll" engine.
*
* Implementation:
* A global table of cv wakeup fds is mantained. A cv wakeup fd is a negative
* file descriptor. poll() is then run in a background thread with only the
* real socket fds while we wait on a condition variable trigged by either the
* poll() completion or a wakeup_fd() call.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H
#define GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H
#include <grpc/support/sync.h>
#include "src/core/lib/iomgr/ev_posix.h"
#define FD_TO_IDX(fd) (-(fd)-1)
#define IDX_TO_FD(idx) (-(idx)-1)
typedef struct cv_node {
gpr_cv* cv;
struct cv_node* next;
} cv_node;
typedef struct fd_node {
int is_set;
cv_node* cvs;
struct fd_node* next_free;
} fd_node;
typedef struct cv_fd_table {
gpr_mu mu;
int pollcount;
int shutdown;
gpr_cv shutdown_complete;
fd_node* cvfds;
fd_node* free_fds;
unsigned int size;
grpc_poll_function_type poll;
} cv_fd_table;
#endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H */

@ -47,11 +47,10 @@
static grpc_error* pipe_init(grpc_wakeup_fd* fd_info) {
int pipefd[2];
/* TODO(klempner): Make this nonfatal */
int r = pipe(pipefd);
if (0 != r) {
gpr_log(GPR_ERROR, "pipe creation failed (%d): %s", errno, strerror(errno));
abort();
return GRPC_OS_ERROR(errno, "pipe");
}
grpc_error* err;
err = grpc_set_socket_nonblocking(pipefd[0], 1);
@ -95,8 +94,13 @@ static void pipe_destroy(grpc_wakeup_fd* fd_info) {
}
static int pipe_check_availability(void) {
/* Assume that pipes are always available. */
return 1;
grpc_wakeup_fd fd;
if (pipe_init(&fd) == GRPC_ERROR_NONE) {
pipe_destroy(&fd);
return 1;
} else {
return 0;
}
}
const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable = {

@ -36,37 +36,66 @@
#ifdef GPR_POSIX_WAKEUP_FD
#include <stddef.h>
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_pipe.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
extern grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable;
static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL;
int grpc_allow_specialized_wakeup_fd = 1;
int grpc_allow_pipe_wakeup_fd = 1;
int has_real_wakeup_fd = 1;
int cv_wakeup_fds_enabled = 0;
void grpc_wakeup_fd_global_init(void) {
if (grpc_allow_specialized_wakeup_fd &&
grpc_specialized_wakeup_fd_vtable.check_availability()) {
wakeup_fd_vtable = &grpc_specialized_wakeup_fd_vtable;
} else {
} else if (grpc_allow_pipe_wakeup_fd &&
grpc_pipe_wakeup_fd_vtable.check_availability()) {
wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
} else {
has_real_wakeup_fd = 0;
}
}
void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = NULL; }
int grpc_has_wakeup_fd(void) { return has_real_wakeup_fd; }
int grpc_cv_wakeup_fds_enabled(void) { return cv_wakeup_fds_enabled; }
void grpc_enable_cv_wakeup_fds(int enable) { cv_wakeup_fds_enabled = enable; }
grpc_error *grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) {
if (cv_wakeup_fds_enabled) {
return grpc_cv_wakeup_fd_vtable.init(fd_info);
}
return wakeup_fd_vtable->init(fd_info);
}
grpc_error *grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd *fd_info) {
if (cv_wakeup_fds_enabled) {
return grpc_cv_wakeup_fd_vtable.consume(fd_info);
}
return wakeup_fd_vtable->consume(fd_info);
}
grpc_error *grpc_wakeup_fd_wakeup(grpc_wakeup_fd *fd_info) {
if (cv_wakeup_fds_enabled) {
return grpc_cv_wakeup_fd_vtable.wakeup(fd_info);
}
return wakeup_fd_vtable->wakeup(fd_info);
}
void grpc_wakeup_fd_destroy(grpc_wakeup_fd *fd_info) {
wakeup_fd_vtable->destroy(fd_info);
if (cv_wakeup_fds_enabled) {
grpc_cv_wakeup_fd_vtable.destroy(fd_info);
} else {
wakeup_fd_vtable->destroy(fd_info);
}
}
#endif /* GPR_POSIX_WAKEUP_FD */

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save