Merge branch 'master' into new_scenario

pull/8215/head
Sree Kuchibhotla 9 years ago
commit b73a654450
  1. 24
      BUILD
  2. 9
      CMakeLists.txt
  3. 165
      Makefile
  4. 3
      binding.gyp
  5. 28
      build.yaml
  6. 3
      config.m4
  7. 2
      doc/environment_variables.md
  8. 121
      doc/epoll-polling-engine.md
  9. BIN
      doc/images/new_epoll_impl.png
  10. BIN
      doc/images/old_epoll_impl.png
  11. 42
      examples/python/helloworld/run_codegen.py
  12. 9
      gRPC-Core.podspec
  13. 6
      grpc.gemspec
  14. 2
      include/grpc++/impl/codegen/server_context.h
  15. 67
      include/grpc++/test/server_context_test_spouse.h
  16. 3
      include/grpc/impl/codegen/grpc_types.h
  17. 6
      include/grpc/support/alloc.h
  18. 6
      package.xml
  19. 4
      src/core/ext/census/grpc_filter.c
  20. 193
      src/core/ext/client_config/client_channel.c
  21. 296
      src/core/ext/client_config/method_config.c
  22. 116
      src/core/ext/client_config/method_config.h
  23. 9
      src/core/ext/client_config/resolver_result.h
  24. 8
      src/core/ext/client_config/subchannel.c
  25. 3
      src/core/ext/client_config/subchannel.h
  26. 5
      src/core/ext/lb_policy/grpclb/grpclb.c
  27. 1
      src/core/ext/lb_policy/pick_first/pick_first.c
  28. 1
      src/core/ext/lb_policy/round_robin/round_robin.c
  29. 6
      src/core/ext/resolver/sockaddr/sockaddr_resolver.c
  30. 3
      src/core/ext/transport/chttp2/transport/chttp2_plugin.c
  31. 2214
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  32. 4
      src/core/ext/transport/chttp2/transport/frame.h
  33. 68
      src/core/ext/transport/chttp2/transport/frame_data.c
  34. 9
      src/core/ext/transport/chttp2/transport/frame_data.h
  35. 18
      src/core/ext/transport/chttp2/transport/frame_goaway.c
  36. 9
      src/core/ext/transport/chttp2/transport/frame_goaway.h
  37. 13
      src/core/ext/transport/chttp2/transport/frame_ping.c
  38. 8
      src/core/ext/transport/chttp2/transport/frame_ping.h
  39. 38
      src/core/ext/transport/chttp2/transport/frame_rst_stream.c
  40. 9
      src/core/ext/transport/chttp2/transport/frame_rst_stream.h
  41. 23
      src/core/ext/transport/chttp2/transport/frame_settings.c
  42. 9
      src/core/ext/transport/chttp2/transport/frame_settings.h
  43. 34
      src/core/ext/transport/chttp2/transport/frame_window_update.c
  44. 5
      src/core/ext/transport/chttp2/transport/frame_window_update.h
  45. 492
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  46. 17
      src/core/ext/transport/chttp2/transport/hpack_parser.h
  47. 654
      src/core/ext/transport/chttp2/transport/internal.h
  48. 910
      src/core/ext/transport/chttp2/transport/parsing.c
  49. 351
      src/core/ext/transport/chttp2/transport/stream_lists.c
  50. 36
      src/core/ext/transport/chttp2/transport/stream_map.c
  51. 4
      src/core/ext/transport/chttp2/transport/stream_map.h
  52. 466
      src/core/ext/transport/chttp2/transport/writing.c
  53. 12
      src/core/lib/channel/channel_args.c
  54. 4
      src/core/lib/channel/channel_args.h
  55. 4
      src/core/lib/channel/channel_stack.c
  56. 4
      src/core/lib/channel/channel_stack.h
  57. 76
      src/core/lib/channel/deadline_filter.c
  58. 33
      src/core/lib/channel/deadline_filter.h
  59. 2
      src/core/lib/channel/handshaker.c
  60. 60
      src/core/lib/channel/message_size_filter.c
  61. 18
      src/core/lib/iomgr/closure.c
  62. 11
      src/core/lib/iomgr/closure.h
  63. 372
      src/core/lib/iomgr/combiner.c
  64. 15
      src/core/lib/iomgr/combiner.h
  65. 18
      src/core/lib/iomgr/error.c
  66. 12
      src/core/lib/iomgr/error.h
  67. 227
      src/core/lib/iomgr/ev_epoll_linux.c
  68. 30
      src/core/lib/iomgr/ev_poll_and_epoll_posix.c
  69. 39
      src/core/lib/iomgr/ev_poll_posix.c
  70. 23
      src/core/lib/iomgr/ev_posix.c
  71. 13
      src/core/lib/iomgr/ev_posix.h
  72. 61
      src/core/lib/iomgr/exec_ctx.c
  73. 24
      src/core/lib/iomgr/exec_ctx.h
  74. 11
      src/core/lib/iomgr/iomgr.c
  75. 11
      src/core/lib/iomgr/tcp_posix.c
  76. 17
      src/core/lib/iomgr/timer.h
  77. 16
      src/core/lib/iomgr/workqueue.h
  78. 196
      src/core/lib/iomgr/workqueue_posix.c
  79. 10
      src/core/lib/iomgr/workqueue_windows.c
  80. 11
      src/core/lib/profiling/basic_timers.c
  81. 2
      src/core/lib/profiling/timers.h
  82. 9
      src/core/lib/support/log.c
  83. 11
      src/core/lib/support/string.c
  84. 4
      src/core/lib/support/string.h
  85. 129
      src/core/lib/surface/call.c
  86. 32
      src/core/lib/surface/call.h
  87. 18
      src/core/lib/surface/channel.c
  88. 167
      src/core/lib/surface/completion_queue.c
  89. 3
      src/core/lib/surface/completion_queue.h
  90. 4
      src/core/lib/surface/init.c
  91. 35
      src/core/lib/surface/server.c
  92. 3
      src/core/lib/surface/server.h
  93. 3
      src/core/lib/transport/connectivity_state.c
  94. 142
      src/core/lib/transport/mdstr_hash_table.c
  95. 83
      src/core/lib/transport/mdstr_hash_table.h
  96. 35
      src/core/lib/transport/transport.c
  97. 9
      src/core/lib/transport/transport.h
  98. 105
      src/core/lib/transport/transport_op_string.c
  99. 49
      src/cpp/test/server_context_test_spouse.cc
  100. 33
      src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
  101. Some files were not shown because too many files have changed in this diff Show More

24
BUILD

@ -221,7 +221,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -240,6 +239,7 @@ cc_library(
"src/core/lib/surface/server.h",
"src/core/lib/transport/byte_stream.h",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/mdstr_hash_table.h",
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/static_metadata.h",
@ -299,6 +299,7 @@ cc_library(
"src/core/ext/client_config/lb_policy.h",
"src/core/ext/client_config/lb_policy_factory.h",
"src/core/ext/client_config/lb_policy_registry.h",
"src/core/ext/client_config/method_config.h",
"src/core/ext/client_config/parse_address.h",
"src/core/ext/client_config/resolver.h",
"src/core/ext/client_config/resolver_factory.h",
@ -385,7 +386,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -411,6 +411,7 @@ cc_library(
"src/core/lib/surface/version.c",
"src/core/lib/transport/byte_stream.c",
"src/core/lib/transport/connectivity_state.c",
"src/core/lib/transport/mdstr_hash_table.c",
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/static_metadata.c",
@ -480,6 +481,7 @@ cc_library(
"src/core/ext/client_config/lb_policy.c",
"src/core/ext/client_config/lb_policy_factory.c",
"src/core/ext/client_config/lb_policy_registry.c",
"src/core/ext/client_config/method_config.c",
"src/core/ext/client_config/parse_address.c",
"src/core/ext/client_config/resolver.c",
"src/core/ext/client_config/resolver_factory.c",
@ -624,7 +626,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -643,6 +644,7 @@ cc_library(
"src/core/lib/surface/server.h",
"src/core/lib/transport/byte_stream.h",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/mdstr_hash_table.h",
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/static_metadata.h",
@ -679,6 +681,7 @@ cc_library(
"src/core/ext/client_config/lb_policy.h",
"src/core/ext/client_config/lb_policy_factory.h",
"src/core/ext/client_config/lb_policy_registry.h",
"src/core/ext/client_config/method_config.h",
"src/core/ext/client_config/parse_address.h",
"src/core/ext/client_config/resolver.h",
"src/core/ext/client_config/resolver_factory.h",
@ -773,7 +776,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -799,6 +801,7 @@ cc_library(
"src/core/lib/surface/version.c",
"src/core/lib/transport/byte_stream.c",
"src/core/lib/transport/connectivity_state.c",
"src/core/lib/transport/mdstr_hash_table.c",
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/static_metadata.c",
@ -842,6 +845,7 @@ cc_library(
"src/core/ext/client_config/lb_policy.c",
"src/core/ext/client_config/lb_policy_factory.c",
"src/core/ext/client_config/lb_policy_registry.c",
"src/core/ext/client_config/method_config.c",
"src/core/ext/client_config/parse_address.c",
"src/core/ext/client_config/resolver.c",
"src/core/ext/client_config/resolver_factory.c",
@ -982,7 +986,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -1001,6 +1004,7 @@ cc_library(
"src/core/lib/surface/server.h",
"src/core/lib/transport/byte_stream.h",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/mdstr_hash_table.h",
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/static_metadata.h",
@ -1036,6 +1040,7 @@ cc_library(
"src/core/ext/client_config/lb_policy.h",
"src/core/ext/client_config/lb_policy_factory.h",
"src/core/ext/client_config/lb_policy_registry.h",
"src/core/ext/client_config/method_config.h",
"src/core/ext/client_config/parse_address.h",
"src/core/ext/client_config/resolver.h",
"src/core/ext/client_config/resolver_factory.h",
@ -1123,7 +1128,6 @@ cc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -1149,6 +1153,7 @@ cc_library(
"src/core/lib/surface/version.c",
"src/core/lib/transport/byte_stream.c",
"src/core/lib/transport/connectivity_state.c",
"src/core/lib/transport/mdstr_hash_table.c",
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/static_metadata.c",
@ -1192,6 +1197,7 @@ cc_library(
"src/core/ext/client_config/lb_policy.c",
"src/core/ext/client_config/lb_policy_factory.c",
"src/core/ext/client_config/lb_policy_registry.c",
"src/core/ext/client_config/method_config.c",
"src/core/ext/client_config/parse_address.c",
"src/core/ext/client_config/resolver.c",
"src/core/ext/client_config/resolver_factory.c",
@ -1887,7 +1893,6 @@ objc_library(
"src/core/lib/iomgr/wakeup_fd_nospecial.c",
"src/core/lib/iomgr/wakeup_fd_pipe.c",
"src/core/lib/iomgr/wakeup_fd_posix.c",
"src/core/lib/iomgr/workqueue_posix.c",
"src/core/lib/iomgr/workqueue_windows.c",
"src/core/lib/json/json.c",
"src/core/lib/json/json_reader.c",
@ -1913,6 +1918,7 @@ objc_library(
"src/core/lib/surface/version.c",
"src/core/lib/transport/byte_stream.c",
"src/core/lib/transport/connectivity_state.c",
"src/core/lib/transport/mdstr_hash_table.c",
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/static_metadata.c",
@ -1982,6 +1988,7 @@ objc_library(
"src/core/ext/client_config/lb_policy.c",
"src/core/ext/client_config/lb_policy_factory.c",
"src/core/ext/client_config/lb_policy_registry.c",
"src/core/ext/client_config/method_config.c",
"src/core/ext/client_config/parse_address.c",
"src/core/ext/client_config/resolver.c",
"src/core/ext/client_config/resolver_factory.c",
@ -2105,7 +2112,6 @@ objc_library(
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/iomgr/workqueue.h",
"src/core/lib/iomgr/workqueue_posix.h",
"src/core/lib/iomgr/workqueue_windows.h",
"src/core/lib/json/json.h",
"src/core/lib/json/json_common.h",
@ -2124,6 +2130,7 @@ objc_library(
"src/core/lib/surface/server.h",
"src/core/lib/transport/byte_stream.h",
"src/core/lib/transport/connectivity_state.h",
"src/core/lib/transport/mdstr_hash_table.h",
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/static_metadata.h",
@ -2183,6 +2190,7 @@ objc_library(
"src/core/ext/client_config/lb_policy.h",
"src/core/ext/client_config/lb_policy_factory.h",
"src/core/ext/client_config/lb_policy_registry.h",
"src/core/ext/client_config/method_config.h",
"src/core/ext/client_config/parse_address.h",
"src/core/ext/client_config/resolver.h",
"src/core/ext/client_config/resolver_factory.h",

@ -351,7 +351,6 @@ add_library(grpc
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -377,6 +376,7 @@ add_library(grpc
src/core/lib/surface/version.c
src/core/lib/transport/byte_stream.c
src/core/lib/transport/connectivity_state.c
src/core/lib/transport/mdstr_hash_table.c
src/core/lib/transport/metadata.c
src/core/lib/transport/metadata_batch.c
src/core/lib/transport/static_metadata.c
@ -446,6 +446,7 @@ add_library(grpc
src/core/ext/client_config/lb_policy.c
src/core/ext/client_config/lb_policy_factory.c
src/core/ext/client_config/lb_policy_registry.c
src/core/ext/client_config/method_config.c
src/core/ext/client_config/parse_address.c
src/core/ext/client_config/resolver.c
src/core/ext/client_config/resolver_factory.c
@ -611,7 +612,6 @@ add_library(grpc_cronet
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -637,6 +637,7 @@ add_library(grpc_cronet
src/core/lib/surface/version.c
src/core/lib/transport/byte_stream.c
src/core/lib/transport/connectivity_state.c
src/core/lib/transport/mdstr_hash_table.c
src/core/lib/transport/metadata.c
src/core/lib/transport/metadata_batch.c
src/core/lib/transport/static_metadata.c
@ -680,6 +681,7 @@ add_library(grpc_cronet
src/core/ext/client_config/lb_policy.c
src/core/ext/client_config/lb_policy_factory.c
src/core/ext/client_config/lb_policy_registry.c
src/core/ext/client_config/method_config.c
src/core/ext/client_config/parse_address.c
src/core/ext/client_config/resolver.c
src/core/ext/client_config/resolver_factory.c
@ -843,7 +845,6 @@ add_library(grpc_unsecure
src/core/lib/iomgr/wakeup_fd_nospecial.c
src/core/lib/iomgr/wakeup_fd_pipe.c
src/core/lib/iomgr/wakeup_fd_posix.c
src/core/lib/iomgr/workqueue_posix.c
src/core/lib/iomgr/workqueue_windows.c
src/core/lib/json/json.c
src/core/lib/json/json_reader.c
@ -869,6 +870,7 @@ add_library(grpc_unsecure
src/core/lib/surface/version.c
src/core/lib/transport/byte_stream.c
src/core/lib/transport/connectivity_state.c
src/core/lib/transport/mdstr_hash_table.c
src/core/lib/transport/metadata.c
src/core/lib/transport/metadata_batch.c
src/core/lib/transport/static_metadata.c
@ -912,6 +914,7 @@ add_library(grpc_unsecure
src/core/ext/client_config/lb_policy.c
src/core/ext/client_config/lb_policy_factory.c
src/core/ext/client_config/lb_policy_registry.c
src/core/ext/client_config/method_config.c
src/core/ext/client_config/parse_address.c
src/core/ext/client_config/resolver.c
src/core/ext/client_config/resolver_factory.c

@ -1072,6 +1072,7 @@ reconnect_interop_server: $(BINDIR)/$(CONFIG)/reconnect_interop_server
secure_auth_context_test: $(BINDIR)/$(CONFIG)/secure_auth_context_test
secure_sync_unary_ping_pong_test: $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test
server_builder_plugin_test: $(BINDIR)/$(CONFIG)/server_builder_plugin_test
server_context_test_spouse_test: $(BINDIR)/$(CONFIG)/server_context_test_spouse_test
server_crash_test: $(BINDIR)/$(CONFIG)/server_crash_test
server_crash_test_client: $(BINDIR)/$(CONFIG)/server_crash_test_client
shutdown_test: $(BINDIR)/$(CONFIG)/shutdown_test
@ -1132,6 +1133,7 @@ bad_ssl_cert_server: $(BINDIR)/$(CONFIG)/bad_ssl_cert_server
bad_ssl_cert_test: $(BINDIR)/$(CONFIG)/bad_ssl_cert_test
h2_census_test: $(BINDIR)/$(CONFIG)/h2_census_test
h2_compress_test: $(BINDIR)/$(CONFIG)/h2_compress_test
h2_fake_resolver_test: $(BINDIR)/$(CONFIG)/h2_fake_resolver_test
h2_fakesec_test: $(BINDIR)/$(CONFIG)/h2_fakesec_test
h2_fd_test: $(BINDIR)/$(CONFIG)/h2_fd_test
h2_full_test: $(BINDIR)/$(CONFIG)/h2_full_test
@ -1150,6 +1152,7 @@ h2_ssl_proxy_test: $(BINDIR)/$(CONFIG)/h2_ssl_proxy_test
h2_uds_test: $(BINDIR)/$(CONFIG)/h2_uds_test
h2_census_nosec_test: $(BINDIR)/$(CONFIG)/h2_census_nosec_test
h2_compress_nosec_test: $(BINDIR)/$(CONFIG)/h2_compress_nosec_test
h2_fake_resolver_nosec_test: $(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test
h2_fd_nosec_test: $(BINDIR)/$(CONFIG)/h2_fd_nosec_test
h2_full_nosec_test: $(BINDIR)/$(CONFIG)/h2_full_nosec_test
h2_full+pipe_nosec_test: $(BINDIR)/$(CONFIG)/h2_full+pipe_nosec_test
@ -1357,6 +1360,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/bad_ssl_cert_test \
$(BINDIR)/$(CONFIG)/h2_census_test \
$(BINDIR)/$(CONFIG)/h2_compress_test \
$(BINDIR)/$(CONFIG)/h2_fake_resolver_test \
$(BINDIR)/$(CONFIG)/h2_fakesec_test \
$(BINDIR)/$(CONFIG)/h2_fd_test \
$(BINDIR)/$(CONFIG)/h2_full_test \
@ -1375,6 +1379,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/h2_uds_test \
$(BINDIR)/$(CONFIG)/h2_census_nosec_test \
$(BINDIR)/$(CONFIG)/h2_compress_nosec_test \
$(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test \
$(BINDIR)/$(CONFIG)/h2_fd_nosec_test \
$(BINDIR)/$(CONFIG)/h2_full_nosec_test \
$(BINDIR)/$(CONFIG)/h2_full+pipe_nosec_test \
@ -1441,6 +1446,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/secure_auth_context_test \
$(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test \
$(BINDIR)/$(CONFIG)/server_builder_plugin_test \
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test \
$(BINDIR)/$(CONFIG)/server_crash_test \
$(BINDIR)/$(CONFIG)/server_crash_test_client \
$(BINDIR)/$(CONFIG)/shutdown_test \
@ -1528,6 +1534,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/secure_auth_context_test \
$(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test \
$(BINDIR)/$(CONFIG)/server_builder_plugin_test \
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test \
$(BINDIR)/$(CONFIG)/server_crash_test \
$(BINDIR)/$(CONFIG)/server_crash_test_client \
$(BINDIR)/$(CONFIG)/shutdown_test \
@ -1832,6 +1839,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test || ( echo test secure_sync_unary_ping_pong_test failed ; exit 1 )
$(E) "[RUN] Testing server_builder_plugin_test"
$(Q) $(BINDIR)/$(CONFIG)/server_builder_plugin_test || ( echo test server_builder_plugin_test failed ; exit 1 )
$(E) "[RUN] Testing server_context_test_spouse_test"
$(Q) $(BINDIR)/$(CONFIG)/server_context_test_spouse_test || ( echo test server_context_test_spouse_test failed ; exit 1 )
$(E) "[RUN] Testing server_crash_test"
$(Q) $(BINDIR)/$(CONFIG)/server_crash_test || ( echo test server_crash_test failed ; exit 1 )
$(E) "[RUN] Testing shutdown_test"
@ -2599,7 +2608,6 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -2625,6 +2633,7 @@ LIBGRPC_SRC = \
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -2694,6 +2703,7 @@ LIBGRPC_SRC = \
src/core/ext/client_config/lb_policy.c \
src/core/ext/client_config/lb_policy_factory.c \
src/core/ext/client_config/lb_policy_registry.c \
src/core/ext/client_config/method_config.c \
src/core/ext/client_config/parse_address.c \
src/core/ext/client_config/resolver.c \
src/core/ext/client_config/resolver_factory.c \
@ -2877,7 +2887,6 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -2903,6 +2912,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -2946,6 +2956,7 @@ LIBGRPC_CRONET_SRC = \
src/core/ext/client_config/lb_policy.c \
src/core/ext/client_config/lb_policy_factory.c \
src/core/ext/client_config/lb_policy_registry.c \
src/core/ext/client_config/method_config.c \
src/core/ext/client_config/parse_address.c \
src/core/ext/client_config/resolver.c \
src/core/ext/client_config/resolver_factory.c \
@ -3145,7 +3156,6 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -3171,6 +3181,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -3340,7 +3351,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -3366,6 +3376,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -3409,6 +3420,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/client_config/lb_policy.c \
src/core/ext/client_config/lb_policy_factory.c \
src/core/ext/client_config/lb_policy_registry.c \
src/core/ext/client_config/method_config.c \
src/core/ext/client_config/parse_address.c \
src/core/ext/client_config/resolver.c \
src/core/ext/client_config/resolver_factory.c \
@ -3956,6 +3968,55 @@ endif
endif
LIBGRPC++_TEST_SRC = \
src/cpp/test/server_context_test_spouse.cc \
PUBLIC_HEADERS_CXX += \
LIBGRPC++_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure libraries if you don't have OpenSSL.
$(LIBDIR)/$(CONFIG)/libgrpc++_test.a: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build a C++ library if you don't have protobuf - a bit overreached, but still okay.
$(LIBDIR)/$(CONFIG)/libgrpc++_test.a: protobuf_dep_error
else
$(LIBDIR)/$(CONFIG)/libgrpc++_test.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTOBUF_DEP) $(LIBGRPC++_TEST_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
$(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc++_test.a
$(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBGRPC++_TEST_OBJS)
ifeq ($(SYSTEM),Darwin)
$(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc++_test.a
endif
endif
endif
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(LIBGRPC++_TEST_OBJS:.o=.dep)
endif
endif
LIBGRPC++_TEST_CONFIG_SRC = \
test/cpp/util/test_config_cc.cc \
@ -12753,6 +12814,49 @@ endif
endif
SERVER_CONTEXT_TEST_SPOUSE_TEST_SRC = \
test/cpp/test/server_context_test_spouse_test.cc \
SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(SERVER_CONTEXT_TEST_SPOUSE_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/server_context_test_spouse_test: $(PROTOBUF_DEP) $(SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/server_context_test_spouse_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/test/server_context_test_spouse_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_server_context_test_spouse_test: $(SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(SERVER_CONTEXT_TEST_SPOUSE_TEST_OBJS:.o=.dep)
endif
endif
SERVER_CRASH_TEST_SRC = \
test/cpp/end2end/server_crash_test.cc \
@ -14469,6 +14573,38 @@ endif
endif
H2_FAKE_RESOLVER_TEST_SRC = \
test/core/end2end/fixtures/h2_fake_resolver.c \
H2_FAKE_RESOLVER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(H2_FAKE_RESOLVER_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/h2_fake_resolver_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/h2_fake_resolver_test: $(H2_FAKE_RESOLVER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(H2_FAKE_RESOLVER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/h2_fake_resolver_test
endif
$(OBJDIR)/$(CONFIG)/test/core/end2end/fixtures/h2_fake_resolver.o: $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_h2_fake_resolver_test: $(H2_FAKE_RESOLVER_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(H2_FAKE_RESOLVER_TEST_OBJS:.o=.dep)
endif
endif
H2_FAKESEC_TEST_SRC = \
test/core/end2end/fixtures/h2_fakesec.c \
@ -15021,6 +15157,26 @@ ifneq ($(NO_DEPS),true)
endif
H2_FAKE_RESOLVER_NOSEC_TEST_SRC = \
test/core/end2end/fixtures/h2_fake_resolver.c \
H2_FAKE_RESOLVER_NOSEC_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(H2_FAKE_RESOLVER_NOSEC_TEST_SRC))))
$(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test: $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test
$(OBJDIR)/$(CONFIG)/test/core/end2end/fixtures/h2_fake_resolver.o: $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_h2_fake_resolver_nosec_test: $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS:.o=.dep)
ifneq ($(NO_DEPS),true)
-include $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS:.o=.dep)
endif
H2_FD_NOSEC_TEST_SRC = \
test/core/end2end/fixtures/h2_fd.c \
@ -15714,6 +15870,7 @@ src/cpp/ext/proto_server_reflection_plugin.cc: $(OPENSSL_DEP)
src/cpp/ext/reflection.grpc.pb.cc: $(OPENSSL_DEP)
src/cpp/ext/reflection.pb.cc: $(OPENSSL_DEP)
src/cpp/server/secure_server_credentials.cc: $(OPENSSL_DEP)
src/cpp/test/server_context_test_spouse.cc: $(OPENSSL_DEP)
src/csharp/ext/grpc_csharp_ext.c: $(OPENSSL_DEP)
test/core/bad_client/bad_client.c: $(OPENSSL_DEP)
test/core/bad_ssl/server_common.c: $(OPENSSL_DEP)

@ -626,7 +626,6 @@
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_posix.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
@ -652,6 +651,7 @@
'src/core/lib/surface/version.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
'src/core/lib/transport/mdstr_hash_table.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/static_metadata.c',
@ -721,6 +721,7 @@
'src/core/ext/client_config/lb_policy.c',
'src/core/ext/client_config/lb_policy_factory.c',
'src/core/ext/client_config/lb_policy_registry.c',
'src/core/ext/client_config/method_config.c',
'src/core/ext/client_config/parse_address.c',
'src/core/ext/client_config/resolver.c',
'src/core/ext/client_config/resolver_factory.c',

@ -225,7 +225,6 @@ filegroups:
- src/core/lib/iomgr/wakeup_fd_pipe.h
- src/core/lib/iomgr/wakeup_fd_posix.h
- src/core/lib/iomgr/workqueue.h
- src/core/lib/iomgr/workqueue_posix.h
- src/core/lib/iomgr/workqueue_windows.h
- src/core/lib/json/json.h
- src/core/lib/json/json_common.h
@ -244,6 +243,7 @@ filegroups:
- src/core/lib/surface/server.h
- src/core/lib/transport/byte_stream.h
- src/core/lib/transport/connectivity_state.h
- src/core/lib/transport/mdstr_hash_table.h
- src/core/lib/transport/metadata.h
- src/core/lib/transport/metadata_batch.h
- src/core/lib/transport/static_metadata.h
@ -312,7 +312,6 @@ filegroups:
- src/core/lib/iomgr/wakeup_fd_nospecial.c
- src/core/lib/iomgr/wakeup_fd_pipe.c
- src/core/lib/iomgr/wakeup_fd_posix.c
- src/core/lib/iomgr/workqueue_posix.c
- src/core/lib/iomgr/workqueue_windows.c
- src/core/lib/json/json.c
- src/core/lib/json/json_reader.c
@ -338,6 +337,7 @@ filegroups:
- src/core/lib/surface/version.c
- src/core/lib/transport/byte_stream.c
- src/core/lib/transport/connectivity_state.c
- src/core/lib/transport/mdstr_hash_table.c
- src/core/lib/transport/metadata.c
- src/core/lib/transport/metadata_batch.c
- src/core/lib/transport/static_metadata.c
@ -358,6 +358,7 @@ filegroups:
- src/core/ext/client_config/lb_policy.h
- src/core/ext/client_config/lb_policy_factory.h
- src/core/ext/client_config/lb_policy_registry.h
- src/core/ext/client_config/method_config.h
- src/core/ext/client_config/parse_address.h
- src/core/ext/client_config/resolver.h
- src/core/ext/client_config/resolver_factory.h
@ -378,6 +379,7 @@ filegroups:
- src/core/ext/client_config/lb_policy.c
- src/core/ext/client_config/lb_policy_factory.c
- src/core/ext/client_config/lb_policy_registry.c
- src/core/ext/client_config/method_config.c
- src/core/ext/client_config/parse_address.c
- src/core/ext/client_config/resolver.c
- src/core/ext/client_config/resolver_factory.c
@ -1024,6 +1026,15 @@ libs:
language: c++
src:
- src/proto/grpc/reflection/v1alpha/reflection.proto
- name: grpc++_test
build: test
language: c++
headers:
- include/grpc++/test/server_context_test_spouse.h
src:
- src/cpp/test/server_context_test_spouse.cc
deps:
- grpc++
- name: grpc++_test_config
build: private
language: c++
@ -3241,6 +3252,19 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: server_context_test_spouse_test
gtest: true
build: test
language: c++
src:
- test/cpp/test/server_context_test_spouse_test.cc
deps:
- grpc_test_util
- grpc++_test
- grpc++
- grpc
- gpr_test_util
- gpr
- name: server_crash_test
gtest: true
cpu_cost: 0.1

@ -145,7 +145,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/wakeup_fd_nospecial.c \
src/core/lib/iomgr/wakeup_fd_pipe.c \
src/core/lib/iomgr/wakeup_fd_posix.c \
src/core/lib/iomgr/workqueue_posix.c \
src/core/lib/iomgr/workqueue_windows.c \
src/core/lib/json/json.c \
src/core/lib/json/json_reader.c \
@ -171,6 +170,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/surface/version.c \
src/core/lib/transport/byte_stream.c \
src/core/lib/transport/connectivity_state.c \
src/core/lib/transport/mdstr_hash_table.c \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/static_metadata.c \
@ -240,6 +240,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/client_config/lb_policy.c \
src/core/ext/client_config/lb_policy_factory.c \
src/core/ext/client_config/lb_policy_registry.c \
src/core/ext/client_config/method_config.c \
src/core/ext/client_config/parse_address.c \
src/core/ext/client_config/resolver.c \
src/core/ext/client_config/resolver_factory.c \

@ -51,6 +51,7 @@ some configuration as environment variables that can be set.
- glb - traces the grpclb load balancer
- queue_pluck
- queue_timeout
- server_channel - lightweight trace of significant server channel events
- secure_endpoint - traces bytes flowing through encrypted channels
- transport_security - traces metadata about secure channel establishment
- tcp - traces bytes in and out of a channel
@ -64,4 +65,3 @@ some configuration as environment variables that can be set.
- DEBUG - log all gRPC messages
- INFO - log INFO and ERROR message
- ERROR - log only errors

@ -0,0 +1,121 @@
# `epoll`-based pollset implementation in gRPC
Sree Kuchibhotla (sreek@) [May - 2016]
(Design input from Craig Tiller and David Klempner)
> Status: As of June 2016, this change is implemented and merged.
> * The bulk of the functionality is in: [ev_poll_linux.c](https://github.com/grpc/grpc/blob/master/src/core/lib/iomgr/ev_epoll_linux.c)
> * Pull request: https://github.com/grpc/grpc/pull/6803
## 1. Introduction
The document talks about the proposed changes to `epoll`-based implementation of pollsets in gRPC. Section-2 gives an overview of the current implementation, Section-3 talks about the problems in the current implementation and finally Section-4 talks about the proposed changes.
## 2. Current `epoll`-based implementation in gRPC
![image](images/old_epoll_impl.png)
**Figure 1: Current implementation**
A gRPC client or a server can have more than one completion queue. Each completion queue creates a pollset.
The gRPC core library does not create any threads[^1] on its own and relies on the application using the gRPC core library to provide the threads. A thread starts to poll for events by calling the gRPC core surface APIs `grpc_completion_queue_next()` or `grpc_completion_queue_pluck()`. More than one thread can call `grpc_completion_queue_next()`on the same completion queue[^2].
A file descriptor can be in more than one completion queue. There are examples in the next section that show how this can happen.
When an event of interest happens in a pollset, multiple threads are woken up and there are no guarantees on which thread actually ends up performing the work i.e executing the callbacks associated with that event. The thread that performs the work finally queues a completion event `grpc_cq_completion` on the appropriate completion queue and "kicks" (i.e wakes ups) the thread that is actually interested in that event (which can be itself - in which case there is no thread hop)
For example, in **Figure 1**, if `fd1` becomes readable, any one of the threads i.e *Threads 1* to *Threads K* or *Thread P*, might be woken up. Let's say *Thread P* was calling a `grpc_completion_queue_pluck()` and was actually interested in the event on `fd1` but *Thread 1* woke up. In this case, *Thread 1* executes the callbacks and finally kicks *Thread P* by signalling `event_fd_P`. *Thread P* wakes up, realizes that there is a new completion event for it and returns from `grpc_completion_queue_pluck()` to its caller.
## 3. Issues in the current architecture
### _Thundering Herds_
If multiple threads concurrently call `epoll_wait()`, we are guaranteed that only one thread is woken up if one of the `fds` in the set becomes readable/writable. However, in our current implementation, the threads do not directly call a blocking `epoll_wait()`[^3]. Instead, they call `poll()` on the set containing `[event_fd`[^4]`, epoll_fd]`. **(see Figure 1)**
Considering the fact that an `fd` can be in multiple `pollsets` and that each `pollset` might have multiple poller threads, it means that whenever an `fd` becomes readable/writable, all the threads in all the `pollsets` (in which that `fd` is present) are woken up.
The performance impact of this would be more conspicuous on the server side. Here are a two examples of thundering herds on the server side.
Example 1: Listening fds on server
* A gRPC server can have multiple server completion queues (i.e completion queues which are used to listen for incoming channels).
* A gRPC server can also listen on more than one TCP-port.
* A listening socket is created for each port the gRPC server would be listening on.
* Every listening socket's fd is added to all the server completion queues' pollsets. (Currently we do not do any sharding of the listening fds across these pollsets).
This means that for every incoming new channel, all the threads waiting on all the pollsets are woken up.
Example 2: New Incoming-channel fds on server
* Currently, every new incoming channel's `fd` (i.e the socket `fd` that is returned by doing an `accept()` on the new incoming channel) is added to all the server completion queues' pollsets [^5]).
* Clearly, this would also cause all thundering herd problem for every read onthat fd
There are other scenarios especially on the client side where an fd can end up being on multiple pollsets which would cause thundering herds on the clients.
## 4. Proposed changes to the current `epoll`-based polling implementation:
The main idea in this proposal is to group 'related' `fds` into a single epoll-based set. This would ensure that only one thread wakes up in case of an event on one of the `fds` in the epoll set.
To accomplish this, we introduce a new abstraction called `polling_island` which will have an epoll set underneath (See **Figure 2** below). A `polling_island` contains the following:
* `epoll_fd`: The file descriptor of the underlying epoll set
* `fd_set`: The set of 'fds' in the pollset island i.e in the epoll set (The pollset island merging operation described later requires the list of fds in the pollset island and currently there is no API available to enumerate all the fds in an epoll set)
* `event_fd`: A level triggered _event fd_ that is used to wake up all the threads waiting on this epoll set (Note: This `event_fd` is added to the underlying epoll set during pollset island creation. This is useful in the pollset island merging operation described later)
* `merged_to`: The polling island into which this one merged. See section 4.2 (case 2) for more details on this. Also note that if `merged_to` is set, all the other fields in this polling island are not used anymore
In this new model, only one thread wakes up whenever an event of interest happens in an epoll set.
![drawing](images/new_epoll_impl.png)
**Figure 2: Proposed changes**
### 4.1 Relation between `fd`, `pollset` and `polling_island:`
* An `fd` may belong to multiple `pollsets` but belongs to exactly one `polling_island`
* A `pollset` belongs to exactly one `polling_island`
* An `fd` and the `pollset(s`) it belongs to, have same `polling_island`
### 4.2 Algorithm to add an `fd` to a `pollset`
There are two cases to check here:
* **Case 1:** Both `fd` and `pollset` already belong to the same `polling_island`
* This is straightforward and nothing really needs to be done here
* **Case 2:** The `fd `and `pollset` point to different `polling_islands`: In this case we _merge_ both the polling islands i.e:
* Add all the `fds` from the smaller `polling_island `to the larger `polling_island` and update the `merged_to` pointer on the smaller island to point to the larger island.
* Wake up all the threads waiting on the smaller `polling_island`'s `epoll_fd` (by signalling the `event_fd` on that island) and make them now wait on the larger `polling_island`'s `epoll_fd`
* Update `fd` and `pollset` to now point to the larger `polling_island`
### 4.3 Directed wakeups:
The new implementation, just like the current implementation, does not provide us any guarantees that the thread that is woken up is the thread that is actually interested in the event. So the thread that woke up executes the callbacks and finally has to 'kick' the appropriate polling thread interested in the event.
In the current implementation, every polling thread also had a `event_fd` on which it was listening to and hence waking it up was as simple as signalling that `event_fd`. However, using an `event_fd` also meant that every thread has to use a `poll()` (on `event_fd` and `epoll_fd`) instead of doing an `epoll_wait()` and this resulted in the thundering herd problems described above.
The proposal here is to use signals and kicking a thread would just be sending a signal to that thread. Unfortunately there are only a few signals available on posix systems and most of them have pre-determined behavior leaving only a few signals `SIGUSR1`, `SIGUSR2` and `SIGRTx (SIGRTMIN to SIGRTMAX)` for custom use.
The calling application might have registered other signal handlers for these signals. `We will provide a new API where the applications can "give a signal number" to gRPC library to use for this purpose.
```
void grpc_use_signal(int signal_num)
```
If the calling application does not provide a signal number, then the gRPC library will relegate to using a model similar to the current implementation (where every thread does a blocking `poll()` on its `wakeup_fd` and the `epoll_fd`). The function` psi_wait() `in figure 2 implements this logic.
**>> **(**NOTE**: Or alternatively, we can implement a turnstile polling (i.e having only one thread calling `epoll_wait()` on the epoll set at any time - which all other threads call poll on their `wakeup_fds`)
in case of not getting a signal number from the applications.
## Notes
[^1]: Only exception is in case of name-resolution
[^2]: However, a `grpc_completion_queue_next()` and `grpc_completion_queue_pluck()` must not be called in parallel on the same completion queue
[^3]: The threads first do a blocking` poll()` with `[wakeup_fd, epoll_fd]`. If the `poll()` returns due to an event of interest in the epoll set, they then call a non-blocking i.e a zero-timeout `epoll_wait()` on the `epoll_fd`
[^4]: `event_fd` is the linux platform specific implementation of `grpc_wakeup_fd`. A `wakeup_fd` is used to wake up polling threads typically when the event for which the polling thread is waiting is already completed by some other thread. It is also used to wake up the polling threads in case of shutdowns or to re-evaluate the poller's interest in the fds to poll (the last scenario is only in case of `poll`-based (not `epoll`-based) implementation of `pollsets`).
[^5]: See more details about the issue here https://github.com/grpc/grpc/issues/5470 and for a proposed fix here: https://github.com/grpc/grpc/pull/6149

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

@ -1,42 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Runs protoc with the gRPC plugin to generate messages and gRPC stubs."""
from grpc.tools import protoc
protoc.main(
(
'',
'-I../../protos',
'--python_out=.',
'--grpc_python_out=.',
'../../protos/helloworld.proto',
)
)

@ -308,7 +308,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/iomgr/workqueue.h',
'src/core/lib/iomgr/workqueue_posix.h',
'src/core/lib/iomgr/workqueue_windows.h',
'src/core/lib/json/json.h',
'src/core/lib/json/json_common.h',
@ -327,6 +326,7 @@ Pod::Spec.new do |s|
'src/core/lib/surface/server.h',
'src/core/lib/transport/byte_stream.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/mdstr_hash_table.h',
'src/core/lib/transport/metadata.h',
'src/core/lib/transport/metadata_batch.h',
'src/core/lib/transport/static_metadata.h',
@ -386,6 +386,7 @@ Pod::Spec.new do |s|
'src/core/ext/client_config/lb_policy.h',
'src/core/ext/client_config/lb_policy_factory.h',
'src/core/ext/client_config/lb_policy_registry.h',
'src/core/ext/client_config/method_config.h',
'src/core/ext/client_config/parse_address.h',
'src/core/ext/client_config/resolver.h',
'src/core/ext/client_config/resolver_factory.h',
@ -476,7 +477,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_posix.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
@ -502,6 +502,7 @@ Pod::Spec.new do |s|
'src/core/lib/surface/version.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
'src/core/lib/transport/mdstr_hash_table.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/static_metadata.c',
@ -571,6 +572,7 @@ Pod::Spec.new do |s|
'src/core/ext/client_config/lb_policy.c',
'src/core/ext/client_config/lb_policy_factory.c',
'src/core/ext/client_config/lb_policy_registry.c',
'src/core/ext/client_config/method_config.c',
'src/core/ext/client_config/parse_address.c',
'src/core/ext/client_config/resolver.c',
'src/core/ext/client_config/resolver_factory.c',
@ -683,7 +685,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/iomgr/workqueue.h',
'src/core/lib/iomgr/workqueue_posix.h',
'src/core/lib/iomgr/workqueue_windows.h',
'src/core/lib/json/json.h',
'src/core/lib/json/json_common.h',
@ -702,6 +703,7 @@ Pod::Spec.new do |s|
'src/core/lib/surface/server.h',
'src/core/lib/transport/byte_stream.h',
'src/core/lib/transport/connectivity_state.h',
'src/core/lib/transport/mdstr_hash_table.h',
'src/core/lib/transport/metadata.h',
'src/core/lib/transport/metadata_batch.h',
'src/core/lib/transport/static_metadata.h',
@ -761,6 +763,7 @@ Pod::Spec.new do |s|
'src/core/ext/client_config/lb_policy.h',
'src/core/ext/client_config/lb_policy_factory.h',
'src/core/ext/client_config/lb_policy_registry.h',
'src/core/ext/client_config/method_config.h',
'src/core/ext/client_config/parse_address.h',
'src/core/ext/client_config/resolver.h',
'src/core/ext/client_config/resolver_factory.h',

@ -228,7 +228,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_posix.h )
s.files += %w( src/core/lib/iomgr/workqueue.h )
s.files += %w( src/core/lib/iomgr/workqueue_posix.h )
s.files += %w( src/core/lib/iomgr/workqueue_windows.h )
s.files += %w( src/core/lib/json/json.h )
s.files += %w( src/core/lib/json/json_common.h )
@ -247,6 +246,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/server.h )
s.files += %w( src/core/lib/transport/byte_stream.h )
s.files += %w( src/core/lib/transport/connectivity_state.h )
s.files += %w( src/core/lib/transport/mdstr_hash_table.h )
s.files += %w( src/core/lib/transport/metadata.h )
s.files += %w( src/core/lib/transport/metadata_batch.h )
s.files += %w( src/core/lib/transport/static_metadata.h )
@ -306,6 +306,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/client_config/lb_policy.h )
s.files += %w( src/core/ext/client_config/lb_policy_factory.h )
s.files += %w( src/core/ext/client_config/lb_policy_registry.h )
s.files += %w( src/core/ext/client_config/method_config.h )
s.files += %w( src/core/ext/client_config/parse_address.h )
s.files += %w( src/core/ext/client_config/resolver.h )
s.files += %w( src/core/ext/client_config/resolver_factory.h )
@ -396,7 +397,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/wakeup_fd_nospecial.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.c )
s.files += %w( src/core/lib/iomgr/wakeup_fd_posix.c )
s.files += %w( src/core/lib/iomgr/workqueue_posix.c )
s.files += %w( src/core/lib/iomgr/workqueue_windows.c )
s.files += %w( src/core/lib/json/json.c )
s.files += %w( src/core/lib/json/json_reader.c )
@ -422,6 +422,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/version.c )
s.files += %w( src/core/lib/transport/byte_stream.c )
s.files += %w( src/core/lib/transport/connectivity_state.c )
s.files += %w( src/core/lib/transport/mdstr_hash_table.c )
s.files += %w( src/core/lib/transport/metadata.c )
s.files += %w( src/core/lib/transport/metadata_batch.c )
s.files += %w( src/core/lib/transport/static_metadata.c )
@ -491,6 +492,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/client_config/lb_policy.c )
s.files += %w( src/core/ext/client_config/lb_policy_factory.c )
s.files += %w( src/core/ext/client_config/lb_policy_registry.c )
s.files += %w( src/core/ext/client_config/method_config.c )
s.files += %w( src/core/ext/client_config/parse_address.c )
s.files += %w( src/core/ext/client_config/resolver.c )
s.files += %w( src/core/ext/client_config/resolver_factory.c )

@ -86,6 +86,7 @@ class ServerInterface;
namespace testing {
class InteropServerContextInspector;
class ServerContextTestSpouse;
} // namespace testing
// Interface of server side rpc context.
@ -173,6 +174,7 @@ class ServerContext {
private:
friend class ::grpc::testing::InteropServerContextInspector;
friend class ::grpc::testing::ServerContextTestSpouse;
friend class ::grpc::ServerInterface;
friend class ::grpc::Server;
template <class W, class R>

@ -0,0 +1,67 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_TEST_SERVER_CONTEXT_TEST_SPOUSE_H
#define GRPCXX_TEST_SERVER_CONTEXT_TEST_SPOUSE_H
#include <map>
#include <grpc++/server_context.h>
namespace grpc {
namespace testing {
// A test-only class to access private members and methods of ServerContext.
class ServerContextTestSpouse {
public:
explicit ServerContextTestSpouse(ServerContext* ctx) : ctx_(ctx) {}
// Inject client metadata to the ServerContext for the test. The test spouse
// must be alive when ServerContext::client_metadata is called.
void AddClientMetadata(const grpc::string& key, const grpc::string& value);
std::multimap<grpc::string, grpc::string> GetInitialMetadata() const {
return ctx_->initial_metadata_;
}
std::multimap<grpc::string, grpc::string> GetTrailingMetadata() const {
return ctx_->trailing_metadata_;
}
private:
ServerContext* ctx_; // not owned
std::multimap<grpc::string, grpc::string> client_metadata_storage_;
};
} // namespace testing
} // namespace grpc
#endif // GRPCXX_TEST_SERVER_CONTEXT_TEST_SPOUSE_H

@ -201,6 +201,9 @@ typedef struct {
#define GRPC_ARG_MAX_METADATA_SIZE "grpc.max_metadata_size"
/** If non-zero, allow the use of SO_REUSEPORT if it's available (default 1) */
#define GRPC_ARG_ALLOW_REUSEPORT "grpc.so_reuseport"
/** Service config data, to be passed to subchannels.
Not intended for external use. */
#define GRPC_ARG_SERVICE_CONFIG "grpc.service_config"
/** \} */
/** Result of a grpc call. If the caller satisfies the prerequisites of a

@ -48,7 +48,11 @@ typedef struct gpr_allocation_functions {
void (*free_fn)(void *ptr);
} gpr_allocation_functions;
/* malloc, never returns NULL */
/* malloc.
* If size==0, always returns NULL. Otherwise this function never returns NULL.
* The pointer returned is suitably aligned for any kind of variable it could
* contain.
*/
GPRAPI void *gpr_malloc(size_t size);
/* free */
GPRAPI void gpr_free(void *ptr);

@ -235,7 +235,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_common.h" role="src" />
@ -254,6 +253,7 @@
<file baseinstalldir="/" name="src/core/lib/surface/server.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/byte_stream.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/mdstr_hash_table.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata_batch.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/static_metadata.h" role="src" />
@ -313,6 +313,7 @@
<file baseinstalldir="/" name="src/core/ext/client_config/lb_policy.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/lb_policy_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/lb_policy_registry.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/method_config.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/parse_address.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/resolver.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/resolver_factory.h" role="src" />
@ -403,7 +404,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_nospecial.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/workqueue_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json_reader.c" role="src" />
@ -429,6 +429,7 @@
<file baseinstalldir="/" name="src/core/lib/surface/version.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/byte_stream.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/connectivity_state.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/mdstr_hash_table.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata_batch.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/static_metadata.c" role="src" />
@ -498,6 +499,7 @@
<file baseinstalldir="/" name="src/core/ext/client_config/lb_policy.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/lb_policy_factory.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/lb_policy_registry.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/method_config.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/parse_address.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/resolver.c" role="src" />
<file baseinstalldir="/" name="src/core/ext/client_config/resolver_factory.c" role="src" />

@ -133,7 +133,7 @@ static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
d->start_ts = args->start_time;
return GRPC_ERROR_NONE;
}
@ -152,7 +152,7 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
d->start_ts = args->start_time;
/* TODO(hongyu): call census_tracing_start_op here. */
grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
return GRPC_ERROR_NONE;

@ -43,6 +43,7 @@
#include <grpc/support/useful.h>
#include "src/core/ext/client_config/lb_policy_registry.h"
#include "src/core/ext/client_config/method_config.h"
#include "src/core/ext/client_config/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
@ -53,6 +54,9 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/static_metadata.h"
/* Client channel implementation */
@ -68,12 +72,13 @@ typedef struct client_channel_channel_data {
/** client channel factory */
grpc_client_channel_factory *client_channel_factory;
/** mutex protecting client configuration, including all
variables below in this data structure */
/** mutex protecting all variables below in this data structure */
gpr_mu mu;
/** currently active load balancer - guarded by mu */
/** currently active load balancer */
grpc_lb_policy *lb_policy;
/** incoming resolver result - set by resolver.next(), guarded by mu */
/** method config table */
grpc_method_config_table *method_config_table;
/** incoming resolver result - set by resolver.next() */
grpc_resolver_result *resolver_result;
/** a list of closures that are all waiting for config to come in */
grpc_closure_list waiting_for_config_closures;
@ -172,6 +177,7 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
channel_data *chand = arg;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
grpc_method_config_table *method_config_table = NULL;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
bool exit_idle = false;
grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
@ -220,6 +226,13 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
state =
grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
}
const grpc_arg *channel_arg = grpc_channel_args_find(
lb_policy_args.additional_args, GRPC_ARG_SERVICE_CONFIG);
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
method_config_table = grpc_method_config_table_ref(
(grpc_method_config_table *)channel_arg->value.pointer.p);
}
grpc_resolver_result_unref(exec_ctx, chand->resolver_result);
chand->resolver_result = NULL;
}
@ -232,6 +245,10 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&chand->mu);
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
if (chand->method_config_table != NULL) {
grpc_method_config_table_unref(chand->method_config_table);
}
chand->method_config_table = method_config_table;
if (lb_policy != NULL) {
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
NULL);
@ -392,6 +409,9 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
if (chand->method_config_table != NULL) {
grpc_method_config_table_unref(chand->method_config_table);
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(chand->interested_parties);
gpr_mu_destroy(&chand->mu);
@ -424,7 +444,16 @@ typedef struct client_channel_call_data {
// stack and each has its own mutex. If/when we have time, find a way
// to avoid this without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
grpc_mdstr *path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
enum {
WAIT_FOR_READY_UNSET,
WAIT_FOR_READY_FALSE,
WAIT_FOR_READY_TRUE
} wait_for_ready_from_service_config;
grpc_closure read_service_config;
grpc_error *cancel_error;
@ -532,10 +561,11 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_CREATE_REFERENCING(
"Cancelled before creating subchannel", &error, 1));
} else {
/* Create call on subchannel. */
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent, calld->deadline,
&subchannel_call);
exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
calld->deadline, &subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
@ -584,11 +614,15 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
} else if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags,
cpa->connected_subchannel, cpa->on_ready,
GRPC_ERROR_NONE)) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
} else {
call_data *calld = cpa->elem->call_data;
gpr_mu_lock(&calld->mu);
if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags, cpa->connected_subchannel,
cpa->on_ready, GRPC_ERROR_NONE)) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
}
gpr_mu_unlock(&calld->mu);
}
gpr_free(cpa);
}
@ -631,18 +665,33 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (chand->lb_policy != NULL) {
grpc_lb_policy *lb_policy = chand->lb_policy;
int r;
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
gpr_mu_unlock(&chand->mu);
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
const bool wait_for_ready_set_from_api =
initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
const bool wait_for_ready_set_from_service_config =
calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET;
if (!wait_for_ready_set_from_api &&
wait_for_ready_set_from_service_config) {
if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
// TODO(dgq): make this deadline configurable somehow.
const grpc_lb_policy_pick_args inputs = {
initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
gpr_inf_future(GPR_CLOCK_MONOTONIC)};
r = grpc_lb_policy_pick(exec_ctx, lb_policy, &inputs, connected_subchannel,
NULL, on_ready);
const bool result = grpc_lb_policy_pick(
exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
GPR_TIMER_END("pick_subchannel", 0);
return r;
return result;
}
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = true;
@ -768,8 +817,8 @@ retry:
calld->connected_subchannel != NULL) {
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent, calld->deadline,
&subchannel_call);
exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
calld->deadline, &subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
@ -786,13 +835,69 @@ retry:
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
// Gets data from the service config. Invoked when the resolver returns
// its initial result.
static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// If this is an error, there's no point in looking at the service config.
if (error == GRPC_ERROR_NONE) {
// Get the method config table from channel data.
gpr_mu_lock(&chand->mu);
grpc_method_config_table *method_config_table = NULL;
if (chand->method_config_table != NULL) {
method_config_table =
grpc_method_config_table_ref(chand->method_config_table);
}
gpr_mu_unlock(&chand->mu);
// If the method config table was present, use it.
if (method_config_table != NULL) {
const grpc_method_config *method_config =
grpc_method_config_table_get_method_config(method_config_table,
calld->path);
if (method_config != NULL) {
const gpr_timespec *per_method_timeout =
grpc_method_config_get_timeout(method_config);
const bool *wait_for_ready =
grpc_method_config_get_wait_for_ready(method_config);
if (per_method_timeout != NULL || wait_for_ready != NULL) {
gpr_mu_lock(&calld->mu);
if (per_method_timeout != NULL) {
gpr_timespec per_method_deadline =
gpr_time_add(calld->call_start_time, *per_method_timeout);
if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
calld->deadline = per_method_deadline;
// Reset deadline timer.
grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
}
}
if (wait_for_ready != NULL) {
calld->wait_for_ready_from_service_config =
*wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE;
}
gpr_mu_unlock(&calld->mu);
}
}
grpc_method_config_table_unref(method_config_table);
}
}
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
}
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_deadline_state_init(exec_ctx, elem, args);
calld->deadline = args->deadline;
// Initialize data members.
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
calld->path = GRPC_MDSTR_REF(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
calld->cancel_error = GRPC_ERROR_NONE;
gpr_atm_rel_store(&calld->subchannel_call, 0);
gpr_mu_init(&calld->mu);
@ -803,6 +908,55 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
calld->owning_call = args->call_stack;
calld->pollent = NULL;
// If the resolver has already returned results, then we can access
// the service config parameters immediately. Otherwise, we need to
// defer that work until the resolver returns an initial result.
// TODO(roth): This code is almost but not quite identical to the code
// in read_service_config() above. It would be nice to find a way to
// combine them, to avoid having to maintain it twice.
gpr_mu_lock(&chand->mu);
if (chand->lb_policy != NULL) {
// We already have a resolver result, so check for service config.
if (chand->method_config_table != NULL) {
grpc_method_config_table *method_config_table =
grpc_method_config_table_ref(chand->method_config_table);
gpr_mu_unlock(&chand->mu);
grpc_method_config *method_config =
grpc_method_config_table_get_method_config(method_config_table,
args->path);
if (method_config != NULL) {
const gpr_timespec *per_method_timeout =
grpc_method_config_get_timeout(method_config);
if (per_method_timeout != NULL) {
gpr_timespec per_method_deadline =
gpr_time_add(calld->call_start_time, *per_method_timeout);
calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
}
const bool *wait_for_ready =
grpc_method_config_get_wait_for_ready(method_config);
if (wait_for_ready != NULL) {
calld->wait_for_ready_from_service_config =
*wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE;
}
}
grpc_method_config_table_unref(method_config_table);
} else {
gpr_mu_unlock(&chand->mu);
}
} else {
// We don't yet have a resolver result, so register a callback to
// get the service config data once the resolver returns.
// Take a reference to the call stack to be owned by the callback.
GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
grpc_closure_init(&calld->read_service_config, read_service_config, elem);
grpc_closure_list_append(&chand->waiting_for_config_closures,
&calld->read_service_config, GRPC_ERROR_NONE);
gpr_mu_unlock(&chand->mu);
}
// Start the deadline timer with the current deadline value. If we
// do not yet have service config data, then the timer may be reset
// later.
grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
return GRPC_ERROR_NONE;
}
@ -813,6 +967,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
void *and_free_memory) {
call_data *calld = elem->call_data;
grpc_deadline_state_destroy(exec_ctx, elem);
GRPC_MDSTR_UNREF(calld->path);
GRPC_ERROR_UNREF(calld->cancel_error);
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {

@ -0,0 +1,296 @@
//
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "src/core/ext/client_config/method_config.h"
#include <string.h>
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/transport/mdstr_hash_table.h"
#include "src/core/lib/transport/metadata.h"
//
// grpc_method_config
//
// bool vtable
static void* bool_copy(void* valuep) {
bool value = *(bool*)valuep;
bool* new_value = gpr_malloc(sizeof(bool));
*new_value = value;
return new_value;
}
static int bool_cmp(void* v1, void* v2) {
bool b1 = *(bool*)v1;
bool b2 = *(bool*)v2;
if (!b1 && b2) return -1;
if (b1 && !b2) return 1;
return 0;
}
static grpc_mdstr_hash_table_vtable bool_vtable = {gpr_free, bool_copy,
bool_cmp};
// timespec vtable
static void* timespec_copy(void* valuep) {
gpr_timespec value = *(gpr_timespec*)valuep;
gpr_timespec* new_value = gpr_malloc(sizeof(gpr_timespec));
*new_value = value;
return new_value;
}
static int timespec_cmp(void* v1, void* v2) {
return gpr_time_cmp(*(gpr_timespec*)v1, *(gpr_timespec*)v2);
}
static grpc_mdstr_hash_table_vtable timespec_vtable = {gpr_free, timespec_copy,
timespec_cmp};
// int32 vtable
static void* int32_copy(void* valuep) {
int32_t value = *(int32_t*)valuep;
int32_t* new_value = gpr_malloc(sizeof(int32_t));
*new_value = value;
return new_value;
}
static int int32_cmp(void* v1, void* v2) {
int32_t i1 = *(int32_t*)v1;
int32_t i2 = *(int32_t*)v2;
if (i1 < i2) return -1;
if (i1 > i2) return 1;
return 0;
}
static grpc_mdstr_hash_table_vtable int32_vtable = {gpr_free, int32_copy,
int32_cmp};
// Hash table keys.
#define GRPC_METHOD_CONFIG_WAIT_FOR_READY "grpc.wait_for_ready" // bool
#define GRPC_METHOD_CONFIG_TIMEOUT "grpc.timeout" // gpr_timespec
#define GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES \
"grpc.max_request_message_bytes" // int32
#define GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES \
"grpc.max_response_message_bytes" // int32
struct grpc_method_config {
grpc_mdstr_hash_table* table;
grpc_mdstr* wait_for_ready_key;
grpc_mdstr* timeout_key;
grpc_mdstr* max_request_message_bytes_key;
grpc_mdstr* max_response_message_bytes_key;
};
grpc_method_config* grpc_method_config_create(
bool* wait_for_ready, gpr_timespec* timeout,
int32_t* max_request_message_bytes, int32_t* max_response_message_bytes) {
grpc_method_config* method_config = gpr_malloc(sizeof(grpc_method_config));
memset(method_config, 0, sizeof(grpc_method_config));
method_config->wait_for_ready_key =
grpc_mdstr_from_string(GRPC_METHOD_CONFIG_WAIT_FOR_READY);
method_config->timeout_key =
grpc_mdstr_from_string(GRPC_METHOD_CONFIG_TIMEOUT);
method_config->max_request_message_bytes_key =
grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES);
method_config->max_response_message_bytes_key =
grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES);
grpc_mdstr_hash_table_entry entries[4];
size_t num_entries = 0;
if (wait_for_ready != NULL) {
entries[num_entries].key = method_config->wait_for_ready_key;
entries[num_entries].value = wait_for_ready;
entries[num_entries].vtable = &bool_vtable;
++num_entries;
}
if (timeout != NULL) {
entries[num_entries].key = method_config->timeout_key;
entries[num_entries].value = timeout;
entries[num_entries].vtable = &timespec_vtable;
++num_entries;
}
if (max_request_message_bytes != NULL) {
entries[num_entries].key = method_config->max_request_message_bytes_key;
entries[num_entries].value = max_request_message_bytes;
entries[num_entries].vtable = &int32_vtable;
++num_entries;
}
if (max_response_message_bytes != NULL) {
entries[num_entries].key = method_config->max_response_message_bytes_key;
entries[num_entries].value = max_response_message_bytes;
entries[num_entries].vtable = &int32_vtable;
++num_entries;
}
method_config->table = grpc_mdstr_hash_table_create(num_entries, entries);
return method_config;
}
grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config) {
grpc_mdstr_hash_table_ref(method_config->table);
return method_config;
}
void grpc_method_config_unref(grpc_method_config* method_config) {
if (grpc_mdstr_hash_table_unref(method_config->table)) {
GRPC_MDSTR_UNREF(method_config->wait_for_ready_key);
GRPC_MDSTR_UNREF(method_config->timeout_key);
GRPC_MDSTR_UNREF(method_config->max_request_message_bytes_key);
GRPC_MDSTR_UNREF(method_config->max_response_message_bytes_key);
gpr_free(method_config);
}
}
int grpc_method_config_cmp(const grpc_method_config* method_config1,
const grpc_method_config* method_config2) {
return grpc_mdstr_hash_table_cmp(method_config1->table,
method_config2->table);
}
const bool* grpc_method_config_get_wait_for_ready(
const grpc_method_config* method_config) {
return grpc_mdstr_hash_table_get(method_config->table,
method_config->wait_for_ready_key);
}
const gpr_timespec* grpc_method_config_get_timeout(
const grpc_method_config* method_config) {
return grpc_mdstr_hash_table_get(method_config->table,
method_config->timeout_key);
}
const int32_t* grpc_method_config_get_max_request_message_bytes(
const grpc_method_config* method_config) {
return grpc_mdstr_hash_table_get(
method_config->table, method_config->max_request_message_bytes_key);
}
const int32_t* grpc_method_config_get_max_response_message_bytes(
const grpc_method_config* method_config) {
return grpc_mdstr_hash_table_get(
method_config->table, method_config->max_response_message_bytes_key);
}
//
// grpc_method_config_table
//
static void method_config_unref(void* valuep) {
grpc_method_config_unref(valuep);
}
static void* method_config_ref(void* valuep) {
return grpc_method_config_ref(valuep);
}
static int method_config_cmp(void* valuep1, void* valuep2) {
return grpc_method_config_cmp(valuep1, valuep2);
}
static const grpc_mdstr_hash_table_vtable method_config_table_vtable = {
method_config_unref, method_config_ref, method_config_cmp};
grpc_method_config_table* grpc_method_config_table_create(
size_t num_entries, grpc_method_config_table_entry* entries) {
grpc_mdstr_hash_table_entry* hash_table_entries =
gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) * num_entries);
for (size_t i = 0; i < num_entries; ++i) {
hash_table_entries[i].key = entries[i].method_name;
hash_table_entries[i].value = entries[i].method_config;
hash_table_entries[i].vtable = &method_config_table_vtable;
}
grpc_method_config_table* method_config_table =
grpc_mdstr_hash_table_create(num_entries, hash_table_entries);
gpr_free(hash_table_entries);
return method_config_table;
}
grpc_method_config_table* grpc_method_config_table_ref(
grpc_method_config_table* table) {
return grpc_mdstr_hash_table_ref(table);
}
void grpc_method_config_table_unref(grpc_method_config_table* table) {
grpc_mdstr_hash_table_unref(table);
}
int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
const grpc_method_config_table* table2) {
return grpc_mdstr_hash_table_cmp(table1, table2);
}
grpc_method_config* grpc_method_config_table_get_method_config(
const grpc_method_config_table* table, const grpc_mdstr* path) {
grpc_method_config* method_config = grpc_mdstr_hash_table_get(table, path);
// If we didn't find a match for the path, try looking for a wildcard
// entry (i.e., change "/service/method" to "/service/*").
if (method_config == NULL) {
const char* path_str = grpc_mdstr_as_c_string(path);
const char* sep = strrchr(path_str, '/') + 1;
const size_t len = (size_t)(sep - path_str);
char* buf = gpr_malloc(len + 2); // '*' and NUL
memcpy(buf, path_str, len);
buf[len] = '*';
buf[len + 1] = '\0';
grpc_mdstr* wildcard_path = grpc_mdstr_from_string(buf);
gpr_free(buf);
method_config = grpc_mdstr_hash_table_get(table, wildcard_path);
GRPC_MDSTR_UNREF(wildcard_path);
}
return method_config;
}
static void* copy_arg(void* p) { return grpc_method_config_table_ref(p); }
static void destroy_arg(void* p) { grpc_method_config_table_unref(p); }
static int cmp_arg(void* p1, void* p2) {
return grpc_method_config_table_cmp(p1, p2);
}
static grpc_arg_pointer_vtable arg_vtable = {copy_arg, destroy_arg, cmp_arg};
grpc_arg grpc_method_config_table_create_channel_arg(
grpc_method_config_table* table) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
arg.key = GRPC_ARG_SERVICE_CONFIG;
arg.value.pointer.p = table;
arg.value.pointer.vtable = &arg_vtable;
return arg;
}

@ -0,0 +1,116 @@
//
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_METHOD_CONFIG_H
#define GRPC_CORE_EXT_CLIENT_CONFIG_METHOD_CONFIG_H
#include <stdbool.h>
#include <grpc/impl/codegen/gpr_types.h>
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/transport/mdstr_hash_table.h"
#include "src/core/lib/transport/metadata.h"
/// Per-method configuration.
typedef struct grpc_method_config grpc_method_config;
/// Creates a grpc_method_config with the specified parameters.
/// Any parameter may be NULL to indicate that the value is unset.
///
/// \a wait_for_ready indicates whether the client should wait until the
/// request deadline for the channel to become ready, even if there is a
/// temporary failure before the deadline while attempting to connect.
///
/// \a timeout indicates the timeout for calls.
///
/// \a max_request_message_bytes and \a max_response_message_bytes
/// indicate the maximum sizes of the request (checked when sending) and
/// response (checked when receiving) messages.
grpc_method_config* grpc_method_config_create(
bool* wait_for_ready, gpr_timespec* timeout,
int32_t* max_request_message_bytes, int32_t* max_response_message_bytes);
grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config);
void grpc_method_config_unref(grpc_method_config* method_config);
/// Compares two grpc_method_configs.
/// The sort order is stable but undefined.
int grpc_method_config_cmp(const grpc_method_config* method_config1,
const grpc_method_config* method_config2);
/// These methods return NULL if the requested field is unset.
/// The caller does NOT take ownership of the result.
const bool* grpc_method_config_get_wait_for_ready(
const grpc_method_config* method_config);
const gpr_timespec* grpc_method_config_get_timeout(
const grpc_method_config* method_config);
const int32_t* grpc_method_config_get_max_request_message_bytes(
const grpc_method_config* method_config);
const int32_t* grpc_method_config_get_max_response_message_bytes(
const grpc_method_config* method_config);
/// A table of method configs.
typedef grpc_mdstr_hash_table grpc_method_config_table;
typedef struct grpc_method_config_table_entry {
/// The name is of one of the following forms:
/// service/method -- specifies exact service and method name
/// service/* -- matches all methods for the specified service
grpc_mdstr* method_name;
grpc_method_config* method_config;
} grpc_method_config_table_entry;
/// Takes new references to all keys and values in \a entries.
grpc_method_config_table* grpc_method_config_table_create(
size_t num_entries, grpc_method_config_table_entry* entries);
grpc_method_config_table* grpc_method_config_table_ref(
grpc_method_config_table* table);
void grpc_method_config_table_unref(grpc_method_config_table* table);
/// Compares two grpc_method_config_tables.
/// The sort order is stable but undefined.
int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
const grpc_method_config_table* table2);
/// Gets the method config for the specified \a path, which should be of
/// the form "/service/method".
/// Returns NULL if the method has no config.
/// Caller does NOT own a reference to the result.
grpc_method_config* grpc_method_config_table_get_method_config(
const grpc_method_config_table* table, const grpc_mdstr* path);
/// Returns a channel arg containing \a table.
grpc_arg grpc_method_config_table_create_channel_arg(
grpc_method_config_table* table);
#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_METHOD_CONFIG_H */

@ -52,22 +52,17 @@ typedef struct grpc_resolver_result grpc_resolver_result;
grpc_resolver_result* grpc_resolver_result_create(
const char* server_name, grpc_lb_addresses* addresses,
const char* lb_policy_name, grpc_channel_args* lb_policy_args);
void grpc_resolver_result_ref(grpc_resolver_result* result);
void grpc_resolver_result_unref(grpc_exec_ctx* exec_ctx,
grpc_resolver_result* result);
/// Caller does NOT take ownership of result.
/// Accessors. Caller does NOT take ownership of results.
const char* grpc_resolver_result_get_server_name(grpc_resolver_result* result);
/// Caller does NOT take ownership of result.
grpc_lb_addresses* grpc_resolver_result_get_addresses(
grpc_resolver_result* result);
/// Caller does NOT take ownership of result.
const char* grpc_resolver_result_get_lb_policy_name(
grpc_resolver_result* result);
/// Caller does NOT take ownership of result.
grpc_channel_args* grpc_resolver_result_get_lb_policy_args(
grpc_resolver_result* result);

@ -220,8 +220,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"SUBCHANNEL: %p %12s 0x%08d -> 0x%08d [%s]", c, purpose, (int)old_val,
(int)(old_val + delta), reason);
"SUBCHANNEL: %p %s 0x%08" PRIxPTR " -> 0x%08" PRIxPTR " [%s]", c,
purpose, old_val, old_val + delta, reason);
#endif
return old_val;
}
@ -704,7 +704,7 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
grpc_polling_entity *pollent, gpr_timespec deadline,
grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
*call = gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
@ -712,7 +712,7 @@ grpc_error *grpc_connected_subchannel_create_call(
(*call)->connection = con; // Ref is added below.
grpc_error *error =
grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, *call,
NULL, NULL, deadline, callstk);
NULL, NULL, path, deadline, callstk);
if (error != GRPC_ERROR_NONE) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);

@ -38,6 +38,7 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
/** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */
@ -110,7 +111,7 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
/** construct a subchannel call */
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
grpc_polling_entity *pollent, gpr_timespec deadline,
grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **subchannel_call);
/** process a transport level op */

@ -113,6 +113,7 @@
#include "src/core/ext/client_config/parse_address.h"
#include "src/core/ext/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/support/string.h"
@ -270,6 +271,7 @@ typedef struct glb_lb_policy {
/** who the client is trying to communicate with */
const char *server_name;
grpc_client_channel_factory *cc_factory;
grpc_channel_args *args;
/** deadline for the LB's call */
gpr_timespec deadline;
@ -457,6 +459,7 @@ static grpc_lb_policy *create_rr_locked(
args.server_name = glb_policy->server_name;
args.client_channel_factory = glb_policy->cc_factory;
args.addresses = process_serverlist(serverlist);
args.additional_args = glb_policy->args;
grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
@ -584,6 +587,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
* Create a client channel over them to communicate with a LB service */
glb_policy->server_name = gpr_strdup(args->server_name);
glb_policy->cc_factory = args->client_channel_factory;
glb_policy->args = grpc_channel_args_copy(args->additional_args);
GPR_ASSERT(glb_policy->cc_factory != NULL);
/* construct a target from the addresses in args, given in the form
@ -650,6 +654,7 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GPR_ASSERT(glb_policy->pending_picks == NULL);
GPR_ASSERT(glb_policy->pending_pings == NULL);
gpr_free((void *)glb_policy->server_name);
grpc_channel_args_destroy(glb_policy->args);
grpc_channel_destroy(glb_policy->lb_channel);
glb_policy->lb_channel = NULL;
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);

@ -466,6 +466,7 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
sc_args.addr =
(struct sockaddr *)(&args->addresses->addresses[i].address.addr);
sc_args.addr_len = args->addresses->addresses[i].address.len;
sc_args.args = args->additional_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);

@ -629,6 +629,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
sc_args.addr =
(struct sockaddr *)(&args->addresses->addresses[i].address.addr);
sc_args.addr_len = args->addresses->addresses[i].address.len;
sc_args.args = args->additional_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);

@ -33,6 +33,7 @@
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
@ -42,6 +43,7 @@
#include "src/core/ext/client_config/parse_address.h"
#include "src/core/ext/client_config/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
@ -119,7 +121,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
*r->target_result = grpc_resolver_result_create(
r->target_name,
grpc_lb_addresses_copy(r->addresses, NULL /* user_data_copy */),
NULL /* lb_policy_name */, NULL);
NULL /* lb_policy_name */, NULL /* lb_policy_args */);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
}
@ -128,8 +130,8 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
sockaddr_resolver *r = (sockaddr_resolver *)gr;
gpr_mu_destroy(&r->mu);
grpc_lb_addresses_destroy(r->addresses, NULL /* user_data_destroy */);
gpr_free(r->target_name);
grpc_lb_addresses_destroy(r->addresses, NULL /* user_data_destroy */);
gpr_free(r);
}

@ -36,14 +36,11 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/metadata.h"
extern int grpc_http_write_state_trace;
void grpc_chttp2_plugin_init(void) {
grpc_chttp2_base64_encode_and_huffman_compress =
grpc_chttp2_base64_encode_and_huffman_compress_impl;
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
grpc_register_tracer("http_write_state", &grpc_http_write_state_trace);
}
void grpc_chttp2_plugin_shutdown(void) {}

File diff suppressed because it is too large Load Diff

@ -40,8 +40,8 @@
#include "src/core/lib/iomgr/error.h"
/* defined in internal.h */
typedef struct grpc_chttp2_stream_parsing grpc_chttp2_stream_parsing;
typedef struct grpc_chttp2_transport_parsing grpc_chttp2_transport_parsing;
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
#define GRPC_CHTTP2_FRAME_DATA 0
#define GRPC_CHTTP2_FRAME_HEADER 1

@ -51,16 +51,11 @@ grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser) {
void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser) {
grpc_byte_stream *bs;
if (parser->parsing_frame) {
if (parser->parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"),
1);
}
while (
(bs = grpc_chttp2_incoming_frame_queue_pop(&parser->incoming_frames))) {
grpc_byte_stream_destroy(exec_ctx, bs);
exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"));
}
GRPC_ERROR_UNREF(parser->error);
}
grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
@ -145,22 +140,17 @@ void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
stats->data_bytes += write_bytes;
}
grpc_error *grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *p,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_data_parser *p = parser;
uint32_t message_flags;
grpc_chttp2_incoming_byte_stream *incoming_byte_stream;
char *msg;
if (is_last && p->is_last_frame) {
stream_parsing->received_close = 1;
}
if (cur == end) {
return GRPC_ERROR_NONE;
}
@ -171,7 +161,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
return GRPC_ERROR_REF(p->error);
fh_0:
case GRPC_CHTTP2_DATA_FH_0:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_type = *cur;
switch (p->frame_type) {
case 0:
@ -184,7 +174,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
gpr_asprintf(&msg, "Bad GRPC frame type 0x%02x", p->frame_type);
p->error = GRPC_ERROR_CREATE(msg);
p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID,
(intptr_t)stream_parsing->id);
(intptr_t)s->id);
gpr_free(msg);
msg = gpr_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
p->error =
@ -201,7 +191,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_1:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size = ((uint32_t)*cur) << 24;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_2;
@ -209,7 +199,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_2:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 16;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_3;
@ -217,7 +207,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_3:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_4;
@ -225,7 +215,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_4:
stream_parsing->stats.incoming.framing_bytes++;
s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur);
p->state = GRPC_CHTTP2_DATA_FRAME;
++cur;
@ -234,35 +224,32 @@ grpc_error *grpc_chttp2_data_parser_parse(
message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
p->parsing_frame = incoming_byte_stream =
grpc_chttp2_incoming_byte_stream_create(
exec_ctx, transport_parsing, stream_parsing, p->frame_size,
message_flags, &p->incoming_frames);
grpc_chttp2_incoming_byte_stream_create(exec_ctx, t, s, p->frame_size,
message_flags);
/* fallthrough */
case GRPC_CHTTP2_DATA_FRAME:
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
if (cur == end) {
return GRPC_ERROR_NONE;
}
uint32_t remaining = (uint32_t)(end - cur);
if (remaining == p->frame_size) {
stream_parsing->stats.incoming.data_bytes += p->frame_size;
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE, 1);
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_ERROR_NONE;
} else if (remaining > p->frame_size) {
stream_parsing->stats.incoming.data_bytes += p->frame_size;
s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg),
(size_t)(cur + p->frame_size - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
GRPC_ERROR_NONE, 1);
GRPC_ERROR_NONE);
p->parsing_frame = NULL;
cur += p->frame_size;
goto fh_0; /* loop */
@ -272,10 +259,25 @@ grpc_error *grpc_chttp2_data_parser_parse(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
p->frame_size -= remaining;
stream_parsing->stats.incoming.data_bytes += remaining;
s->stats.incoming.data_bytes += remaining;
return GRPC_ERROR_NONE;
}
}
GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
}
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
grpc_chttp2_data_parser *p = parser;
grpc_error *error = parse_inner(exec_ctx, p, t, s, slice);
if (is_last && p->is_last_frame) {
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
GRPC_ERROR_NONE);
}
return error;
}

@ -69,7 +69,6 @@ typedef struct {
grpc_error *error;
int is_frame_compressed;
grpc_chttp2_incoming_frame_queue incoming_frames;
grpc_chttp2_incoming_byte_stream *parsing_frame;
} grpc_chttp2_data_parser;
@ -92,10 +91,10 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
/* handle a slice of a data frame - is_last indicates the last slice of a
frame */
grpc_error *grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
uint32_t write_bytes, int is_eof,

@ -67,10 +67,11 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_goaway_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -148,12 +149,9 @@ grpc_error *grpc_chttp2_goaway_parser_parse(
p->debug_pos += (uint32_t)(end - cur);
p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
if (is_last) {
transport_parsing->goaway_received = 1;
transport_parsing->goaway_last_stream_index = p->last_stream_id;
gpr_slice_unref(transport_parsing->goaway_text);
transport_parsing->goaway_error = (grpc_status_code)p->error_code;
transport_parsing->goaway_text =
gpr_slice_new(p->debug_data, p->debug_length, gpr_free);
grpc_chttp2_add_incoming_goaway(
exec_ctx, t, (uint32_t)p->error_code,
gpr_slice_new(p->debug_data, p->debug_length, gpr_free));
p->debug_data = NULL;
}
return GRPC_ERROR_NONE;

@ -65,10 +65,11 @@ void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p);
void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p);
grpc_error *grpc_chttp2_goaway_parser_begin_frame(
grpc_chttp2_goaway_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_goaway_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
gpr_slice debug_data,

@ -73,10 +73,10 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_ping_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -91,10 +91,11 @@ grpc_error *grpc_chttp2_ping_parser_parse(
if (p->byte == 8) {
GPR_ASSERT(is_last);
if (p->is_ack) {
grpc_chttp2_ack_ping(exec_ctx, transport_parsing, p->opaque_8bytes);
grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
gpr_slice_buffer_add(&transport_parsing->qbuf,
gpr_slice_buffer_add(&t->qbuf,
grpc_chttp2_ping_create(1, p->opaque_8bytes));
grpc_chttp2_initiate_write(exec_ctx, t, false, "ping response");
}
}

@ -48,9 +48,9 @@ gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes);
grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_ping_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */

@ -39,6 +39,8 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
gpr_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
grpc_transport_one_way_stats *stats) {
@ -83,10 +85,11 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
return GRPC_ERROR_NONE;
}
grpc_error *grpc_chttp2_rst_stream_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -97,19 +100,28 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(
cur++;
p->byte++;
}
stream_parsing->stats.incoming.framing_bytes += (uint64_t)(end - cur);
s->stats.incoming.framing_bytes += (uint64_t)(end - cur);
if (p->byte == 4) {
GPR_ASSERT(is_last);
stream_parsing->received_close = 1;
if (stream_parsing->forced_close_error == GRPC_ERROR_NONE) {
stream_parsing->forced_close_error = grpc_error_set_int(
GRPC_ERROR_CREATE("RST_STREAM"), GRPC_ERROR_INT_HTTP2_ERROR,
(intptr_t)((((uint32_t)p->reason_bytes[0]) << 24) |
(((uint32_t)p->reason_bytes[1]) << 16) |
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]))));
uint32_t reason = (((uint32_t)p->reason_bytes[0]) << 24) |
(((uint32_t)p->reason_bytes[1]) << 16) |
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]));
grpc_error *error = GRPC_ERROR_NONE;
if (reason != GRPC_CHTTP2_NO_ERROR) {
error = grpc_error_set_int(GRPC_ERROR_CREATE("RST_STREAM"),
GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)reason);
grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
(grpc_chttp2_error_code)reason, s->deadline);
char *status_details;
gpr_asprintf(&status_details, "Received RST_STREAM with error code %d",
reason);
gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
gpr_free(status_details);
grpc_chttp2_fake_status(exec_ctx, t, s, status_code, &slice_details);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, error);
}
return GRPC_ERROR_NONE;

@ -49,9 +49,10 @@ gpr_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_rst_stream_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */

@ -143,10 +143,10 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
}
}
grpc_error *grpc_chttp2_settings_parser_parse(
grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
const uint8_t *cur = GPR_SLICE_START_PTR(slice);
const uint8_t *end = GPR_SLICE_END_PTR(slice);
@ -162,11 +162,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
if (cur == end) {
parser->state = GRPC_CHTTP2_SPS_ID0;
if (is_last) {
transport_parsing->settings_updated = 1;
memcpy(parser->target_settings, parser->incoming_settings,
GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
gpr_slice_buffer_add(&transport_parsing->qbuf,
grpc_chttp2_settings_ack_create());
gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
}
return GRPC_ERROR_NONE;
}
@ -226,9 +224,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
break;
case GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE:
grpc_chttp2_goaway_append(
transport_parsing->last_incoming_stream_id, sp->error_value,
t->last_new_stream_id, sp->error_value,
gpr_slice_from_static_string("HTTP2 settings error"),
&transport_parsing->qbuf);
&t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
grpc_error *err = GRPC_ERROR_CREATE(msg);
@ -238,18 +236,17 @@ grpc_error *grpc_chttp2_settings_parser_parse(
}
if (parser->id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE &&
parser->incoming_settings[parser->id] != parser->value) {
transport_parsing->initial_window_update =
t->initial_window_update =
(int64_t)parser->value - parser->incoming_settings[parser->id];
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "adding %d for initial_window change",
(int)transport_parsing->initial_window_update);
(int)t->initial_window_update);
}
}
parser->incoming_settings[parser->id] = parser->value;
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "CHTTP2:%s: got setting %d = %d",
transport_parsing->is_client ? "CLI" : "SVR", parser->id,
parser->value);
t->is_client ? "CLI" : "SVR", parser->id, parser->value);
}
} else if (grpc_http_trace) {
gpr_log(GPR_ERROR, "CHTTP2: Ignoring unknown setting %d (value %d)",

@ -95,9 +95,10 @@ gpr_slice grpc_chttp2_settings_ack_create(void);
grpc_error *grpc_chttp2_settings_parser_begin_frame(
grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
uint32_t *settings);
grpc_error *grpc_chttp2_settings_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
void *parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */

@ -80,9 +80,8 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
}
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@ -94,8 +93,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
p->byte++;
}
if (stream_parsing != NULL) {
stream_parsing->stats.incoming.framing_bytes += (uint32_t)(end - cur);
if (s != NULL) {
s->stats.incoming.framing_bytes += (uint32_t)(end - cur);
}
if (p->byte == 4) {
@ -109,17 +108,26 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
}
GPR_ASSERT(is_last);
if (transport_parsing->incoming_stream_id != 0) {
if (stream_parsing != NULL) {
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", transport_parsing,
stream_parsing, outgoing_window,
if (t->incoming_stream_id != 0) {
if (s != NULL) {
bool was_zero = s->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", t, s, outgoing_window,
received_update);
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
bool is_zero = s->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"stream.read_flow_control");
}
}
} else {
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", transport_parsing,
outgoing_window, received_update);
bool was_zero = t->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", t, outgoing_window,
received_update);
bool is_zero = t->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_initiate_write(exec_ctx, t, false,
"new_global_flow_control");
}
}
}

@ -51,8 +51,7 @@ gpr_slice grpc_chttp2_window_update_create(uint32_t id, uint32_t window_delta,
grpc_error *grpc_chttp2_window_update_parser_begin_frame(
grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */

File diff suppressed because it is too large Load Diff

@ -45,7 +45,8 @@
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
typedef grpc_error *(*grpc_chttp2_hpack_parser_state)(
grpc_chttp2_hpack_parser *p, const uint8_t *beg, const uint8_t *end);
grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *beg,
const uint8_t *end);
typedef struct {
char *str;
@ -55,7 +56,7 @@ typedef struct {
struct grpc_chttp2_hpack_parser {
/* user specified callback for each header output */
void (*on_header)(void *user_data, grpc_mdelem *md);
void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem *md);
void *on_header_user_data;
grpc_error *last_error;
@ -104,15 +105,17 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
/* returns 1 on success, 0 on error */
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *beg,
const uint8_t *end);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
grpc_error *grpc_chttp2_header_parser_parse(
grpc_exec_ctx *exec_ctx, void *hpack_parser,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */

@ -53,31 +53,25 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/transport_impl.h"
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
/* streams are kept in various linked lists depending on what things need to
happen to them... this enum labels each list */
typedef enum {
GRPC_CHTTP2_LIST_ALL_STREAMS,
GRPC_CHTTP2_LIST_CHECK_READ_OPS,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE,
GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING,
GRPC_CHTTP2_LIST_WRITTEN,
GRPC_CHTTP2_LIST_PARSING_SEEN,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
/* streams waiting for the outgoing window in the writing path, they will be
* merged to the stalled list or writable list under transport lock. */
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
STREAM_LIST_COUNT /* must be last */
} grpc_chttp2_stream_list_id;
typedef enum {
GRPC_CHTTP2_WRITE_STATE_IDLE,
GRPC_CHTTP2_WRITE_STATE_WRITING,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
} grpc_chttp2_write_state;
/* deframer state for the overall http2 stream of bytes */
typedef enum {
/* prefix: one entry per http2 connection prefix byte */
@ -152,6 +146,12 @@ typedef struct grpc_chttp2_outstanding_ping {
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
typedef struct grpc_chttp2_write_cb {
int64_t call_at_byte;
grpc_closure *closure;
struct grpc_chttp2_write_cb *next;
} grpc_chttp2_write_cb;
/* forward declared in frame_data.h */
struct grpc_chttp2_incoming_byte_stream {
grpc_byte_stream base;
@ -161,12 +161,13 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_chttp2_transport *transport;
grpc_chttp2_stream *stream;
int is_tail;
bool is_tail;
gpr_mu slice_mu; // protects slices, on_next
gpr_slice_buffer slices;
grpc_closure *on_next;
gpr_slice *next;
uint32_t remaining_bytes;
struct {
grpc_closure closure;
@ -178,12 +179,68 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_closure finished_action;
};
typedef struct {
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
grpc_endpoint *ep;
char *peer_string;
grpc_combiner *combiner;
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
uint8_t closed;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
/** maps stream id to grpc_chttp2_stream objects */
grpc_chttp2_stream_map stream_map;
grpc_closure write_action_begin_locked;
grpc_closure write_action;
grpc_closure write_action_end;
grpc_closure write_action_end_locked;
grpc_closure read_action_begin;
grpc_closure read_action_locked;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
grpc_chttp2_stream **accepting_stream;
struct {
/* accept stream callback */
void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
/** connectivity tracking */
grpc_connectivity_state_tracker state_tracker;
} channel_callback;
/** data to write now */
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
int64_t outgoing_window;
/** is this a client? */
uint8_t is_client;
/** data to write next write */
gpr_slice_buffer qbuf;
/** window available for us to send to peer */
int64_t outgoing_window;
/** window available to announce to peer */
int64_t announce_incoming_window;
/** how much window would we like to have for incoming_window */
@ -194,8 +251,6 @@ typedef struct {
/** have we sent a goaway */
uint8_t sent_goaway;
/** is this transport a client? */
uint8_t is_client;
/** are the local settings dirty and need to be sent? */
uint8_t dirtied_local_settings;
/** have local settings been sent? */
@ -212,49 +267,14 @@ typedef struct {
/** how far to lookahead in a stream? */
uint32_t stream_lookahead;
/** last received stream id */
uint32_t last_incoming_stream_id;
/** last new stream id */
uint32_t last_new_stream_id;
/** pings awaiting responses */
grpc_chttp2_outstanding_ping pings;
/** next payload for an outgoing ping */
uint64_t ping_counter;
/** concurrent stream count: updated when not parsing,
so this is a strict over-estimation on the client */
uint32_t concurrent_stream_count;
} grpc_chttp2_transport_global;
typedef struct {
/** data to write now */
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
int64_t outgoing_window;
/** is this a client? */
uint8_t is_client;
/** callback for when writing is done */
grpc_closure done_cb;
/** maximum frame size */
uint32_t max_frame_size;
} grpc_chttp2_transport_writing;
struct grpc_chttp2_transport_parsing {
/** is this transport a client? (boolean) */
uint8_t is_client;
/** were settings updated? */
uint8_t settings_updated;
/** was a settings ack received? */
uint8_t settings_ack_received;
/** was a goaway frame received? */
uint8_t goaway_received;
/** initial window change */
int64_t initial_window_update;
/** data to write later - after parsing */
gpr_slice_buffer qbuf;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
/** simple one shot parsers */
@ -267,13 +287,12 @@ struct grpc_chttp2_transport_parsing {
/** parser for goaway frames */
grpc_chttp2_goaway_parser goaway_parser;
/** initial window change */
int64_t initial_window_update;
/** window available for peer to send to us */
int64_t incoming_window;
/** next stream id available at the time of beginning parsing */
uint32_t next_stream_id;
uint32_t last_incoming_stream_id;
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
uint8_t incoming_frame_type;
@ -284,135 +303,42 @@ struct grpc_chttp2_transport_parsing {
uint32_t incoming_frame_size;
uint32_t incoming_stream_id;
/* current max frame size */
uint32_t max_frame_size;
/* active parser */
void *parser_data;
grpc_chttp2_stream_parsing *incoming_stream;
grpc_chttp2_stream *incoming_stream;
grpc_error *(*parser)(grpc_exec_ctx *exec_ctx, void *parser_user_data,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
/* received settings */
uint32_t settings[GRPC_CHTTP2_NUM_SETTINGS];
/* last settings that were sent */
uint32_t last_sent_settings[GRPC_CHTTP2_NUM_SETTINGS];
/* goaway data */
grpc_status_code goaway_error;
uint32_t goaway_last_stream_index;
gpr_slice goaway_text;
int64_t outgoing_window;
grpc_chttp2_write_cb *write_cb_pool;
/* if non-NULL, close the transport with this error when writes are finished
*/
grpc_error *close_transport_on_writes_finished;
};
typedef enum {
/** no writing activity allowed */
GRPC_CHTTP2_WRITES_CORKED,
/** no writing activity */
GRPC_CHTTP2_WRITING_INACTIVE,
/** write has been requested and scheduled against the workqueue */
GRPC_CHTTP2_WRITE_SCHEDULED,
/** write has been initiated after being reaped from the workqueue */
GRPC_CHTTP2_WRITING,
/** write has been initiated, AND another write needs to be started once it's
done */
GRPC_CHTTP2_WRITING_STALE_WITH_POLLER,
GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
} grpc_chttp2_write_state;
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
grpc_endpoint *ep;
char *peer_string;
/** when this drops to zero it's safe to shutdown the endpoint */
gpr_refcount shutdown_ep_refs;
struct {
grpc_combiner *combiner;
/** is a thread currently in the global lock */
bool global_active;
/** is a thread currently parsing */
bool parsing_active;
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** has a check_read_ops been scheduled */
bool check_read_ops_scheduled;
} executor;
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
uint8_t closed;
GRPC_METADATA_NOT_PUBLISHED,
GRPC_METADATA_SYNTHESIZED_FROM_FAKE,
GRPC_METADATA_PUBLISHED_FROM_WIRE,
GPRC_METADATA_PUBLISHED_AT_CLOSE
} grpc_published_metadata_method;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
/** various lists of streams */
grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
/** global state for reading/writing */
grpc_chttp2_transport_global global;
/** state only accessible by the chain of execution that
set writing_state >= GRPC_WRITING, and only by the writing closure
chain. */
grpc_chttp2_transport_writing writing;
/** state only accessible by the chain of execution that
set parsing_active=1 */
grpc_chttp2_transport_parsing parsing;
/** maps stream id to grpc_chttp2_stream objects;
owned by the parsing thread when parsing */
grpc_chttp2_stream_map parsing_stream_map;
/** streams created by the client (possibly during parsing);
merged with parsing_stream_map during unlock when no
parsing is occurring */
grpc_chttp2_stream_map new_stream_map;
/** closure to execute writing */
grpc_closure writing_action;
/** closure to start reading from the endpoint */
grpc_closure reading_action;
grpc_closure reading_action_locked;
grpc_closure post_parse_locked;
/** closure to actually do parsing */
grpc_closure parsing_action;
/** closure to initiate writing */
grpc_closure initiate_writing;
/** closure to finish writing */
grpc_closure terminate_writing;
/** closure to flush read state up the stack */
grpc_closure initiate_read_flush_locked;
/** incoming read bytes */
gpr_slice_buffer read_buffer;
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
grpc_chttp2_stream **accepting_stream;
struct {
/* accept stream callback */
void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
/** connectivity tracking */
grpc_connectivity_state_tracker state_tracker;
} channel_callback;
grpc_closure destroy_stream;
void *destroy_stream_arg;
/** Transport op to be applied post-parsing */
grpc_transport_op *post_parsing_op;
};
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
@ -422,20 +348,22 @@ typedef struct {
As the upper layer offers more bytes, this value increases.
As bytes are read, this value decreases. */
uint32_t max_recv_bytes;
/** The number of bytes the upper layer has offered to read but we have
not yet announced to HTTP2 flow control.
As the upper layers offer to read more bytes, this value increases.
As we advertise incoming flow control window, this value decreases. */
uint32_t unannounced_incoming_window_for_parse;
uint32_t unannounced_incoming_window_for_writing;
/** things the upper layers would like to send */
grpc_metadata_batch *send_initial_metadata;
grpc_closure *send_initial_metadata_finished;
grpc_byte_stream *send_message;
grpc_closure *send_message_finished;
grpc_metadata_batch *send_trailing_metadata;
grpc_closure *send_trailing_metadata_finished;
grpc_byte_stream *fetching_send_message;
uint32_t fetched_send_message_length;
gpr_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
bool complete_fetch_covered_by_poller;
grpc_closure complete_fetch;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *recv_initial_metadata_ready;
grpc_byte_stream **recv_message;
@ -455,95 +383,44 @@ typedef struct {
bool read_closed;
/** Are all published incoming byte streams closed. */
bool all_incoming_byte_streams_finished;
/** Is this stream in the stream map. */
bool in_stream_map;
/** Has this stream seen an error.
If true, then pending incoming frames can be thrown away. */
bool seen_error;
bool exceeded_metadata_size;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
/** the error that resulted in this stream being write-closed */
grpc_error *write_closed_error;
bool published_initial_metadata;
bool published_trailing_metadata;
grpc_published_metadata_method published_metadata[2];
bool final_metadata_requested;
grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
grpc_chttp2_incoming_frame_queue incoming_frames;
gpr_timespec deadline;
} grpc_chttp2_stream_global;
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
uint8_t fetching;
bool sent_initial_metadata;
uint8_t sent_message;
uint8_t sent_trailing_metadata;
uint8_t read_closed;
/** send this initial metadata */
grpc_metadata_batch *send_initial_metadata;
grpc_byte_stream *send_message;
grpc_metadata_batch *send_trailing_metadata;
int64_t outgoing_window;
/** how much window should we announce? */
uint32_t announce_window;
gpr_slice_buffer flow_controlled_buffer;
gpr_slice fetching_slice;
size_t stream_fetched;
grpc_closure finished_fetch;
/** stats gathered during the write */
grpc_transport_one_way_stats stats;
} grpc_chttp2_stream_writing;
struct grpc_chttp2_stream_parsing {
/** saw some stream level error */
grpc_error *forced_close_error;
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
/** has this stream received a close */
uint8_t received_close;
/** how many header frames have we received? */
uint8_t header_frames_received;
/** which metadata did we get (on this parse) */
uint8_t got_metadata_on_parse[2];
/** should we raise the seen_error flag in transport_global */
bool seen_error;
bool exceeded_metadata_size;
/** window available for peer to send to us */
int64_t incoming_window;
/** parsing state for data frames */
grpc_chttp2_data_parser data_parser;
/** amount of window given */
int64_t outgoing_window;
/** number of bytes received - reset at end of parse thread execution */
int64_t received_bytes;
/** stats gathered during the parse */
grpc_transport_stream_stats stats;
/** incoming metadata */
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
};
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
grpc_chttp2_stream_global global;
grpc_chttp2_stream_writing writing;
grpc_chttp2_stream_parsing parsing;
grpc_closure init_stream;
grpc_closure destroy_stream;
void *destroy_stream_arg;
bool sent_initial_metadata;
bool sent_trailing_metadata;
/** how much window should we announce? */
uint32_t announce_window;
gpr_slice_buffer flow_controlled_buffer;
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
};
/** Transport writing call flow:
@ -559,162 +436,71 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason);
/** Someone is unlocking the transport mutex: check to see if writes
are required, and schedule them if so */
int grpc_chttp2_unlocking_check_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint);
void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
void *transport_writing, grpc_error *error);
void grpc_chttp2_cleanup_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_writing *writing);
void grpc_chttp2_prepare_to_read(grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
are required, and frame them if so */
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
grpc_error *grpc_chttp2_perform_read(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
gpr_slice slice);
void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *global,
grpc_chttp2_transport_parsing *parsing);
bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, gpr_slice slice);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
/** Get a writable stream
returns non-zero if there was a stream available */
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) GRPC_MUST_USE_RESULT;
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing);
int grpc_chttp2_list_pop_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_add_written_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing);
int grpc_chttp2_list_pop_parsing_seen_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_add_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_check_read_ops(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
bool grpc_chttp2_list_remove_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
bool grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing);
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_remove_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
grpc_chttp2_transport_parsing *transport_parsing, uint32_t id);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
uint32_t id);
void grpc_chttp2_add_incoming_goaway(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
uint32_t goaway_error, gpr_slice goaway_text);
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
/* returns 1 if this is the last stream, 0 otherwise */
int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
int grpc_chttp2_has_streams(grpc_chttp2_transport *t);
void grpc_chttp2_for_all_streams(
grpc_chttp2_transport_global *transport_global, void *user_data,
void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
grpc_chttp2_stream_global *stream_global));
void grpc_chttp2_parsing_become_skip_parser(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
void grpc_chttp2_complete_closure_step(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_closure **pclosure,
grpc_error *error);
grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t);
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id);
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t id);
void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t goaway_error,
gpr_slice goaway_text);
void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_closure **pclosure,
grpc_error *error, const char *desc);
#define GRPC_CHTTP2_CLIENT_CONNECT_STRING "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
@ -805,57 +591,83 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
const char *var2, int is_client,
uint32_t stream_id, int64_t val1, int64_t val2);
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream,
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *stream,
grpc_status_code status, gpr_slice *details);
void grpc_chttp2_mark_stream_closed(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, int close_reads, int close_writes,
grpc_error *error);
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int close_reads,
int close_writes, grpc_error *error);
void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global);
grpc_chttp2_transport *t);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global, reason)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
const char *reason);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global,
#define GRPC_CHTTP2_STREAM_REF(stream, reason) \
grpc_chttp2_stream_ref(stream, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream, reason)
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
const char *reason);
#else
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global);
#define GRPC_CHTTP2_STREAM_REF(stream, reason) grpc_chttp2_stream_ref(stream)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream)
void grpc_chttp2_stream_ref(grpc_chttp2_stream *s);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
#endif
//#define GRPC_CHTTP2_REFCOUNTING_DEBUG 1
#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) \
grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
grpc_chttp2_unref_transport(cl, t, r, __FILE__, __LINE__)
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason,
const char *file, int line);
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
const char *file, int line);
#else
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) grpc_chttp2_unref_transport(cl, t)
void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t);
#endif
grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
uint32_t frame_size, uint32_t flags);
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
gpr_slice slice);
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
grpc_error *error, int from_parsing_thread);
grpc_error *error);
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *parsing,
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const uint8_t *opaque_8bytes);
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
bool covered_by_poller, const char *reason);
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, bool covered_by_poller,
const char *reason);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *due_to_error);
void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */

File diff suppressed because it is too large Load Diff

@ -35,27 +35,6 @@
#include <grpc/support/log.h>
#define TRANSPORT_FROM_GLOBAL(tg) \
((grpc_chttp2_transport *)((char *)(tg)-offsetof(grpc_chttp2_transport, \
global)))
#define STREAM_FROM_GLOBAL(sg) \
((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
#define TRANSPORT_FROM_WRITING(tw) \
((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \
writing)))
#define STREAM_FROM_WRITING(sw) \
((grpc_chttp2_stream *)((char *)(sw)-offsetof(grpc_chttp2_stream, writing)))
#define TRANSPORT_FROM_PARSING(tp) \
((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
parsing)))
#define STREAM_FROM_PARSING(sp) \
((grpc_chttp2_stream *)((char *)(sp)-offsetof(grpc_chttp2_stream, parsing)))
/* core list management */
static int stream_list_empty(grpc_chttp2_transport *t,
@ -139,321 +118,57 @@ static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
/* wrappers for specializations */
bool grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
return stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE);
}
int grpc_chttp2_list_pop_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WRITABLE);
if (r != 0) {
*stream_global = &stream->global;
*stream_writing = &stream->writing;
}
return r;
}
bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE);
}
void grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
GPR_ASSERT(stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITING));
}
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing) {
return !stream_list_empty(TRANSPORT_FROM_WRITING(transport_writing),
GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_pop_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITING);
if (r != 0) {
*stream_writing = &stream->writing;
}
return r;
}
void grpc_chttp2_list_add_written_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITTEN);
}
int grpc_chttp2_list_pop_written_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
GRPC_CHTTP2_LIST_WRITTEN);
if (r != 0) {
*stream_global = &stream->global;
*stream_writing = &stream->writing;
}
return r;
}
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(
TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r =
stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
}
return r;
}
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing) {
stream_list_add(TRANSPORT_FROM_PARSING(transport_parsing),
STREAM_FROM_PARSING(stream_parsing),
GRPC_CHTTP2_LIST_PARSING_SEEN);
}
int grpc_chttp2_list_pop_parsing_seen_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_PARSING(transport_parsing), &stream,
GRPC_CHTTP2_LIST_PARSING_SEEN);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
}
return r;
}
void grpc_chttp2_list_add_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_check_read_ops(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
if (!t->executor.check_read_ops_scheduled) {
grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
&t->initiate_read_flush_locked,
GRPC_ERROR_NONE, false);
t->executor.check_read_ops_scheduled = true;
}
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
bool grpc_chttp2_list_remove_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
GPR_ASSERT(s->id != 0);
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_writing_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
grpc_chttp2_stream *stream = STREAM_FROM_WRITING(stream_writing);
gpr_log(GPR_DEBUG, "writing stalled %d", stream->global.id);
if (!stream->included[GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT]) {
GRPC_CHTTP2_STREAM_REF(&stream->global, "chttp2_writing_stalled");
}
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing), stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT);
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
bool grpc_chttp2_list_flush_writing_stalled_by_transport(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream *stream;
bool out = false;
grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing);
while (stream_list_pop(transport, &stream,
GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) {
gpr_log(GPR_DEBUG, "move %d from writing stalled to just stalled",
stream->global.id);
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
&stream->writing);
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &stream->global,
"chttp2_writing_stalled");
out = true;
}
return out;
bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
gpr_log(GPR_DEBUG, "stalled %d", stream_writing->id);
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING);
}
void grpc_chttp2_list_remove_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING);
}
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
void grpc_chttp2_list_add_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
int grpc_chttp2_list_pop_closed_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add_tail(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
return stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
int grpc_chttp2_has_streams(grpc_chttp2_transport *t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
}
void grpc_chttp2_for_all_streams(
grpc_chttp2_transport_global *transport_global, void *user_data,
void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
grpc_chttp2_stream_global *stream_global)) {
grpc_chttp2_stream *s;
grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
for (s = t->lists[GRPC_CHTTP2_LIST_ALL_STREAMS].head; s != NULL;
s = s->links[GRPC_CHTTP2_LIST_ALL_STREAMS].next) {
cb(transport_global, user_data, &s->global);
}
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}

@ -77,6 +77,7 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
GPR_ASSERT(count == 0 || keys[count - 1] < key);
GPR_ASSERT(value);
GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
if (count == capacity) {
if (map->free > capacity / 4) {
@ -96,40 +97,6 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
map->count = count + 1;
}
void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
grpc_chttp2_stream_map *dst) {
/* if src is empty we dont need to do anything */
if (src->count == src->free) {
return;
}
/* if dst is empty we simply need to swap */
if (dst->count == dst->free) {
GPR_SWAP(grpc_chttp2_stream_map, *src, *dst);
return;
}
/* the first element of src must be greater than the last of dst...
* however the maps may need compacting for this property to hold */
if (src->keys[0] <= dst->keys[dst->count - 1]) {
src->count = compact(src->keys, src->values, src->count);
src->free = 0;
dst->count = compact(dst->keys, dst->values, dst->count);
dst->free = 0;
}
GPR_ASSERT(src->keys[0] > dst->keys[dst->count - 1]);
/* if dst doesn't have capacity, resize */
if (dst->count + src->count > dst->capacity) {
dst->capacity = GPR_MAX(dst->capacity * 3 / 2, dst->count + src->count);
dst->keys = gpr_realloc(dst->keys, dst->capacity * sizeof(uint32_t));
dst->values = gpr_realloc(dst->values, dst->capacity * sizeof(void *));
}
memcpy(dst->keys + dst->count, src->keys, src->count * sizeof(uint32_t));
memcpy(dst->values + dst->count, src->values, src->count * sizeof(void *));
dst->count += src->count;
dst->free += src->free;
src->count = 0;
src->free = 0;
}
static void **find(grpc_chttp2_stream_map *map, uint32_t key) {
size_t min_idx = 0;
size_t max_idx = map->count;
@ -170,6 +137,7 @@ void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key) {
if (map->free == map->count) {
map->free = map->count = 0;
}
GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
}
return out;
}

@ -65,10 +65,6 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
or NULL otherwise */
void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key);
/* Move all elements of src into dst */
void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
grpc_chttp2_stream_map *dst);
/* Return an existing key, or NULL if it does not exist */
void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key);

@ -40,349 +40,221 @@
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing);
static void add_to_write_list(grpc_chttp2_write_cb **list,
grpc_chttp2_write_cb *cb) {
cb->next = *list;
*list = cb;
}
static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb,
grpc_error *error) {
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error,
"finish_write_cb");
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
int grpc_chttp2_unlocking_check_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_writing *stream_writing;
static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int64_t send_bytes,
grpc_chttp2_write_cb **list, grpc_error *error) {
grpc_chttp2_write_cb *cb = *list;
*list = NULL;
s->flow_controlled_bytes_written += send_bytes;
while (cb) {
grpc_chttp2_write_cb *next = cb->next;
if (cb->call_at_byte <= s->flow_controlled_bytes_written) {
finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
} else {
add_to_write_list(list, cb);
}
cb = next;
}
GRPC_ERROR_UNREF(error);
}
GPR_TIMER_BEGIN("grpc_chttp2_unlocking_check_writes", 0);
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
transport_writing->max_frame_size =
transport_global->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
if (transport_global->dirtied_local_settings &&
!transport_global->sent_local_settings) {
if (t->dirtied_local_settings && !t->sent_local_settings) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
&t->outbuf,
grpc_chttp2_settings_create(
transport_global->settings[GRPC_SENT_SETTINGS],
transport_global->settings[GRPC_LOCAL_SETTINGS],
transport_global->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
transport_global->force_send_settings = 0;
transport_global->dirtied_local_settings = 0;
transport_global->sent_local_settings = 1;
t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS],
t->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
t->force_send_settings = 0;
t->dirtied_local_settings = 0;
t->sent_local_settings = 1;
}
/* simple writes are queued to qbuf, and flushed here */
gpr_slice_buffer_move_into(&transport_global->qbuf,
&transport_writing->outbuf);
GPR_ASSERT(transport_global->qbuf.count == 0);
gpr_slice_buffer_move_into(&t->qbuf, &t->outbuf);
GPR_ASSERT(t->qbuf.count == 0);
grpc_chttp2_hpack_compressor_set_max_table_size(
&transport_writing->hpack_compressor,
transport_global->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
&t->hpack_compressor,
t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
transport_global, outgoing_window);
if (transport_writing->outgoing_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
&stream_global)) {
grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
false, "transport.read_flow_control");
if (t->outgoing_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"transport.read_flow_control");
}
}
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (grpc_chttp2_list_pop_writable_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
bool sent_initial_metadata = stream_writing->sent_initial_metadata;
bool become_writable = false;
while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
bool sent_initial_metadata = s->sent_initial_metadata;
bool now_writing = false;
stream_writing->id = stream_global->id;
stream_writing->read_closed = stream_global->read_closed;
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t,
t->is_client ? "CLIENT" : "SERVER", s->id, sent_initial_metadata,
s->send_initial_metadata != NULL, s->announce_window));
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_writing, stream_writing,
outgoing_window, stream_global,
outgoing_window);
if (!sent_initial_metadata && stream_global->send_initial_metadata) {
stream_writing->send_initial_metadata =
stream_global->send_initial_metadata;
stream_global->send_initial_metadata = NULL;
become_writable = true;
/* send initial metadata if it's available */
if (!sent_initial_metadata && s->send_initial_metadata) {
grpc_chttp2_encode_header(
&t->hpack_compressor, s->id, s->send_initial_metadata, 0,
t->settings[GRPC_ACKED_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
&s->stats.outgoing, &t->outbuf);
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
now_writing = true;
}
/* send any window updates */
if (s->announce_window > 0) {
uint32_t announce = s->announce_window;
gpr_slice_buffer_add(&t->outbuf,
grpc_chttp2_window_update_create(
s->id, s->announce_window, &s->stats.outgoing));
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, announce_window, announce);
}
if (sent_initial_metadata) {
if (stream_global->send_message != NULL) {
gpr_slice hdr = gpr_slice_malloc(5);
uint8_t *p = GPR_SLICE_START_PTR(hdr);
uint32_t len = stream_global->send_message->length;
GPR_ASSERT(stream_writing->send_message == NULL);
p[0] = (stream_global->send_message->flags &
GRPC_WRITE_INTERNAL_COMPRESS) != 0;
p[1] = (uint8_t)(len >> 24);
p[2] = (uint8_t)(len >> 16);
p[3] = (uint8_t)(len >> 8);
p[4] = (uint8_t)(len);
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer, hdr);
if (stream_global->send_message->length > 0) {
stream_writing->send_message = stream_global->send_message;
} else {
stream_writing->send_message = NULL;
/* send any body bytes, if allowed by flow control */
if (s->flow_controlled_buffer.length > 0) {
uint32_t max_outgoing =
(uint32_t)GPR_MIN(t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
GPR_MIN(s->outgoing_window, t->outgoing_window));
if (max_outgoing > 0) {
uint32_t send_bytes =
(uint32_t)GPR_MIN(max_outgoing, s->flow_controlled_buffer.length);
bool is_last_data_frame =
s->fetching_send_message == NULL &&
send_bytes == s->flow_controlled_buffer.length;
bool is_last_frame =
is_last_data_frame && s->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(s->send_trailing_metadata);
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, send_bytes,
is_last_frame, &s->stats.outgoing,
&t->outbuf);
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, outgoing_window,
send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, outgoing_window,
send_bytes);
if (is_last_frame) {
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_CHTTP2_NO_ERROR,
&s->stats.outgoing));
}
}
s->sending_bytes += send_bytes;
now_writing = true;
if (s->flow_controlled_buffer.length > 0) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
} else if (t->outgoing_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
}
stream_writing->stream_fetched = 0;
stream_global->send_message = NULL;
}
if ((stream_writing->send_message != NULL ||
stream_writing->flow_controlled_buffer.length > 0) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
become_writable = true;
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0) {
if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,
&s->stats.outgoing, &t->outbuf);
} else {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_encode_header(
&t->hpack_compressor, s->id, s->send_trailing_metadata, true,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
&s->stats.outgoing, &t->outbuf);
}
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
if (!t->is_client && !s->read_closed) {
gpr_slice_buffer_add(
&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_CHTTP2_NO_ERROR, &s->stats.outgoing));
}
now_writing = true;
}
if (stream_global->send_trailing_metadata) {
stream_writing->send_trailing_metadata =
stream_global->send_trailing_metadata;
stream_global->send_trailing_metadata = NULL;
become_writable = true;
}
}
if (!stream_global->read_closed &&
stream_global->unannounced_incoming_window_for_writing > 1024) {
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_global, stream_writing,
announce_window, stream_global,
unannounced_incoming_window_for_writing);
become_writable = true;
}
if (become_writable) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
if (now_writing) {
if (!grpc_chttp2_list_add_writing_stream(t, s)) {
/* already in writing list: drop ref */
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
}
} else {
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write");
}
}
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
if (transport_global->announce_incoming_window > 0) {
uint32_t announced = (uint32_t)GPR_MIN(
transport_global->announce_incoming_window, UINT32_MAX);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_global,
announce_incoming_window, announced);
if (t->announce_incoming_window > 0) {
uint32_t announced =
(uint32_t)GPR_MIN(t->announce_incoming_window, UINT32_MAX);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, announce_incoming_window,
announced);
grpc_transport_one_way_stats throwaway_stats;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(0, announced, &throwaway_stats));
}
GPR_TIMER_END("grpc_chttp2_unlocking_check_writes", 0);
return transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing);
}
void grpc_chttp2_perform_writes(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
grpc_endpoint *endpoint) {
GPR_ASSERT(transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing));
finalize_outbuf(exec_ctx, transport_writing);
GPR_ASSERT(endpoint);
if (transport_writing->outbuf.count > 0) {
grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
&transport_writing->done_cb);
} else {
grpc_exec_ctx_sched(exec_ctx, &transport_writing->done_cb, GRPC_ERROR_NONE,
NULL);
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_window_update_create(
0, announced, &throwaway_stats));
}
}
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
GPR_TIMER_BEGIN("finalize_outbuf", 0);
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
bool is_first_data_frame = true;
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
uint32_t max_outgoing =
(uint32_t)GPR_MIN(transport_writing->max_frame_size,
GPR_MIN(stream_writing->outgoing_window,
transport_writing->outgoing_window));
/* send initial metadata if it's available */
if (stream_writing->send_initial_metadata != NULL) {
grpc_chttp2_encode_header(
&transport_writing->hpack_compressor, stream_writing->id,
stream_writing->send_initial_metadata, 0,
transport_writing->max_frame_size, &stream_writing->stats,
&transport_writing->outbuf);
stream_writing->send_initial_metadata = NULL;
stream_writing->sent_initial_metadata = 1;
}
/* send any window updates */
if (stream_writing->announce_window > 0 &&
stream_writing->send_initial_metadata == NULL) {
uint32_t announce = stream_writing->announce_window;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(stream_writing->id,
stream_writing->announce_window,
&stream_writing->stats));
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing, stream_writing,
announce_window, announce);
stream_writing->announce_window = 0;
}
/* fetch any body bytes */
while (!stream_writing->fetching && stream_writing->send_message &&
stream_writing->flow_controlled_buffer.length < max_outgoing &&
stream_writing->stream_fetched <
stream_writing->send_message->length) {
if (grpc_byte_stream_next(exec_ctx, stream_writing->send_message,
&stream_writing->fetching_slice, max_outgoing,
&stream_writing->finished_fetch)) {
stream_writing->stream_fetched +=
GPR_SLICE_LENGTH(stream_writing->fetching_slice);
if (stream_writing->stream_fetched ==
stream_writing->send_message->length) {
stream_writing->send_message = NULL;
}
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer,
stream_writing->fetching_slice);
} else {
stream_writing->fetching = 1;
}
}
/* send any body bytes */
if (stream_writing->flow_controlled_buffer.length > 0) {
if (max_outgoing > 0) {
uint32_t send_bytes = (uint32_t)GPR_MIN(
max_outgoing, stream_writing->flow_controlled_buffer.length);
int is_last_data_frame =
stream_writing->send_message == NULL &&
send_bytes == stream_writing->flow_controlled_buffer.length;
int is_last_frame = is_last_data_frame &&
stream_writing->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata);
grpc_chttp2_encode_data(
stream_writing->id, &stream_writing->flow_controlled_buffer,
send_bytes, is_last_frame, &stream_writing->stats,
&transport_writing->outbuf);
if (is_first_data_frame) {
/* TODO(dgq): this is a hack. It'll be fix in a future refactoring */
stream_writing->stats.data_bytes -= 5; /* discount grpc framing */
is_first_data_frame = false;
}
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing,
stream_writing, outgoing_window,
send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_writing,
outgoing_window, send_bytes);
if (is_last_frame) {
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
if (is_last_data_frame) {
GPR_ASSERT(stream_writing->send_message == NULL);
stream_writing->sent_message = 1;
}
} else if (transport_writing->outgoing_window == 0) {
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
/* send trailing metadata if it's available and we're ready for it */
if (stream_writing->send_message == NULL &&
stream_writing->flow_controlled_buffer.length == 0 &&
stream_writing->send_trailing_metadata != NULL) {
if (grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata)) {
grpc_chttp2_encode_data(
stream_writing->id, &stream_writing->flow_controlled_buffer, 0, 1,
&stream_writing->stats, &transport_writing->outbuf);
} else {
grpc_chttp2_encode_header(
&transport_writing->hpack_compressor, stream_writing->id,
stream_writing->send_trailing_metadata, 1,
transport_writing->max_frame_size, &stream_writing->stats,
&transport_writing->outbuf);
}
if (!transport_writing->is_client && !stream_writing->read_closed) {
gpr_slice_buffer_add(&transport_writing->outbuf,
grpc_chttp2_rst_stream_create(
stream_writing->id, GRPC_CHTTP2_NO_ERROR,
&stream_writing->stats));
}
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
/* if there's more to write, then loop, otherwise prepare to finish the
* write */
if ((stream_writing->flow_controlled_buffer.length > 0 ||
(stream_writing->send_message && !stream_writing->fetching)) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
} else {
grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
} else {
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
GPR_TIMER_END("finalize_outbuf", 0);
return t->outbuf.count > 0;
}
void grpc_chttp2_cleanup_writing(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
GPR_TIMER_BEGIN("grpc_chttp2_cleanup_writing", 0);
grpc_chttp2_stream_writing *stream_writing;
grpc_chttp2_stream_global *stream_global;
if (grpc_chttp2_list_flush_writing_stalled_by_transport(exec_ctx,
transport_writing)) {
grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
"resume_stalled_stream");
}
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0);
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
if (stream_writing->sent_initial_metadata) {
while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
if (s->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_initial_metadata_finished, GRPC_ERROR_NONE);
exec_ctx, t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_REF(error), "send_initial_metadata_finished");
}
grpc_transport_move_one_way_stats(&stream_writing->stats,
&stream_global->stats.outgoing);
if (stream_writing->sent_message) {
GPR_ASSERT(stream_writing->send_message == NULL);
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_message_finished, GRPC_ERROR_NONE);
stream_writing->sent_message = 0;
if (s->sending_bytes != 0) {
update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
&s->on_write_finished_cbs, GRPC_ERROR_REF(error));
s->sending_bytes = 0;
}
if (stream_writing->sent_trailing_metadata) {
if (s->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, transport_global, stream_global,
&stream_global->send_trailing_metadata_finished, GRPC_ERROR_NONE);
}
if (stream_writing->sent_trailing_metadata) {
grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
!transport_global->is_client, 1,
GRPC_ERROR_NONE);
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_REF(error));
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
}
gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
GPR_TIMER_END("grpc_chttp2_cleanup_writing", 0);
gpr_slice_buffer_reset_and_unref(&t->outbuf);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_chttp2_end_write", 0);
}

@ -272,6 +272,18 @@ int grpc_channel_args_compare(const grpc_channel_args *a,
return 0;
}
const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
const char *name) {
if (args != NULL) {
for (size_t i = 0; i < args->num_args; ++i) {
if (strcmp(args->args[i].key, name) == 0) {
return &args->args[i];
}
}
}
return NULL;
}
int grpc_channel_arg_get_integer(grpc_arg *arg, grpc_integer_options options) {
if (arg->type != GRPC_ARG_INTEGER) {
gpr_log(GPR_ERROR, "%s ignored: it must be an integer", arg->key);

@ -89,6 +89,10 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
int grpc_channel_args_compare(const grpc_channel_args *a,
const grpc_channel_args *b);
/** Returns the value of argument \a name from \a args, or NULL if not found. */
const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
const char *name);
typedef struct grpc_integer_options {
int default_value; // Return this if value is outside of expected bounds.
int min_value;

@ -162,7 +162,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
gpr_timespec deadline, grpc_call_stack *call_stack) {
grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
size_t count = channel_stack->count;
@ -179,10 +179,12 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
args.start_time = gpr_now(GPR_CLOCK_MONOTONIC);
for (i = 0; i < count; i++) {
args.call_stack = call_stack;
args.server_transport_data = transport_server_data;
args.context = context;
args.path = path;
args.deadline = deadline;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;

@ -74,6 +74,8 @@ typedef struct {
grpc_call_stack *call_stack;
const void *server_transport_data;
grpc_call_context_element *context;
grpc_mdstr *path;
gpr_timespec start_time;
gpr_timespec deadline;
} grpc_call_element_args;
@ -225,7 +227,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
gpr_timespec deadline, grpc_call_stack *call_stack);
grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack);
/* Set a pollset or a pollset_set for a call stack: must occur before the first
* op is started */
void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,

@ -64,30 +64,49 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
}
// Starts the deadline timer.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// Note: We do not start the timer if there is already a timer
// pending. This should be okay, because this is only called from two
// functions exported by this module: grpc_deadline_state_start(), which
// starts the initial timer, and grpc_deadline_state_reset(), which
// cancels any pre-existing timer before starting a new one. In
// particular, we want to ensure that if grpc_deadline_state_start()
// winds up trying to start the timer after grpc_deadline_state_reset()
// has already done so, we ignore the value from the former.
if (!deadline_state->timer_pending &&
gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// Take a reference to the call stack, to be owned by the timer.
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
gpr_mu_lock(&deadline_state->timer_mu);
deadline_state->timer_pending = true;
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
elem, gpr_now(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(&deadline_state->timer_mu);
}
}
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
gpr_mu_lock(&deadline_state->timer_mu);
start_timer_if_needed_locked(exec_ctx, elem, deadline);
gpr_mu_unlock(&deadline_state->timer_mu);
}
// Cancels the deadline timer.
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
gpr_mu_lock(&deadline_state->timer_mu);
static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
if (deadline_state->timer_pending) {
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
deadline_state->timer_pending = false;
}
}
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
gpr_mu_lock(&deadline_state->timer_mu);
cancel_timer_if_needed_locked(exec_ctx, deadline_state);
gpr_mu_unlock(&deadline_state->timer_mu);
}
@ -108,6 +127,21 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
op->on_complete = &deadline_state->on_complete;
}
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack) {
grpc_deadline_state* deadline_state = elem->call_data;
memset(deadline_state, 0, sizeof(*deadline_state));
deadline_state->call_stack = call_stack;
gpr_mu_init(&deadline_state->timer_mu);
}
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
grpc_deadline_state* deadline_state = elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
gpr_mu_destroy(&deadline_state->timer_mu);
}
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
@ -122,16 +156,11 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
gpr_free(state);
}
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_element_args* args) {
grpc_deadline_state* deadline_state = elem->call_data;
memset(deadline_state, 0, sizeof(*deadline_state));
deadline_state->call_stack = args->call_stack;
gpr_mu_init(&deadline_state->timer_mu);
void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec deadline) {
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
const gpr_timespec deadline =
gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// When the deadline passes, we indicate the failure by sending down
// an op with cancel_error set. However, we can't send down any ops
@ -148,11 +177,13 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
}
}
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
gpr_mu_destroy(&deadline_state->timer_mu);
gpr_mu_lock(&deadline_state->timer_mu);
cancel_timer_if_needed_locked(exec_ctx, deadline_state);
start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
gpr_mu_unlock(&deadline_state->timer_mu);
}
void grpc_deadline_state_client_start_transport_stream_op(
@ -209,7 +240,8 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element_args* args) {
// Note: size of call data is different between client and server.
memset(elem->call_data, 0, elem->filter->sizeof_call_data);
grpc_deadline_state_init(exec_ctx, elem, args);
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
grpc_deadline_state_start(exec_ctx, elem, args->deadline);
return GRPC_ERROR_NONE;
}

@ -54,18 +54,37 @@ typedef struct grpc_deadline_state {
grpc_closure* next_on_complete;
} grpc_deadline_state;
// To be used in a filter's init_call_elem(), destroy_call_elem(), and
// start_transport_stream_op() methods to enforce call deadlines.
//
// REQUIRES: The first field in elem->call_data is a grpc_deadline_state.
// NOTE: All of these functions require that the first field in
// elem->call_data is a grpc_deadline_state.
//
// For grpc_deadline_state_client_start_transport_stream_op(), it is the
// caller's responsibility to chain to the next filter if necessary
// after the function returns.
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_element_args* args);
grpc_call_stack* call_stack);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
// Starts the timer with the specified deadline.
// Should be called from the filter's init_call_elem() method.
void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec deadline);
// Cancels the existing timer and starts a new one with new_deadline.
//
// Note: It is generally safe to call this with an earlier deadline
// value than the current one, but not the reverse. No checks are done
// to ensure that the timer callback is not invoked while it is in the
// process of being reset, which means that attempting to increase the
// deadline may result in the timer being called twice.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline);
// To be called from the client-side filter's start_transport_stream_op()
// method. Ensures that the deadline timer is cancelled when the call
// is completed.
//
// Note: It is the caller's responsibility to chain to the next filter if
// necessary after this function returns.
void grpc_deadline_state_client_start_transport_stream_op(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op* op);

@ -183,7 +183,7 @@ void grpc_handshake_manager_do_handshake(
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
grpc_handshaker_done_cb cb, void* user_data) {
grpc_channel_args* args_copy = grpc_channel_args_copy(args);
gpr_slice_buffer* read_buffer = malloc(sizeof(*read_buffer));
gpr_slice_buffer* read_buffer = gpr_malloc(sizeof(*read_buffer));
gpr_slice_buffer_init(read_buffer);
if (mgr->count == 0) {
// No handshakers registered, so we just immediately call the done

@ -38,6 +38,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/client_config/method_config.h"
#include "src/core/lib/channel/channel_args.h"
#define DEFAULT_MAX_SEND_MESSAGE_LENGTH -1 // Unlimited.
@ -45,6 +46,8 @@
#define DEFAULT_MAX_RECV_MESSAGE_LENGTH (4 * 1024 * 1024)
typedef struct call_data {
int max_send_size;
int max_recv_size;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
// call our next_recv_message_ready member after handling it.
@ -58,6 +61,8 @@ typedef struct call_data {
typedef struct channel_data {
int max_send_size;
int max_recv_size;
// Method config table.
grpc_method_config_table* method_config_table;
} channel_data;
// Callback invoked when we receive a message. Here we check the max
@ -66,13 +71,12 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
grpc_error* error) {
grpc_call_element* elem = user_data;
call_data* calld = elem->call_data;
channel_data* chand = elem->channel_data;
if (*calld->recv_message != NULL && chand->max_recv_size >= 0 &&
(*calld->recv_message)->length > (size_t)chand->max_recv_size) {
if (*calld->recv_message != NULL && calld->max_recv_size >= 0 &&
(*calld->recv_message)->length > (size_t)calld->max_recv_size) {
char* message_string;
gpr_asprintf(&message_string,
"Received message larger than max (%u vs. %d)",
(*calld->recv_message)->length, chand->max_recv_size);
(*calld->recv_message)->length, calld->max_recv_size);
grpc_error* new_error = grpc_error_set_int(
GRPC_ERROR_CREATE(message_string), GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_INVALID_ARGUMENT);
@ -93,13 +97,12 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_transport_stream_op* op) {
call_data* calld = elem->call_data;
channel_data* chand = elem->channel_data;
// Check max send message size.
if (op->send_message != NULL && chand->max_send_size >= 0 &&
op->send_message->length > (size_t)chand->max_send_size) {
if (op->send_message != NULL && calld->max_send_size >= 0 &&
op->send_message->length > (size_t)calld->max_send_size) {
char* message_string;
gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)",
op->send_message->length, chand->max_send_size);
op->send_message->length, calld->max_send_size);
gpr_slice message = gpr_slice_from_copied_string(message_string);
gpr_free(message_string);
grpc_call_element_send_close_with_message(
@ -119,9 +122,37 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_call_element_args* args) {
channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;
grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem);
// Get max sizes from channel data, then merge in per-method config values.
// Note: Per-method config is only available on the client, so we
// apply the max request size to the send limit and the max response
// size to the receive limit.
calld->max_send_size = chand->max_send_size;
calld->max_recv_size = chand->max_recv_size;
if (chand->method_config_table != NULL) {
grpc_method_config* method_config =
grpc_method_config_table_get_method_config(chand->method_config_table,
args->path);
if (method_config != NULL) {
const int32_t* max_request_message_bytes =
grpc_method_config_get_max_request_message_bytes(method_config);
if (max_request_message_bytes != NULL &&
(*max_request_message_bytes < calld->max_send_size ||
calld->max_send_size < 0)) {
calld->max_send_size = *max_request_message_bytes;
}
const int32_t* max_response_message_bytes =
grpc_method_config_get_max_response_message_bytes(method_config);
if (max_response_message_bytes != NULL &&
(*max_response_message_bytes < calld->max_recv_size ||
calld->max_recv_size < 0)) {
calld->max_recv_size = *max_response_message_bytes;
}
}
}
return GRPC_ERROR_NONE;
}
@ -155,11 +186,22 @@ static void init_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_arg_get_integer(&args->channel_args->args[i], options);
}
}
// Get method config table from channel args.
const grpc_arg* channel_arg =
grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG);
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
chand->method_config_table = grpc_method_config_table_ref(
(grpc_method_config_table*)channel_arg->value.pointer.p);
}
}
// Destructor for channel_data.
static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem) {}
grpc_channel_element* elem) {
channel_data* chand = elem->channel_data;
grpc_method_config_table_unref(chand->method_config_table);
}
const grpc_channel_filter grpc_message_size_filter = {
start_transport_stream_op,

@ -35,6 +35,8 @@
#include <grpc/support/alloc.h>
#include "src/core/lib/profiling/timers.h"
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
@ -51,7 +53,7 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
GRPC_ERROR_UNREF(error);
return;
}
closure->error = error;
closure->error_data.error = error;
closure->next_data.next = NULL;
if (closure_list->head == NULL) {
closure_list->head = closure;
@ -64,8 +66,8 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
void grpc_closure_list_fail_all(grpc_closure_list *list,
grpc_error *forced_failure) {
for (grpc_closure *c = list->head; c != NULL; c = c->next_data.next) {
if (c->error == GRPC_ERROR_NONE) {
c->error = GRPC_ERROR_REF(forced_failure);
if (c->error_data.error == GRPC_ERROR_NONE) {
c->error_data.error = GRPC_ERROR_REF(forced_failure);
}
}
GRPC_ERROR_UNREF(forced_failure);
@ -110,3 +112,13 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
return &wc->wrapper;
}
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != NULL) {
c->cb(exec_ctx, c->cb_arg, error);
}
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_closure_run", 0);
}

@ -76,7 +76,10 @@ struct grpc_closure {
void *cb_arg;
/** Once queued, the result of the closure. Before then: scratch space */
grpc_error *error;
union {
grpc_error *error;
uintptr_t scratch;
} error_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */
@ -106,4 +109,10 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
/** return whether \a list is empty. */
bool grpc_closure_list_empty(grpc_closure_list list);
/** Run a closure directly. Caller ensures that no locks are being held above.
* Note that calling this at the end of a closure callback function itself is
* by definition safe. */
void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error);
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */

@ -50,25 +50,57 @@ int grpc_combiner_trace = 0;
} \
} while (0)
#define STATE_UNORPHANED 1
#define STATE_ELEM_COUNT_LOW_BIT 2
struct grpc_combiner {
grpc_combiner *next_combiner_on_this_exec_ctx;
grpc_workqueue *optional_workqueue;
gpr_mpscq queue;
// state is:
// lower bit - zero if orphaned
// other bits - number of items queued on the lock
// lower bit - zero if orphaned (STATE_UNORPHANED)
// other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT)
gpr_atm state;
bool take_async_break_before_final_list;
// number of elements in the list that are covered by a poller: if >0, we can
// offload safely
gpr_atm elements_covered_by_poller;
bool time_to_execute_final_list;
bool final_list_covered_by_poller;
grpc_closure_list final_list;
grpc_closure continue_finishing;
grpc_closure offload;
};
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
typedef struct {
grpc_error *error;
bool covered_by_poller;
} error_data;
static uintptr_t pack_error_data(error_data d) {
return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
}
static error_data unpack_error_data(uintptr_t p) {
return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
}
static bool is_covered_by_poller(grpc_combiner *lock) {
return lock->final_list_covered_by_poller ||
gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0;
}
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
lock->next_combiner_on_this_exec_ctx = NULL;
lock->time_to_execute_final_list = false;
lock->optional_workqueue = optional_workqueue;
gpr_atm_no_barrier_store(&lock->state, 1);
lock->final_list_covered_by_poller = false;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
lock->take_async_break_before_final_list = false;
grpc_closure_list_init(&lock->final_list);
grpc_closure_init(&lock->offload, offload, lock);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@ -82,7 +114,7 @@ static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -1);
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
if (old_state == 1) {
@ -90,170 +122,186 @@ void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock) {
lock->next_combiner_on_this_exec_ctx = NULL;
if (exec_ctx->active_combiner == NULL) {
exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
} else {
exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock;
exec_ctx->last_combiner = lock;
}
}
static void continue_finishing_mainline(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
GPR_TIMER_BEGIN("combiner.continue_executing_mainline", 0);
grpc_combiner *lock = arg;
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p continue_finishing_mainline", lock));
GPR_ASSERT(exec_ctx->active_combiner == NULL);
static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
grpc_combiner *lock) {
lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner;
exec_ctx->active_combiner = lock;
if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
GPR_TIMER_END("combiner.continue_executing_mainline", 0);
if (lock->next_combiner_on_this_exec_ctx == NULL) {
exec_ctx->last_combiner = lock;
}
}
static void execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.execute_final", 0);
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
lock->take_async_break_before_final_list = false;
int loops = 0;
while (c != NULL) {
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
loops++;
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error,
bool covered_by_poller) {
GPR_TIMER_BEGIN("combiner.execute", 0);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
cl, covered_by_poller, last));
GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
cl->error_data.scratch =
pack_error_data((error_data){error, covered_by_poller});
if (covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1);
}
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
if (last == 1) {
// first element on this list: add it to the list of combiner locks
// executing within this exec_ctx
push_last_on_exec_ctx(exec_ctx, lock);
}
GPR_TIMER_END("combiner.execute_final", 0);
GPR_TIMER_END("combiner.execute", 0);
}
static void continue_executing_final(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
GPR_TIMER_BEGIN("combiner.continue_executing_final", 0);
grpc_combiner *lock = arg;
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p continue_executing_final", lock));
GPR_ASSERT(exec_ctx->active_combiner == NULL);
exec_ctx->active_combiner = lock;
// quick peek to see if new things have turned up on the queue: if so, go back
// to executing them before the final list
if ((gpr_atm_acq_load(&lock->state) >> 1) > 1) {
if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
} else {
execute_final(exec_ctx, lock);
finish(exec_ctx, lock);
static void move_next(grpc_exec_ctx *exec_ctx) {
exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
if (exec_ctx->active_combiner == NULL) {
exec_ctx->last_combiner = NULL;
}
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
GPR_TIMER_END("combiner.continue_executing_final", 0);
}
static bool start_execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.start_execute_final", 0);
GPR_ASSERT(exec_ctx->active_combiner == lock);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG,
"C:%p start_execute_final take_async_break_before_final_list=%d",
lock, lock->take_async_break_before_final_list));
if (lock->take_async_break_before_final_list) {
grpc_closure_init(&lock->continue_finishing, continue_executing_final,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
GPR_TIMER_END("combiner.start_execute_final", 0);
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_combiner *lock = arg;
push_last_on_exec_ctx(exec_ctx, lock);
}
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
lock->optional_workqueue));
grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload,
GRPC_ERROR_NONE);
}
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
grpc_combiner *lock = exec_ctx->active_combiner;
if (lock == NULL) {
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return false;
} else {
execute_final(exec_ctx, lock);
GPR_TIMER_END("combiner.start_execute_final", 0);
return true;
}
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
GPR_TIMER_BEGIN("combiner.maybe_finish_one", 0);
gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
GPR_ASSERT(exec_ctx->active_combiner == lock);
if (n == NULL) {
// Queue is in an transiently inconsistent state: a new item is being queued
// but is not visible to this thread yet.
// Use this as a cue that we should go off and do something else for a while
// (and come back later)
grpc_closure_init(&lock->continue_finishing, continue_finishing_mainline,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
GPR_TIMER_END("combiner.maybe_finish_one", 0);
return false;
gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
"is_covered_by_poller=%d exec_ctx_ready_to_finish=%d "
"time_to_execute_final_list=%d",
lock, lock->optional_workqueue, is_covered_by_poller(lock),
grpc_exec_ctx_ready_to_finish(exec_ctx),
lock->time_to_execute_final_list));
if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) &&
grpc_exec_ctx_ready_to_finish(exec_ctx)) {
GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
// this execution context wants to move on, and we have a workqueue (and
// so can help the execution context out): schedule remaining work to be
// picked up on the workqueue
queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
grpc_closure *cl = (grpc_closure *)n;
grpc_error *error = cl->error;
cl->cb(exec_ctx, cl->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("combiner.maybe_finish_one", 0);
return true;
}
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
bool (*executor)(grpc_exec_ctx * exec_ctx, grpc_combiner * lock);
GPR_TIMER_BEGIN("combiner.finish", 0);
int loops = 0;
do {
executor = maybe_finish_one;
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -2);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p finish[%d] old_state=%" PRIdPTR, lock,
loops, old_state));
switch (old_state) {
default:
// we have multiple queued work items: just continue executing them
break;
case 5: // we're down to one queued item: if it's the final list we
case 4: // should do that
if (!grpc_closure_list_empty(lock->final_list)) {
executor = start_execute_final;
}
break;
case 3: // had one count, one unorphaned --> unlocked unorphaned
GPR_TIMER_END("combiner.finish", 0);
return;
case 2: // and one count, one orphaned --> unlocked and orphaned
really_destroy(exec_ctx, lock);
GPR_TIMER_END("combiner.finish", 0);
return;
case 1:
case 0:
// these values are illegal - representing an already unlocked or
// deleted lock
GPR_UNREACHABLE_CODE(return );
if (!lock->time_to_execute_final_list ||
// peek to see if something new has shown up, and execute that with
// priority
(gpr_atm_acq_load(&lock->state) >> 1) > 1) {
gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
if (n == NULL) {
// queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later)
GPR_TIMER_MARK("delay_busy", 0);
if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) {
queue_offload(exec_ctx, lock);
}
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
loops++;
} while (executor(exec_ctx, lock));
GPR_TIMER_END("combiner.finish", 0);
}
GPR_TIMER_BEGIN("combiner.exec1", 0);
grpc_closure *cl = (grpc_closure *)n;
error_data err = unpack_error_data(cl->error_data.scratch);
cl->cb(exec_ctx, cl->cb_arg, err.error);
if (err.covered_by_poller) {
gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1);
}
GRPC_ERROR_UNREF(err.error);
GPR_TIMER_END("combiner.exec1", 0);
} else {
grpc_closure *c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
lock->final_list_covered_by_poller = false;
int loops = 0;
while (c != NULL) {
GPR_TIMER_BEGIN("combiner.exec_1final", 0);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
GPR_TIMER_END("combiner.exec_1final", 0);
}
}
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *cl, grpc_error *error) {
GPR_TIMER_MARK("unref", 0);
move_next(exec_ctx);
lock->time_to_execute_final_list = false;
gpr_atm old_state =
gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p grpc_combiner_execute c=%p", lock, cl));
GPR_TIMER_BEGIN("combiner.execute", 0);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, 2);
GPR_ASSERT(last & 1); // ensure lock has not been destroyed
if (last == 1) {
exec_ctx->active_combiner = lock;
GPR_TIMER_BEGIN("combiner.execute_first_cb", 0);
cl->cb(exec_ctx, cl->cb_arg, error);
GPR_TIMER_END("combiner.execute_first_cb", 0);
GRPC_ERROR_UNREF(error);
finish(exec_ctx, lock);
GPR_ASSERT(exec_ctx->active_combiner == lock);
exec_ctx->active_combiner = NULL;
} else {
cl->error = error;
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
// Define a macro to ease readability of the following switch statement.
#define OLD_STATE_WAS(orphaned, elem_count) \
(((orphaned) ? 0 : STATE_UNORPHANED) | \
((elem_count)*STATE_ELEM_COUNT_LOW_BIT))
// Depending on what the previous state was, we need to perform different
// actions.
switch (old_state) {
default:
// we have multiple queued work items: just continue executing them
break;
case OLD_STATE_WAS(false, 2):
case OLD_STATE_WAS(true, 2):
// we're down to one queued item: if it's the final list we should do that
if (!grpc_closure_list_empty(lock->final_list)) {
lock->time_to_execute_final_list = true;
}
break;
case OLD_STATE_WAS(false, 1):
// had one count, one unorphaned --> unlocked unorphaned
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
case OLD_STATE_WAS(true, 1):
// and one count, one orphaned --> unlocked and orphaned
really_destroy(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
case OLD_STATE_WAS(false, 0):
case OLD_STATE_WAS(true, 0):
// these values are illegal - representing an already unlocked or
// deleted lock
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
GPR_UNREACHABLE_CODE(return true);
}
GPR_TIMER_END("combiner.execute", 0);
push_first_on_exec_ctx(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
@ -264,30 +312,26 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool force_async_break) {
bool covered_by_poller) {
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG,
"C:%p grpc_combiner_execute_finally c=%p force_async_break=%d; ac=%p",
lock, closure, force_async_break, exec_ctx->active_combiner));
GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
closure, exec_ctx->active_combiner, covered_by_poller));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
grpc_combiner_execute(exec_ctx, lock,
grpc_closure_create(enqueue_finally, closure), error);
grpc_closure_create(enqueue_finally, closure), error,
false);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
if (force_async_break) {
lock->take_async_break_before_final_list = true;
}
if (grpc_closure_list_empty(lock->final_list)) {
gpr_atm_full_fetch_add(&lock->state, 2);
gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
}
if (covered_by_poller) {
lock->final_list_covered_by_poller = true;
}
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
void grpc_combiner_force_async_finally(grpc_combiner *lock) {
lock->take_async_break_before_final_list = true;
}

@ -52,19 +52,14 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
// Execute \a action within the lock.
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error);
grpc_closure *closure, grpc_error *error,
bool covered_by_poller);
// Execute \a action within the lock just prior to unlocking.
// if \a hint_async_break is true, the combiner tries to hand execution to
// another thread before finishing the primary queue of combined closures and
// executing the finally list.
// Deprecation warning: \a hint_async_break will be removed in a future version
// Takes a very slow and round-about path if not called from a
// grpc_combiner_execute closure.
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool hint_async_break);
// Deprecated: force the finally list execution onto another thread
void grpc_combiner_force_async_finally(grpc_combiner *lock);
bool covered_by_poller);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
extern int grpc_combiner_trace;

@ -120,6 +120,8 @@ static const char *error_int_name(grpc_error_ints key) {
return "http_status";
case GRPC_ERROR_INT_LIMIT:
return "limit";
case GRPC_ERROR_INT_OCCURRED_DURING_WRITE:
return "occurred_during_write";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
@ -144,6 +146,8 @@ static const char *error_str_name(grpc_error_strs key) {
return "tsi_error";
case GRPC_ERROR_STR_FILENAME:
return "filename";
case GRPC_ERROR_STR_QUEUED_BUFFERS:
return "queued_buffers";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
@ -265,7 +269,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
} else {
out = gpr_malloc(sizeof(*out));
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
gpr_log(GPR_DEBUG, "%p create copying", out);
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
#endif
out->ints = gpr_avl_ref(in->ints);
out->strs = gpr_avl_ref(in->strs);
@ -523,21 +527,25 @@ static char *fmt_time(void *p) {
return out;
}
static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap) {
static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap,
bool *first) {
if (n == NULL) return;
add_errs(n->left, s, sz, cap);
add_errs(n->left, s, sz, cap, first);
if (!*first) append_chr(',', s, sz, cap);
*first = false;
const char *e = grpc_error_string(n->value);
append_str(e, s, sz, cap);
grpc_error_free_string(e);
add_errs(n->right, s, sz, cap);
add_errs(n->right, s, sz, cap, first);
}
static char *errs_string(grpc_error *err) {
char *s = NULL;
size_t sz = 0;
size_t cap = 0;
bool first = true;
append_chr('[', &s, &sz, &cap);
add_errs(err->errs.root, &s, &sz, &cap);
add_errs(err->errs.root, &s, &sz, &cap, &first);
append_chr(']', &s, &sz, &cap);
append_chr(0, &s, &sz, &cap);
return s;

@ -100,6 +100,8 @@ typedef enum {
GRPC_ERROR_INT_HTTP_STATUS,
/// context sensitive limit associated with the error
GRPC_ERROR_INT_LIMIT,
/// chttp2: did the error occur while a write was in progress
GRPC_ERROR_INT_OCCURRED_DURING_WRITE,
} grpc_error_ints;
typedef enum {
@ -121,6 +123,8 @@ typedef enum {
GRPC_ERROR_STR_TSI_ERROR,
/// filename that we were trying to read/write when this error occurred
GRPC_ERROR_STR_FILENAME,
/// which data was queued for writing when the error occurred
GRPC_ERROR_STR_QUEUED_BUFFERS
} grpc_error_strs;
typedef enum {
@ -128,9 +132,13 @@ typedef enum {
GRPC_ERROR_TIME_CREATED,
} grpc_error_times;
/// The following "special" errors can be propagated without allocating memory.
/// They are always even so that other code (particularly combiner locks) can
/// safely use the lower bit for themselves.
#define GRPC_ERROR_NONE ((grpc_error *)NULL)
#define GRPC_ERROR_OOM ((grpc_error *)1)
#define GRPC_ERROR_CANCELLED ((grpc_error *)2)
#define GRPC_ERROR_OOM ((grpc_error *)2)
#define GRPC_ERROR_CANCELLED ((grpc_error *)4)
const char *grpc_error_string(grpc_error *error);
void grpc_error_free_string(const char *str);

@ -152,20 +152,20 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
//#define GRPC_PI_REF_COUNT_DEBUG
#ifdef GRPC_PI_REF_COUNT_DEBUG
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
#else /* defined(GRPC_PI_REF_COUNT_DEBUG) */
#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
@ -185,8 +185,17 @@ typedef struct polling_island {
* (except mu and ref_count) are invalid and must be ignored. */
gpr_atm merged_to;
/* The workqueue associated with this polling island */
grpc_workqueue *workqueue;
/* Number of threads currently polling on this island */
gpr_atm poller_count;
/* Mutex guarding the read end of the workqueue (must be held to pop from
* workqueue_items) */
gpr_mu workqueue_read_mu;
/* Queue of closures to be executed */
gpr_mpscq workqueue_items;
/* Count of items in workqueue_items */
gpr_atm workqueue_item_count;
/* Wakeup fd used to wake pollers to check the contents of workqueue_items */
grpc_wakeup_fd workqueue_wakeup_fd;
/* The fd of the underlying epoll set */
int epoll_fd;
@ -275,6 +284,10 @@ static bool append_error(grpc_error **composite, grpc_error *error,
threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
static grpc_wakeup_fd polling_island_wakeup_fd;
/* The polling island being polled right now.
See comments in workqueue_maybe_wakeup for why this is tracked. */
static __thread polling_island *g_current_thread_polling_island;
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
@ -289,12 +302,12 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
#ifdef GRPC_PI_REF_COUNT_DEBUG
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
int line) {
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_add_ref(pi);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
@ -302,12 +315,42 @@ static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
char *reason, char *file, int line) {
const char *reason, const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_unref(exec_ctx, pi);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
if (workqueue != NULL) {
pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue != NULL) {
pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
}
}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_add_ref((polling_island *)workqueue);
}
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (workqueue != NULL) {
pi_unref(exec_ctx, (polling_island *)workqueue);
}
}
#endif
static void pi_add_ref(polling_island *pi) {
@ -315,10 +358,7 @@ static void pi_add_ref(polling_island *pi) {
}
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
/* If ref count went to one, we're back to just the workqueue owning a ref.
Unref the workqueue to break the loop.
If ref count went to zero, delete the polling island.
/* If ref count went to zero, delete the polling island.
Note that this deletion not be done under a lock. Once the ref count goes
to zero, we are guaranteed that no one else holds a reference to the
polling island (and that there is no racing pi_add_ref() call either).
@ -326,20 +366,12 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
Also, if we are deleting the polling island and the merged_to field is
non-empty, we should remove a ref to the merged_to polling island
*/
switch (gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
case 2: /* last external ref: the only one now owned is by the workqueue */
GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
break;
case 1: {
polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
polling_island_delete(exec_ctx, pi);
if (next != NULL) {
PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
break;
if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
polling_island_delete(exec_ctx, pi);
if (next != NULL) {
PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
case 0:
GPR_UNREACHABLE_CODE(return );
}
}
@ -488,11 +520,20 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
pi->workqueue = NULL;
gpr_mu_init(&pi->workqueue_read_mu);
gpr_mpscq_init(&pi->workqueue_items);
gpr_atm_rel_store(&pi->workqueue_item_count, 0);
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
err_desc)) {
goto done;
}
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@ -501,26 +542,14 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
}
polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
if (append_error(error, grpc_workqueue_create(exec_ctx, &pi->workqueue),
err_desc) &&
*error == GRPC_ERROR_NONE) {
polling_island_add_fds_locked(pi, &pi->workqueue->wakeup_read_fd, 1, true,
error);
GPR_ASSERT(pi->workqueue->wakeup_read_fd->polling_island == NULL);
pi->workqueue->wakeup_read_fd->polling_island = pi;
PI_ADD_REF(pi, "fd");
}
done:
if (*error != GRPC_ERROR_NONE) {
if (pi->workqueue != NULL) {
GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
}
polling_island_delete(exec_ctx, pi);
pi = NULL;
}
@ -533,7 +562,11 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
gpr_mu_destroy(&pi->workqueue_read_mu);
gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_free(pi->fds);
gpr_free(pi);
}
@ -678,6 +711,45 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
static void workqueue_maybe_wakeup(polling_island *pi) {
/* If this thread is the current poller, then it may be that it's about to
decrement the current poller count, so we need to look past this thread */
bool is_current_poller = (g_current_thread_polling_island == pi);
gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
/* Only issue a wakeup if it's likely that some poller could come in and take
it right now. Note that since we do an anticipatory mpscq_pop every poll
loop, it's ok if we miss the wakeup here, as we'll get the work item when
the next poller enters anyway. */
if (current_pollers > min_current_pollers_for_wakeup) {
GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
}
}
static void workqueue_move_items_to_parent(polling_island *q) {
polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
if (p == NULL) {
return;
}
gpr_mu_lock(&q->workqueue_read_mu);
int num_added = 0;
while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
if (n != NULL) {
gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
gpr_mpscq_push(&p->workqueue_items, n);
num_added++;
}
}
gpr_mu_unlock(&q->workqueue_read_mu);
if (num_added > 0) {
workqueue_maybe_wakeup(p);
}
workqueue_move_items_to_parent(p);
}
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@ -702,6 +774,8 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
workqueue_move_items_to_parent(q);
}
/* else if p == q, nothing needs to be done */
@ -712,6 +786,26 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
/* take a ref to the workqueue: otherwise it can happen that whatever events
* this kicks off ends up destroying the workqueue before this function
* completes */
GRPC_WORKQUEUE_REF(workqueue, "enqueue");
polling_island *pi = (polling_island *)workqueue;
gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
closure->error_data.error = error;
gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
if (last == 0) {
workqueue_maybe_wakeup(pi);
}
workqueue_move_items_to_parent(pi);
GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
GPR_TIMER_END("workqueue.enqueue", 0);
}
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@ -1042,11 +1136,8 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
gpr_mu_lock(&fd->mu);
grpc_workqueue *workqueue = NULL;
if (fd->polling_island != NULL) {
workqueue =
GRPC_WORKQUEUE_REF(fd->polling_island->workqueue, "get_workqueue");
}
grpc_workqueue *workqueue = GRPC_WORKQUEUE_REF(
(grpc_workqueue *)fd->polling_island, "fd_get_workqueue");
gpr_mu_unlock(&fd->mu);
return workqueue;
}
@ -1299,7 +1390,29 @@ static void pollset_reset(grpc_pollset *pollset) {
GPR_ASSERT(pollset->polling_island == NULL);
}
#define GRPC_EPOLL_MAX_EVENTS 1000
static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
polling_island *pi) {
if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
gpr_mu_unlock(&pi->workqueue_read_mu);
if (n != NULL) {
if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) {
workqueue_maybe_wakeup(pi);
}
grpc_closure *c = (grpc_closure *)n;
grpc_closure_run(exec_ctx, c, c->error_data.error);
return true;
} else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
/* n == NULL might mean there's work but it's not available to be popped
* yet - try to ensure another workqueue wakes up to check shortly if so
*/
workqueue_maybe_wakeup(pi);
}
}
return false;
}
#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
@ -1354,7 +1467,13 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->mu);
do {
/* If we get some workqueue work to do, it might end up completing an item on
the completion queue, so there's no need to poll... so we skip that and
redo the complete loop to verify */
if (!maybe_do_workqueue_work(exec_ctx, pi)) {
gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
g_current_thread_polling_island = pi;
GRPC_SCHEDULING_START_BLOCKING_REGION;
ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
sig_mask);
@ -1386,6 +1505,11 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
append_error(error,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
err_desc);
} else if (data_ptr == &pi->workqueue_wakeup_fd) {
append_error(error,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
err_desc);
maybe_do_workqueue_work(exec_ctx, pi);
} else if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
@ -1408,7 +1532,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
}
}
}
} while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
g_current_thread_polling_island = NULL;
gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
}
GPR_ASSERT(pi != NULL);
@ -1868,6 +1995,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue,
.shutdown_engine = shutdown_engine,
};

@ -1988,6 +1988,32 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
/*******************************************************************************
* workqueue stubs
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
}
/*******************************************************************************
* event engine binding
*/
@ -2029,6 +2055,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue,
.shutdown_engine = shutdown_engine,
};

@ -985,8 +985,15 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (errno != EINTR) {
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
}
for (i = 2; i < pfd_count; i++) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
// Wake up all the file descriptors, if we have an invalid one
// we can identify it on the next pollset_work()
fd_end_poll(exec_ctx, &watchers[i], 1, 1, pollset);
}
}
} else if (r == 0) {
for (i = 2; i < pfd_count; i++) {
@ -1259,6 +1266,32 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
/*******************************************************************************
* workqueue stubs
*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
const char *file, int line,
const char *reason) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {}
#endif
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue, grpc_closure *closure,
grpc_error *error) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
}
/*******************************************************************************
* Condition Variable polling extensions
*/
@ -1498,6 +1531,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
.workqueue_ref = workqueue_ref,
.workqueue_unref = workqueue_unref,
.workqueue_enqueue = workqueue_enqueue,
.shutdown_engine = shutdown_engine,
};

@ -259,4 +259,27 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
grpc_error *grpc_kick_poller(void) { return g_event_engine->kick_poller(); }
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason) {
return g_event_engine->workqueue_ref(workqueue, file, line, reason);
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
g_event_engine->workqueue_unref(exec_ctx, workqueue, file, line, reason);
}
#else
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
return g_event_engine->workqueue_ref(workqueue);
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
g_event_engine->workqueue_unref(exec_ctx, workqueue);
}
#endif
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
grpc_closure *closure, grpc_error *error) {
g_event_engine->workqueue_enqueue(exec_ctx, workqueue, closure, error);
}
#endif // GPR_POSIX_SOCKET

@ -40,6 +40,7 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
typedef struct grpc_fd grpc_fd;
@ -95,6 +96,18 @@ typedef struct grpc_event_engine_vtable {
grpc_error *(*kick_poller)(void);
void (*shutdown_engine)(void);
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue, const char *file,
int line, const char *reason);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
void (*workqueue_enqueue)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
grpc_closure *closure, grpc_error *error);
} grpc_event_engine_vtable;
void grpc_event_engine_init(void);

@ -37,6 +37,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
@ -60,18 +61,43 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error;
did_something = true;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
for (;;) {
if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
did_something = true;
grpc_closure_run(exec_ctx, c, c->error_data.error);
c = next;
}
} else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) {
break;
}
}
GPR_ASSERT(exec_ctx->active_combiner == NULL);
if (exec_ctx->stealing_from_workqueue != NULL) {
if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
} else {
grpc_closure *c = exec_ctx->stolen_closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
"exec_ctx_sched");
exec_ctx->stealing_from_workqueue = NULL;
exec_ctx->stolen_closure = NULL;
grpc_error *error = c->error_data.error;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0);
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0);
grpc_exec_ctx_flush(exec_ctx);
did_something = true;
}
}
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
@ -86,12 +112,25 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error,
grpc_workqueue *offload_target_or_null) {
GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0);
if (offload_target_or_null == NULL) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
} else {
} else if (exec_ctx->stealing_from_workqueue == NULL) {
exec_ctx->stealing_from_workqueue = offload_target_or_null;
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
} else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) {
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
} else { /* stealing_from_workqueue == offload_target_or_null */
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null,
exec_ctx->stolen_closure,
exec_ctx->stolen_closure->error_data.error);
closure->error_data.error = error;
exec_ctx->stolen_closure = closure;
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
}
GPR_TIMER_END("grpc_exec_ctx_sched", 0);
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,

@ -66,15 +66,33 @@ typedef struct grpc_combiner grpc_combiner;
#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
struct grpc_exec_ctx {
grpc_closure_list closure_list;
/** The workqueue we're stealing work from.
As items are queued to the execution context, we try to steal one
workqueue item and execute it inline (assuming the exec_ctx is not
finished) - doing so does not invalidate the workqueue's contract, and
provides a small latency win in cases where we get a hit */
grpc_workqueue *stealing_from_workqueue;
/** The workqueue item that was stolen from the workqueue above. When new
items are scheduled to be offloaded to that workqueue, we need to update
this like a 1-deep fifo to maintain the invariant that workqueue items
queued by one thread are started in order */
grpc_closure *stolen_closure;
/** currently active combiner: updated only via combiner.c */
grpc_combiner *active_combiner;
/** last active combiner in the active combiner list */
grpc_combiner *last_combiner;
bool cached_ready_to_finish;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
};
/* initializer for grpc_exec_ctx:
prefer to use GRPC_EXEC_CTX_INIT whenever possible */
#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
{ GRPC_CLOSURE_LIST_INIT, NULL, false, finish_check_arg, finish_check }
{ \
GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \
finish_check \
}
#else
struct grpc_exec_ctx {
bool cached_ready_to_finish;
@ -85,8 +103,10 @@ struct grpc_exec_ctx {
{ false, finish_check_arg, finish_check }
#endif
/* initialize an execution context at the top level of an API call into grpc
(this is safe to use elsewhere, though possibly not as efficient) */
#define GRPC_EXEC_CTX_INIT \
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_never_ready_to_finish, NULL)
GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.

@ -112,6 +112,14 @@ void grpc_iomgr_shutdown(void) {
continue;
}
if (g_root_object.next != &g_root_object) {
if (grpc_iomgr_abort_on_leaks()) {
gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
" iomgr objects before shutdown deadline: "
"memory leaks are likely",
count_objects());
dump_objects("LEAKED");
abort();
}
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
@ -122,9 +130,6 @@ void grpc_iomgr_shutdown(void) {
"memory leaks are likely",
count_objects());
dump_objects("LEAKED");
if (grpc_iomgr_abort_on_leaks()) {
abort();
}
}
break;
}

@ -177,7 +177,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
tcp->read_cb = NULL;
tcp->incoming_buffer = NULL;
grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
grpc_closure_run(exec_ctx, cb, error);
}
#define MAX_READ_IOVEC 4
@ -209,11 +209,11 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
GPR_TIMER_BEGIN("recvmsg", 1);
GPR_TIMER_BEGIN("recvmsg", 0);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
GPR_TIMER_END("recvmsg", 0);
GPR_TIMER_END("recvmsg", read_bytes >= 0);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
@ -392,11 +392,8 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error_free_string(str);
}
GPR_TIMER_BEGIN("tcp_handle_write.cb", 0);
cb->cb(exec_ctx, cb->cb_arg, error);
GPR_TIMER_END("tcp_handle_write.cb", 0);
grpc_closure_run(exec_ctx, cb, error);
TCP_UNREF(exec_ctx, tcp, "write");
GRPC_ERROR_UNREF(error);
}
}

@ -49,11 +49,11 @@ typedef struct grpc_timer {
} grpc_timer;
/* Initialize *timer. When expired or canceled, timer_cb will be called with
*timer_cb_arg and status to indicate if it expired (SUCCESS) or was
canceled (CANCELLED). timer_cb is guaranteed to be called exactly once,
and application code should check the status to determine how it was
invoked. The application callback is also responsible for maintaining
information about when to free up any user-level state. */
*timer_cb_arg and error set to indicate if it expired (GRPC_ERROR_NONE) or
was canceled (GRPC_ERROR_CANCELLED). timer_cb is guaranteed to be called
exactly once, and application code should check the error to determine
how it was invoked. The application callback is also responsible for
maintaining information about when to free up any user-level state. */
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
void *timer_cb_arg, gpr_timespec now);
@ -74,8 +74,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
In all of these cases, the cancellation is still considered successful.
They are essentially distinguished in that the timer_cb will be run
exactly once from either the cancellation (with status CANCELLED)
or from the activation (with status SUCCESS)
exactly once from either the cancellation (with error GRPC_ERROR_CANCELLED)
or from the activation (with error GRPC_ERROR_NONE).
Note carefully that the callback function MAY occur in the same callstack
as grpc_timer_cancel. It's expected that most timers will be cancelled (their
@ -83,14 +83,13 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
that cancellation costs as little as possible. Making callbacks run inline
matches this aim.
Requires: cancel() must happen after add() on a given timer */
Requires: cancel() must happen after init() on a given timer */
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer);
/* iomgr internal api for dealing with timers */
/* Check for timers to be run, and run them.
Return true if timer callbacks were executed.
Drops drop_mu if it is non-null before executing callbacks.
If next is non-null, TRY to update *next with the next running timer
IF that timer occurs before *next current value.
*next is never guaranteed to be updated on any given execution; however,

@ -40,10 +40,6 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#ifdef GPR_POSIX_SOCKET
#include "src/core/lib/iomgr/workqueue_posix.h"
#endif
#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/workqueue_windows.h"
#endif
@ -58,20 +54,20 @@
string will be printed alongside the refcount. When it is not defined, the
string will be discarded at compilation time. */
//#define GRPC_WORKQUEUE_REFCOUNT_DEBUG
/*#define GRPC_WORKQUEUE_REFCOUNT_DEBUG*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define GRPC_WORKQUEUE_REF(p, r) \
(grpc_workqueue_ref((p), __FILE__, __LINE__, (r)), (p))
grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
#define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \
grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
const char *reason);
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
#define GRPC_WORKQUEUE_REF(p, r) (grpc_workqueue_ref((p)), (p))
#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
void grpc_workqueue_ref(grpc_workqueue *workqueue);
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif

@ -1,196 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/lib/iomgr/workqueue.h"
#include <stdio.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/profiling/timers.h"
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
grpc_workqueue **workqueue) {
char name[32];
*workqueue = gpr_malloc(sizeof(grpc_workqueue));
gpr_ref_init(&(*workqueue)->refs, 1);
gpr_atm_no_barrier_store(&(*workqueue)->state, 1);
grpc_error *err = grpc_wakeup_fd_init(&(*workqueue)->wakeup_fd);
if (err != GRPC_ERROR_NONE) {
gpr_free(*workqueue);
return err;
}
sprintf(name, "workqueue:%p", (void *)(*workqueue));
(*workqueue)->wakeup_read_fd = grpc_fd_create(
GRPC_WAKEUP_FD_GET_READ_FD(&(*workqueue)->wakeup_fd), name);
gpr_mpscq_init(&(*workqueue)->queue);
grpc_closure_init(&(*workqueue)->read_closure, on_readable, *workqueue);
grpc_fd_notify_on_read(exec_ctx, (*workqueue)->wakeup_read_fd,
&(*workqueue)->read_closure);
return GRPC_ERROR_NONE;
}
static void workqueue_destroy(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
grpc_fd_shutdown(exec_ctx, workqueue->wakeup_read_fd);
}
static void workqueue_orphan(grpc_exec_ctx *exec_ctx,
grpc_workqueue *workqueue) {
if (gpr_atm_full_fetch_add(&workqueue->state, -1) == 1) {
workqueue_destroy(exec_ctx, workqueue);
}
}
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
const char *reason) {
if (workqueue == NULL) return;
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p ref %d -> %d %s",
workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count + 1,
reason);
gpr_ref(&workqueue->refs);
}
#else
void grpc_workqueue_ref(grpc_workqueue *workqueue) {
if (workqueue == NULL) return;
gpr_ref(&workqueue->refs);
}
#endif
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {
if (workqueue == NULL) return;
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p unref %d -> %d %s",
workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count - 1,
reason);
if (gpr_unref(&workqueue->refs)) {
workqueue_orphan(exec_ctx, workqueue);
}
}
#else
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
if (workqueue == NULL) return;
if (gpr_unref(&workqueue->refs)) {
workqueue_orphan(exec_ctx, workqueue);
}
}
#endif
static void drain(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
abort();
}
static void wakeup(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
GPR_TIMER_MARK("workqueue.wakeup", 0);
grpc_error *err = grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
if (!GRPC_LOG_IF_ERROR("wakeupfd_wakeup", err)) {
drain(exec_ctx, workqueue);
}
}
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.on_readable", 0);
grpc_workqueue *workqueue = arg;
if (error != GRPC_ERROR_NONE) {
/* HACK: let wakeup_fd code know that we stole the fd */
workqueue->wakeup_fd.read_fd = 0;
grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy");
GPR_ASSERT(gpr_atm_no_barrier_load(&workqueue->state) == 0);
gpr_free(workqueue);
} else {
error = grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd);
gpr_mpscq_node *n = gpr_mpscq_pop(&workqueue->queue);
if (error == GRPC_ERROR_NONE) {
grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
&workqueue->read_closure);
} else {
/* recurse to get error handling */
on_readable(exec_ctx, arg, error);
}
if (n == NULL) {
/* try again - queue in an inconsistant state */
wakeup(exec_ctx, workqueue);
} else {
switch (gpr_atm_full_fetch_add(&workqueue->state, -2)) {
case 3: // had one count, one unorphaned --> done, unorphaned
break;
case 2: // had one count, one orphaned --> done, orphaned
workqueue_destroy(exec_ctx, workqueue);
break;
case 1:
case 0:
// these values are illegal - representing an already done or
// deleted workqueue
GPR_UNREACHABLE_CODE(break);
default:
// schedule a wakeup since there's more to do
wakeup(exec_ctx, workqueue);
}
grpc_closure *cl = (grpc_closure *)n;
grpc_error *clerr = cl->error;
cl->cb(exec_ctx, cl->cb_arg, clerr);
GRPC_ERROR_UNREF(clerr);
}
}
GPR_TIMER_END("workqueue.on_readable", 0);
}
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
grpc_closure *closure, grpc_error *error) {
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
gpr_atm last = gpr_atm_full_fetch_add(&workqueue->state, 2);
GPR_ASSERT(last & 1);
closure->error = error;
gpr_mpscq_push(&workqueue->queue, &closure->next_data.atm_next);
if (last == 1) {
wakeup(exec_ctx, workqueue);
}
GPR_TIMER_END("workqueue.enqueue", 0);
}
#endif /* GPR_POSIX_SOCKET */

@ -43,12 +43,16 @@
// workqueues.
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
const char *reason) {}
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
int line, const char *reason) {
return workqueue;
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
void grpc_workqueue_ref(grpc_workqueue *workqueue) {}
grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
return workqueue;
}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif

@ -83,6 +83,7 @@ static int g_shutdown;
static gpr_thd_id g_writing_thread;
static __thread int g_thread_id;
static int g_next_thread_id;
static int g_writing_enabled = 1;
static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
if (list->head == NULL) {
@ -177,7 +178,7 @@ static void flush_logs(gpr_timer_log_list *list) {
}
}
static void finish_writing() {
static void finish_writing(void) {
pthread_mutex_lock(&g_mu);
g_shutdown = 1;
pthread_cond_signal(&g_cv);
@ -230,6 +231,10 @@ static void gpr_timers_log_add(const char *tagstr, marker_type type,
int important, const char *file, int line) {
gpr_timer_entry *entry;
if (!g_writing_enabled) {
return;
}
if (g_thread_log == NULL || g_thread_log->num_entries == MAX_COUNT) {
rotate_log();
}
@ -261,6 +266,8 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
gpr_timers_log_add(tagstr, END, important, file, line);
}
void gpr_timer_set_enabled(int enabled) { g_writing_enabled = enabled; }
/* Basic profiler specific API functions. */
void gpr_timers_global_init(void) {}
@ -272,4 +279,6 @@ void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
void gpr_timers_set_log_filename(const char *filename) {}
void gpr_timer_set_enabled(int enabled) {}
#endif /* GRPC_BASIC_PROFILER */

@ -50,6 +50,8 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
void gpr_timers_set_log_filename(const char *filename);
void gpr_timer_set_enabled(int enabled);
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \

@ -60,8 +60,9 @@ const char *gpr_log_severity_string(gpr_log_severity severity) {
void gpr_log_message(const char *file, int line, gpr_log_severity severity,
const char *message) {
if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print))
if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print)) {
return;
}
gpr_log_func_args lfargs;
memset(&lfargs, 0, sizeof(lfargs));
@ -82,11 +83,11 @@ void gpr_log_verbosity_init() {
gpr_atm min_severity_to_print = GPR_LOG_SEVERITY_ERROR;
if (verbosity != NULL) {
if (strcmp(verbosity, "DEBUG") == 0) {
if (gpr_stricmp(verbosity, "DEBUG") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_DEBUG;
} else if (strcmp(verbosity, "INFO") == 0) {
} else if (gpr_stricmp(verbosity, "INFO") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_INFO;
} else if (strcmp(verbosity, "ERROR") == 0) {
} else if (gpr_stricmp(verbosity, "ERROR") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_ERROR;
}
gpr_free(verbosity);

@ -304,3 +304,14 @@ void gpr_strvec_add(gpr_strvec *sv, char *str) {
char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) {
return gpr_strjoin((const char **)sv->strs, sv->count, final_length);
}
int gpr_stricmp(const char *a, const char *b) {
int ca, cb;
do {
ca = tolower(*a);
cb = tolower(*b);
++a;
++b;
} while (ca == cb && ca && cb);
return ca - cb;
}

@ -118,6 +118,10 @@ void gpr_strvec_add(gpr_strvec *strs, char *add);
total_length as per gpr_strjoin */
char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length);
/** Case insensitive string comparison... return <0 if lower(a)<lower(b), ==0 if
lower(a)==lower(b), >0 if lower(a)>lower(b) */
int gpr_stricmp(const char *a, const char *b);
#ifdef __cplusplus
}
#endif

@ -223,33 +223,37 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error);
grpc_call *grpc_call_create(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative,
const void *server_transport_data, grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count, gpr_timespec send_deadline) {
grpc_error *grpc_call_create(const grpc_call_create_args *args,
grpc_call **out_call) {
size_t i, j;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_channel_stack *channel_stack =
grpc_channel_get_channel_stack(args->channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
*out_call = call;
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
call->channel = channel;
call->cq = cq;
call->parent = parent_call;
call->channel = args->channel;
call->cq = args->cq;
call->parent = args->parent_call;
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
call->is_client = server_transport_data == NULL;
call->is_client = args->server_transport_data == NULL;
grpc_mdstr *path = NULL;
if (call->is_client) {
GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
for (i = 0; i < add_initial_metadata_count; i++) {
call->send_extra_metadata[i].md = add_initial_metadata[i];
GPR_ASSERT(args->add_initial_metadata_count <
MAX_SEND_EXTRA_METADATA_COUNT);
for (i = 0; i < args->add_initial_metadata_count; i++) {
call->send_extra_metadata[i].md = args->add_initial_metadata[i];
if (args->add_initial_metadata[i]->key == GRPC_MDSTR_PATH) {
path = GRPC_MDSTR_REF(args->add_initial_metadata[i]->value);
}
}
call->send_extra_metadata_count = (int)add_initial_metadata_count;
call->send_extra_metadata_count = (int)args->add_initial_metadata_count;
} else {
GPR_ASSERT(add_initial_metadata_count == 0);
GPR_ASSERT(args->add_initial_metadata_count == 0);
call->send_extra_metadata_count = 0;
}
for (i = 0; i < 2; i++) {
@ -257,84 +261,87 @@ grpc_call *grpc_call_create(
call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
}
}
send_deadline = gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC);
gpr_timespec send_deadline =
gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC);
if (parent_call != NULL) {
GRPC_CALL_INTERNAL_REF(parent_call, "child");
if (args->parent_call != NULL) {
GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
GPR_ASSERT(call->is_client);
GPR_ASSERT(!parent_call->is_client);
GPR_ASSERT(!args->parent_call->is_client);
gpr_mu_lock(&parent_call->mu);
gpr_mu_lock(&args->parent_call->mu);
if (propagation_mask & GRPC_PROPAGATE_DEADLINE) {
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
gpr_convert_clock_type(send_deadline,
parent_call->send_deadline.clock_type),
parent_call->send_deadline);
args->parent_call->send_deadline.clock_type),
args->parent_call->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
/* TODO(ctiller): This should change to use the appropriate census start_op
* call. */
if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
parent_call->context[GRPC_CONTEXT_TRACING].value,
NULL);
if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
grpc_call_context_set(
call, GRPC_CONTEXT_TRACING,
args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
} else {
GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
}
if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
}
if (parent_call->first_child == NULL) {
parent_call->first_child = call;
if (args->parent_call->first_child == NULL) {
args->parent_call->first_child = call;
call->sibling_next = call->sibling_prev = call;
} else {
call->sibling_next = parent_call->first_child;
call->sibling_prev = parent_call->first_child->sibling_prev;
call->sibling_next = args->parent_call->first_child;
call->sibling_prev = args->parent_call->first_child->sibling_prev;
call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
call;
}
gpr_mu_unlock(&parent_call->mu);
gpr_mu_unlock(&args->parent_call->mu);
}
call->send_deadline = send_deadline;
GRPC_CHANNEL_INTERNAL_REF(channel, "call");
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
grpc_error *error = grpc_call_stack_init(
&exec_ctx, channel_stack, 1, destroy_call, call, call->context,
server_transport_data, send_deadline, CALL_STACK_FROM_CALL(call));
grpc_error *error =
grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
call->context, args->server_transport_data, path,
send_deadline, CALL_STACK_FROM_CALL(call));
if (error != GRPC_ERROR_NONE) {
grpc_status_code status;
const char *error_str;
grpc_error_get_status(error, &status, &error_str);
close_with_status(&exec_ctx, call, status, error_str);
GRPC_ERROR_UNREF(error);
}
if (cq != NULL) {
if (args->cq != NULL) {
GPR_ASSERT(
pollset_set_alternative == NULL &&
args->pollset_set_alternative == NULL &&
"Only one of 'cq' and 'pollset_set_alternative' should be non-NULL.");
GRPC_CQ_INTERNAL_REF(cq, "bind");
GRPC_CQ_INTERNAL_REF(args->cq, "bind");
call->pollent =
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq));
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(args->cq));
}
if (pollset_set_alternative != NULL) {
call->pollent =
grpc_polling_entity_create_from_pollset_set(pollset_set_alternative);
if (args->pollset_set_alternative != NULL) {
call->pollent = grpc_polling_entity_create_from_pollset_set(
args->pollset_set_alternative);
}
if (!grpc_polling_entity_is_empty(&call->pollent)) {
grpc_call_stack_set_pollset_or_pollset_set(
&exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
if (path != NULL) GRPC_MDSTR_UNREF(path);
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_create", 0);
return call;
return error;
}
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
@ -1038,9 +1045,14 @@ static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
static void post_batch_completion(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
grpc_error *error = bctl->error;
if (bctl->recv_final_op) {
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
grpc_exec_ctx_sched(exec_ctx, bctl->notify_tag, bctl->error, NULL);
grpc_closure_run(exec_ctx, bctl->notify_tag, error);
gpr_mu_lock(&call->mu);
bctl->call->used_batches =
(uint8_t)(bctl->call->used_batches &
@ -1049,7 +1061,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, bctl->error,
grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, error,
finish_batch_completion, bctl, &bctl->cq_completion);
}
}
@ -1198,6 +1210,14 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
}
}
static void add_batch_error(batch_control *bctl, grpc_error *error) {
if (error == GRPC_ERROR_NONE) return;
if (bctl->error == GRPC_ERROR_NONE) {
bctl->error = GRPC_ERROR_CREATE("Call batch operation failed");
}
bctl->error = grpc_error_add_child(bctl->error, error);
}
static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *bctlp, grpc_error *error) {
batch_control *bctl = bctlp;
@ -1205,9 +1225,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&call->mu);
if (error != GRPC_ERROR_NONE) {
bctl->error = GRPC_ERROR_REF(error);
} else {
add_batch_error(bctl, GRPC_ERROR_REF(error));
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
grpc_metadata_batch_filter(md, recv_initial_filter, call);
@ -1304,8 +1323,7 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
GRPC_ERROR_UNREF(bctl->error);
bctl->error = GRPC_ERROR_REF(error);
add_batch_error(bctl, GRPC_ERROR_REF(error));
gpr_mu_unlock(&call->mu);
if (gpr_unref(&bctl->steps_to_complete)) {
post_batch_completion(exec_ctx, bctl);
@ -1341,6 +1359,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *stream_op = &bctl->op;
memset(stream_op, 0, sizeof(*stream_op));
stream_op->covered_by_poller = true;
if (nops == 0) {
GRPC_CALL_INTERNAL_REF(call, "completion");

@ -49,15 +49,29 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *user_data);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask,
grpc_completion_queue *cq,
/* if not NULL, it'll be used in lieu of \a cq */
grpc_pollset_set *pollset_set_alternative,
const void *server_transport_data,
grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count,
gpr_timespec send_deadline);
typedef struct grpc_call_create_args {
grpc_channel *channel;
grpc_call *parent_call;
uint32_t propagation_mask;
grpc_completion_queue *cq;
/* if not NULL, it'll be used in lieu of cq */
grpc_pollset_set *pollset_set_alternative;
const void *server_transport_data;
grpc_mdelem **add_initial_metadata;
size_t add_initial_metadata_count;
gpr_timespec send_deadline;
} grpc_call_create_args;
/* Create a new call based on \a args.
Regardless of success or failure, always returns a valid new call into *call
*/
grpc_error *grpc_call_create(const grpc_call_create_args *args,
grpc_call **call);
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);

@ -193,9 +193,21 @@ static grpc_call *grpc_channel_create_call_internal(
send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority);
}
return grpc_call_create(channel, parent_call, propagation_mask, cq,
pollset_set_alternative, NULL, send_metadata,
num_metadata, deadline);
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = channel;
args.parent_call = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
args.pollset_set_alternative = pollset_set_alternative;
args.server_transport_data = NULL;
args.add_initial_metadata = send_metadata;
args.add_initial_metadata_count = num_metadata;
args.send_deadline = deadline;
grpc_call *call;
GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call));
return call;
}
grpc_call *grpc_channel_create_call(grpc_channel *channel,

@ -39,6 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h"
@ -50,6 +51,9 @@
#include "src/core/lib/surface/event_string.h"
int grpc_trace_operation_failures;
#ifndef NDEBUG
int grpc_trace_pending_tags;
#endif
typedef struct {
grpc_pollset_worker **worker;
@ -67,6 +71,9 @@ struct grpc_completion_queue {
gpr_refcount pending_events;
/** Once owning_refs drops to zero, we will destroy the cq */
gpr_refcount owning_refs;
/** counter of how many things have ever been queued on this completion queue
useful for avoiding locks to check the queue */
gpr_atm things_queued_ever;
/** 0 initially, 1 once we've begun shutting down */
int shutdown;
int shutdown_called;
@ -121,15 +128,6 @@ void grpc_cq_global_shutdown(void) {
}
}
struct grpc_cq_alarm {
grpc_timer alarm;
grpc_cq_completion completion;
/** completion queue where events about this alarm will be posted */
grpc_completion_queue *cq;
/** user supplied tag */
void *tag;
};
grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_completion_queue *cc;
GPR_ASSERT(!reserved);
@ -166,6 +164,7 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
cc->is_server_cq = 0;
cc->is_non_listening_server_cq = 0;
cc->num_pluckers = 0;
gpr_atm_no_barrier_store(&cc->things_queued_ever, 0);
#ifndef NDEBUG
cc->outstanding_tag_count = 0;
#endif
@ -276,6 +275,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GPR_ASSERT(found);
#endif
shutdown = gpr_unref(&cc->pending_events);
gpr_atm_no_barrier_fetch_add(&cc->things_queued_ever, 1);
if (!shutdown) {
cc->completed_tail->next =
((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
@ -313,13 +313,66 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GRPC_ERROR_UNREF(error);
}
typedef struct {
gpr_atm last_seen_things_queued_ever;
grpc_completion_queue *cq;
gpr_timespec deadline;
grpc_cq_completion *stolen_completion;
void *tag; /* for pluck */
bool first_loop;
} cq_is_finished_arg;
static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = arg;
grpc_completion_queue *cq = a->cq;
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cq->things_queued_ever);
if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
gpr_mu_lock(cq->mu);
a->last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cq->things_queued_ever);
if (cq->completed_tail != &cq->completed_head) {
a->stolen_completion = (grpc_cq_completion *)cq->completed_head.next;
cq->completed_head.next = a->stolen_completion->next & ~(uintptr_t)1;
if (a->stolen_completion == cq->completed_tail) {
cq->completed_tail = &cq->completed_head;
}
gpr_mu_unlock(cq->mu);
return true;
}
gpr_mu_unlock(cq->mu);
}
return !a->first_loop &&
gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
}
#ifndef NDEBUG
static void dump_pending_tags(grpc_completion_queue *cc) {
if (!grpc_trace_pending_tags) return;
gpr_strvec v;
gpr_strvec_init(&v);
gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:"));
for (size_t i = 0; i < cc->outstanding_tag_count; i++) {
char *s;
gpr_asprintf(&s, " %p", cc->outstanding_tags[i]);
gpr_strvec_add(&v, s);
}
char *out = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
gpr_log(GPR_DEBUG, "%s", out);
gpr_free(out);
}
#else
static void dump_pending_tags(grpc_completion_queue *cc) {}
#endif
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
grpc_pollset_worker *worker = NULL;
int first_loop = 1;
gpr_timespec now;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@ -333,11 +386,33 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
reserved));
GPR_ASSERT(!reserved);
dump_pending_tags(cc);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(cc->mu);
cq_is_finished_arg is_finished_arg = {
.last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cc->things_queued_ever),
.cq = cc,
.deadline = deadline,
.stolen_completion = NULL,
.tag = NULL,
.first_loop = true};
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
cq_is_next_finished, &is_finished_arg);
for (;;) {
if (is_finished_arg.stolen_completion != NULL) {
gpr_mu_unlock(cc->mu);
grpc_cq_completion *c = is_finished_arg.stolen_completion;
is_finished_arg.stolen_completion = NULL;
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
c->done(&exec_ctx, c->done_arg, c);
break;
}
if (cc->completed_tail != &cc->completed_head) {
grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
cc->completed_head.next = c->next & ~(uintptr_t)1;
@ -358,13 +433,13 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
first_loop = 0;
/* Check alarms - these are a global resource so we just ping
each time through on every pollset.
May update deadline to ensure timely wakeups.
@ -387,13 +462,16 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
}
is_finished_arg.first_loop = false;
}
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_next", 0);
@ -424,6 +502,37 @@ static void del_plucker(grpc_completion_queue *cc, void *tag,
GPR_UNREACHABLE_CODE(return );
}
static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = arg;
grpc_completion_queue *cq = a->cq;
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cq->things_queued_ever);
if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
gpr_mu_lock(cq->mu);
a->last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cq->things_queued_ever);
grpc_cq_completion *c;
grpc_cq_completion *prev = &cq->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
&cq->completed_head) {
if (c->tag == a->tag) {
prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
if (c == cq->completed_tail) {
cq->completed_tail = prev;
}
gpr_mu_unlock(cq->mu);
a->stolen_completion = c;
return true;
}
prev = c;
}
gpr_mu_unlock(cq->mu);
}
return !a->first_loop &&
gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
}
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
@ -431,8 +540,6 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
gpr_timespec now;
int first_loop = 1;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@ -448,11 +555,33 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
}
GPR_ASSERT(!reserved);
dump_pending_tags(cc);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(cc->mu);
cq_is_finished_arg is_finished_arg = {
.last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cc->things_queued_ever),
.cq = cc,
.deadline = deadline,
.stolen_completion = NULL,
.tag = tag,
.first_loop = true};
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
cq_is_pluck_finished, &is_finished_arg);
for (;;) {
if (is_finished_arg.stolen_completion != NULL) {
gpr_mu_unlock(cc->mu);
c = is_finished_arg.stolen_completion;
is_finished_arg.stolen_completion = NULL;
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
c->done(&exec_ctx, c->done_arg, c);
break;
}
prev = &cc->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
&cc->completed_head) {
@ -485,17 +614,18 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
memset(&ret, 0, sizeof(ret));
/* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
del_plucker(cc, tag, &worker);
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
first_loop = 0;
/* Check alarms - these are a global resource so we just ping
each time through on every pollset.
May update deadline to ensure timely wakeups.
@ -518,15 +648,18 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cc);
break;
}
}
is_finished_arg.first_loop = false;
del_plucker(cc, tag, &worker);
}
done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_pluck", 0);

@ -44,6 +44,9 @@
extern int grpc_cq_pluck_trace;
extern int grpc_cq_event_timeout_trace;
extern int grpc_trace_operation_failures;
#ifndef NDEBUG
extern int grpc_trace_pending_tags;
#endif
typedef struct grpc_cq_completion {
/** user supplied tag */

@ -184,12 +184,16 @@ void grpc_init(void) {
grpc_register_tracer("compression", &grpc_compression_trace);
grpc_register_tracer("queue_pluck", &grpc_cq_pluck_trace);
grpc_register_tracer("combiner", &grpc_combiner_trace);
grpc_register_tracer("server_channel", &grpc_server_channel_trace);
// Default pluck trace to 1
grpc_cq_pluck_trace = 1;
grpc_register_tracer("queue_timeout", &grpc_cq_event_timeout_trace);
// Default timeout trace to 1
grpc_cq_event_timeout_trace = 1;
grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
#ifndef NDEBUG
grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
#endif
grpc_security_pre_init();
grpc_iomgr_init();
grpc_executor_init();

@ -71,6 +71,8 @@ typedef struct registered_method registered_method;
typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
int grpc_server_channel_trace = 0;
typedef struct requested_call {
requested_call_type type;
size_t cq_idx;
@ -280,6 +282,7 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
grpc_channel_element *elem;
op->send_goaway = send_goaway;
op->set_accept_stream = true;
sc->slice = gpr_slice_from_copied_string("Server shutdown");
op->goaway_message = &sc->slice;
op->goaway_status = GRPC_STATUS_OK;
@ -439,6 +442,13 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
chand->finish_destroy_channel_closure.cb_arg = chand;
if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Disconnected client: %s", msg);
grpc_error_free_string(msg);
}
GRPC_ERROR_UNREF(error);
grpc_transport_op *op =
grpc_make_transport_op(&chand->finish_destroy_channel_closure);
op->set_accept_stream = true;
@ -446,13 +456,6 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_channel_stack_element(
grpc_channel_get_channel_stack(chand->channel), 0),
op);
if (error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Disconnected client: %s", msg);
grpc_error_free_string(msg);
}
GRPC_ERROR_UNREF(error);
}
static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
@ -773,8 +776,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_ERROR_CREATE_REFERENCING("Missing :authority or :path", &error, 1);
}
grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv_initial_metadata, error,
NULL);
grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error);
}
static void server_mutate_op(grpc_call_element *elem,
@ -829,11 +831,20 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
const void *transport_server_data) {
channel_data *chand = cd;
/* create a call */
grpc_call *call = grpc_call_create(chand->channel, NULL, 0, NULL, NULL,
transport_server_data, NULL, 0,
gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = chand->channel;
args.server_transport_data = transport_server_data;
args.send_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
grpc_call *call;
grpc_error *error = grpc_call_create(&args, &call);
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
if (error != GRPC_ERROR_NONE) {
got_initial_metadata(exec_ctx, elem, error);
GRPC_ERROR_UNREF(error);
return;
}
call_data *calld = elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));

@ -40,6 +40,9 @@
extern const grpc_channel_filter grpc_server_top_filter;
/** Lightweight tracing of server channel state */
extern int grpc_server_channel_trace;
/* Add a listener to the server: when the server starts, it will call start,
and when it shuts down, it will call destroy */
void grpc_server_add_listener(

@ -180,7 +180,8 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
*w->current = tracker->current_state;
tracker->watchers = w->next;
if (grpc_connectivity_state_trace) {
gpr_log(GPR_DEBUG, "NOTIFY: %p", w->notify);
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
w->notify);
}
grpc_exec_ctx_sched(exec_ctx, w->notify,
GRPC_ERROR_REF(tracker->current_error), NULL);

@ -0,0 +1,142 @@
//
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "src/core/lib/transport/mdstr_hash_table.h"
#include <stdbool.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/transport/metadata.h"
struct grpc_mdstr_hash_table {
gpr_refcount refs;
size_t num_entries;
grpc_mdstr_hash_table_entry* entries;
};
// Helper function for insert and get operations that performs quadratic
// probing (https://en.wikipedia.org/wiki/Quadratic_probing).
static size_t grpc_mdstr_hash_table_find_index(
const grpc_mdstr_hash_table* table, const grpc_mdstr* key,
bool find_empty) {
for (size_t i = 0; i < table->num_entries; ++i) {
const size_t idx = (key->hash + i * i) % table->num_entries;
if (table->entries[idx].key == NULL)
return find_empty ? idx : table->num_entries;
if (table->entries[idx].key == key) return idx;
}
return table->num_entries; // Not found.
}
static void grpc_mdstr_hash_table_add(
grpc_mdstr_hash_table* table, grpc_mdstr* key, void* value,
const grpc_mdstr_hash_table_vtable* vtable) {
GPR_ASSERT(value != NULL);
const size_t idx =
grpc_mdstr_hash_table_find_index(table, key, true /* find_empty */);
GPR_ASSERT(idx != table->num_entries); // Table should never be full.
grpc_mdstr_hash_table_entry* entry = &table->entries[idx];
entry->key = GRPC_MDSTR_REF(key);
entry->value = vtable->copy_value(value);
entry->vtable = vtable;
}
grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
size_t num_entries, grpc_mdstr_hash_table_entry* entries) {
grpc_mdstr_hash_table* table = gpr_malloc(sizeof(*table));
memset(table, 0, sizeof(*table));
gpr_ref_init(&table->refs, 1);
// Quadratic probing gets best performance when the table is no more
// than half full.
table->num_entries = num_entries * 2;
const size_t entry_size =
sizeof(grpc_mdstr_hash_table_entry) * table->num_entries;
table->entries = gpr_malloc(entry_size);
memset(table->entries, 0, entry_size);
for (size_t i = 0; i < num_entries; ++i) {
grpc_mdstr_hash_table_entry* entry = &entries[i];
grpc_mdstr_hash_table_add(table, entry->key, entry->value, entry->vtable);
}
return table;
}
grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table) {
if (table != NULL) gpr_ref(&table->refs);
return table;
}
int grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table) {
if (table != NULL && gpr_unref(&table->refs)) {
for (size_t i = 0; i < table->num_entries; ++i) {
grpc_mdstr_hash_table_entry* entry = &table->entries[i];
if (entry->key != NULL) {
GRPC_MDSTR_UNREF(entry->key);
entry->vtable->destroy_value(entry->value);
}
}
gpr_free(table->entries);
gpr_free(table);
return 1;
}
return 0;
}
void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
const grpc_mdstr* key) {
const size_t idx =
grpc_mdstr_hash_table_find_index(table, key, false /* find_empty */);
if (idx == table->num_entries) return NULL; // Not found.
return table->entries[idx].value;
}
int grpc_mdstr_hash_table_cmp(const grpc_mdstr_hash_table* table1,
const grpc_mdstr_hash_table* table2) {
// Compare by num_entries.
if (table1->num_entries < table2->num_entries) return -1;
if (table1->num_entries > table2->num_entries) return 1;
for (size_t i = 0; i < table1->num_entries; ++i) {
grpc_mdstr_hash_table_entry* e1 = &table1->entries[i];
grpc_mdstr_hash_table_entry* e2 = &table2->entries[i];
// Compare keys by hash value.
if (e1->key->hash < e2->key->hash) return -1;
if (e1->key->hash > e2->key->hash) return 1;
// Compare by vtable (pointer equality).
if (e1->vtable < e2->vtable) return -1;
if (e1->vtable > e2->vtable) return 1;
// Compare values via vtable.
const int value_result = e1->vtable->compare_value(e1->value, e2->value);
if (value_result != 0) return value_result;
}
return 0;
}

@ -0,0 +1,83 @@
/*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H
#define GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H
#include "src/core/lib/transport/metadata.h"
/** Hash table implementation.
*
* This implementation uses open addressing
* (https://en.wikipedia.org/wiki/Open_addressing) with quadratic
* probing (https://en.wikipedia.org/wiki/Quadratic_probing).
*
* The keys are \a grpc_mdstr objects. The values are arbitrary pointers
* with a common vtable.
*
* Hash tables are intentionally immutable, to avoid the need for locking.
*/
typedef struct grpc_mdstr_hash_table grpc_mdstr_hash_table;
typedef struct grpc_mdstr_hash_table_vtable {
void (*destroy_value)(void* value);
void* (*copy_value)(void* value);
int (*compare_value)(void* value1, void* value2);
} grpc_mdstr_hash_table_vtable;
typedef struct grpc_mdstr_hash_table_entry {
grpc_mdstr* key;
void* value; /* Must not be NULL. */
const grpc_mdstr_hash_table_vtable* vtable;
} grpc_mdstr_hash_table_entry;
/** Creates a new hash table of containing \a entries, which is an array
of length \a num_entries.
Creates its own copy of all keys and values from \a entries. */
grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
size_t num_entries, grpc_mdstr_hash_table_entry* entries);
grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table);
/** Returns 1 when \a table is destroyed. */
int grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table);
/** Returns the value from \a table associated with \a key.
Returns NULL if \a key is not found. */
void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
const grpc_mdstr* key);
/** Compares two hash tables.
The sort order is stable but undefined. */
int grpc_mdstr_hash_table_cmp(const grpc_mdstr_hash_table* table1,
const grpc_mdstr_hash_table* table2);
#endif /* GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H */

@ -46,8 +46,9 @@
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "%s %p:%p REF %d->%d %s", refcount->object_type,
refcount, refcount->destroy.cb_arg, (int)val, (int)val + 1, reason);
gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
refcount->object_type, refcount, refcount->destroy.cb_arg, val,
val + 1, reason);
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
@ -58,8 +59,9 @@ void grpc_stream_ref(grpc_stream_refcount *refcount) {
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "%s %p:%p UNREF %d->%d %s", refcount->object_type,
refcount, refcount->destroy.cb_arg, (int)val, (int)val - 1, reason);
gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
refcount->object_type, refcount, refcount->destroy.cb_arg, val,
val - 1, reason);
#else
void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
@ -274,3 +276,28 @@ grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
op->op.on_consumed = &op->outer_on_complete;
return &op->op;
}
typedef struct {
grpc_closure outer_on_complete;
grpc_closure *inner_on_complete;
grpc_transport_stream_op op;
} made_transport_stream_op;
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
made_transport_stream_op *op = arg;
grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error),
NULL);
gpr_free(op);
}
grpc_transport_stream_op *grpc_make_transport_stream_op(
grpc_closure *on_complete) {
made_transport_stream_op *op = gpr_malloc(sizeof(*op));
grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
op);
op->inner_on_complete = on_complete;
memset(&op->op, 0, sizeof(op->op));
op->op.on_complete = &op->outer_on_complete;
return &op->op;
}

@ -113,6 +113,10 @@ typedef struct grpc_transport_stream_op {
have been completed. */
grpc_closure *on_complete;
/** Is the completion of this op covered by a poller (if false: the op should
complete independently of some pollset being polled) */
bool covered_by_poller;
/** Send initial metadata to the peer, from the provided metadata batch.
idempotent_request MUST be set if this is non-null */
grpc_metadata_batch *send_initial_metadata;
@ -252,6 +256,7 @@ void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
gpr_slice *optional_message);
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
char *grpc_transport_op_string(grpc_transport_op *op);
/* Send a batch of operations on a transport
@ -293,6 +298,10 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
/* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
\a on_consumed and then delete the returned transport op */
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed);
/* Allocate a grpc_transport_stream_op, and preconfigure the on_consumed closure
to \a on_consumed and then delete the returned transport op */
grpc_transport_stream_op *grpc_make_transport_stream_op(
grpc_closure *on_consumed);
#ifdef __cplusplus
}

@ -41,6 +41,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/connectivity_state.h"
/* These routines are here to facilitate debugging - they produce string
representations of various transport data structures */
@ -72,56 +73,51 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
char *tmp;
char *out;
int first = 1;
gpr_strvec b;
gpr_strvec_init(&b);
gpr_strvec_add(
&b, gpr_strdup(op->covered_by_poller ? "[COVERED]" : "[UNCOVERED]"));
if (op->send_initial_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));
put_metadata_list(&b, *op->send_initial_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
if (op->send_message != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d",
op->send_message->flags, op->send_message->length);
gpr_strvec_add(&b, tmp);
}
if (op->send_trailing_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_TRAILING_METADATA{"));
put_metadata_list(&b, *op->send_trailing_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
if (op->recv_initial_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_INITIAL_METADATA"));
}
if (op->recv_message != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
}
if (op->recv_trailing_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_TRAILING_METADATA"));
}
if (op->cancel_error != GRPC_ERROR_NONE) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup(" "));
const char *msg = grpc_error_string(op->cancel_error);
gpr_asprintf(&tmp, "CANCEL:%s", msg);
grpc_error_free_string(msg);
@ -129,8 +125,7 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
}
if (op->close_error != GRPC_ERROR_NONE) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup(" "));
const char *msg = grpc_error_string(op->close_error);
gpr_asprintf(&tmp, "CLOSE:%s", msg);
grpc_error_free_string(msg);
@ -143,6 +138,82 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
return out;
}
char *grpc_transport_op_string(grpc_transport_op *op) {
char *tmp;
char *out;
bool first = true;
gpr_strvec b;
gpr_strvec_init(&b);
if (op->on_connectivity_state_change != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
if (op->connectivity_state != NULL) {
gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:from=%s",
op->on_connectivity_state_change,
grpc_connectivity_state_name(*op->connectivity_state));
gpr_strvec_add(&b, tmp);
} else {
gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:unsubscribe",
op->on_connectivity_state_change);
gpr_strvec_add(&b, tmp);
}
}
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
const char *err = grpc_error_string(op->disconnect_with_error);
gpr_asprintf(&tmp, "DISCONNECT:%s", err);
gpr_strvec_add(&b, tmp);
grpc_error_free_string(err);
}
if (op->send_goaway) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
char *msg = op->goaway_message == NULL
? "null"
: gpr_dump_slice(*op->goaway_message,
GPR_DUMP_ASCII | GPR_DUMP_HEX);
gpr_asprintf(&tmp, "SEND_GOAWAY:status=%d:msg=%s", op->goaway_status, msg);
if (op->goaway_message != NULL) gpr_free(msg);
gpr_strvec_add(&b, tmp);
}
if (op->set_accept_stream) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
gpr_asprintf(&tmp, "SET_ACCEPT_STREAM:%p(%p,...)", op->set_accept_stream_fn,
op->set_accept_stream_user_data);
gpr_strvec_add(&b, tmp);
}
if (op->bind_pollset != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET"));
}
if (op->bind_pollset_set != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET_SET"));
}
if (op->send_ping != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
gpr_strvec_add(&b, gpr_strdup("SEND_PING"));
}
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);
return out;
}
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_transport_stream_op *op) {
char *str = grpc_transport_stream_op_string(op);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,31 +31,22 @@
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H
#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/support/mpscq.h"
struct grpc_fd;
struct grpc_workqueue {
gpr_refcount refs;
gpr_mpscq queue;
// state is:
// lower bit - zero if orphaned
// other bits - number of items enqueued
gpr_atm state;
grpc_wakeup_fd wakeup_fd;
struct grpc_fd *wakeup_read_fd;
grpc_closure read_closure;
};
/** Create a work queue. Returns an error if creation fails. If creation
succeeds, sets *workqueue to point to it. */
grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
grpc_workqueue **workqueue);
#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H */
#include <grpc++/test/server_context_test_spouse.h>
namespace grpc {
namespace testing {
void ServerContextTestSpouse::AddClientMetadata(const grpc::string& key,
const grpc::string& value) {
client_metadata_storage_.insert(
std::pair<grpc::string, grpc::string>(key, value));
ctx_->client_metadata_.clear();
for (auto iter = client_metadata_storage_.begin();
iter != client_metadata_storage_.end(); ++iter) {
ctx_->client_metadata_.insert(std::pair<grpc::string_ref, grpc::string_ref>(
iter->first.c_str(), iter->second.c_str()));
}
}
} // namespace testing
} // namespace grpc

@ -277,20 +277,31 @@ namespace Grpc.Core.Internal
}
}
internal class NoSuchMethodCallHandler : IServerCallHandler
internal class UnimplementedMethodCallHandler : IServerCallHandler
{
public static readonly NoSuchMethodCallHandler Instance = new NoSuchMethodCallHandler();
public static readonly UnimplementedMethodCallHandler Instance = new UnimplementedMethodCallHandler();
public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
DuplexStreamingServerCallHandler<byte[], byte[]> callHandlerImpl;
public UnimplementedMethodCallHandler()
{
// We don't care about the payload type here.
var asyncCall = new AsyncCallServer<byte[], byte[]>(
(payload) => payload, (payload) => payload, newRpc.Server);
asyncCall.Initialize(newRpc.Call, cq);
var finishedTask = asyncCall.ServerSideCallAsync();
await asyncCall.SendStatusFromServerAsync(new Status(StatusCode.Unimplemented, ""), Metadata.Empty, null).ConfigureAwait(false);
await finishedTask.ConfigureAwait(false);
var marshaller = new Marshaller<byte[]>((payload) => payload, (payload) => payload);
var method = new Method<byte[], byte[]>(MethodType.DuplexStreaming, "", "", marshaller, marshaller);
this.callHandlerImpl = new DuplexStreamingServerCallHandler<byte[], byte[]>(method, new DuplexStreamingServerMethod<byte[], byte[]>(UnimplementedMethod));
}
/// <summary>
/// Handler used for unimplemented method.
/// </summary>
private Task UnimplementedMethod(IAsyncStreamReader<byte[]> requestStream, IServerStreamWriter<byte[]> responseStream, ServerCallContext ctx)
{
ctx.Status = new Status(StatusCode.Unimplemented, "");
return Task.FromResult<object>(null);
}
public Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
return callHandlerImpl.HandleCall(newRpc, cq);
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save