Merge pull request #3993 from ctiller/new_op

Core transport & call cleanup
pull/4097/head
David G. Quintas 9 years ago
commit 75b53d6a5d
  1. 42
      BUILD
  2. 1000
      Makefile
  3. 7
      binding.gyp
  4. 32
      build.yaml
  5. 21
      gRPC.podspec
  6. 2
      include/grpc++/alarm.h
  7. 2
      include/grpc/support/port_platform.h
  8. 5
      include/grpc/support/slice_buffer.h
  9. 69
      src/core/census/grpc_filter.c
  10. 50
      src/core/channel/channel_stack.c
  11. 58
      src/core/channel/channel_stack.h
  12. 494
      src/core/channel/client_channel.c
  13. 355
      src/core/channel/client_uchannel.c
  14. 293
      src/core/channel/compress_filter.c
  15. 29
      src/core/channel/connected_channel.c
  16. 2
      src/core/channel/connected_channel.h
  17. 113
      src/core/channel/http_client_filter.c
  18. 148
      src/core/channel/http_server_filter.c
  19. 22
      src/core/channel/noop_filter.c
  20. 283
      src/core/channel/subchannel_call_holder.c
  21. 98
      src/core/channel/subchannel_call_holder.h
  22. 37
      src/core/client_config/lb_policies/pick_first.c
  23. 40
      src/core/client_config/lb_policies/round_robin.c
  24. 17
      src/core/client_config/lb_policy.c
  25. 19
      src/core/client_config/lb_policy.h
  26. 148
      src/core/client_config/subchannel.c
  27. 34
      src/core/client_config/subchannel.h
  28. 23
      src/core/iomgr/closure.c
  29. 19
      src/core/iomgr/closure.h
  30. 5
      src/core/iomgr/exec_ctx.c
  31. 13
      src/core/iomgr/executor.c
  32. 5
      src/core/iomgr/pollset.h
  33. 14
      src/core/iomgr/pollset_multipoller_with_epoll.c
  34. 4
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  35. 88
      src/core/iomgr/pollset_posix.c
  36. 9
      src/core/iomgr/pollset_posix.h
  37. 12
      src/core/iomgr/pollset_windows.c
  38. 2
      src/core/iomgr/workqueue_posix.c
  39. 6
      src/core/profiling/basic_timers.c
  40. 109
      src/core/security/client_auth_filter.c
  41. 3
      src/core/security/credentials.c
  42. 2
      src/core/security/credentials.h
  43. 6
      src/core/security/security_context.c
  44. 93
      src/core/security/server_auth_filter.c
  45. 3
      src/core/security/server_secure_chttp2.c
  46. 49
      src/core/support/slice_buffer.c
  47. 5
      src/core/support/sync_posix.c
  48. 97
      src/core/surface/byte_buffer_queue.c
  49. 62
      src/core/surface/byte_buffer_queue.h
  50. 1884
      src/core/surface/call.c
  51. 67
      src/core/surface/call.h
  52. 3
      src/core/surface/call_log_batch.c
  53. 1
      src/core/surface/call_test_only.h
  54. 82
      src/core/surface/completion_queue.c
  55. 3
      src/core/surface/completion_queue.h
  56. 5
      src/core/surface/init.c
  57. 74
      src/core/surface/lame_client.c
  58. 230
      src/core/surface/server.c
  59. 76
      src/core/transport/byte_stream.c
  60. 88
      src/core/transport/byte_stream.h
  61. 99
      src/core/transport/chttp2/frame_data.c
  62. 26
      src/core/transport/chttp2/frame_data.h
  63. 15
      src/core/transport/chttp2/frame_window_update.c
  64. 289
      src/core/transport/chttp2/hpack_encoder.c
  65. 22
      src/core/transport/chttp2/hpack_encoder.h
  66. 16
      src/core/transport/chttp2/hpack_parser.c
  67. 136
      src/core/transport/chttp2/incoming_metadata.c
  68. 24
      src/core/transport/chttp2/incoming_metadata.h
  69. 346
      src/core/transport/chttp2/internal.h
  70. 368
      src/core/transport/chttp2/parsing.c
  71. 112
      src/core/transport/chttp2/stream_lists.c
  72. 330
      src/core/transport/chttp2/writing.c
  73. 841
      src/core/transport/chttp2_transport.c
  74. 45
      src/core/transport/metadata.c
  75. 169
      src/core/transport/metadata_batch.c
  76. 89
      src/core/transport/metadata_batch.h
  77. 50
      src/core/transport/transport.c
  78. 60
      src/core/transport/transport.h
  79. 8
      src/core/transport/transport_impl.h
  80. 80
      src/core/transport/transport_op_string.c
  81. 8
      src/cpp/common/alarm.cc
  82. 2
      src/cpp/server/server.cc
  83. 30
      test/core/channel/channel_stack_test.c
  84. 154
      test/core/client_config/lb_policies_test.c
  85. 6
      test/core/end2end/fixtures/proxy.c
  86. 40
      test/core/end2end/gen_build_yaml.py
  87. 183
      test/core/end2end/tests/cancel_with_status.c
  88. 7
      test/core/end2end/tests/max_message_length.c
  89. 180
      test/core/end2end/tests/negative_deadline.c
  90. 2
      test/core/end2end/tests/request_with_flags.c
  91. 200
      test/core/transport/chttp2/hpack_encoder_test.c
  92. 359
      test/core/transport/chttp2/stream_encoder_test.c
  93. 116
      test/core/transport/stream_op_test.c
  94. 10
      test/cpp/end2end/end2end_test.cc
  95. 2
      test/cpp/interop/interop_client.cc
  96. 14
      tools/doxygen/Doxyfile.core.internal
  97. 61
      tools/run_tests/run_tests.py
  98. 840
      tools/run_tests/sources_and_headers.json
  99. 856
      tools/run_tests/tests.json
  100. 1183
      vsprojects/buildtests_c.sln
  101. Some files were not shown because too many files have changed in this diff Show More

42
BUILD

@ -160,6 +160,7 @@ cc_library(
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/channel/subchannel_call_holder.h",
"src/core/client_config/client_config.h",
"src/core/client_config/connector.h",
"src/core/client_config/lb_policies/pick_first.h",
@ -226,7 +227,6 @@ cc_library(
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
"src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/call_test_only.h",
"src/core/surface/channel.h",
@ -235,6 +235,7 @@ cc_library(
"src/core/surface/init.h",
"src/core/surface/server.h",
"src/core/surface/surface_trace.h",
"src/core/transport/byte_stream.h",
"src/core/transport/chttp2/alpn.h",
"src/core/transport/chttp2/bin_encoder.h",
"src/core/transport/chttp2/frame.h",
@ -244,6 +245,7 @@ cc_library(
"src/core/transport/chttp2/frame_rst_stream.h",
"src/core/transport/chttp2/frame_settings.h",
"src/core/transport/chttp2/frame_window_update.h",
"src/core/transport/chttp2/hpack_encoder.h",
"src/core/transport/chttp2/hpack_parser.h",
"src/core/transport/chttp2/hpack_table.h",
"src/core/transport/chttp2/http2_errors.h",
@ -251,14 +253,13 @@ cc_library(
"src/core/transport/chttp2/incoming_metadata.h",
"src/core/transport/chttp2/internal.h",
"src/core/transport/chttp2/status_conversion.h",
"src/core/transport/chttp2/stream_encoder.h",
"src/core/transport/chttp2/stream_map.h",
"src/core/transport/chttp2/timeout_encoding.h",
"src/core/transport/chttp2/varint.h",
"src/core/transport/chttp2_transport.h",
"src/core/transport/connectivity_state.h",
"src/core/transport/metadata.h",
"src/core/transport/stream_op.h",
"src/core/transport/metadata_batch.h",
"src/core/transport/transport.h",
"src/core/transport/transport_impl.h",
"src/core/census/aggregation.h",
@ -296,6 +297,7 @@ cc_library(
"src/core/channel/http_client_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c",
"src/core/channel/subchannel_call_holder.c",
"src/core/client_config/client_config.c",
"src/core/client_config/connector.c",
"src/core/client_config/lb_policies/pick_first.c",
@ -365,7 +367,6 @@ cc_library(
"src/core/json/json_writer.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
"src/core/surface/byte_buffer_reader.c",
"src/core/surface/call.c",
"src/core/surface/call_details.c",
@ -382,6 +383,7 @@ cc_library(
"src/core/surface/server_chttp2.c",
"src/core/surface/server_create.c",
"src/core/surface/version.c",
"src/core/transport/byte_stream.c",
"src/core/transport/chttp2/alpn.c",
"src/core/transport/chttp2/bin_encoder.c",
"src/core/transport/chttp2/frame_data.c",
@ -390,13 +392,13 @@ cc_library(
"src/core/transport/chttp2/frame_rst_stream.c",
"src/core/transport/chttp2/frame_settings.c",
"src/core/transport/chttp2/frame_window_update.c",
"src/core/transport/chttp2/hpack_encoder.c",
"src/core/transport/chttp2/hpack_parser.c",
"src/core/transport/chttp2/hpack_table.c",
"src/core/transport/chttp2/huffsyms.c",
"src/core/transport/chttp2/incoming_metadata.c",
"src/core/transport/chttp2/parsing.c",
"src/core/transport/chttp2/status_conversion.c",
"src/core/transport/chttp2/stream_encoder.c",
"src/core/transport/chttp2/stream_lists.c",
"src/core/transport/chttp2/stream_map.c",
"src/core/transport/chttp2/timeout_encoding.c",
@ -405,7 +407,7 @@ cc_library(
"src/core/transport/chttp2_transport.c",
"src/core/transport/connectivity_state.c",
"src/core/transport/metadata.c",
"src/core/transport/stream_op.c",
"src/core/transport/metadata_batch.c",
"src/core/transport/transport.c",
"src/core/transport/transport_op_string.c",
"src/core/census/context.c",
@ -448,6 +450,7 @@ cc_library(
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/channel/subchannel_call_holder.h",
"src/core/client_config/client_config.h",
"src/core/client_config/connector.h",
"src/core/client_config/lb_policies/pick_first.h",
@ -514,7 +517,6 @@ cc_library(
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
"src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/call_test_only.h",
"src/core/surface/channel.h",
@ -523,6 +525,7 @@ cc_library(
"src/core/surface/init.h",
"src/core/surface/server.h",
"src/core/surface/surface_trace.h",
"src/core/transport/byte_stream.h",
"src/core/transport/chttp2/alpn.h",
"src/core/transport/chttp2/bin_encoder.h",
"src/core/transport/chttp2/frame.h",
@ -532,6 +535,7 @@ cc_library(
"src/core/transport/chttp2/frame_rst_stream.h",
"src/core/transport/chttp2/frame_settings.h",
"src/core/transport/chttp2/frame_window_update.h",
"src/core/transport/chttp2/hpack_encoder.h",
"src/core/transport/chttp2/hpack_parser.h",
"src/core/transport/chttp2/hpack_table.h",
"src/core/transport/chttp2/http2_errors.h",
@ -539,14 +543,13 @@ cc_library(
"src/core/transport/chttp2/incoming_metadata.h",
"src/core/transport/chttp2/internal.h",
"src/core/transport/chttp2/status_conversion.h",
"src/core/transport/chttp2/stream_encoder.h",
"src/core/transport/chttp2/stream_map.h",
"src/core/transport/chttp2/timeout_encoding.h",
"src/core/transport/chttp2/varint.h",
"src/core/transport/chttp2_transport.h",
"src/core/transport/connectivity_state.h",
"src/core/transport/metadata.h",
"src/core/transport/stream_op.h",
"src/core/transport/metadata_batch.h",
"src/core/transport/transport.h",
"src/core/transport/transport_impl.h",
"src/core/census/aggregation.h",
@ -564,6 +567,7 @@ cc_library(
"src/core/channel/http_client_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c",
"src/core/channel/subchannel_call_holder.c",
"src/core/client_config/client_config.c",
"src/core/client_config/connector.c",
"src/core/client_config/lb_policies/pick_first.c",
@ -633,7 +637,6 @@ cc_library(
"src/core/json/json_writer.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
"src/core/surface/byte_buffer_reader.c",
"src/core/surface/call.c",
"src/core/surface/call_details.c",
@ -650,6 +653,7 @@ cc_library(
"src/core/surface/server_chttp2.c",
"src/core/surface/server_create.c",
"src/core/surface/version.c",
"src/core/transport/byte_stream.c",
"src/core/transport/chttp2/alpn.c",
"src/core/transport/chttp2/bin_encoder.c",
"src/core/transport/chttp2/frame_data.c",
@ -658,13 +662,13 @@ cc_library(
"src/core/transport/chttp2/frame_rst_stream.c",
"src/core/transport/chttp2/frame_settings.c",
"src/core/transport/chttp2/frame_window_update.c",
"src/core/transport/chttp2/hpack_encoder.c",
"src/core/transport/chttp2/hpack_parser.c",
"src/core/transport/chttp2/hpack_table.c",
"src/core/transport/chttp2/huffsyms.c",
"src/core/transport/chttp2/incoming_metadata.c",
"src/core/transport/chttp2/parsing.c",
"src/core/transport/chttp2/status_conversion.c",
"src/core/transport/chttp2/stream_encoder.c",
"src/core/transport/chttp2/stream_lists.c",
"src/core/transport/chttp2/stream_map.c",
"src/core/transport/chttp2/timeout_encoding.c",
@ -673,7 +677,7 @@ cc_library(
"src/core/transport/chttp2_transport.c",
"src/core/transport/connectivity_state.c",
"src/core/transport/metadata.c",
"src/core/transport/stream_op.c",
"src/core/transport/metadata_batch.c",
"src/core/transport/transport.c",
"src/core/transport/transport_op_string.c",
"src/core/census/context.c",
@ -1092,6 +1096,7 @@ objc_library(
"src/core/channel/http_client_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c",
"src/core/channel/subchannel_call_holder.c",
"src/core/client_config/client_config.c",
"src/core/client_config/connector.c",
"src/core/client_config/lb_policies/pick_first.c",
@ -1161,7 +1166,6 @@ objc_library(
"src/core/json/json_writer.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
"src/core/surface/byte_buffer_reader.c",
"src/core/surface/call.c",
"src/core/surface/call_details.c",
@ -1178,6 +1182,7 @@ objc_library(
"src/core/surface/server_chttp2.c",
"src/core/surface/server_create.c",
"src/core/surface/version.c",
"src/core/transport/byte_stream.c",
"src/core/transport/chttp2/alpn.c",
"src/core/transport/chttp2/bin_encoder.c",
"src/core/transport/chttp2/frame_data.c",
@ -1186,13 +1191,13 @@ objc_library(
"src/core/transport/chttp2/frame_rst_stream.c",
"src/core/transport/chttp2/frame_settings.c",
"src/core/transport/chttp2/frame_window_update.c",
"src/core/transport/chttp2/hpack_encoder.c",
"src/core/transport/chttp2/hpack_parser.c",
"src/core/transport/chttp2/hpack_table.c",
"src/core/transport/chttp2/huffsyms.c",
"src/core/transport/chttp2/incoming_metadata.c",
"src/core/transport/chttp2/parsing.c",
"src/core/transport/chttp2/status_conversion.c",
"src/core/transport/chttp2/stream_encoder.c",
"src/core/transport/chttp2/stream_lists.c",
"src/core/transport/chttp2/stream_map.c",
"src/core/transport/chttp2/timeout_encoding.c",
@ -1201,7 +1206,7 @@ objc_library(
"src/core/transport/chttp2_transport.c",
"src/core/transport/connectivity_state.c",
"src/core/transport/metadata.c",
"src/core/transport/stream_op.c",
"src/core/transport/metadata_batch.c",
"src/core/transport/transport.c",
"src/core/transport/transport_op_string.c",
"src/core/census/context.c",
@ -1241,6 +1246,7 @@ objc_library(
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/channel/subchannel_call_holder.h",
"src/core/client_config/client_config.h",
"src/core/client_config/connector.h",
"src/core/client_config/lb_policies/pick_first.h",
@ -1307,7 +1313,6 @@ objc_library(
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
"src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/call_test_only.h",
"src/core/surface/channel.h",
@ -1316,6 +1321,7 @@ objc_library(
"src/core/surface/init.h",
"src/core/surface/server.h",
"src/core/surface/surface_trace.h",
"src/core/transport/byte_stream.h",
"src/core/transport/chttp2/alpn.h",
"src/core/transport/chttp2/bin_encoder.h",
"src/core/transport/chttp2/frame.h",
@ -1325,6 +1331,7 @@ objc_library(
"src/core/transport/chttp2/frame_rst_stream.h",
"src/core/transport/chttp2/frame_settings.h",
"src/core/transport/chttp2/frame_window_update.h",
"src/core/transport/chttp2/hpack_encoder.h",
"src/core/transport/chttp2/hpack_parser.h",
"src/core/transport/chttp2/hpack_table.h",
"src/core/transport/chttp2/http2_errors.h",
@ -1332,14 +1339,13 @@ objc_library(
"src/core/transport/chttp2/incoming_metadata.h",
"src/core/transport/chttp2/internal.h",
"src/core/transport/chttp2/status_conversion.h",
"src/core/transport/chttp2/stream_encoder.h",
"src/core/transport/chttp2/stream_map.h",
"src/core/transport/chttp2/timeout_encoding.h",
"src/core/transport/chttp2/varint.h",
"src/core/transport/chttp2_transport.h",
"src/core/transport/connectivity_state.h",
"src/core/transport/metadata.h",
"src/core/transport/stream_op.h",
"src/core/transport/metadata_batch.h",
"src/core/transport/transport.h",
"src/core/transport/transport_impl.h",
"src/core/census/aggregation.h",

1000
Makefile

File diff suppressed because one or more lines are too long

@ -183,6 +183,7 @@
'src/core/channel/http_client_filter.c',
'src/core/channel/http_server_filter.c',
'src/core/channel/noop_filter.c',
'src/core/channel/subchannel_call_holder.c',
'src/core/client_config/client_config.c',
'src/core/client_config/connector.c',
'src/core/client_config/lb_policies/pick_first.c',
@ -252,7 +253,6 @@
'src/core/json/json_writer.c',
'src/core/surface/api_trace.c',
'src/core/surface/byte_buffer.c',
'src/core/surface/byte_buffer_queue.c',
'src/core/surface/byte_buffer_reader.c',
'src/core/surface/call.c',
'src/core/surface/call_details.c',
@ -269,6 +269,7 @@
'src/core/surface/server_chttp2.c',
'src/core/surface/server_create.c',
'src/core/surface/version.c',
'src/core/transport/byte_stream.c',
'src/core/transport/chttp2/alpn.c',
'src/core/transport/chttp2/bin_encoder.c',
'src/core/transport/chttp2/frame_data.c',
@ -277,13 +278,13 @@
'src/core/transport/chttp2/frame_rst_stream.c',
'src/core/transport/chttp2/frame_settings.c',
'src/core/transport/chttp2/frame_window_update.c',
'src/core/transport/chttp2/hpack_encoder.c',
'src/core/transport/chttp2/hpack_parser.c',
'src/core/transport/chttp2/hpack_table.c',
'src/core/transport/chttp2/huffsyms.c',
'src/core/transport/chttp2/incoming_metadata.c',
'src/core/transport/chttp2/parsing.c',
'src/core/transport/chttp2/status_conversion.c',
'src/core/transport/chttp2/stream_encoder.c',
'src/core/transport/chttp2/stream_lists.c',
'src/core/transport/chttp2/stream_map.c',
'src/core/transport/chttp2/timeout_encoding.c',
@ -292,7 +293,7 @@
'src/core/transport/chttp2_transport.c',
'src/core/transport/connectivity_state.c',
'src/core/transport/metadata.c',
'src/core/transport/stream_op.c',
'src/core/transport/metadata_batch.c',
'src/core/transport/transport.c',
'src/core/transport/transport_op_string.c',
'src/core/census/context.c',

@ -116,6 +116,7 @@ filegroups:
- src/core/channel/http_client_filter.h
- src/core/channel/http_server_filter.h
- src/core/channel/noop_filter.h
- src/core/channel/subchannel_call_holder.h
- src/core/client_config/client_config.h
- src/core/client_config/connector.h
- src/core/client_config/lb_policies/pick_first.h
@ -182,7 +183,6 @@ filegroups:
- src/core/statistics/census_interface.h
- src/core/statistics/census_rpc_stats.h
- src/core/surface/api_trace.h
- src/core/surface/byte_buffer_queue.h
- src/core/surface/call.h
- src/core/surface/call_test_only.h
- src/core/surface/channel.h
@ -191,6 +191,7 @@ filegroups:
- src/core/surface/init.h
- src/core/surface/server.h
- src/core/surface/surface_trace.h
- src/core/transport/byte_stream.h
- src/core/transport/chttp2/alpn.h
- src/core/transport/chttp2/bin_encoder.h
- src/core/transport/chttp2/frame.h
@ -200,6 +201,7 @@ filegroups:
- src/core/transport/chttp2/frame_rst_stream.h
- src/core/transport/chttp2/frame_settings.h
- src/core/transport/chttp2/frame_window_update.h
- src/core/transport/chttp2/hpack_encoder.h
- src/core/transport/chttp2/hpack_parser.h
- src/core/transport/chttp2/hpack_table.h
- src/core/transport/chttp2/http2_errors.h
@ -207,14 +209,13 @@ filegroups:
- src/core/transport/chttp2/incoming_metadata.h
- src/core/transport/chttp2/internal.h
- src/core/transport/chttp2/status_conversion.h
- src/core/transport/chttp2/stream_encoder.h
- src/core/transport/chttp2/stream_map.h
- src/core/transport/chttp2/timeout_encoding.h
- src/core/transport/chttp2/varint.h
- src/core/transport/chttp2_transport.h
- src/core/transport/connectivity_state.h
- src/core/transport/metadata.h
- src/core/transport/stream_op.h
- src/core/transport/metadata_batch.h
- src/core/transport/transport.h
- src/core/transport/transport_impl.h
src:
@ -229,6 +230,7 @@ filegroups:
- src/core/channel/http_client_filter.c
- src/core/channel/http_server_filter.c
- src/core/channel/noop_filter.c
- src/core/channel/subchannel_call_holder.c
- src/core/client_config/client_config.c
- src/core/client_config/connector.c
- src/core/client_config/lb_policies/pick_first.c
@ -298,7 +300,6 @@ filegroups:
- src/core/json/json_writer.c
- src/core/surface/api_trace.c
- src/core/surface/byte_buffer.c
- src/core/surface/byte_buffer_queue.c
- src/core/surface/byte_buffer_reader.c
- src/core/surface/call.c
- src/core/surface/call_details.c
@ -315,6 +316,7 @@ filegroups:
- src/core/surface/server_chttp2.c
- src/core/surface/server_create.c
- src/core/surface/version.c
- src/core/transport/byte_stream.c
- src/core/transport/chttp2/alpn.c
- src/core/transport/chttp2/bin_encoder.c
- src/core/transport/chttp2/frame_data.c
@ -323,13 +325,13 @@ filegroups:
- src/core/transport/chttp2/frame_rst_stream.c
- src/core/transport/chttp2/frame_settings.c
- src/core/transport/chttp2/frame_window_update.c
- src/core/transport/chttp2/hpack_encoder.c
- src/core/transport/chttp2/hpack_parser.c
- src/core/transport/chttp2/hpack_table.c
- src/core/transport/chttp2/huffsyms.c
- src/core/transport/chttp2/incoming_metadata.c
- src/core/transport/chttp2/parsing.c
- src/core/transport/chttp2/status_conversion.c
- src/core/transport/chttp2/stream_encoder.c
- src/core/transport/chttp2/stream_lists.c
- src/core/transport/chttp2/stream_map.c
- src/core/transport/chttp2/timeout_encoding.c
@ -338,7 +340,7 @@ filegroups:
- src/core/transport/chttp2_transport.c
- src/core/transport/connectivity_state.c
- src/core/transport/metadata.c
- src/core/transport/stream_op.c
- src/core/transport/metadata_batch.c
- src/core/transport/transport.c
- src/core/transport/transport_op_string.c
- name: grpc_test_util_base
@ -811,21 +813,21 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: chttp2_status_conversion_test
- name: chttp2_hpack_encoder_test
build: test
language: c
src:
- test/core/transport/chttp2/status_conversion_test.c
- test/core/transport/chttp2/hpack_encoder_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: chttp2_stream_encoder_test
- name: chttp2_status_conversion_test
build: test
language: c
src:
- test/core/transport/chttp2/stream_encoder_test.c
- test/core/transport/chttp2/status_conversion_test.c
deps:
- grpc_test_util
- grpc
@ -1229,16 +1231,6 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: grpc_stream_op_test
build: test
language: c
src:
- test/core/transport/stream_op_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: grpc_verify_jwt
build: tool
language: c

@ -164,6 +164,7 @@ Pod::Spec.new do |s|
'src/core/channel/http_client_filter.h',
'src/core/channel/http_server_filter.h',
'src/core/channel/noop_filter.h',
'src/core/channel/subchannel_call_holder.h',
'src/core/client_config/client_config.h',
'src/core/client_config/connector.h',
'src/core/client_config/lb_policies/pick_first.h',
@ -230,7 +231,6 @@ Pod::Spec.new do |s|
'src/core/statistics/census_interface.h',
'src/core/statistics/census_rpc_stats.h',
'src/core/surface/api_trace.h',
'src/core/surface/byte_buffer_queue.h',
'src/core/surface/call.h',
'src/core/surface/call_test_only.h',
'src/core/surface/channel.h',
@ -239,6 +239,7 @@ Pod::Spec.new do |s|
'src/core/surface/init.h',
'src/core/surface/server.h',
'src/core/surface/surface_trace.h',
'src/core/transport/byte_stream.h',
'src/core/transport/chttp2/alpn.h',
'src/core/transport/chttp2/bin_encoder.h',
'src/core/transport/chttp2/frame.h',
@ -248,6 +249,7 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/frame_rst_stream.h',
'src/core/transport/chttp2/frame_settings.h',
'src/core/transport/chttp2/frame_window_update.h',
'src/core/transport/chttp2/hpack_encoder.h',
'src/core/transport/chttp2/hpack_parser.h',
'src/core/transport/chttp2/hpack_table.h',
'src/core/transport/chttp2/http2_errors.h',
@ -255,14 +257,13 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/incoming_metadata.h',
'src/core/transport/chttp2/internal.h',
'src/core/transport/chttp2/status_conversion.h',
'src/core/transport/chttp2/stream_encoder.h',
'src/core/transport/chttp2/stream_map.h',
'src/core/transport/chttp2/timeout_encoding.h',
'src/core/transport/chttp2/varint.h',
'src/core/transport/chttp2_transport.h',
'src/core/transport/connectivity_state.h',
'src/core/transport/metadata.h',
'src/core/transport/stream_op.h',
'src/core/transport/metadata_batch.h',
'src/core/transport/transport.h',
'src/core/transport/transport_impl.h',
'src/core/census/aggregation.h',
@ -307,6 +308,7 @@ Pod::Spec.new do |s|
'src/core/channel/http_client_filter.c',
'src/core/channel/http_server_filter.c',
'src/core/channel/noop_filter.c',
'src/core/channel/subchannel_call_holder.c',
'src/core/client_config/client_config.c',
'src/core/client_config/connector.c',
'src/core/client_config/lb_policies/pick_first.c',
@ -376,7 +378,6 @@ Pod::Spec.new do |s|
'src/core/json/json_writer.c',
'src/core/surface/api_trace.c',
'src/core/surface/byte_buffer.c',
'src/core/surface/byte_buffer_queue.c',
'src/core/surface/byte_buffer_reader.c',
'src/core/surface/call.c',
'src/core/surface/call_details.c',
@ -393,6 +394,7 @@ Pod::Spec.new do |s|
'src/core/surface/server_chttp2.c',
'src/core/surface/server_create.c',
'src/core/surface/version.c',
'src/core/transport/byte_stream.c',
'src/core/transport/chttp2/alpn.c',
'src/core/transport/chttp2/bin_encoder.c',
'src/core/transport/chttp2/frame_data.c',
@ -401,13 +403,13 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/frame_rst_stream.c',
'src/core/transport/chttp2/frame_settings.c',
'src/core/transport/chttp2/frame_window_update.c',
'src/core/transport/chttp2/hpack_encoder.c',
'src/core/transport/chttp2/hpack_parser.c',
'src/core/transport/chttp2/hpack_table.c',
'src/core/transport/chttp2/huffsyms.c',
'src/core/transport/chttp2/incoming_metadata.c',
'src/core/transport/chttp2/parsing.c',
'src/core/transport/chttp2/status_conversion.c',
'src/core/transport/chttp2/stream_encoder.c',
'src/core/transport/chttp2/stream_lists.c',
'src/core/transport/chttp2/stream_map.c',
'src/core/transport/chttp2/timeout_encoding.c',
@ -416,7 +418,7 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2_transport.c',
'src/core/transport/connectivity_state.c',
'src/core/transport/metadata.c',
'src/core/transport/stream_op.c',
'src/core/transport/metadata_batch.c',
'src/core/transport/transport.c',
'src/core/transport/transport_op_string.c',
'src/core/census/context.c',
@ -458,6 +460,7 @@ Pod::Spec.new do |s|
'src/core/channel/http_client_filter.h',
'src/core/channel/http_server_filter.h',
'src/core/channel/noop_filter.h',
'src/core/channel/subchannel_call_holder.h',
'src/core/client_config/client_config.h',
'src/core/client_config/connector.h',
'src/core/client_config/lb_policies/pick_first.h',
@ -524,7 +527,6 @@ Pod::Spec.new do |s|
'src/core/statistics/census_interface.h',
'src/core/statistics/census_rpc_stats.h',
'src/core/surface/api_trace.h',
'src/core/surface/byte_buffer_queue.h',
'src/core/surface/call.h',
'src/core/surface/call_test_only.h',
'src/core/surface/channel.h',
@ -533,6 +535,7 @@ Pod::Spec.new do |s|
'src/core/surface/init.h',
'src/core/surface/server.h',
'src/core/surface/surface_trace.h',
'src/core/transport/byte_stream.h',
'src/core/transport/chttp2/alpn.h',
'src/core/transport/chttp2/bin_encoder.h',
'src/core/transport/chttp2/frame.h',
@ -542,6 +545,7 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/frame_rst_stream.h',
'src/core/transport/chttp2/frame_settings.h',
'src/core/transport/chttp2/frame_window_update.h',
'src/core/transport/chttp2/hpack_encoder.h',
'src/core/transport/chttp2/hpack_parser.h',
'src/core/transport/chttp2/hpack_table.h',
'src/core/transport/chttp2/http2_errors.h',
@ -549,14 +553,13 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/incoming_metadata.h',
'src/core/transport/chttp2/internal.h',
'src/core/transport/chttp2/status_conversion.h',
'src/core/transport/chttp2/stream_encoder.h',
'src/core/transport/chttp2/stream_map.h',
'src/core/transport/chttp2/timeout_encoding.h',
'src/core/transport/chttp2/varint.h',
'src/core/transport/chttp2_transport.h',
'src/core/transport/connectivity_state.h',
'src/core/transport/metadata.h',
'src/core/transport/stream_op.h',
'src/core/transport/metadata_batch.h',
'src/core/transport/transport.h',
'src/core/transport/transport_impl.h',
'src/core/census/aggregation.h',

@ -43,7 +43,7 @@
namespace grpc {
/// A thin wrapper around \a grpc_alarm (see / \a / src/core/surface/alarm.h).
class Alarm: public GrpcLibrary {
class Alarm : public GrpcLibrary {
public:
/// Create a completion queue alarm instance associated to \a cq.
///

@ -181,9 +181,9 @@
#ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#define GPR_FORBID_UNREACHABLE_CODE
#define GPR_MSG_IOVLEN_TYPE int
#if TARGET_OS_IPHONE
#define GPR_FORBID_UNREACHABLE_CODE
#define GPR_PLATFORM_STRING "ios"
#define GPR_CPU_IPHONE 1
#define GPR_PTHREAD_TLS 1

@ -89,6 +89,11 @@ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst);
/* remove n bytes from the end of a slice buffer */
void gpr_slice_buffer_trim_end(gpr_slice_buffer *src, size_t n,
gpr_slice_buffer *garbage);
/* move the first n bytes of src into dst */
void gpr_slice_buffer_move_first(gpr_slice_buffer *src, size_t n,
gpr_slice_buffer *dst);
/* take the first slice in the slice buffer */
gpr_slice gpr_slice_buffer_take_first(gpr_slice_buffer *src);
#ifdef __cplusplus
}

@ -53,28 +53,24 @@ typedef struct call_data {
int error;
/* recv callback */
grpc_stream_op_buffer *recv_ops;
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *on_done_recv;
grpc_closure finish_recv;
} call_data;
typedef struct channel_data {
grpc_mdstr *path_str; /* pointer to meta data str with key == ":path" */
} channel_data;
static void extract_and_annotate_method_tag(grpc_stream_op_buffer *sopb,
static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
call_data *calld,
channel_data *chand) {
grpc_linked_mdelem *m;
size_t i;
for (i = 0; i < sopb->nops; i++) {
grpc_stream_op *op = &sopb->ops[i];
if (op->type != GRPC_OP_METADATA) continue;
for (m = op->data.metadata.list.head; m != NULL; m = m->next) {
if (m->md->key == chand->path_str) {
gpr_log(GPR_DEBUG, "%s",
(const char *)GPR_SLICE_START_PTR(m->md->value->slice));
/* Add method tag here */
}
for (m = md->list.head; m != NULL; m = m->next) {
if (m->md->key == chand->path_str) {
gpr_log(GPR_DEBUG, "%s",
(const char *)GPR_SLICE_START_PTR(m->md->value->slice));
/* Add method tag here */
}
}
}
@ -83,8 +79,8 @@ static void client_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (op->send_ops) {
extract_and_annotate_method_tag(op->send_ops, calld, chand);
if (op->send_initial_metadata) {
extract_and_annotate_method_tag(op->send_initial_metadata, calld, chand);
}
}
@ -101,7 +97,7 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (success) {
extract_and_annotate_method_tag(calld->recv_ops, calld, chand);
extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
}
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
@ -109,11 +105,11 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
if (op->recv_ops) {
if (op->recv_initial_metadata) {
/* substitute our callback for the op callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
op->on_done_recv = calld->on_done_recv;
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->on_done_recv = op->on_complete;
op->on_complete = &calld->finish_recv;
}
}
@ -128,13 +124,11 @@ static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
static void client_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
if (initial_op) client_mutate_op(elem, initial_op);
}
static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
@ -146,15 +140,13 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static void server_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
/* TODO(hongyu): call census_tracing_start_op here. */
grpc_closure_init(d->on_done_recv, server_on_done_recv, elem);
if (initial_op) server_mutate_op(elem, initial_op);
grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
}
static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
@ -165,12 +157,11 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
}
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(chand != NULL);
chand->path_str = grpc_mdstr_from_string(mdctx, ":path");
chand->path_str = grpc_mdstr_from_string(args->metadata_context, ":path");
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
@ -183,15 +174,13 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
const grpc_channel_filter grpc_client_census_filter = {
client_start_transport_op, grpc_channel_next_op,
sizeof(call_data), client_init_call_elem,
client_destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "census-client"};
client_start_transport_op, grpc_channel_next_op, sizeof(call_data),
client_init_call_elem, grpc_call_stack_ignore_set_pollset,
client_destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, grpc_call_next_get_peer, "census-client"};
const grpc_channel_filter grpc_server_census_filter = {
server_start_transport_op, grpc_channel_next_op,
sizeof(call_data), server_init_call_elem,
server_destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "census-server"};
server_start_transport_op, grpc_channel_next_op, sizeof(call_data),
server_init_call_elem, grpc_call_stack_ignore_set_pollset,
server_destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, grpc_call_next_get_peer, "census-server"};

@ -104,13 +104,14 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
const grpc_channel_filter **filters,
size_t filter_count, grpc_channel *master,
const grpc_channel_args *args,
const grpc_channel_args *channel_args,
grpc_mdctx *metadata_context,
grpc_channel_stack *stack) {
size_t call_size =
ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
grpc_channel_element *elems;
grpc_channel_element_args args;
char *user_data;
size_t i;
@ -122,11 +123,14 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
/* init per-filter data */
for (i = 0; i < filter_count; i++) {
args.master = master;
args.channel_args = channel_args;
args.metadata_context = metadata_context;
args.is_first = i == 0;
args.is_last = i == (filter_count - 1);
elems[i].filter = filters[i];
elems[i].channel_data = user_data;
elems[i].filter->init_channel_elem(exec_ctx, &elems[i], master, args,
metadata_context, i == 0,
i == (filter_count - 1));
elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
}
@ -151,33 +155,63 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
}
void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
grpc_channel_stack *channel_stack,
grpc_channel_stack *channel_stack, int initial_refs,
grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context,
const void *transport_server_data,
grpc_transport_stream_op *initial_op,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
size_t count = channel_stack->count;
grpc_call_element *call_elems;
char *user_data;
size_t i;
call_stack->count = count;
gpr_ref_init(&call_stack->refcount.refs, initial_refs);
grpc_closure_init(&call_stack->refcount.destroy, destroy, destroy_arg);
call_elems = CALL_ELEMS_FROM_STACK(call_stack);
user_data = ((char *)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
for (i = 0; i < count; i++) {
args.refcount = &call_stack->refcount;
args.server_transport_data = transport_server_data;
args.context = context;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i],
transport_server_data, initial_op);
call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i], &args);
user_data +=
ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
}
void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_call_stack *call_stack,
grpc_pollset *pollset) {
size_t count = call_stack->count;
grpc_call_element *call_elems;
char *user_data;
size_t i;
call_elems = CALL_ELEMS_FROM_STACK(call_stack);
user_data = ((char *)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
for (i = 0; i < count; i++) {
call_elems[i].filter->set_pollset(exec_ctx, &call_elems[i], pollset);
user_data +=
ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
}
void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_pollset *pollset) {}
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack) {
grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;

@ -51,6 +51,20 @@
typedef struct grpc_channel_element grpc_channel_element;
typedef struct grpc_call_element grpc_call_element;
typedef struct {
grpc_channel *master;
const grpc_channel_args *channel_args;
grpc_mdctx *metadata_context;
int is_first;
int is_last;
} grpc_channel_element_args;
typedef struct {
grpc_stream_refcount *refcount;
const void *server_transport_data;
grpc_call_context_element *context;
} grpc_call_element_args;
/* Channel filters specify:
1. the amount of memory needed in the channel & call (via the sizeof_XXX
members)
@ -84,8 +98,9 @@ typedef struct {
transport and is on the server. Most filters want to ignore this
argument. */
void (*init_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op);
grpc_call_element_args *args);
void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset);
/* Destroy per call data.
The filter does not need to do any chaining */
void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
@ -99,9 +114,7 @@ typedef struct {
useful for asserting correct configuration by upper layer code.
The filter does not need to do any chaining */
void (*init_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_channel *master, const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last);
grpc_channel_element_args *args);
/* Destroy per channel data.
The filter does not need to do any chaining */
void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
@ -141,7 +154,14 @@ typedef struct {
/* A call stack tracks a set of related filters for one call, and guarantees
they live within a single malloc() allocation */
typedef struct { size_t count; } grpc_call_stack;
typedef struct {
/* shared refcount for this channel stack.
MUST be the first element: the underlying code calls destroy
with the address of the refcount, but higher layers prefer to think
about the address of the call stack itself. */
grpc_stream_refcount refcount;
size_t count;
} grpc_call_stack;
/* Get a channel element given a channel stack and its index */
grpc_channel_element *grpc_channel_stack_element(grpc_channel_stack *stack,
@ -170,13 +190,35 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
expected to be NULL on a client, or an opaque transport owned pointer on the
server. */
void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
grpc_channel_stack *channel_stack,
grpc_channel_stack *channel_stack, int initial_refs,
grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context,
const void *transport_server_data,
grpc_transport_stream_op *initial_op,
grpc_call_stack *call_stack);
/* Set a pollset for a call stack: must occur before the first op is started */
void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_call_stack *call_stack,
grpc_pollset *pollset);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define grpc_call_stack_ref(call_stack, reason) \
grpc_stream_ref(&(call_stack)->refcount, reason)
#define grpc_call_stack_unref(exec_ctx, call_stack, reason) \
grpc_stream_unref(exec_ctx, &(call_stack)->refcount, reason)
#else
#define grpc_call_stack_ref(call_stack) grpc_stream_ref(&(call_stack)->refcount)
#define grpc_call_stack_unref(exec_ctx, call_stack) \
grpc_stream_unref(exec_ctx, &(call_stack)->refcount)
#endif
/* Destroy a call stack */
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack);
/* Ignore set pollset - used by filters to implement the set_pollset method
if they don't care about pollsets at all. Does nothing. */
void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_pollset *pollset);
/* Call the next operation in a call stack */
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op *op);

@ -43,6 +43,7 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/subchannel_call_holder.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
@ -51,7 +52,7 @@
/* Client channel implementation */
typedef struct call_data call_data;
typedef grpc_subchannel_call_holder call_data;
typedef struct client_channel_channel_data {
/** metadata context for this channel */
@ -98,360 +99,22 @@ typedef struct {
grpc_lb_policy *lb_policy;
} lb_policy_connectivity_watcher;
typedef enum {
CALL_CREATED,
CALL_WAITING_FOR_SEND,
CALL_WAITING_FOR_CONFIG,
CALL_WAITING_FOR_PICK,
CALL_WAITING_FOR_CALL,
CALL_ACTIVE,
CALL_CANCELLED
} call_state;
struct call_data {
/* owning element */
grpc_call_element *elem;
gpr_mu mu_state;
call_state state;
gpr_timespec deadline;
grpc_subchannel *picked_channel;
grpc_closure async_setup_task;
grpc_transport_stream_op waiting_op;
/* our child call stack */
grpc_subchannel_call *subchannel_call;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
};
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
grpc_transport_stream_op *new_op)
GRPC_MUST_USE_RESULT;
static void handle_op_after_cancellation(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (op->send_ops) {
grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
}
if (op->recv_ops) {
char status[GPR_LTOA_MIN_BUFSIZE];
grpc_metadata_batch mdb;
gpr_ltoa(GRPC_STATUS_CANCELLED, status);
calld->status.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-status", status);
calld->details.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-message", "Cancelled");
calld->status.prev = calld->details.next = NULL;
calld->status.next = &calld->details;
calld->details.prev = &calld->status;
mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
}
if (op->on_consumed) {
op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
}
}
typedef struct {
grpc_closure closure;
grpc_call_element *elem;
} waiting_call;
static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation);
static void continue_with_pick(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
waiting_call *wc = arg;
call_data *calld = wc->elem->call_data;
perform_transport_stream_op(exec_ctx, wc->elem, &calld->waiting_op, 1);
gpr_free(wc);
}
static void add_to_lb_policy_wait_queue_locked_state_config(
grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
waiting_call *wc = gpr_malloc(sizeof(*wc));
grpc_closure_init(&wc->closure, continue_with_pick, wc);
wc->elem = elem;
grpc_closure_list_add(&chand->waiting_for_config_closures, &wc->closure, 1);
}
static int is_empty(void *p, int len) {
char *ptr = p;
int i;
for (i = 0; i < len; i++) {
if (ptr[i] != 0) return 0;
}
return 1;
}
static void started_call_locked(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
grpc_transport_stream_op op;
int have_waiting;
if (calld->state == CALL_CANCELLED && calld->subchannel_call != NULL) {
memset(&op, 0, sizeof(op));
op.cancel_with_status = GRPC_STATUS_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, &op);
} else if (calld->state == CALL_WAITING_FOR_CALL) {
have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
if (calld->subchannel_call != NULL) {
calld->state = CALL_ACTIVE;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
&calld->waiting_op);
}
} else {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
}
}
} else {
GPR_ASSERT(calld->state == CALL_CANCELLED);
gpr_mu_unlock(&calld->mu_state);
}
}
static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
gpr_mu_lock(&calld->mu_state);
started_call_locked(exec_ctx, arg, iomgr_success);
}
static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
grpc_pollset *pollset;
grpc_subchannel_call_create_status call_creation_status;
GPR_TIMER_BEGIN("picked_target", 0);
if (calld->picked_channel == NULL) {
/* treat this like a cancellation */
calld->waiting_op.cancel_with_status = GRPC_STATUS_UNAVAILABLE;
perform_transport_stream_op(exec_ctx, calld->elem, &calld->waiting_op, 1);
} else {
gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_CANCELLED) {
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
} else {
GPR_ASSERT(calld->state == CALL_WAITING_FOR_PICK);
calld->state = CALL_WAITING_FOR_CALL;
pollset = calld->waiting_op.bind_pollset;
grpc_closure_init(&calld->async_setup_task, started_call, calld);
call_creation_status = grpc_subchannel_create_call(
exec_ctx, calld->picked_channel, pollset, &calld->subchannel_call,
&calld->async_setup_task);
if (call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY) {
started_call_locked(exec_ctx, calld, iomgr_success);
} else {
gpr_mu_unlock(&calld->mu_state);
}
}
}
GPR_TIMER_END("picked_target", 0);
}
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
grpc_transport_stream_op *new_op) {
call_data *calld = elem->call_data;
grpc_closure *consumed_op = NULL;
grpc_transport_stream_op *waiting_op = &calld->waiting_op;
GPR_ASSERT((waiting_op->send_ops != NULL) + (new_op->send_ops != NULL) <= 1);
GPR_ASSERT((waiting_op->recv_ops != NULL) + (new_op->recv_ops != NULL) <= 1);
if (new_op->send_ops != NULL) {
waiting_op->send_ops = new_op->send_ops;
waiting_op->is_last_send = new_op->is_last_send;
waiting_op->on_done_send = new_op->on_done_send;
}
if (new_op->recv_ops != NULL) {
waiting_op->recv_ops = new_op->recv_ops;
waiting_op->recv_state = new_op->recv_state;
waiting_op->on_done_recv = new_op->on_done_recv;
}
if (new_op->on_consumed != NULL) {
if (waiting_op->on_consumed != NULL) {
consumed_op = waiting_op->on_consumed;
}
waiting_op->on_consumed = new_op->on_consumed;
}
if (new_op->cancel_with_status != GRPC_STATUS_OK) {
waiting_op->cancel_with_status = new_op->cancel_with_status;
}
return consumed_op;
}
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
char *result;
gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_ACTIVE) {
subchannel_call = calld->subchannel_call;
GRPC_SUBCHANNEL_CALL_REF(subchannel_call, "get_peer");
gpr_mu_unlock(&calld->mu_state);
result = grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "get_peer");
return result;
} else {
gpr_mu_unlock(&calld->mu_state);
return grpc_channel_get_target(chand->master);
}
}
static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
grpc_lb_policy *lb_policy;
grpc_transport_stream_op op2;
GPR_TIMER_BEGIN("perform_transport_stream_op", 0);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
gpr_mu_lock(&calld->mu_state);
switch (calld->state) {
case CALL_ACTIVE:
GPR_ASSERT(!continuation);
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
grpc_subchannel_call_process_op(exec_ctx, subchannel_call, op);
break;
case CALL_CANCELLED:
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
break;
case CALL_WAITING_FOR_SEND:
GPR_ASSERT(!continuation);
grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
if (!calld->waiting_op.send_ops &&
calld->waiting_op.cancel_with_status == GRPC_STATUS_OK) {
gpr_mu_unlock(&calld->mu_state);
break;
}
*op = calld->waiting_op;
memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
continuation = 1;
/* fall through */
case CALL_WAITING_FOR_CONFIG:
case CALL_WAITING_FOR_PICK:
case CALL_WAITING_FOR_CALL:
if (!continuation) {
if (op->cancel_with_status != GRPC_STATUS_OK) {
calld->state = CALL_CANCELLED;
op2 = calld->waiting_op;
memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
if (op->on_consumed) {
calld->waiting_op.on_consumed = op->on_consumed;
op->on_consumed = NULL;
} else if (op2.on_consumed) {
calld->waiting_op.on_consumed = op2.on_consumed;
op2.on_consumed = NULL;
}
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
handle_op_after_cancellation(exec_ctx, elem, &op2);
} else {
grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
gpr_mu_unlock(&calld->mu_state);
}
break;
}
/* fall through */
case CALL_CREATED:
if (op->cancel_with_status != GRPC_STATUS_OK) {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
} else {
calld->waiting_op = *op;
if (op->send_ops == NULL) {
/* need to have some send ops before we can select the
lb target */
calld->state = CALL_WAITING_FOR_SEND;
gpr_mu_unlock(&calld->mu_state);
} else {
gpr_mu_lock(&chand->mu_config);
lb_policy = chand->lb_policy;
if (lb_policy) {
grpc_transport_stream_op *waiting_op = &calld->waiting_op;
grpc_pollset *bind_pollset = waiting_op->bind_pollset;
grpc_metadata_batch *initial_metadata =
&waiting_op->send_ops->ops[0].data.metadata;
GRPC_LB_POLICY_REF(lb_policy, "pick");
gpr_mu_unlock(&chand->mu_config);
calld->state = CALL_WAITING_FOR_PICK;
GPR_ASSERT(waiting_op->bind_pollset);
GPR_ASSERT(waiting_op->send_ops);
GPR_ASSERT(waiting_op->send_ops->nops >= 1);
GPR_ASSERT(waiting_op->send_ops->ops[0].type == GRPC_OP_METADATA);
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->async_setup_task, picked_target, calld);
grpc_lb_policy_pick(exec_ctx, lb_policy, bind_pollset,
initial_metadata, &calld->picked_channel,
&calld->async_setup_task);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick");
} else if (chand->resolver != NULL) {
calld->state = CALL_WAITING_FOR_CONFIG;
add_to_lb_policy_wait_queue_locked_state_config(elem);
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
chand->started_resolving = 1;
grpc_resolver_next(exec_ctx, chand->resolver,
&chand->incoming_configuration,
&chand->on_config_changed);
}
gpr_mu_unlock(&chand->mu_config);
gpr_mu_unlock(&calld->mu_state);
} else {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&chand->mu_config);
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
}
}
}
break;
}
GPR_TIMER_END("perform_transport_stream_op", 0);
return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data,
chand->master);
}
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
perform_transport_stream_op(exec_ctx, elem, op, 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
}
static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
@ -593,11 +256,9 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
op->connectivity_state = NULL;
}
if (!is_empty(op, sizeof(*op))) {
lb_policy = chand->lb_policy;
if (lb_policy) {
GRPC_LB_POLICY_REF(lb_policy, "broadcast");
}
lb_policy = chand->lb_policy;
if (lb_policy) {
GRPC_LB_POLICY_REF(lb_policy, "broadcast");
}
if (op->disconnect && chand->resolver != NULL) {
@ -624,67 +285,110 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
}
}
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
typedef struct {
grpc_metadata_batch *initial_metadata;
grpc_subchannel **subchannel;
grpc_closure *on_ready;
grpc_call_element *elem;
grpc_closure closure;
} continue_picking_args;
static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **subchannel,
grpc_closure *on_ready);
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, int success) {
continue_picking_args *cpa = arg;
if (!success) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
} else if (cpa->subchannel == NULL) {
/* cancelled, do nothing */
} else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->subchannel, cpa->on_ready)) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 1);
}
gpr_free(cpa);
}
static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **subchannel,
grpc_closure *on_ready) {
grpc_call_element *elem = elemp;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
continue_picking_args *cpa;
grpc_closure *closure;
/* TODO(ctiller): is there something useful we can do here? */
GPR_ASSERT(initial_op == NULL);
GPR_ASSERT(subchannel);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GPR_ASSERT(server_transport_data == NULL);
gpr_mu_init(&calld->mu_state);
calld->elem = elem;
calld->state = CALL_CREATED;
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
gpr_mu_lock(&chand->mu_config);
if (initial_metadata == NULL) {
if (chand->lb_policy != NULL) {
grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy, subchannel);
}
for (closure = chand->waiting_for_config_closures.head; closure != NULL;
closure = grpc_closure_next(closure)) {
cpa = closure->cb_arg;
if (cpa->subchannel == subchannel) {
cpa->subchannel = NULL;
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
}
}
gpr_mu_unlock(&chand->mu_config);
return 1;
}
if (chand->lb_policy != NULL) {
int r = grpc_lb_policy_pick(exec_ctx, chand->lb_policy, calld->pollset,
initial_metadata, subchannel, on_ready);
gpr_mu_unlock(&chand->mu_config);
return r;
}
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = 1;
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
grpc_resolver_next(exec_ctx, chand->resolver,
&chand->incoming_configuration,
&chand->on_config_changed);
}
cpa = gpr_malloc(sizeof(*cpa));
cpa->initial_metadata = initial_metadata;
cpa->subchannel = subchannel;
cpa->on_ready = on_ready;
cpa->elem = elem;
grpc_closure_init(&cpa->closure, continue_picking, cpa);
grpc_closure_list_add(&chand->waiting_for_config_closures, &cpa->closure, 1);
gpr_mu_unlock(&chand->mu_config);
return 0;
}
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_call_element_args *args) {
grpc_subchannel_call_holder_init(elem->call_data, cc_pick_subchannel, elem);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call;
/* if the call got activated, we need to destroy the child stack also, and
remove it from the in-flight requests tracked by the child_entry we
picked */
gpr_mu_lock(&calld->mu_state);
switch (calld->state) {
case CALL_ACTIVE:
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "client_channel");
break;
case CALL_CREATED:
case CALL_CANCELLED:
gpr_mu_unlock(&calld->mu_state);
break;
case CALL_WAITING_FOR_PICK:
case CALL_WAITING_FOR_CONFIG:
case CALL_WAITING_FOR_CALL:
case CALL_WAITING_FOR_SEND:
GPR_UNREACHABLE_CODE(return );
}
grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
GPR_ASSERT(is_last);
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
gpr_mu_init(&chand->mu_config);
chand->mdctx = metadata_context;
chand->master = master;
chand->mdctx = args->metadata_context;
chand->master = args->master;
grpc_pollset_set_init(&chand->pollset_set);
grpc_closure_init(&chand->on_config_changed, cc_on_config_changed, chand);
@ -709,10 +413,16 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
gpr_mu_destroy(&chand->mu_config);
}
static void cc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset) {
call_data *calld = elem->call_data;
calld->pollset = pollset;
}
const grpc_channel_filter grpc_client_channel_filter = {
cc_start_transport_stream_op, cc_start_transport_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, cc_get_peer, "client-channel",
init_call_elem, cc_set_pollset, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, cc_get_peer, "client-channel",
};
void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,

@ -39,6 +39,7 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/client_channel.h"
#include "src/core/channel/compress_filter.h"
#include "src/core/channel/subchannel_call_holder.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
@ -52,8 +53,6 @@
/** Microchannel (uchannel) implementation: a lightweight channel without any
* load-balancing mechanisms meant for communication from within the core. */
typedef struct call_data call_data;
typedef struct client_uchannel_channel_data {
/** metadata context for this channel */
grpc_mdctx *mdctx;
@ -80,85 +79,7 @@ typedef struct client_uchannel_channel_data {
gpr_mu mu_state;
} channel_data;
typedef enum {
CALL_CREATED,
CALL_WAITING_FOR_SEND,
CALL_WAITING_FOR_CALL,
CALL_ACTIVE,
CALL_CANCELLED
} call_state;
struct call_data {
/* owning element */
grpc_call_element *elem;
gpr_mu mu_state;
call_state state;
gpr_timespec deadline;
grpc_closure async_setup_task;
grpc_transport_stream_op waiting_op;
/* our child call stack */
grpc_subchannel_call *subchannel_call;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
};
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
grpc_transport_stream_op *new_op)
GRPC_MUST_USE_RESULT;
static void handle_op_after_cancellation(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (op->send_ops) {
grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
}
if (op->recv_ops) {
char status[GPR_LTOA_MIN_BUFSIZE];
grpc_metadata_batch mdb;
gpr_ltoa(GRPC_STATUS_CANCELLED, status);
calld->status.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-status", status);
calld->details.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-message", "Cancelled");
calld->status.prev = calld->details.next = NULL;
calld->status.next = &calld->details;
calld->details.prev = &calld->status;
mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
}
if (op->on_consumed) {
op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
}
}
typedef struct {
grpc_closure closure;
grpc_call_element *elem;
} waiting_call;
static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation);
static int is_empty(void *p, int len) {
char *ptr = p;
int i;
for (i = 0; i < len; i++) {
if (ptr[i] != 0) return 0;
}
return 1;
}
typedef grpc_subchannel_call_holder call_data;
static void monitor_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
@ -171,201 +92,17 @@ static void monitor_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
&chand->connectivity_cb);
}
static void started_call_locked(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
grpc_transport_stream_op op;
int have_waiting;
if (calld->state == CALL_CANCELLED && iomgr_success == 0) {
have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
}
} else if (calld->state == CALL_CANCELLED && calld->subchannel_call != NULL) {
memset(&op, 0, sizeof(op));
op.cancel_with_status = GRPC_STATUS_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, &op);
} else if (calld->state == CALL_WAITING_FOR_CALL) {
have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
if (calld->subchannel_call != NULL) {
calld->state = CALL_ACTIVE;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
&calld->waiting_op);
}
} else {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
}
}
} else {
GPR_ASSERT(calld->state == CALL_CANCELLED);
gpr_mu_unlock(&calld->mu_state);
have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
if (have_waiting) {
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
}
}
}
static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
gpr_mu_lock(&calld->mu_state);
started_call_locked(exec_ctx, arg, iomgr_success);
}
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
grpc_transport_stream_op *new_op) {
call_data *calld = elem->call_data;
grpc_closure *consumed_op = NULL;
grpc_transport_stream_op *waiting_op = &calld->waiting_op;
GPR_ASSERT((waiting_op->send_ops != NULL) + (new_op->send_ops != NULL) <= 1);
GPR_ASSERT((waiting_op->recv_ops != NULL) + (new_op->recv_ops != NULL) <= 1);
if (new_op->send_ops != NULL) {
waiting_op->send_ops = new_op->send_ops;
waiting_op->is_last_send = new_op->is_last_send;
waiting_op->on_done_send = new_op->on_done_send;
}
if (new_op->recv_ops != NULL) {
waiting_op->recv_ops = new_op->recv_ops;
waiting_op->recv_state = new_op->recv_state;
waiting_op->on_done_recv = new_op->on_done_recv;
}
if (new_op->on_consumed != NULL) {
if (waiting_op->on_consumed != NULL) {
consumed_op = waiting_op->on_consumed;
}
waiting_op->on_consumed = new_op->on_consumed;
}
if (new_op->cancel_with_status != GRPC_STATUS_OK) {
waiting_op->cancel_with_status = new_op->cancel_with_status;
}
return consumed_op;
}
static char *cuc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
char *result;
gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_ACTIVE) {
subchannel_call = calld->subchannel_call;
GRPC_SUBCHANNEL_CALL_REF(subchannel_call, "get_peer");
gpr_mu_unlock(&calld->mu_state);
result = grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "get_peer");
return result;
} else {
gpr_mu_unlock(&calld->mu_state);
return grpc_channel_get_target(chand->master);
}
}
static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
grpc_transport_stream_op op2;
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
gpr_mu_lock(&calld->mu_state);
/* make sure the wrapped subchannel has been set (see
* grpc_client_uchannel_set_subchannel) */
GPR_ASSERT(chand->subchannel != NULL);
switch (calld->state) {
case CALL_ACTIVE:
GPR_ASSERT(!continuation);
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
grpc_subchannel_call_process_op(exec_ctx, subchannel_call, op);
break;
case CALL_CANCELLED:
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
break;
case CALL_WAITING_FOR_SEND:
GPR_ASSERT(!continuation);
grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
if (!calld->waiting_op.send_ops &&
calld->waiting_op.cancel_with_status == GRPC_STATUS_OK) {
gpr_mu_unlock(&calld->mu_state);
break;
}
*op = calld->waiting_op;
memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
continuation = 1;
/* fall through */
case CALL_WAITING_FOR_CALL:
if (!continuation) {
if (op->cancel_with_status != GRPC_STATUS_OK) {
calld->state = CALL_CANCELLED;
op2 = calld->waiting_op;
memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
if (op->on_consumed) {
calld->waiting_op.on_consumed = op->on_consumed;
op->on_consumed = NULL;
} else if (op2.on_consumed) {
calld->waiting_op.on_consumed = op2.on_consumed;
op2.on_consumed = NULL;
}
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
handle_op_after_cancellation(exec_ctx, elem, &op2);
grpc_subchannel_cancel_waiting_call(exec_ctx, chand->subchannel, 1);
} else {
grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
gpr_mu_unlock(&calld->mu_state);
}
break;
}
/* fall through */
case CALL_CREATED:
if (op->cancel_with_status != GRPC_STATUS_OK) {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
} else {
calld->waiting_op = *op;
if (op->send_ops == NULL) {
calld->state = CALL_WAITING_FOR_SEND;
gpr_mu_unlock(&calld->mu_state);
} else {
grpc_subchannel_call_create_status call_creation_status;
grpc_pollset *pollset = calld->waiting_op.bind_pollset;
calld->state = CALL_WAITING_FOR_CALL;
grpc_closure_init(&calld->async_setup_task, started_call, calld);
call_creation_status = grpc_subchannel_create_call(
exec_ctx, chand->subchannel, pollset, &calld->subchannel_call,
&calld->async_setup_task);
if (call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY) {
started_call_locked(exec_ctx, calld, 1);
} else {
gpr_mu_unlock(&calld->mu_state);
}
}
}
break;
}
return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data,
chand->master);
}
static void cuc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
perform_transport_stream_op(exec_ctx, elem, op, 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
}
static void cuc_start_transport_op(grpc_exec_ctx *exec_ctx,
@ -392,64 +129,40 @@ static void cuc_start_transport_op(grpc_exec_ctx *exec_ctx,
}
}
static int cuc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **subchannel,
grpc_closure *on_ready) {
channel_data *chand = arg;
GPR_ASSERT(initial_metadata != NULL);
*subchannel = chand->subchannel;
return 1;
}
/* Constructor for call_data */
static void cuc_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(call_data));
/* TODO(ctiller): is there something useful we can do here? */
GPR_ASSERT(initial_op == NULL);
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
GPR_ASSERT(server_transport_data == NULL);
gpr_mu_init(&calld->mu_state);
calld->elem = elem;
calld->state = CALL_CREATED;
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_call_element_args *args) {
grpc_subchannel_call_holder_init(elem->call_data, cuc_pick_subchannel,
elem->channel_data);
}
/* Destructor for call_data */
static void cuc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call;
/* if the call got activated, we need to destroy the child stack also, and
remove it from the in-flight requests tracked by the child_entry we
picked */
gpr_mu_lock(&calld->mu_state);
switch (calld->state) {
case CALL_ACTIVE:
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "client_uchannel");
break;
case CALL_CREATED:
case CALL_CANCELLED:
gpr_mu_unlock(&calld->mu_state);
break;
case CALL_WAITING_FOR_CALL:
case CALL_WAITING_FOR_SEND:
GPR_UNREACHABLE_CODE(return );
}
grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
}
/* Constructor for channel_data */
static void cuc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
grpc_closure_init(&chand->connectivity_cb, monitor_subchannel, chand);
GPR_ASSERT(is_last);
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
chand->mdctx = metadata_context;
chand->master = master;
chand->mdctx = args->metadata_context;
chand->master = args->master;
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_uchannel");
gpr_mu_init(&chand->mu_state);
@ -465,17 +178,17 @@ static void cuc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
gpr_mu_destroy(&chand->mu_state);
}
static void cuc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset) {
call_data *calld = elem->call_data;
calld->pollset = pollset;
}
const grpc_channel_filter grpc_client_uchannel_filter = {
cuc_start_transport_stream_op,
cuc_start_transport_op,
sizeof(call_data),
cuc_init_call_elem,
cuc_destroy_call_elem,
sizeof(channel_data),
cuc_init_channel_elem,
cuc_destroy_channel_elem,
cuc_get_peer,
"client-uchannel",
cuc_start_transport_stream_op, cuc_start_transport_op, sizeof(call_data),
cuc_init_call_elem, cuc_set_pollset, cuc_destroy_call_elem,
sizeof(channel_data), cuc_init_channel_elem, cuc_destroy_channel_elem,
cuc_get_peer, "client-uchannel",
};
grpc_connectivity_state grpc_client_uchannel_check_connectivity_state(

@ -50,13 +50,20 @@ typedef struct call_data {
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
gpr_uint32 remaining_slice_bytes;
/**< Input data to be read, as per BEGIN_MESSAGE */
int written_initial_metadata; /**< Already processed initial md? */
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
/** If true, contents of \a compression_algorithm are authoritative */
int has_compression_algorithm;
grpc_transport_stream_op send_op;
gpr_uint32 send_length;
gpr_uint32 send_flags;
gpr_slice incoming_slice;
grpc_slice_buffer_stream replacement_stream;
grpc_closure *post_send;
grpc_closure send_done;
grpc_closure got_slice;
} call_data;
typedef struct channel_data {
@ -76,24 +83,6 @@ typedef struct channel_data {
grpc_compression_options compression_options;
} channel_data;
/** Compress \a slices in place using \a algorithm. Returns 1 if compression did
* actually happen, 0 otherwise (for example if the compressed output size was
* larger than the raw input).
*
* Returns 1 if the data was actually compress and 0 otherwise. */
static int compress_send_sb(grpc_compression_algorithm algorithm,
gpr_slice_buffer *slices) {
int did_compress;
gpr_slice_buffer tmp;
gpr_slice_buffer_init(&tmp);
did_compress = grpc_msg_compress(algorithm, slices, &tmp);
if (did_compress) {
gpr_slice_buffer_swap(slices, &tmp);
}
gpr_slice_buffer_destroy(&tmp);
return did_compress;
}
/** For each \a md element from the incoming metadata, filter out the entry for
* "grpc-encoding", using its value to populate the call data's
* compression_algorithm field. */
@ -127,7 +116,9 @@ static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
return md;
}
static int skip_compression(channel_data *channeld, call_data *calld) {
static int skip_compression(grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
if (calld->has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
return 1;
@ -138,169 +129,127 @@ static int skip_compression(channel_data *channeld, call_data *calld) {
return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
}
/** Assembles a new grpc_stream_op_buffer with the compressed slices, modifying
* the associated GRPC_OP_BEGIN_MESSAGE accordingly (new compressed length,
* flags indicating compression is in effect) and replaces \a send_ops with it.
* */
static void finish_compressed_sopb(grpc_stream_op_buffer *send_ops,
grpc_call_element *elem) {
size_t i;
/** Filter initial metadata */
static void process_send_initial_metadata(
grpc_call_element *elem, grpc_metadata_batch *initial_metadata) {
call_data *calld = elem->call_data;
int new_slices_added = 0; /* GPR_FALSE */
grpc_metadata_batch metadata;
grpc_stream_op_buffer new_send_ops;
grpc_sopb_init(&new_send_ops);
for (i = 0; i < send_ops->nops; i++) {
grpc_stream_op *sop = &send_ops->ops[i];
switch (sop->type) {
case GRPC_OP_BEGIN_MESSAGE:
GPR_ASSERT(calld->slices.length <= GPR_UINT32_MAX);
grpc_sopb_add_begin_message(
&new_send_ops, (gpr_uint32)calld->slices.length,
sop->data.begin_message.flags | GRPC_WRITE_INTERNAL_COMPRESS);
break;
case GRPC_OP_SLICE:
/* Once we reach the slices section of the original buffer, simply add
* all the new (compressed) slices. We obviously want to do this only
* once, hence the "new_slices_added" guard. */
if (!new_slices_added) {
size_t j;
for (j = 0; j < calld->slices.count; ++j) {
grpc_sopb_add_slice(&new_send_ops,
gpr_slice_ref(calld->slices.slices[j]));
}
new_slices_added = 1; /* GPR_TRUE */
}
break;
case GRPC_OP_METADATA:
/* move the metadata to the new buffer. */
grpc_metadata_batch_move(&metadata, &sop->data.metadata);
grpc_sopb_add_metadata(&new_send_ops, metadata);
break;
case GRPC_NO_OP:
break;
}
channel_data *channeld = elem->channel_data;
/* Parse incoming request for compression. If any, it'll be available
* at calld->compression_algorithm */
grpc_metadata_batch_filter(initial_metadata, compression_md_filter, elem);
if (!calld->has_compression_algorithm) {
/* If no algorithm was found in the metadata and we aren't
* exceptionally skipping compression, fall back to the channel
* default */
calld->compression_algorithm = channeld->default_compression_algorithm;
calld->has_compression_algorithm = 1; /* GPR_TRUE */
}
grpc_sopb_swap(send_ops, &new_send_ops);
grpc_sopb_destroy(&new_send_ops);
/* hint compression algorithm */
grpc_metadata_batch_add_tail(
initial_metadata, &calld->compression_algorithm_storage,
GRPC_MDELEM_REF(
channeld
->mdelem_compression_algorithms[calld->compression_algorithm]));
/* convey supported compression algorithms */
grpc_metadata_batch_add_tail(
initial_metadata, &calld->accept_encoding_storage,
GRPC_MDELEM_REF(channeld->mdelem_accept_encoding));
}
/** Filter's "main" function, called for any incoming grpc_transport_stream_op
* instance that holds a non-zero number of send operations, accesible to this
* function in \a send_ops. */
static void process_send_ops(grpc_call_element *elem,
grpc_stream_op_buffer *send_ops) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
size_t i;
int did_compress = 0;
static void continue_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem);
/* In streaming calls, we need to reset the previously accumulated slices */
static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, int success) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
gpr_slice_buffer_reset_and_unref(&calld->slices);
for (i = 0; i < send_ops->nops; ++i) {
grpc_stream_op *sop = &send_ops->ops[i];
switch (sop->type) {
case GRPC_OP_BEGIN_MESSAGE:
/* buffer up slices until we've processed all the expected ones (as
* given by GRPC_OP_BEGIN_MESSAGE) */
calld->remaining_slice_bytes = sop->data.begin_message.length;
if (sop->data.begin_message.flags & GRPC_WRITE_NO_COMPRESS) {
calld->has_compression_algorithm = 1; /* GPR_TRUE */
calld->compression_algorithm = GRPC_COMPRESS_NONE;
}
break;
case GRPC_OP_METADATA:
if (!calld->written_initial_metadata) {
/* Parse incoming request for compression. If any, it'll be available
* at calld->compression_algorithm */
grpc_metadata_batch_filter(&(sop->data.metadata),
compression_md_filter, elem);
if (!calld->has_compression_algorithm) {
/* If no algorithm was found in the metadata and we aren't
* exceptionally skipping compression, fall back to the channel
* default */
calld->compression_algorithm =
channeld->default_compression_algorithm;
calld->has_compression_algorithm = 1; /* GPR_TRUE */
}
/* hint compression algorithm */
grpc_metadata_batch_add_tail(
&(sop->data.metadata), &calld->compression_algorithm_storage,
GRPC_MDELEM_REF(channeld->mdelem_compression_algorithms
[calld->compression_algorithm]));
/* convey supported compression algorithms */
grpc_metadata_batch_add_tail(
&(sop->data.metadata), &calld->accept_encoding_storage,
GRPC_MDELEM_REF(channeld->mdelem_accept_encoding));
calld->written_initial_metadata = 1; /* GPR_TRUE */
}
break;
case GRPC_OP_SLICE:
if (skip_compression(channeld, calld)) continue;
GPR_ASSERT(calld->remaining_slice_bytes > 0);
/* Increase input ref count, gpr_slice_buffer_add takes ownership. */
gpr_slice_buffer_add(&calld->slices, gpr_slice_ref(sop->data.slice));
GPR_ASSERT(GPR_SLICE_LENGTH(sop->data.slice) <=
calld->remaining_slice_bytes);
calld->remaining_slice_bytes -=
(gpr_uint32)GPR_SLICE_LENGTH(sop->data.slice);
if (calld->remaining_slice_bytes == 0) {
did_compress =
compress_send_sb(calld->compression_algorithm, &calld->slices);
}
break;
case GRPC_NO_OP:
break;
}
}
calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, success);
}
/* Modify the send_ops stream_op_buffer depending on whether compression was
* carried out */
static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
int did_compress;
gpr_slice_buffer tmp;
gpr_slice_buffer_init(&tmp);
did_compress =
grpc_msg_compress(calld->compression_algorithm, &calld->slices, &tmp);
if (did_compress) {
finish_compressed_sopb(send_ops, elem);
gpr_slice_buffer_swap(&calld->slices, &tmp);
calld->send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
gpr_slice_buffer_destroy(&tmp);
grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
calld->send_flags);
calld->send_op.send_message = &calld->replacement_stream.base;
calld->post_send = calld->send_op.on_complete;
calld->send_op.on_complete = &calld->send_done;
grpc_call_next_op(exec_ctx, elem, &calld->send_op);
}
static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, int success) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
finish_send_message(exec_ctx, elem);
} else {
continue_send_message(exec_ctx, elem);
}
}
static void continue_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
while (grpc_byte_stream_next(exec_ctx, calld->send_op.send_message,
&calld->incoming_slice, ~(size_t)0,
&calld->got_slice)) {
gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
finish_send_message(exec_ctx, elem);
break;
}
}
}
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
GPR_TIMER_BEGIN("compress_start_transport_stream_op", 0);
if (op->send_ops && op->send_ops->nops > 0) {
process_send_ops(elem, op->send_ops);
if (op->send_initial_metadata) {
process_send_initial_metadata(elem, op->send_initial_metadata);
}
if (op->send_message != NULL && !skip_compression(elem) &&
0 == (op->send_message->flags & GRPC_WRITE_NO_COMPRESS)) {
calld->send_op = *op;
calld->send_length = op->send_message->length;
calld->send_flags = op->send_message->flags;
continue_send_message(exec_ctx, elem);
} else {
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}
GPR_TIMER_END("compress_start_transport_stream_op", 0);
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
gpr_slice_buffer_init(&calld->slices);
calld->has_compression_algorithm = 0;
calld->written_initial_metadata = 0; /* GPR_FALSE */
if (initial_op) {
if (initial_op->send_ops && initial_op->send_ops->nops > 0) {
process_send_ops(elem, initial_op->send_ops);
}
}
grpc_closure_init(&calld->got_slice, got_slice, elem);
grpc_closure_init(&calld->send_done, send_done, elem);
}
/* Destructor for call_data */
@ -313,9 +262,8 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *channeld = elem->channel_data;
grpc_compression_algorithm algo_idx;
const char *supported_algorithms_names[GRPC_COMPRESS_ALGORITHMS_COUNT - 1];
@ -325,24 +273,25 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_compression_options_init(&channeld->compression_options);
channeld->compression_options.enabled_algorithms_bitset =
(gpr_uint32)grpc_channel_args_compression_algorithm_get_states(args);
(gpr_uint32)grpc_channel_args_compression_algorithm_get_states(
args->channel_args);
channeld->default_compression_algorithm =
grpc_channel_args_get_compression_algorithm(args);
grpc_channel_args_get_compression_algorithm(args->channel_args);
/* Make sure the default isn't disabled. */
GPR_ASSERT(grpc_compression_options_is_algorithm_enabled(
&channeld->compression_options, channeld->default_compression_algorithm));
channeld->compression_options.default_compression_algorithm =
channeld->default_compression_algorithm;
channeld->mdstr_request_compression_algorithm_key =
grpc_mdstr_from_string(mdctx, GRPC_COMPRESS_REQUEST_ALGORITHM_KEY);
channeld->mdstr_request_compression_algorithm_key = grpc_mdstr_from_string(
args->metadata_context, GRPC_COMPRESS_REQUEST_ALGORITHM_KEY);
channeld->mdstr_outgoing_compression_algorithm_key =
grpc_mdstr_from_string(mdctx, "grpc-encoding");
grpc_mdstr_from_string(args->metadata_context, "grpc-encoding");
channeld->mdstr_compression_capabilities_key =
grpc_mdstr_from_string(mdctx, "grpc-accept-encoding");
grpc_mdstr_from_string(args->metadata_context, "grpc-accept-encoding");
for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
char *algorithm_name;
@ -354,9 +303,9 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(grpc_compression_algorithm_name(algo_idx, &algorithm_name) != 0);
channeld->mdelem_compression_algorithms[algo_idx] =
grpc_mdelem_from_metadata_strings(
mdctx,
args->metadata_context,
GRPC_MDSTR_REF(channeld->mdstr_outgoing_compression_algorithm_key),
grpc_mdstr_from_string(mdctx, algorithm_name));
grpc_mdstr_from_string(args->metadata_context, algorithm_name));
if (algo_idx > 0) {
supported_algorithms_names[supported_algorithms_idx++] = algorithm_name;
}
@ -369,11 +318,12 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
&accept_encoding_str_len);
channeld->mdelem_accept_encoding = grpc_mdelem_from_metadata_strings(
mdctx, GRPC_MDSTR_REF(channeld->mdstr_compression_capabilities_key),
grpc_mdstr_from_string(mdctx, accept_encoding_str));
args->metadata_context,
GRPC_MDSTR_REF(channeld->mdstr_compression_capabilities_key),
grpc_mdstr_from_string(args->metadata_context, accept_encoding_str));
gpr_free(accept_encoding_str);
GPR_ASSERT(!is_last);
GPR_ASSERT(!args->is_last);
}
/* Destructor for channel data */
@ -393,5 +343,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
const grpc_channel_filter grpc_compress_filter = {
compress_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, grpc_call_next_get_peer, "compress"};
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "compress"};

@ -83,8 +83,7 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
int r;
@ -92,10 +91,18 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
r = grpc_transport_init_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
server_transport_data, initial_op);
args->refcount, args->server_transport_data);
GPR_ASSERT(r == 0);
}
static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_transport_set_pollset(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollset);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
@ -108,11 +115,10 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(is_last);
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
cd->transport = NULL;
}
@ -132,8 +138,8 @@ static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
const grpc_channel_filter grpc_connected_channel_filter = {
con_start_transport_stream_op, con_start_transport_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, con_get_peer, "connected",
init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, con_get_peer, "connected",
};
void grpc_connected_channel_bind_transport(grpc_channel_stack *channel_stack,
@ -154,3 +160,8 @@ void grpc_connected_channel_bind_transport(grpc_channel_stack *channel_stack,
channel. */
channel_stack->call_stack_size += grpc_transport_stream_size(transport);
}
grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
call_data *calld = elem->call_data;
return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}

@ -46,4 +46,6 @@ extern const grpc_channel_filter grpc_connected_channel_filter;
void grpc_connected_channel_bind_transport(grpc_channel_stack* channel_stack,
grpc_transport* transport);
grpc_stream* grpc_connected_channel_get_stream(grpc_call_element* elem);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CONNECTED_CHANNEL_H */

@ -45,10 +45,8 @@ typedef struct call_data {
grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type;
grpc_linked_mdelem user_agent;
int sent_initial_metadata;
int got_initial_metadata;
grpc_stream_op_buffer *recv_ops;
grpc_metadata_batch *recv_initial_metadata;
/** Closure to call when finished with the hc_on_recv hook */
grpc_closure *on_done_recv;
@ -91,18 +89,11 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
size_t i;
size_t nops = calld->recv_ops->nops;
grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
client_recv_filter_args a;
if (op->type != GRPC_OP_METADATA) continue;
calld->got_initial_metadata = 1;
a.elem = elem;
a.exec_ctx = exec_ctx;
grpc_metadata_batch_filter(&op->data.metadata, client_recv_filter, &a);
}
client_recv_filter_args a;
a.elem = elem;
a.exec_ctx = exec_ctx;
grpc_metadata_batch_filter(calld->recv_initial_metadata, client_recv_filter,
&a);
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
@ -123,40 +114,29 @@ static void hc_mutate_op(grpc_call_element *elem,
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
size_t i;
if (op->send_ops && !calld->sent_initial_metadata) {
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *stream_op = &ops[i];
if (stream_op->type != GRPC_OP_METADATA) continue;
calld->sent_initial_metadata = 1;
grpc_metadata_batch_filter(&stream_op->data.metadata, client_strip_filter,
elem);
/* Send : prefixed headers, which have to be before any application
layer headers. */
grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->method,
GRPC_MDELEM_REF(channeld->method));
grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->scheme,
GRPC_MDELEM_REF(channeld->scheme));
grpc_metadata_batch_add_tail(&stream_op->data.metadata,
&calld->te_trailers,
GRPC_MDELEM_REF(channeld->te_trailers));
grpc_metadata_batch_add_tail(&stream_op->data.metadata,
&calld->content_type,
GRPC_MDELEM_REF(channeld->content_type));
grpc_metadata_batch_add_tail(&stream_op->data.metadata,
&calld->user_agent,
GRPC_MDELEM_REF(channeld->user_agent));
break;
}
if (op->send_initial_metadata != NULL) {
grpc_metadata_batch_filter(op->send_initial_metadata, client_strip_filter,
elem);
/* Send : prefixed headers, which have to be before any application
layer headers. */
grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->method,
GRPC_MDELEM_REF(channeld->method));
grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->scheme,
GRPC_MDELEM_REF(channeld->scheme));
grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->te_trailers,
GRPC_MDELEM_REF(channeld->te_trailers));
grpc_metadata_batch_add_tail(op->send_initial_metadata,
&calld->content_type,
GRPC_MDELEM_REF(channeld->content_type));
grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->user_agent,
GRPC_MDELEM_REF(channeld->user_agent));
}
if (op->recv_ops && !calld->got_initial_metadata) {
if (op->recv_initial_metadata != NULL) {
/* substitute our callback for the higher callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
op->on_done_recv = &calld->hc_on_recv;
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->on_done_recv = op->on_complete;
op->on_complete = &calld->hc_on_recv;
}
}
@ -172,14 +152,10 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
calld->sent_initial_metadata = 0;
calld->got_initial_metadata = 0;
calld->on_done_recv = NULL;
grpc_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
if (initial_op) hc_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
@ -250,28 +226,31 @@ static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *channel_args,
grpc_mdctx *mdctx, int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_last);
GPR_ASSERT(!args->is_last);
/* initialize members */
channeld->te_trailers = grpc_mdelem_from_strings(mdctx, "te", "trailers");
channeld->method = grpc_mdelem_from_strings(mdctx, ":method", "POST");
channeld->scheme = grpc_mdelem_from_strings(mdctx, ":scheme",
scheme_from_args(channel_args));
channeld->content_type =
grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
channeld->status = grpc_mdelem_from_strings(mdctx, ":status", "200");
channeld->te_trailers =
grpc_mdelem_from_strings(args->metadata_context, "te", "trailers");
channeld->method =
grpc_mdelem_from_strings(args->metadata_context, ":method", "POST");
channeld->scheme = grpc_mdelem_from_strings(
args->metadata_context, ":scheme", scheme_from_args(args->channel_args));
channeld->content_type = grpc_mdelem_from_strings(
args->metadata_context, "content-type", "application/grpc");
channeld->status =
grpc_mdelem_from_strings(args->metadata_context, ":status", "200");
channeld->user_agent = grpc_mdelem_from_metadata_strings(
mdctx, grpc_mdstr_from_string(mdctx, "user-agent"),
user_agent_from_args(mdctx, channel_args));
args->metadata_context,
grpc_mdstr_from_string(args->metadata_context, "user-agent"),
user_agent_from_args(args->metadata_context, args->channel_args));
}
/* Destructor for channel data */
@ -290,6 +269,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
const grpc_channel_filter grpc_http_client_filter = {
hc_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"http-client"};
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "http-client"};

@ -39,7 +39,6 @@
#include "src/core/profiling/timers.h"
typedef struct call_data {
gpr_uint8 got_initial_metadata;
gpr_uint8 seen_path;
gpr_uint8 seen_post;
gpr_uint8 sent_status;
@ -49,7 +48,7 @@ typedef struct call_data {
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
grpc_stream_op_buffer *recv_ops;
grpc_metadata_batch *recv_initial_metadata;
/** Closure to call when finished with the hs_on_recv hook */
grpc_closure *on_done_recv;
/** Receive closures are chained: we inject this closure as the on_done_recv
@ -154,43 +153,35 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (success) {
size_t i;
size_t nops = calld->recv_ops->nops;
grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
server_filter_args a;
if (op->type != GRPC_OP_METADATA) continue;
calld->got_initial_metadata = 1;
a.elem = elem;
a.exec_ctx = exec_ctx;
grpc_metadata_batch_filter(&op->data.metadata, server_filter, &a);
/* Have we seen the required http2 transport headers?
(:method, :scheme, content-type, with :path and :authority covered
at the channel level right now) */
if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
calld->seen_path && calld->seen_authority) {
/* do nothing */
} else {
if (!calld->seen_path) {
gpr_log(GPR_ERROR, "Missing :path header");
}
if (!calld->seen_authority) {
gpr_log(GPR_ERROR, "Missing :authority header");
}
if (!calld->seen_post) {
gpr_log(GPR_ERROR, "Missing :method header");
}
if (!calld->seen_scheme) {
gpr_log(GPR_ERROR, "Missing :scheme header");
}
if (!calld->seen_te_trailers) {
gpr_log(GPR_ERROR, "Missing te trailers header");
}
/* Error this call out */
success = 0;
grpc_call_element_send_cancel(exec_ctx, elem);
server_filter_args a;
a.elem = elem;
a.exec_ctx = exec_ctx;
grpc_metadata_batch_filter(calld->recv_initial_metadata, server_filter, &a);
/* Have we seen the required http2 transport headers?
(:method, :scheme, content-type, with :path and :authority covered
at the channel level right now) */
if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
calld->seen_path && calld->seen_authority) {
/* do nothing */
} else {
if (!calld->seen_path) {
gpr_log(GPR_ERROR, "Missing :path header");
}
if (!calld->seen_authority) {
gpr_log(GPR_ERROR, "Missing :authority header");
}
if (!calld->seen_post) {
gpr_log(GPR_ERROR, "Missing :method header");
}
if (!calld->seen_scheme) {
gpr_log(GPR_ERROR, "Missing :scheme header");
}
if (!calld->seen_te_trailers) {
gpr_log(GPR_ERROR, "Missing te trailers header");
}
/* Error this call out */
success = 0;
grpc_call_element_send_cancel(exec_ctx, elem);
}
}
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
@ -201,29 +192,21 @@ static void hs_mutate_op(grpc_call_element *elem,
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
size_t i;
if (op->send_ops && !calld->sent_status) {
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *stream_op = &ops[i];
if (stream_op->type != GRPC_OP_METADATA) continue;
calld->sent_status = 1;
grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->status,
GRPC_MDELEM_REF(channeld->status_ok));
grpc_metadata_batch_add_tail(&stream_op->data.metadata,
&calld->content_type,
GRPC_MDELEM_REF(channeld->content_type));
break;
}
if (op->send_initial_metadata != NULL && !calld->sent_status) {
calld->sent_status = 1;
grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->status,
GRPC_MDELEM_REF(channeld->status_ok));
grpc_metadata_batch_add_tail(op->send_initial_metadata,
&calld->content_type,
GRPC_MDELEM_REF(channeld->content_type));
}
if (op->recv_ops && !calld->got_initial_metadata) {
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
op->on_done_recv = &calld->hs_on_recv;
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->on_done_recv = op->on_complete;
op->on_complete = &calld->hs_on_recv;
}
}
@ -239,14 +222,12 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
if (initial_op) hs_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
@ -255,34 +236,39 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
GPR_ASSERT(!args->is_last);
/* initialize members */
channeld->te_trailers = grpc_mdelem_from_strings(mdctx, "te", "trailers");
channeld->status_ok = grpc_mdelem_from_strings(mdctx, ":status", "200");
channeld->te_trailers =
grpc_mdelem_from_strings(args->metadata_context, "te", "trailers");
channeld->status_ok =
grpc_mdelem_from_strings(args->metadata_context, ":status", "200");
channeld->status_not_found =
grpc_mdelem_from_strings(mdctx, ":status", "404");
channeld->method_post = grpc_mdelem_from_strings(mdctx, ":method", "POST");
channeld->http_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "http");
channeld->https_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "https");
channeld->grpc_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "grpc");
channeld->path_key = grpc_mdstr_from_string(mdctx, ":path");
channeld->authority_key = grpc_mdstr_from_string(mdctx, ":authority");
channeld->host_key = grpc_mdstr_from_string(mdctx, "host");
channeld->content_type =
grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
grpc_mdelem_from_strings(args->metadata_context, ":status", "404");
channeld->method_post =
grpc_mdelem_from_strings(args->metadata_context, ":method", "POST");
channeld->http_scheme =
grpc_mdelem_from_strings(args->metadata_context, ":scheme", "http");
channeld->https_scheme =
grpc_mdelem_from_strings(args->metadata_context, ":scheme", "https");
channeld->grpc_scheme =
grpc_mdelem_from_strings(args->metadata_context, ":scheme", "grpc");
channeld->path_key = grpc_mdstr_from_string(args->metadata_context, ":path");
channeld->authority_key =
grpc_mdstr_from_string(args->metadata_context, ":authority");
channeld->host_key = grpc_mdstr_from_string(args->metadata_context, "host");
channeld->content_type = grpc_mdelem_from_strings(
args->metadata_context, "content-type", "application/grpc");
channeld->mdctx = mdctx;
channeld->mdctx = args->metadata_context;
}
/* Destructor for channel data */
@ -306,6 +292,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"http-server"};
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "http-server"};

@ -73,16 +73,13 @@ static void noop_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
/* initialize members */
calld->unused = channeld->unused;
if (initial_op) noop_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
@ -91,17 +88,15 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
/* The last filter tends to be implemented differently to
handle the case that there's no 'next' filter to call on the down
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
GPR_ASSERT(!args->is_last);
/* initialize members */
channeld->unused = 0;
@ -118,5 +113,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
const grpc_channel_filter grpc_no_op_filter = {
noop_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, grpc_call_next_get_peer, "no-op"};
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "no-op"};

@ -0,0 +1,283 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/channel/subchannel_call_holder.h"
#include <grpc/support/alloc.h>
#include "src/core/profiling/timers.h"
#define GET_CALL(holder) \
((grpc_subchannel_call *)(gpr_atm_acq_load(&(holder)->subchannel_call)))
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *holder,
int success);
static void call_ready(grpc_exec_ctx *exec_ctx, void *holder, int success);
static void retry_ops(grpc_exec_ctx *exec_ctx, void *retry_ops_args,
int success);
static void add_waiting_locked(grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op);
static void fail_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
void grpc_subchannel_call_holder_init(
grpc_subchannel_call_holder *holder,
grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
void *pick_subchannel_arg) {
gpr_atm_rel_store(&holder->subchannel_call, 0);
holder->pick_subchannel = pick_subchannel;
holder->pick_subchannel_arg = pick_subchannel_arg;
gpr_mu_init(&holder->mu);
holder->subchannel = NULL;
holder->waiting_ops = NULL;
holder->waiting_ops_count = 0;
holder->waiting_ops_capacity = 0;
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
}
void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder) {
grpc_subchannel_call *call = GET_CALL(holder);
if (call != NULL && call != CANCELLED_CALL) {
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "holder");
}
GPR_ASSERT(holder->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
gpr_mu_destroy(&holder->mu);
GPR_ASSERT(holder->waiting_ops_count == 0);
gpr_free(holder->waiting_ops);
}
void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op) {
/* try to (atomically) get the call */
grpc_subchannel_call *call = GET_CALL(holder);
GPR_TIMER_BEGIN("grpc_subchannel_call_holder_perform_op", 0);
if (call == CANCELLED_CALL) {
grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
if (call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, call, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
/* we failed; lock and figure out what to do */
gpr_mu_lock(&holder->mu);
retry:
/* need to recheck that another thread hasn't set the call */
call = GET_CALL(holder);
if (call == CANCELLED_CALL) {
gpr_mu_unlock(&holder->mu);
grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
if (call != NULL) {
gpr_mu_unlock(&holder->mu);
grpc_subchannel_call_process_op(exec_ctx, call, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
/* if this is a cancellation, then we can raise our cancelled flag */
if (op->cancel_with_status != GRPC_STATUS_OK) {
if (!gpr_atm_rel_cas(&holder->subchannel_call, 0, 1)) {
goto retry;
} else {
switch (holder->creation_phase) {
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
fail_locked(exec_ctx, holder);
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_CREATING_CALL:
grpc_subchannel_cancel_create_call(exec_ctx, holder->subchannel,
&holder->subchannel_call);
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
&holder->subchannel, NULL);
break;
}
gpr_mu_unlock(&holder->mu);
grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
}
/* if we don't have a subchannel, try to get one */
if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
holder->subchannel == NULL && op->send_initial_metadata != NULL) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
grpc_closure_init(&holder->next_step, subchannel_ready, holder);
if (holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg,
op->send_initial_metadata, &holder->subchannel,
&holder->next_step)) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
}
}
/* if we've got a subchannel, then let's ask it to create a call */
if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
holder->subchannel != NULL) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_CREATING_CALL;
grpc_closure_init(&holder->next_step, call_ready, holder);
if (grpc_subchannel_create_call(exec_ctx, holder->subchannel,
holder->pollset, &holder->subchannel_call,
&holder->next_step)) {
/* got one immediately - continue the op (and any waiting ops) */
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
retry_waiting_locked(exec_ctx, holder);
goto retry;
}
}
/* nothing to be done but wait */
add_waiting_locked(holder, op);
gpr_mu_unlock(&holder->mu);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
}
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
grpc_subchannel_call_holder *holder = arg;
grpc_subchannel_call *call;
gpr_mu_lock(&holder->mu);
GPR_ASSERT(holder->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
call = GET_CALL(holder);
GPR_ASSERT(call == NULL || call == CANCELLED_CALL);
if (holder->subchannel == NULL) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
fail_locked(exec_ctx, holder);
} else {
grpc_closure_init(&holder->next_step, call_ready, holder);
if (grpc_subchannel_create_call(exec_ctx, holder->subchannel,
holder->pollset, &holder->subchannel_call,
&holder->next_step)) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
/* got one immediately - continue the op (and any waiting ops) */
retry_waiting_locked(exec_ctx, holder);
}
}
gpr_mu_unlock(&holder->mu);
}
static void call_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
grpc_subchannel_call_holder *holder = arg;
GPR_TIMER_BEGIN("call_ready", 0);
gpr_mu_lock(&holder->mu);
GPR_ASSERT(holder->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_CREATING_CALL);
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (GET_CALL(holder) != NULL) {
retry_waiting_locked(exec_ctx, holder);
} else {
fail_locked(exec_ctx, holder);
}
gpr_mu_unlock(&holder->mu);
GPR_TIMER_END("call_ready", 0);
}
typedef struct {
grpc_transport_stream_op *ops;
size_t nops;
grpc_subchannel_call *call;
} retry_ops_args;
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder) {
retry_ops_args *a = gpr_malloc(sizeof(*a));
a->ops = holder->waiting_ops;
a->nops = holder->waiting_ops_count;
a->call = GET_CALL(holder);
if (a->call == CANCELLED_CALL) {
gpr_free(a);
fail_locked(exec_ctx, holder);
return;
}
holder->waiting_ops = NULL;
holder->waiting_ops_count = 0;
holder->waiting_ops_capacity = 0;
GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(retry_ops, a), 1);
}
static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, int success) {
retry_ops_args *a = args;
size_t i;
for (i = 0; i < a->nops; i++) {
grpc_subchannel_call_process_op(exec_ctx, a->call, &a->ops[i]);
}
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
gpr_free(a->ops);
gpr_free(a);
}
static void add_waiting_locked(grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("add_waiting_locked", 0);
if (holder->waiting_ops_count == holder->waiting_ops_capacity) {
holder->waiting_ops_capacity = GPR_MAX(3, 2 * holder->waiting_ops_capacity);
holder->waiting_ops =
gpr_realloc(holder->waiting_ops, holder->waiting_ops_capacity *
sizeof(*holder->waiting_ops));
}
holder->waiting_ops[holder->waiting_ops_count++] = *op;
GPR_TIMER_END("add_waiting_locked", 0);
}
static void fail_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder) {
size_t i;
for (i = 0; i < holder->waiting_ops_count; i++) {
grpc_exec_ctx_enqueue(exec_ctx, holder->waiting_ops[i].on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, holder->waiting_ops[i].recv_message_ready,
0);
}
holder->waiting_ops_count = 0;
}
char *grpc_subchannel_call_holder_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_channel *master) {
grpc_subchannel_call *subchannel_call = GET_CALL(holder);
if (subchannel_call) {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
} else {
return grpc_channel_get_target(master);
}
}

@ -0,0 +1,98 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_CHANNEL_SUBCHANNEL_CALL_HOLDER_H
#define GRPC_INTERNAL_CORE_CHANNEL_SUBCHANNEL_CALL_HOLDER_H
#include "src/core/client_config/subchannel.h"
/** Pick a subchannel for grpc_subchannel_call_holder;
Return 1 if subchannel is available immediately (in which case on_ready
should not be called), or 0 otherwise (in which case on_ready should be
called when the subchannel is available) */
typedef int (*grpc_subchannel_call_holder_pick_subchannel)(
grpc_exec_ctx *exec_ctx, void *arg, grpc_metadata_batch *initial_metadata,
grpc_subchannel **subchannel, grpc_closure *on_ready);
typedef enum {
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL,
GRPC_SUBCHANNEL_CALL_HOLDER_CREATING_CALL
} grpc_subchannel_call_holder_creation_phase;
/** Wrapper for holding a pointer to grpc_subchannel_call, and the
associated machinery to create such a pointer.
Handles queueing of stream ops until a call object is ready, waiting
for initial metadata before trying to create a call object,
and handling cancellation gracefully.
Both the channel and uchannel filter use this as their call_data. */
typedef struct grpc_subchannel_call_holder {
/** either 0 for no call, 1 for cancelled, or a pointer to a
grpc_subchannel_call */
gpr_atm subchannel_call;
/** Helper function to choose the subchannel on which to create
the call object. Channel filter delegates to the load
balancing policy (once it's ready); uchannel returns
immediately */
grpc_subchannel_call_holder_pick_subchannel pick_subchannel;
void *pick_subchannel_arg;
gpr_mu mu;
grpc_subchannel_call_holder_creation_phase creation_phase;
grpc_subchannel *subchannel;
grpc_pollset *pollset;
grpc_transport_stream_op *waiting_ops;
size_t waiting_ops_count;
size_t waiting_ops_capacity;
grpc_closure next_step;
} grpc_subchannel_call_holder;
void grpc_subchannel_call_holder_init(
grpc_subchannel_call_holder *holder,
grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
void *pick_subchannel_arg);
void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op);
char *grpc_subchannel_call_holder_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_channel *master);
#endif

@ -130,6 +130,30 @@ void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
}
static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_subchannel **target) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
grpc_subchannel_del_interested_party(
exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
}
pp = next;
}
gpr_mu_unlock(&p->mu);
}
static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
p->started_picking = 1;
p->checking_subchannel = 0;
@ -149,16 +173,16 @@ void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_mu_unlock(&p->mu);
}
void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete) {
int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata, grpc_subchannel **target,
grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
if (p->selected) {
gpr_mu_unlock(&p->mu);
*target = p->selected;
grpc_exec_ctx_enqueue(exec_ctx, on_complete, 1);
return 1;
} else {
if (!p->started_picking) {
start_picking(exec_ctx, p);
@ -172,6 +196,7 @@ void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pp->on_complete = on_complete;
p->pending_picks = pp;
gpr_mu_unlock(&p->mu);
return 0;
}
}
@ -365,8 +390,8 @@ void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_destroy, pf_shutdown, pf_pick, pf_exit_idle, pf_broadcast,
pf_check_connectivity, pf_notify_on_state_change};
pf_destroy, pf_shutdown, pf_pick, pf_cancel_pick, pf_exit_idle,
pf_broadcast, pf_check_connectivity, pf_notify_on_state_change};
static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}

@ -264,6 +264,33 @@ void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_mu_unlock(&p->mu);
}
static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_subchannel **target) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
size_t i;
gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
for (i = 0; i < p->num_subchannels; i++) {
grpc_subchannel_add_interested_party(exec_ctx, p->subchannels[i],
pp->pollset);
}
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
}
pp = next;
}
gpr_mu_unlock(&p->mu);
}
static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
@ -286,9 +313,9 @@ void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_mu_unlock(&p->mu);
}
void rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete) {
int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata, grpc_subchannel **target,
grpc_closure *on_complete) {
size_t i;
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
@ -303,7 +330,7 @@ void rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
on_complete->cb(exec_ctx, on_complete->cb_arg, 1);
return 1;
} else {
if (!p->started_picking) {
start_picking(exec_ctx, p);
@ -319,6 +346,7 @@ void rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pp->on_complete = on_complete;
p->pending_picks = pp;
gpr_mu_unlock(&p->mu);
return 0;
}
}
@ -487,8 +515,8 @@ static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
}
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_destroy, rr_shutdown, rr_pick, rr_exit_idle, rr_broadcast,
rr_check_connectivity, rr_notify_on_state_change};
rr_destroy, rr_shutdown, rr_pick, rr_cancel_pick, rr_exit_idle,
rr_broadcast, rr_check_connectivity, rr_notify_on_state_change};
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}

@ -68,12 +68,17 @@ void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
policy->vtable->shutdown(exec_ctx, policy);
}
void grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete) {
policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata, target,
on_complete);
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete) {
return policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata,
target, on_complete);
}
void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_subchannel **target) {
policy->vtable->cancel_pick(exec_ctx, policy, target);
}
void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,

@ -56,9 +56,11 @@ struct grpc_lb_policy_vtable {
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** implement grpc_lb_policy_pick */
void (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete);
int (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete);
void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_subchannel **target);
/** try to enter a READY connectivity state */
void (*exit_idle)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
@ -106,10 +108,13 @@ void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
target for this rpc, and 'return' it by calling \a on_complete after setting
\a target.
Picking can be asynchronous. Any IO should be done under \a pollset. */
void grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete);
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete);
void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_subchannel **target);
void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_transport_op *op);

@ -41,8 +41,10 @@
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/iomgr/timer.h"
#include "src/core/transport/connectivity_state.h"
#include "src/core/profiling/timers.h"
#include "src/core/surface/channel.h"
#include "src/core/transport/connectivity_state.h"
#include "src/core/transport/connectivity_state.h"
#define GRPC_SUBCHANNEL_MIN_CONNECT_TIMEOUT_SECONDS 20
#define GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS 1
@ -69,7 +71,7 @@ typedef struct waiting_for_connect {
struct waiting_for_connect *next;
grpc_closure *notify;
grpc_pollset *pollset;
grpc_subchannel_call **target;
gpr_atm *target;
grpc_subchannel *subchannel;
grpc_closure continuation;
} waiting_for_connect;
@ -137,14 +139,16 @@ struct grpc_subchannel {
struct grpc_subchannel_call {
connection *connection;
gpr_refcount refs;
};
#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1))
#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack *)((con) + 1))
#define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \
(((grpc_subchannel_call *)(callstack)) - 1)
static grpc_subchannel_call *create_call(grpc_exec_ctx *exec_ctx,
connection *con);
connection *con,
grpc_pollset *pollset);
static void connectivity_state_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel *c,
const char *reason);
@ -163,7 +167,7 @@ static grpc_subchannel *connection_unref_locked(
connection *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) GRPC_MUST_USE_RESULT;
static void subchannel_destroy(grpc_exec_ctx *exec_ctx, grpc_subchannel *c);
#ifdef GRPC_SUBCHANNEL_REFCOUNT_DEBUG
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define SUBCHANNEL_REF_LOCKED(p, r) \
subchannel_ref_locked((p), __FILE__, __LINE__, (r))
#define SUBCHANNEL_UNREF_LOCKED(p, r) \
@ -173,6 +177,7 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, grpc_subchannel *c);
#define CONNECTION_UNREF_LOCKED(cl, p, r) \
connection_unref_locked((cl), (p), __FILE__, __LINE__, (r))
#define REF_PASS_ARGS , file, line, reason
#define REF_PASS_REASON , reason
#define REF_LOG(name, p) \
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "%s: %p ref %d -> %d %s", \
(name), (p), (p)->refs, (p)->refs + 1, reason)
@ -185,6 +190,7 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, grpc_subchannel *c);
#define CONNECTION_REF_LOCKED(p, r) connection_ref_locked((p))
#define CONNECTION_UNREF_LOCKED(cl, p, r) connection_unref_locked((cl), (p))
#define REF_PASS_ARGS
#define REF_PASS_REASON
#define REF_LOG(name, p) \
do { \
} while (0)
@ -312,9 +318,9 @@ grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
return c;
}
void grpc_subchannel_cancel_waiting_call(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
int iomgr_success) {
static void cancel_waiting_calls(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
int iomgr_success) {
waiting_for_connect *w4c;
gpr_mu_lock(&subchannel->mu);
w4c = subchannel->waiting;
@ -335,6 +341,37 @@ void grpc_subchannel_cancel_waiting_call(grpc_exec_ctx *exec_ctx,
}
}
void grpc_subchannel_cancel_create_call(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
gpr_atm *target) {
waiting_for_connect *w4c;
int unref_count = 0;
gpr_mu_lock(&subchannel->mu);
w4c = subchannel->waiting;
subchannel->waiting = NULL;
while (w4c != NULL) {
waiting_for_connect *next = w4c->next;
if (w4c->target == target) {
grpc_subchannel_del_interested_party(exec_ctx, w4c->subchannel,
w4c->pollset);
grpc_exec_ctx_enqueue(exec_ctx, w4c->notify, 0);
unref_count++;
gpr_free(w4c);
} else {
w4c->next = subchannel->waiting;
subchannel->waiting = w4c;
}
w4c = next;
}
gpr_mu_unlock(&subchannel->mu);
while (unref_count-- > 0) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannel, "waiting_for_connect");
}
}
static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
grpc_connect_in_args args;
@ -358,29 +395,35 @@ static void start_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
static void continue_creating_call(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
grpc_subchannel_call_create_status call_creation_status;
int call_creation_finished_ok;
waiting_for_connect *w4c = arg;
grpc_subchannel_del_interested_party(exec_ctx, w4c->subchannel, w4c->pollset);
call_creation_status = grpc_subchannel_create_call(
call_creation_finished_ok = grpc_subchannel_create_call(
exec_ctx, w4c->subchannel, w4c->pollset, w4c->target, w4c->notify);
GPR_ASSERT(call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY);
GPR_ASSERT(call_creation_finished_ok == 1);
w4c->notify->cb(exec_ctx, w4c->notify->cb_arg, iomgr_success);
GRPC_SUBCHANNEL_UNREF(exec_ctx, w4c->subchannel, "waiting_for_connect");
gpr_free(w4c);
}
grpc_subchannel_call_create_status grpc_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_subchannel *c, grpc_pollset *pollset,
grpc_subchannel_call **target, grpc_closure *notify) {
int grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
grpc_pollset *pollset, gpr_atm *target,
grpc_closure *notify) {
connection *con;
grpc_subchannel_call *call;
GPR_TIMER_BEGIN("grpc_subchannel_create_call", 0);
gpr_mu_lock(&c->mu);
if (c->active != NULL) {
con = c->active;
CONNECTION_REF_LOCKED(con, "call");
gpr_mu_unlock(&c->mu);
*target = create_call(exec_ctx, con);
return GRPC_SUBCHANNEL_CALL_CREATE_READY;
call = create_call(exec_ctx, con, pollset);
if (!gpr_atm_rel_cas(target, 0, (gpr_atm)(gpr_uintptr)call)) {
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "failed to set");
}
GPR_TIMER_END("grpc_subchannel_create_call", 0);
return 1;
} else {
waiting_for_connect *w4c = gpr_malloc(sizeof(*w4c));
w4c->next = c->waiting;
@ -405,7 +448,8 @@ grpc_subchannel_call_create_status grpc_subchannel_create_call(
} else {
gpr_mu_unlock(&c->mu);
}
return GRPC_SUBCHANNEL_CALL_CREATE_PENDING;
GPR_TIMER_END("grpc_subchannel_create_call", 0);
return 0;
}
}
@ -653,10 +697,25 @@ static double generate_uniform_random_number(grpc_subchannel *c) {
/* Update backoff_delta and next_attempt in subchannel */
static void update_reconnect_parameters(grpc_subchannel *c) {
size_t i;
gpr_int32 backoff_delta_millis, jitter;
gpr_int32 max_backoff_millis =
GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000;
double jitter_range;
if (c->args) {
for (i = 0; i < c->args->num_args; i++) {
if (0 == strcmp(c->args->args[i].key,
"grpc.testing.fixed_reconnect_backoff")) {
GPR_ASSERT(c->args->args[i].type == GRPC_ARG_INTEGER);
c->next_attempt = gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(c->args->args[i].value.integer, GPR_TIMESPAN));
return;
}
}
}
backoff_delta_millis =
(gpr_int32)(gpr_time_to_millis(c->backoff_delta) *
GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER);
@ -687,7 +746,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, int iomgr_success) {
update_reconnect_parameters(c);
continue_connect(exec_ctx, c);
} else {
grpc_subchannel_cancel_waiting_call(exec_ctx, c, iomgr_success);
cancel_waiting_calls(exec_ctx, c, iomgr_success);
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->master, "connecting");
GRPC_SUBCHANNEL_UNREF(exec_ctx, c, "connecting");
}
@ -747,26 +806,40 @@ static void connectivity_state_changed_locked(grpc_exec_ctx *exec_ctx,
* grpc_subchannel_call implementation
*/
static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
int success) {
grpc_subchannel_call *c = call;
gpr_mu *mu = &c->connection->subchannel->mu;
grpc_subchannel *destroy;
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c));
gpr_mu_lock(mu);
destroy = CONNECTION_UNREF_LOCKED(exec_ctx, c->connection, "call");
gpr_mu_unlock(mu);
gpr_free(c);
if (destroy != NULL) {
subchannel_destroy(exec_ctx, destroy);
}
GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
}
void grpc_subchannel_call_ref(grpc_subchannel_call *c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_ref(&c->refs);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
grpc_call_stack_ref(SUBCHANNEL_CALL_TO_CALL_STACK(c), reason);
#else
grpc_call_stack_ref(SUBCHANNEL_CALL_TO_CALL_STACK(c));
#endif
}
void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
if (gpr_unref(&c->refs)) {
gpr_mu *mu = &c->connection->subchannel->mu;
grpc_subchannel *destroy;
grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c));
gpr_mu_lock(mu);
destroy = CONNECTION_UNREF_LOCKED(exec_ctx, c->connection, "call");
gpr_mu_unlock(mu);
gpr_free(c);
if (destroy != NULL) {
subchannel_destroy(exec_ctx, destroy);
}
}
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
grpc_call_stack_unref(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), reason);
#else
grpc_call_stack_unref(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c));
#endif
}
char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
@ -785,14 +858,16 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
}
static grpc_subchannel_call *create_call(grpc_exec_ctx *exec_ctx,
connection *con) {
connection *con,
grpc_pollset *pollset) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
grpc_subchannel_call *call =
gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(call);
call->connection = con;
gpr_ref_init(&call->refs, 1);
grpc_call_stack_init(exec_ctx, chanstk, NULL, NULL, callstk);
grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, call,
NULL, NULL, callstk);
grpc_call_stack_set_pollset(exec_ctx, callstk, pollset);
return call;
}
@ -803,3 +878,8 @@ grpc_mdctx *grpc_subchannel_get_mdctx(grpc_subchannel *subchannel) {
grpc_channel *grpc_subchannel_get_master(grpc_subchannel *subchannel) {
return subchannel->master;
}
grpc_call_stack *grpc_subchannel_call_get_call_stack(
grpc_subchannel_call *subchannel_call) {
return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
}

@ -44,7 +44,7 @@ typedef struct grpc_subchannel grpc_subchannel;
typedef struct grpc_subchannel_call grpc_subchannel_call;
typedef struct grpc_subchannel_args grpc_subchannel_args;
#ifdef GRPC_SUBCHANNEL_REFCOUNT_DEBUG
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_SUBCHANNEL_REF(p, r) \
grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_UNREF(cl, p, r) \
@ -75,27 +75,22 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
typedef enum {
GRPC_SUBCHANNEL_CALL_CREATE_READY,
GRPC_SUBCHANNEL_CALL_CREATE_PENDING
} grpc_subchannel_call_create_status;
/** construct a subchannel call (possibly asynchronously).
*
* If the returned status is \a GRPC_SUBCHANNEL_CALL_CREATE_READY, the call will
* return immediately and \a target will point to a connected \a subchannel_call
* instance. Note that \a notify will \em not be invoked in this case.
* Otherwise, if the returned status is GRPC_SUBCHANNEL_CALL_CREATE_PENDING, the
* subchannel call will be created asynchronously, invoking the \a notify
* callback upon completion. */
grpc_subchannel_call_create_status grpc_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_subchannel *subchannel, grpc_pollset *pollset,
grpc_subchannel_call **target, grpc_closure *notify);
* If the returned status is 1, the call will return immediately and \a target
* will point to a connected \a subchannel_call instance. Note that \a notify
* will \em not be invoked in this case.
* Otherwise, if the returned status is 0, the subchannel call will be created
* asynchronously, invoking the \a notify callback upon completion. */
int grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
grpc_pollset *pollset, gpr_atm *target,
grpc_closure *notify);
/** cancel \a call in the waiting state. */
void grpc_subchannel_cancel_waiting_call(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
int iomgr_success);
void grpc_subchannel_cancel_create_call(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
gpr_atm *target);
/** process a transport level op */
void grpc_subchannel_process_transport_op(grpc_exec_ctx *exec_ctx,
@ -138,6 +133,9 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call);
grpc_call_stack *grpc_subchannel_call_get_call_stack(
grpc_subchannel_call *subchannel_call);
struct grpc_subchannel_args {
/** Channel filters for this channel - wrapped factories will likely
want to mutate this */

@ -39,18 +39,17 @@ void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
closure->cb_arg = cb_arg;
closure->next = NULL;
closure->final_data = 0;
}
void grpc_closure_list_add(grpc_closure_list *closure_list,
grpc_closure *closure, int success) {
if (closure == NULL) return;
closure->next = NULL;
closure->success = success;
closure->final_data = (success != 0);
if (closure_list->head == NULL) {
closure_list->head = closure;
} else {
closure_list->tail->next = closure;
closure_list->tail->final_data |= (gpr_uintptr)closure;
}
closure_list->tail = closure;
}
@ -66,22 +65,12 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
if (dst->head == NULL) {
*dst = *src;
} else {
dst->tail->next = src->head;
dst->tail->final_data |= (gpr_uintptr)src->head;
dst->tail = src->tail;
}
src->head = src->tail = NULL;
}
grpc_closure *grpc_closure_list_pop(grpc_closure_list *list) {
grpc_closure *head;
if (list->head == NULL) {
return NULL;
}
head = list->head;
list->head = list->head->next;
return head;
}
typedef struct {
grpc_iomgr_cb_func cb;
void *cb_arg;
@ -103,3 +92,7 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
return &wc->wrapper;
}
grpc_closure *grpc_closure_next(grpc_closure *closure) {
return (grpc_closure *)(closure->final_data & ~(gpr_uintptr)1);
}

@ -34,7 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H
#define GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H
#include <stddef.h>
#include <grpc/support/port_platform.h>
struct grpc_closure;
typedef struct grpc_closure grpc_closure;
@ -64,13 +64,10 @@ struct grpc_closure {
/** Arguments to be passed to "cb". */
void *cb_arg;
/** Internal. A boolean indication to "cb" on the state of the iomgr.
* For instance, closures created during a shutdown would have this field set
* to false. */
int success;
/**< Internal. Do not touch */
struct grpc_closure *next;
/** Once enqueued, contains in the lower bit the success of the closure,
and in the upper bits the pointer to the next closure in the list.
Before enqueing for execution, this is usable for scratch data. */
gpr_uintptr final_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */
@ -91,10 +88,10 @@ void grpc_closure_list_add(grpc_closure_list *list, grpc_closure *closure,
/** append all closures from \a src to \a dst and empty \a src. */
void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
/** pop (return and remove) the head closure from \a list. */
grpc_closure *grpc_closure_list_pop(grpc_closure_list *list);
/** return whether \a list is empty. */
int grpc_closure_list_empty(grpc_closure_list list);
/** return the next pointer for a queued closure list */
grpc_closure *grpc_closure_next(grpc_closure *closure);
#endif /* GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H */

@ -44,10 +44,11 @@ int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next;
int success = (int)(c->final_data & 1);
grpc_closure *next = (grpc_closure *)(c->final_data & ~(gpr_uintptr)1);
did_something++;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
c->cb(exec_ctx, c->cb_arg, c->success);
c->cb(exec_ctx, c->cb_arg, success);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
}

@ -63,8 +63,6 @@ void grpc_executor_init() {
/* thread body */
static void closure_exec_thread_func(void *ignored) {
grpc_closure *closure;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (1) {
gpr_mu_lock(&g_executor.mu);
@ -72,16 +70,16 @@ static void closure_exec_thread_func(void *ignored) {
gpr_mu_unlock(&g_executor.mu);
break;
}
closure = grpc_closure_list_pop(&g_executor.closures);
if (closure == NULL) {
if (grpc_closure_list_empty(g_executor.closures)) {
/* no more work, time to die */
GPR_ASSERT(g_executor.busy == 1);
g_executor.busy = 0;
gpr_mu_unlock(&g_executor.mu);
break;
} else {
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures);
}
gpr_mu_unlock(&g_executor.mu);
closure->cb(&exec_ctx, closure->cb_arg, closure->success);
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
@ -125,7 +123,6 @@ void grpc_executor_enqueue(grpc_closure *closure, int success) {
void grpc_executor_shutdown() {
int pending_join;
grpc_closure *closure;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&g_executor.mu);
@ -136,9 +133,7 @@ void grpc_executor_shutdown() {
* list below because we aren't accepting new work */
/* Execute pending callbacks, some may be performing cleanups */
while ((closure = grpc_closure_list_pop(&g_executor.closures)) != NULL) {
closure->cb(&exec_ctx, closure->cb_arg, closure->success);
}
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
if (pending_join) {

@ -55,8 +55,13 @@
#endif
void grpc_pollset_init(grpc_pollset *pollset);
/* Begin shutting down the pollset, and call closure when done.
* GRPC_POLLSET_MU(pollset) must be held */
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
/** Reset the pollset to its initial state (perhaps with some cached objects);
* must have been previously shutdown */
void grpc_pollset_reset(grpc_pollset *pollset);
void grpc_pollset_destroy(grpc_pollset *pollset);
/* Do some work on a pollset.

@ -47,21 +47,13 @@
#include "src/core/support/block_annotate.h"
#include "src/core/profiling/timers.h"
typedef struct wakeup_fd_hdl {
grpc_wakeup_fd wakeup_fd;
struct wakeup_fd_hdl *next;
} wakeup_fd_hdl;
typedef struct {
grpc_pollset *pollset;
grpc_fd *fd;
grpc_closure closure;
} delayed_add;
typedef struct {
int epoll_fd;
wakeup_fd_hdl *free_wakeup_fds;
} pollset_hdr;
typedef struct { int epoll_fd; } pollset_hdr;
static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
@ -174,7 +166,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
pfds[0].events = POLLIN;
pfds[0].revents = 0;
pfds[1].fd = h->epoll_fd;
@ -197,7 +189,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
/* do nothing */
} else {
if (pfds[0].revents) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
}
if (pfds[1].revents) {
do {

@ -124,7 +124,7 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
pfds[0].events = POLLIN;
pfds[0].revents = 0;
pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
pfds[1].events = POLLIN;
pfds[1].revents = 0;
for (i = 0; i < h->fd_count; i++) {
@ -174,7 +174,7 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
if (pfds[1].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
}
for (i = 2; i < pfd_count; i++) {
if (watchers[i].fd == NULL) {

@ -111,7 +111,7 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
p->kicked_without_pollers = 1;
GPR_TIMER_END("grpc_pollset_kick_ext.broadcast", 0);
@ -122,14 +122,14 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
specific_worker->reevaluate_polling_on_wakeup = 1;
}
specific_worker->kicked_specifically = 1;
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
} else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
GPR_TIMER_MARK("kick_yoself", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = 1;
}
specific_worker->kicked_specifically = 1;
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
} else if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) {
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
@ -151,7 +151,7 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
if (specific_worker != NULL) {
GPR_TIMER_MARK("finally_kick", 0);
push_back_worker(p, specific_worker);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
} else {
GPR_TIMER_MARK("kicked_no_pollers", 0);
@ -177,9 +177,9 @@ void grpc_pollset_global_init(void) {
void grpc_pollset_global_shutdown(void) {
grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
grpc_wakeup_fd_global_destroy();
gpr_tls_destroy(&g_current_thread_poller);
gpr_tls_destroy(&g_current_thread_worker);
grpc_wakeup_fd_global_destroy();
}
void grpc_kick_poller(void) { grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); }
@ -195,6 +195,34 @@ void grpc_pollset_init(grpc_pollset *pollset) {
pollset->shutting_down = 0;
pollset->called_shutdown = 0;
pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL;
pollset->local_wakeup_cache = NULL;
pollset->kicked_without_pollers = 0;
become_basic_pollset(pollset, NULL);
}
void grpc_pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(pollset->in_flight_cbs == 0);
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
pollset->vtable->destroy(pollset);
gpr_mu_destroy(&pollset->mu);
while (pollset->local_wakeup_cache) {
grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
gpr_free(pollset->local_wakeup_cache);
pollset->local_wakeup_cache = next;
}
}
void grpc_pollset_reset(grpc_pollset *pollset) {
GPR_ASSERT(pollset->shutting_down);
GPR_ASSERT(pollset->in_flight_cbs == 0);
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
pollset->vtable->destroy(pollset);
pollset->shutting_down = 0;
pollset->called_shutdown = 0;
pollset->kicked_without_pollers = 0;
become_basic_pollset(pollset, NULL);
}
@ -244,13 +272,19 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;
worker->reevaluate_polling_on_wakeup = 0;
if (pollset->local_wakeup_cache != NULL) {
worker->wakeup_fd = pollset->local_wakeup_cache;
pollset->local_wakeup_cache = worker->wakeup_fd->next;
} else {
worker->wakeup_fd = gpr_malloc(sizeof(*worker->wakeup_fd));
grpc_wakeup_fd_init(&worker->wakeup_fd->fd);
}
worker->kicked_specifically = 0;
/* TODO(ctiller): pool these */
grpc_wakeup_fd_init(&worker->wakeup_fd);
/* If there's work waiting for the pollset to be idle, and the
pollset is idle, then do that work */
if (!grpc_pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) {
GPR_TIMER_MARK("grpc_pollset_work.idle_jobs", 0);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
goto done;
}
@ -259,16 +293,19 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
May update deadline to ensure timely wakeups.
TODO(ctiller): can this work be localized? */
if (grpc_timer_check(exec_ctx, now, &deadline)) {
GPR_TIMER_MARK("grpc_pollset_work.alarm_triggered", 0);
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
}
/* If we're shutting down then we don't execute any extended work */
if (pollset->shutting_down) {
GPR_TIMER_MARK("grpc_pollset_work.shutting_down", 0);
goto done;
}
/* Give do_promote priority so we don't starve it out */
if (pollset->in_flight_cbs) {
GPR_TIMER_MARK("grpc_pollset_work.in_flight_cbs", 0);
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
@ -293,6 +330,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
locked = 0;
gpr_tls_set(&g_current_thread_poller, 0);
} else {
GPR_TIMER_MARK("grpc_pollset_work.kicked_without_pollers", 0);
pollset->kicked_without_pollers = 0;
}
/* Finished execution - start cleaning up.
@ -323,7 +361,10 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
remove_worker(pollset, worker);
gpr_tls_set(&g_current_thread_worker, 0);
}
grpc_wakeup_fd_destroy(&worker->wakeup_fd);
/* release wakeup fd to the local pool */
worker->wakeup_fd->next = pollset->local_wakeup_cache;
pollset->local_wakeup_cache = worker->wakeup_fd;
/* check shutdown conditions */
if (pollset->shutting_down) {
if (grpc_pollset_has_workers(pollset)) {
grpc_pollset_kick(pollset, NULL);
@ -338,8 +379,8 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
@ -349,35 +390,20 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
int call_shutdown = 0;
gpr_mu_lock(&pollset->mu);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
!grpc_pollset_has_workers(pollset)) {
pollset->called_shutdown = 1;
call_shutdown = 1;
}
pollset->shutdown_done = closure;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!grpc_pollset_has_workers(pollset)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
}
pollset->shutdown_done = closure;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
gpr_mu_unlock(&pollset->mu);
if (call_shutdown) {
if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
!grpc_pollset_has_workers(pollset)) {
pollset->called_shutdown = 1;
finish_shutdown(exec_ctx, pollset);
}
}
void grpc_pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(pollset->shutting_down);
GPR_ASSERT(pollset->in_flight_cbs == 0);
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
pollset->vtable->destroy(pollset);
gpr_mu_destroy(&pollset->mu);
}
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
@ -557,7 +583,7 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
pfd[0].events = POLLIN;
pfd[0].revents = 0;
pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
pfd[1].events = POLLIN;
pfd[1].revents = 0;
nfds = 2;
@ -599,7 +625,7 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
if (pfd[1].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
}
if (nfds > 2) {
grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,

@ -48,8 +48,13 @@ typedef struct grpc_pollset_vtable grpc_pollset_vtable;
use the struct tag */
struct grpc_fd;
typedef struct grpc_cached_wakeup_fd {
grpc_wakeup_fd fd;
struct grpc_cached_wakeup_fd *next;
} grpc_cached_wakeup_fd;
typedef struct grpc_pollset_worker {
grpc_wakeup_fd wakeup_fd;
grpc_cached_wakeup_fd *wakeup_fd;
int reevaluate_polling_on_wakeup;
int kicked_specifically;
struct grpc_pollset_worker *next;
@ -74,6 +79,8 @@ typedef struct grpc_pollset {
int fd;
void *ptr;
} data;
/* Local cache of eventfds for workers */
grpc_cached_wakeup_fd *local_wakeup_cache;
} grpc_pollset;
struct grpc_pollset_vtable {

@ -35,6 +35,7 @@
#ifdef GPR_WINSOCK_SOCKET
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include "src/core/iomgr/timer_internal.h"
@ -112,7 +113,6 @@ void grpc_pollset_init(grpc_pollset *pollset) {
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
gpr_mu_lock(&grpc_polling_mu);
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
@ -120,11 +120,19 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} else {
pollset->on_shutdown = closure;
}
gpr_mu_unlock(&grpc_polling_mu);
}
void grpc_pollset_destroy(grpc_pollset *pollset) {}
void grpc_pollset_reset(grpc_pollset *pollset) {
GPR_ASSERT(pollset->shutting_down);
GPR_ASSERT(!has_workers(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET));
pollset->shutting_down = 0;
pollset->is_iocp_worker = 0;
pollset->kicked_without_pollers = 0;
pollset->on_shutdown = NULL;
}
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker, gpr_timespec now,
gpr_timespec deadline) {

@ -129,8 +129,6 @@ static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, int success) {
void grpc_workqueue_push(grpc_workqueue *workqueue, grpc_closure *closure,
int success) {
closure->success = success;
closure->next = NULL;
gpr_mu_lock(&workqueue->mu);
if (grpc_closure_list_empty(workqueue->closure_list)) {
grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);

@ -50,12 +50,12 @@ typedef struct gpr_timer_entry {
gpr_timespec tm;
const char *tagstr;
const char *file;
int line;
short line;
char type;
gpr_uint8 important;
} gpr_timer_entry;
#define MAX_COUNT (1024 * 1024 / sizeof(gpr_timer_entry))
#define MAX_COUNT (5 * 1024 * 1024 / sizeof(gpr_timer_entry))
static __thread gpr_timer_entry g_log[MAX_COUNT];
static __thread int g_count;
@ -102,7 +102,7 @@ static void gpr_timers_log_add(const char *tagstr, marker_type type,
entry->tagstr = tagstr;
entry->type = type;
entry->file = file;
entry->line = line;
entry->line = (short)line;
entry->important = important != 0;
}

@ -59,8 +59,6 @@ typedef struct {
progress */
grpc_pollset *pollset;
grpc_transport_stream_op op;
size_t op_md_idx;
int sent_initial_metadata;
gpr_uint8 security_context_set;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
char *service_url;
@ -108,9 +106,8 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
return;
}
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
GPR_ASSERT(op->send_ops && op->send_ops->nops > calld->op_md_idx &&
op->send_ops->ops[calld->op_md_idx].type == GRPC_OP_METADATA);
mdb = &op->send_ops->ops[calld->op_md_idx].data.metadata;
GPR_ASSERT(op->send_initial_metadata != NULL);
mdb = op->send_initial_metadata;
for (i = 0; i < num_md; i++) {
grpc_metadata_batch_add_tail(
mdb, &calld->md_links[i],
@ -205,7 +202,6 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_linked_mdelem *l;
size_t i;
grpc_client_security_context *sec_ctx = NULL;
if (calld->security_context_set == 0 &&
@ -224,53 +220,41 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
chand->security_connector->base.auth_context, "client_auth_filter");
}
if (op->bind_pollset != NULL) {
calld->pollset = op->bind_pollset;
}
if (op->send_ops != NULL && !calld->sent_initial_metadata) {
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *sop = &ops[i];
if (sop->type != GRPC_OP_METADATA) continue;
calld->op_md_idx = i;
calld->sent_initial_metadata = 1;
for (l = sop->data.metadata.list.head; l != NULL; l = l->next) {
grpc_mdelem *md = l->md;
/* Pointer comparison is OK for md_elems created from the same context.
*/
if (md->key == chand->authority_string) {
if (calld->host != NULL) GRPC_MDSTR_UNREF(calld->host);
calld->host = GRPC_MDSTR_REF(md->value);
} else if (md->key == chand->path_string) {
if (calld->method != NULL) GRPC_MDSTR_UNREF(calld->method);
calld->method = GRPC_MDSTR_REF(md->value);
}
if (op->send_initial_metadata != NULL) {
for (l = op->send_initial_metadata->list.head; l != NULL; l = l->next) {
grpc_mdelem *md = l->md;
/* Pointer comparison is OK for md_elems created from the same context.
*/
if (md->key == chand->authority_string) {
if (calld->host != NULL) GRPC_MDSTR_UNREF(calld->host);
calld->host = GRPC_MDSTR_REF(md->value);
} else if (md->key == chand->path_string) {
if (calld->method != NULL) GRPC_MDSTR_UNREF(calld->method);
calld->method = GRPC_MDSTR_REF(md->value);
}
if (calld->host != NULL) {
grpc_security_status status;
const char *call_host = grpc_mdstr_as_c_string(calld->host);
calld->op = *op; /* Copy op (originates from the caller's stack). */
status = grpc_channel_security_connector_check_call_host(
exec_ctx, chand->security_connector, call_host, on_host_checked,
elem);
if (status != GRPC_SECURITY_OK) {
if (status == GRPC_SECURITY_ERROR) {
char *error_msg;
gpr_asprintf(&error_msg,
"Invalid host %s set in :authority metadata.",
call_host);
bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT,
error_msg);
gpr_free(error_msg);
}
return; /* early exit */
}
if (calld->host != NULL) {
grpc_security_status status;
const char *call_host = grpc_mdstr_as_c_string(calld->host);
calld->op = *op; /* Copy op (originates from the caller's stack). */
status = grpc_channel_security_connector_check_call_host(
exec_ctx, chand->security_connector, call_host, on_host_checked,
elem);
if (status != GRPC_SECURITY_OK) {
if (status == GRPC_SECURITY_ERROR) {
char *error_msg;
gpr_asprintf(&error_msg,
"Invalid host %s set in :authority metadata.",
call_host);
bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT,
error_msg);
gpr_free(error_msg);
}
return; /* early exit */
}
send_security_metadata(exec_ctx, elem, op);
return; /* early exit */
}
send_security_metadata(exec_ctx, elem, op);
return; /* early exit */
}
/* pass control down the stack */
@ -279,11 +263,15 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(*calld));
GPR_ASSERT(!initial_op || !initial_op->send_ops);
}
static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset) {
call_data *calld = elem->call_data;
calld->pollset = pollset;
}
/* Destructor for call_data */
@ -302,18 +290,17 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
grpc_channel_element *elem,
grpc_channel_element_args *args) {
grpc_security_connector *sc =
grpc_find_security_connector_in_args(args->channel_args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_last);
GPR_ASSERT(!args->is_last);
GPR_ASSERT(sc != NULL);
/* initialize members */
@ -321,7 +308,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->security_connector =
(grpc_channel_security_connector *)GRPC_SECURITY_CONNECTOR_REF(
sc, "client_auth_filter");
chand->md_ctx = metadata_context;
chand->md_ctx = args->metadata_context;
chand->authority_string = grpc_mdstr_from_string(chand->md_ctx, ":authority");
chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path");
chand->error_msg_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-message");
@ -352,6 +339,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
const grpc_channel_filter grpc_client_auth_filter = {
auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"client-auth"};

@ -207,8 +207,7 @@ grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *p) {
return arg;
}
grpc_server_credentials *grpc_server_credentials_from_arg(
const grpc_arg *arg) {
grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg) {
if (strcmp(arg->key, GRPC_SERVER_CREDENTIALS_ARG) != 0) return NULL;
if (arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,

@ -34,7 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H
#define GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H
#include "src/core/transport/stream_op.h"
#include "src/core/transport/metadata_batch.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/sync.h>

@ -320,8 +320,7 @@ grpc_arg grpc_auth_context_to_arg(grpc_auth_context *p) {
return arg;
}
grpc_auth_context *grpc_auth_context_from_arg(
const grpc_arg *arg) {
grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg) {
if (strcmp(arg->key, GRPC_AUTH_CONTEXT_ARG) != 0) return NULL;
if (arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
@ -336,8 +335,7 @@ grpc_auth_context *grpc_find_auth_context_in_args(
size_t i;
if (args == NULL) return NULL;
for (i = 0; i < args->num_args; i++) {
grpc_auth_context *p =
grpc_auth_context_from_arg(&args->args[i]);
grpc_auth_context *p = grpc_auth_context_from_arg(&args->args[i]);
if (p != NULL) return p;
}
return NULL;

@ -41,8 +41,7 @@
#include <grpc/support/log.h>
typedef struct call_data {
gpr_uint8 got_client_metadata;
grpc_stream_op_buffer *recv_ops;
grpc_metadata_batch *recv_initial_metadata;
/* Closure to call when finished with the auth_on_recv hook. */
grpc_closure *on_done_recv;
/* Receive closures are chained: we inject this closure as the on_done_recv
@ -53,7 +52,6 @@ typedef struct call_data {
grpc_metadata_array md;
const grpc_metadata *consumed_md;
size_t num_consumed_md;
grpc_stream_op *md_op;
grpc_auth_context *auth_context;
} call_data;
@ -128,20 +126,28 @@ static void on_md_processing_done(
if (status == GRPC_STATUS_OK) {
calld->consumed_md = consumed_md;
calld->num_consumed_md = num_consumed_md;
grpc_metadata_batch_filter(&calld->md_op->data.metadata, remove_consumed_md,
grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md,
elem);
grpc_metadata_array_destroy(&calld->md);
calld->on_done_recv->cb(&exec_ctx, calld->on_done_recv->cb_arg, 1);
} else {
gpr_slice message;
grpc_transport_stream_op close_op;
memset(&close_op, 0, sizeof(close_op));
grpc_metadata_array_destroy(&calld->md);
error_details = error_details != NULL
? error_details
: "Authentication metadata processing failed.";
message = gpr_slice_from_copied_string(error_details);
grpc_sopb_reset(calld->recv_ops);
grpc_transport_stream_op_add_close(&calld->transport_op, status, &message);
grpc_call_next_op(&exec_ctx, elem, &calld->transport_op);
calld->transport_op.send_initial_metadata = NULL;
if (calld->transport_op.send_message != NULL) {
grpc_byte_stream_destroy(calld->transport_op.send_message);
calld->transport_op.send_message = NULL;
}
calld->transport_op.send_trailing_metadata = NULL;
grpc_transport_stream_op_add_close(&close_op, status, &message);
grpc_call_next_op(&exec_ctx, elem, &close_op);
calld->on_done_recv->cb(&exec_ctx, calld->on_done_recv->cb_arg, 0);
}
grpc_exec_ctx_finish(&exec_ctx);
@ -153,16 +159,8 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (success) {
size_t i;
size_t nops = calld->recv_ops->nops;
grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA || calld->got_client_metadata) continue;
calld->got_client_metadata = 1;
if (chand->creds->processor.process == NULL) continue;
calld->md_op = op;
calld->md = metadata_batch_to_md_array(&op->data.metadata);
if (chand->creds->processor.process != NULL) {
calld->md = metadata_batch_to_md_array(calld->recv_initial_metadata);
chand->creds->processor.process(
chand->creds->processor.state, calld->auth_context,
calld->md.metadata, calld->md.count, on_md_processing_done, elem);
@ -176,11 +174,11 @@ static void set_recv_ops_md_callbacks(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
if (op->recv_ops && !calld->got_client_metadata) {
if (op->recv_initial_metadata != NULL) {
/* substitute our callback for the higher callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
op->on_done_recv = &calld->auth_on_recv;
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->on_done_recv = op->on_complete;
op->on_complete = &calld->auth_on_recv;
calld->transport_op = *op;
}
}
@ -199,8 +197,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
@ -210,47 +207,39 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem);
GPR_ASSERT(initial_op && initial_op->context != NULL &&
initial_op->context[GRPC_CONTEXT_SECURITY].value == NULL);
/* Create a security context for the call and reference the auth context from
the channel. */
if (initial_op->context[GRPC_CONTEXT_SECURITY].value != NULL) {
initial_op->context[GRPC_CONTEXT_SECURITY].destroy(
initial_op->context[GRPC_CONTEXT_SECURITY].value);
if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) {
args->context[GRPC_CONTEXT_SECURITY].destroy(
args->context[GRPC_CONTEXT_SECURITY].value);
}
server_ctx = grpc_server_security_context_create();
server_ctx->auth_context =
grpc_auth_context_create(chand->auth_context);
server_ctx->auth_context->pollset = initial_op->bind_pollset;
initial_op->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
initial_op->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_server_security_context_destroy;
server_ctx->auth_context = grpc_auth_context_create(chand->auth_context);
calld->auth_context = server_ctx->auth_context;
/* Set the metadata callbacks. */
set_recv_ops_md_callbacks(elem, initial_op);
args->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
args->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_server_security_context_destroy;
}
static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset) {}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_auth_context *auth_context = grpc_find_auth_context_in_args(args);
grpc_server_credentials *creds = grpc_find_server_credentials_in_args(args);
grpc_channel_element *elem,
grpc_channel_element_args *args) {
grpc_auth_context *auth_context =
grpc_find_auth_context_in_args(args->channel_args);
grpc_server_credentials *creds =
grpc_find_server_credentials_in_args(args->channel_args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
GPR_ASSERT(!args->is_last);
GPR_ASSERT(auth_context != NULL);
GPR_ASSERT(creds != NULL);
@ -258,7 +247,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter");
chand->creds = grpc_server_credentials_ref(creds);
chand->mdctx = mdctx;
chand->mdctx = args->metadata_context;
}
/* Destructor for channel data */
@ -272,6 +261,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
const grpc_channel_filter grpc_server_auth_filter = {
auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"server-auth"};

@ -94,8 +94,7 @@ static void setup_transport(grpc_exec_ctx *exec_ctx, void *statep,
grpc_channel_args *args_copy;
grpc_arg args_to_add[2];
args_to_add[0] = grpc_server_credentials_to_arg(state->creds);
args_to_add[1] =
grpc_auth_context_to_arg(state->sc->auth_context);
args_to_add[1] = grpc_auth_context_to_arg(state->sc->auth_context);
args_copy = grpc_channel_args_copy_and_add(
grpc_server_get_channel_args(state->server), args_to_add,
GPR_ARRAY_SIZE(args_to_add));

@ -31,6 +31,7 @@
*
*/
#include <grpc/support/port_platform.h>
#include <grpc/support/slice_buffer.h>
#include <string.h>
@ -208,6 +209,44 @@ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst) {
src->length = 0;
}
void gpr_slice_buffer_move_first(gpr_slice_buffer *src, size_t n,
gpr_slice_buffer *dst) {
size_t src_idx;
size_t output_len = dst->length + n;
size_t new_input_len = src->length - n;
GPR_ASSERT(src->length >= n);
if (src->length == n) {
gpr_slice_buffer_move_into(src, dst);
return;
}
src_idx = 0;
while (src_idx < src->capacity) {
gpr_slice slice = src->slices[src_idx];
size_t slice_len = GPR_SLICE_LENGTH(slice);
if (n > slice_len) {
gpr_slice_buffer_add(dst, slice);
n -= slice_len;
src_idx++;
} else if (n == slice_len) {
gpr_slice_buffer_add(dst, slice);
src_idx++;
break;
} else { /* n < slice_len */
src->slices[src_idx] = gpr_slice_split_tail(&slice, n);
GPR_ASSERT(GPR_SLICE_LENGTH(slice) == n);
GPR_ASSERT(GPR_SLICE_LENGTH(src->slices[src_idx]) == slice_len - n);
gpr_slice_buffer_add(dst, slice);
break;
}
}
GPR_ASSERT(dst->length == output_len);
memmove(src->slices, src->slices + src_idx,
sizeof(gpr_slice) * (src->count - src_idx));
src->count -= src_idx;
src->length = new_input_len;
GPR_ASSERT(src->count > 0);
}
void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n,
gpr_slice_buffer *garbage) {
GPR_ASSERT(n <= sb->length);
@ -231,3 +270,13 @@ void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n,
}
}
}
gpr_slice gpr_slice_buffer_take_first(gpr_slice_buffer *sb) {
gpr_slice slice;
GPR_ASSERT(sb->count > 0);
slice = sb->slices[0];
memmove(&sb->slices[0], &sb->slices[1], (sb->count - 1) * sizeof(gpr_slice));
sb->count--;
sb->length -= GPR_SLICE_LENGTH(slice);
return slice;
}

@ -59,8 +59,11 @@ void gpr_mu_unlock(gpr_mu* mu) {
}
int gpr_mu_trylock(gpr_mu* mu) {
int err = pthread_mutex_trylock(mu);
int err;
GPR_TIMER_BEGIN("gpr_mu_trylock", 0);
err = pthread_mutex_trylock(mu);
GPR_ASSERT(err == 0 || err == EBUSY);
GPR_TIMER_END("gpr_mu_trylock", 0);
return err == 0;
}

@ -1,97 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/surface/byte_buffer_queue.h"
#include <grpc/support/alloc.h>
#include <grpc/support/useful.h>
static void bba_destroy(grpc_bbq_array *array, size_t start_pos) {
size_t i;
for (i = start_pos; i < array->count; i++) {
grpc_byte_buffer_destroy(array->data[i]);
}
gpr_free(array->data);
}
/* Append an operation to an array, expanding as needed */
static void bba_push(grpc_bbq_array *a, grpc_byte_buffer *buffer) {
if (a->count == a->capacity) {
a->capacity = GPR_MAX(a->capacity * 2, 8);
a->data = gpr_realloc(a->data, sizeof(grpc_byte_buffer *) * a->capacity);
}
a->data[a->count++] = buffer;
}
void grpc_bbq_destroy(grpc_byte_buffer_queue *q) {
bba_destroy(&q->filling, 0);
bba_destroy(&q->draining, q->drain_pos);
}
int grpc_bbq_empty(grpc_byte_buffer_queue *q) {
return (q->drain_pos == q->draining.count && q->filling.count == 0);
}
void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *buffer) {
q->bytes += grpc_byte_buffer_length(buffer);
bba_push(&q->filling, buffer);
}
void grpc_bbq_flush(grpc_byte_buffer_queue *q) {
grpc_byte_buffer *bb;
while ((bb = grpc_bbq_pop(q))) {
grpc_byte_buffer_destroy(bb);
}
}
size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q) { return q->bytes; }
grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q) {
grpc_bbq_array temp_array;
grpc_byte_buffer *out;
if (q->drain_pos == q->draining.count) {
if (q->filling.count == 0) {
return NULL;
}
q->draining.count = 0;
q->drain_pos = 0;
/* swap arrays */
temp_array = q->filling;
q->filling = q->draining;
q->draining = temp_array;
}
out = q->draining.data[q->drain_pos++];
q->bytes -= grpc_byte_buffer_length(out);
return out;
}

@ -1,62 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H
#define GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H
#include <grpc/byte_buffer.h>
/* TODO(ctiller): inline an element or two into this struct to avoid per-call
allocations */
typedef struct {
grpc_byte_buffer **data;
size_t count;
size_t capacity;
} grpc_bbq_array;
/* should be initialized by zeroing memory */
typedef struct {
size_t drain_pos;
grpc_bbq_array filling;
grpc_bbq_array draining;
size_t bytes;
} grpc_byte_buffer_queue;
void grpc_bbq_destroy(grpc_byte_buffer_queue *q);
grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q);
void grpc_bbq_flush(grpc_byte_buffer_queue *q);
int grpc_bbq_empty(grpc_byte_buffer_queue *q);
void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *bb);
size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q);
#endif /* GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H */

File diff suppressed because it is too large Load Diff

@ -44,51 +44,6 @@
extern "C" {
#endif
/* Primitive operation types - grpc_op's get rewritten into these */
typedef enum {
GRPC_IOREQ_RECV_INITIAL_METADATA,
GRPC_IOREQ_RECV_MESSAGE,
GRPC_IOREQ_RECV_TRAILING_METADATA,
GRPC_IOREQ_RECV_STATUS,
GRPC_IOREQ_RECV_STATUS_DETAILS,
GRPC_IOREQ_RECV_CLOSE,
GRPC_IOREQ_SEND_INITIAL_METADATA,
GRPC_IOREQ_SEND_MESSAGE,
GRPC_IOREQ_SEND_TRAILING_METADATA,
GRPC_IOREQ_SEND_STATUS,
GRPC_IOREQ_SEND_CLOSE,
GRPC_IOREQ_OP_COUNT
} grpc_ioreq_op;
typedef union {
grpc_metadata_array *recv_metadata;
grpc_byte_buffer **recv_message;
struct {
void (*set_value)(grpc_status_code status, void *user_data);
void *user_data;
} recv_status;
struct {
char **details;
size_t *details_capacity;
} recv_status_details;
struct {
size_t count;
grpc_metadata *metadata;
} send_metadata;
grpc_byte_buffer *send_message;
struct {
grpc_status_code code;
grpc_mdstr *details;
} send_status;
} grpc_ioreq_data;
typedef struct {
grpc_ioreq_op op;
gpr_uint32 flags;
/**< A copy of the write flags from grpc_op */
grpc_ioreq_data data;
} grpc_ioreq;
typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *user_data);
@ -105,7 +60,7 @@ void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);
grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call);
#ifdef GRPC_CALL_REF_COUNT_DEBUG
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_call_internal_ref(grpc_call *call, const char *reason);
void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call,
const char *reason);
@ -121,12 +76,14 @@ void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call);
grpc_call_internal_unref(exec_ctx, call)
#endif
grpc_call_error grpc_call_start_ioreq_and_call_back(
grpc_exec_ctx *exec_ctx, grpc_call *call, const grpc_ioreq *reqs,
size_t nreqs, grpc_ioreq_completion_func on_complete, void *user_data);
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call);
grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
grpc_call *call,
const grpc_op *ops,
size_t nops,
grpc_closure *closure);
/* Given the top call_element, get the call object. */
grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element);
@ -157,16 +114,6 @@ void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
#define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \
if (grpc_api_trace) grpc_call_log_batch(sev, call, ops, nops, tag)
#define GRPC_SERVER_LOG_REQUEST_CALL(sev, server, call, details, \
initial_metadata, cq_bound_to_call, \
cq_for_notifications, tag) \
if (grpc_api_trace) \
grpc_server_log_request_call(sev, server, call, details, initial_metadata, \
cq_bound_to_call, cq_for_notifications, tag)
#define GRPC_SERVER_LOG_SHUTDOWN(sev, server, cq, tag) \
if (grpc_api_trace) grpc_server_log_shutdown(sev, server, cq, tag)
gpr_uint8 grpc_call_is_client(grpc_call *call);
#ifdef __cplusplus

@ -110,9 +110,6 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
void *tag) {
char *tmp;
size_t i;
gpr_log(file, line, severity,
"grpc_call_start_batch(call=%p, ops=%p, nops=%d, tag=%p)", call, ops,
nops, tag);
for (i = 0; i < nops; i++) {
tmp = grpc_op_string(&ops[i]);
gpr_log(file, line, severity, "ops[%d]: %s", i, tmp);

@ -57,7 +57,6 @@ gpr_uint32 grpc_call_test_only_get_message_flags(grpc_call *call);
* To be indexed by grpc_compression_algorithm enum values. */
gpr_uint32 grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call);
#ifdef __cplusplus
}
#endif

@ -71,9 +71,29 @@ struct grpc_completion_queue {
int is_server_cq;
int num_pluckers;
plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
grpc_closure pollset_destroy_done;
grpc_closure pollset_shutdown_done;
grpc_completion_queue *next_free;
};
static gpr_mu g_freelist_mu;
grpc_completion_queue *g_freelist;
static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cc,
int success);
void grpc_cq_global_init(void) { gpr_mu_init(&g_freelist_mu); }
void grpc_cq_global_shutdown(void) {
gpr_mu_destroy(&g_freelist_mu);
while (g_freelist) {
grpc_completion_queue *next = g_freelist->next_free;
grpc_pollset_destroy(&g_freelist->pollset);
gpr_free(g_freelist);
g_freelist = next;
}
}
struct grpc_cq_alarm {
grpc_timer alarm;
grpc_cq_completion completion;
@ -83,22 +103,41 @@ struct grpc_cq_alarm {
void *tag;
};
static void on_pollset_destroy_done(grpc_exec_ctx *exec_ctx, void *cc,
int success);
grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue));
GRPC_API_TRACE("grpc_completion_queue_create(reserved=%p)", 1, (reserved));
grpc_completion_queue *cc;
GPR_ASSERT(!reserved);
memset(cc, 0, sizeof(*cc));
GPR_TIMER_BEGIN("grpc_completion_queue_create", 0);
GRPC_API_TRACE("grpc_completion_queue_create(reserved=%p)", 1, (reserved));
gpr_mu_lock(&g_freelist_mu);
if (g_freelist == NULL) {
gpr_mu_unlock(&g_freelist_mu);
cc = gpr_malloc(sizeof(grpc_completion_queue));
grpc_pollset_init(&cc->pollset);
} else {
cc = g_freelist;
g_freelist = g_freelist->next_free;
gpr_mu_unlock(&g_freelist_mu);
/* pollset already initialized */
}
/* Initial ref is dropped by grpc_completion_queue_shutdown */
gpr_ref_init(&cc->pending_events, 1);
/* One for destroy(), one for pollset_shutdown */
gpr_ref_init(&cc->owning_refs, 2);
grpc_pollset_init(&cc->pollset);
cc->completed_tail = &cc->completed_head;
cc->completed_head.next = (gpr_uintptr)cc->completed_tail;
grpc_closure_init(&cc->pollset_destroy_done, on_pollset_destroy_done, cc);
cc->shutdown = 0;
cc->shutdown_called = 0;
cc->is_server_cq = 0;
cc->num_pluckers = 0;
grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc);
GPR_TIMER_END("grpc_completion_queue_create", 0);
return cc;
}
@ -113,8 +152,8 @@ void grpc_cq_internal_ref(grpc_completion_queue *cc) {
gpr_ref(&cc->owning_refs);
}
static void on_pollset_destroy_done(grpc_exec_ctx *exec_ctx, void *arg,
int success) {
static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
int success) {
grpc_completion_queue *cc = arg;
GRPC_CQ_INTERNAL_UNREF(cc, "pollset_destroy");
}
@ -129,8 +168,11 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
#endif
if (gpr_unref(&cc->owning_refs)) {
GPR_ASSERT(cc->completed_head.next == (gpr_uintptr)&cc->completed_head);
grpc_pollset_destroy(&cc->pollset);
gpr_free(cc);
grpc_pollset_reset(&cc->pollset);
gpr_mu_lock(&g_freelist_mu);
cc->next_free = g_freelist;
g_freelist = cc;
gpr_mu_unlock(&g_freelist_mu);
}
}
@ -185,8 +227,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GPR_ASSERT(!cc->shutdown);
GPR_ASSERT(cc->shutdown_called);
cc->shutdown = 1;
grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_destroy_done);
}
GPR_TIMER_END("grpc_cq_end_op", 0);
@ -365,29 +407,31 @@ done:
to zero here, then enter shutdown mode and wake up any waiters */
void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0);
GRPC_API_TRACE("grpc_completion_queue_shutdown(cc=%p)", 1, (cc));
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
if (cc->shutdown_called) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
return;
}
cc->shutdown_called = 1;
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
if (gpr_unref(&cc->pending_events)) {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
GPR_ASSERT(!cc->shutdown);
cc->shutdown = 1;
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_shutdown(&exec_ctx, &cc->pollset, &cc->pollset_destroy_done);
grpc_pollset_shutdown(&exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
}
void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
GRPC_API_TRACE("grpc_completion_queue_destroy(cc=%p)", 1, (cc));
GPR_TIMER_BEGIN("grpc_completion_queue_destroy", 0);
grpc_completion_queue_shutdown(cc);
GRPC_CQ_INTERNAL_UNREF(cc, "destroy");
GPR_TIMER_END("grpc_completion_queue_destroy", 0);
}
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {

@ -83,4 +83,7 @@ grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
void grpc_cq_mark_server_cq(grpc_completion_queue *cc);
int grpc_cq_is_server_cq(grpc_completion_queue *cc);
void grpc_cq_global_init(void);
void grpc_cq_global_shutdown(void);
#endif /* GRPC_INTERNAL_CORE_SURFACE_COMPLETION_QUEUE_H */

@ -52,6 +52,7 @@
#include "src/core/profiling/timers.h"
#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
#include "src/core/surface/completion_queue.h"
#include "src/core/surface/init.h"
#include "src/core/surface/surface_trace.h"
#include "src/core/transport/chttp2_transport.h"
@ -118,6 +119,7 @@ void grpc_init(void) {
}
}
gpr_timers_global_init();
grpc_cq_global_init();
for (i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].init != NULL) {
g_all_of_the_plugins[i].init();
@ -133,8 +135,9 @@ void grpc_shutdown(void) {
GRPC_API_TRACE("grpc_shutdown(void)", 0, ());
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
grpc_iomgr_shutdown();
grpc_executor_shutdown();
grpc_cq_global_shutdown();
grpc_iomgr_shutdown();
census_shutdown();
gpr_timers_global_destroy();
grpc_tracer_shutdown();

@ -55,38 +55,33 @@ typedef struct {
const char *error_message;
} channel_data;
static void fill_metadata(grpc_call_element *elem, grpc_metadata_batch *mdb) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp);
calld->status.md = grpc_mdelem_from_strings(chand->mdctx, "grpc-status", tmp);
calld->details.md = grpc_mdelem_from_strings(chand->mdctx, "grpc-message",
chand->error_message);
calld->status.prev = calld->details.next = NULL;
calld->status.next = &calld->details;
calld->details.prev = &calld->status;
mdb->list.head = &calld->status;
mdb->list.tail = &calld->details;
mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
static void lame_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
if (op->send_ops != NULL) {
grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
}
if (op->recv_ops != NULL) {
char tmp[GPR_LTOA_MIN_BUFSIZE];
grpc_metadata_batch mdb;
gpr_ltoa(chand->error_code, tmp);
calld->status.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-status", tmp);
calld->details.md = grpc_mdelem_from_strings(chand->mdctx, "grpc-message",
chand->error_message);
calld->status.prev = calld->details.next = NULL;
calld->status.next = &calld->details;
calld->details.prev = &calld->status;
mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
}
if (op->on_consumed != NULL) {
op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
if (op->recv_initial_metadata != NULL) {
fill_metadata(elem, op->recv_initial_metadata);
} else if (op->recv_trailing_metadata != NULL) {
fill_metadata(elem, op->recv_trailing_metadata);
}
grpc_exec_ctx_enqueue(exec_ctx, op->on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->recv_message_ready, 0);
}
static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
@ -109,25 +104,19 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
}
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *transport_server_data,
grpc_transport_stream_op *initial_op) {
if (initial_op) {
grpc_transport_stream_op_finish_with_failure(exec_ctx, initial_op);
}
}
grpc_call_element_args *args) {}
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {}
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(is_first);
GPR_ASSERT(is_last);
chand->mdctx = mdctx;
chand->master = master;
GPR_ASSERT(args->is_first);
GPR_ASSERT(args->is_last);
chand->mdctx = args->metadata_context;
chand->master = args->master;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
@ -135,8 +124,9 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
static const grpc_channel_filter lame_filter = {
lame_start_transport_stream_op, lame_start_transport_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, lame_get_peer, "lame-client",
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
lame_get_peer, "lame-client",
};
#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))

@ -84,18 +84,18 @@ typedef struct requested_call {
grpc_completion_queue *cq_for_notification;
grpc_call **call;
grpc_cq_completion completion;
grpc_metadata_array *initial_metadata;
union {
struct {
grpc_call_details *details;
grpc_metadata_array *initial_metadata;
} batch;
struct {
registered_method *registered_method;
gpr_timespec *deadline;
grpc_metadata_array *initial_metadata;
grpc_byte_buffer **optional_payload;
} registered;
} data;
grpc_closure publish;
} requested_call;
typedef struct channel_registered_method {
@ -150,16 +150,16 @@ struct call_data {
grpc_mdstr *path;
grpc_mdstr *host;
gpr_timespec deadline;
int got_initial_metadata;
grpc_completion_queue *cq_new;
grpc_stream_op_buffer *recv_ops;
grpc_stream_state *recv_state;
grpc_closure *on_done_recv;
grpc_metadata_batch *recv_initial_metadata;
grpc_metadata_array initial_metadata;
grpc_closure server_on_recv;
grpc_closure got_initial_metadata;
grpc_closure server_on_recv_initial_metadata;
grpc_closure kill_zombie_closure;
grpc_closure *on_done_recv_initial_metadata;
call_data *pending_next;
};
@ -396,7 +396,6 @@ static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
int success) {
channel_data *chand = cd;
grpc_server *server = chand->server;
gpr_log(GPR_DEBUG, "finish_destroy_channel: %p", chand->channel);
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
server_unref(exec_ctx, server);
}
@ -571,79 +570,35 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
return md;
}
static void server_on_recv(grpc_exec_ctx *exec_ctx, void *ptr, int success) {
static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
int success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
gpr_timespec op_deadline;
if (success && !calld->got_initial_metadata) {
size_t i;
size_t nops = calld->recv_ops->nops;
grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue;
grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
op_deadline = op->data.metadata.deadline;
if (0 !=
gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) {
calld->deadline = op->data.metadata.deadline;
}
if (calld->host && calld->path) {
calld->got_initial_metadata = 1;
start_new_rpc(exec_ctx, elem);
}
break;
}
grpc_metadata_batch_filter(calld->recv_initial_metadata, server_filter, elem);
op_deadline = calld->recv_initial_metadata->deadline;
if (0 != gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) {
calld->deadline = op_deadline;
}
switch (*calld->recv_state) {
case GRPC_STREAM_OPEN:
break;
case GRPC_STREAM_SEND_CLOSED:
break;
case GRPC_STREAM_RECV_CLOSED:
gpr_mu_lock(&calld->mu_state);
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
} else {
gpr_mu_unlock(&calld->mu_state);
}
break;
case GRPC_STREAM_CLOSED:
gpr_mu_lock(&calld->mu_state);
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
} else if (calld->state == PENDING) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
/* zombied call will be destroyed when it's removed from the pending
queue... later */
} else {
gpr_mu_unlock(&calld->mu_state);
}
break;
if (calld->host && calld->path) {
/* do nothing */
} else {
success = 0;
}
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
calld->on_done_recv_initial_metadata->cb(
exec_ctx, calld->on_done_recv_initial_metadata->cb_arg, success);
}
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
if (op->recv_ops) {
/* substitute our callback for the higher callback */
calld->recv_ops = op->recv_ops;
calld->recv_state = op->recv_state;
calld->on_done_recv = op->on_done_recv;
op->on_done_recv = &calld->server_on_recv;
if (op->recv_initial_metadata != NULL) {
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->on_done_recv_initial_metadata = op->on_complete;
op->on_complete = &calld->server_on_recv_initial_metadata;
}
}
@ -655,12 +610,48 @@ static void server_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_next_op(exec_ctx, elem, op);
}
static void accept_stream(void *cd, grpc_transport *transport,
static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
int success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
if (success) {
start_new_rpc(exec_ctx, elem);
} else {
gpr_mu_lock(&calld->mu_state);
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
grpc_exec_ctx_enqueue(exec_ctx, &calld->kill_zombie_closure, 1);
} else if (calld->state == PENDING) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
/* zombied call will be destroyed when it's removed from the pending
queue... later */
} else {
gpr_mu_unlock(&calld->mu_state);
}
}
}
static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
grpc_transport *transport,
const void *transport_server_data) {
channel_data *chand = cd;
/* create a call */
grpc_call_create(chand->channel, NULL, 0, NULL, transport_server_data, NULL,
0, gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_call *call =
grpc_call_create(chand->channel, NULL, 0, NULL, transport_server_data,
NULL, 0, gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
call_data *calld = elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
op.data.recv_initial_metadata = &calld->initial_metadata;
grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem);
grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
&calld->got_initial_metadata);
}
static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
@ -685,8 +676,7 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
}
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data));
@ -694,11 +684,10 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
calld->call = grpc_call_from_top_element(elem);
gpr_mu_init(&calld->mu_state);
grpc_closure_init(&calld->server_on_recv, server_on_recv, elem);
grpc_closure_init(&calld->server_on_recv_initial_metadata,
server_on_recv_initial_metadata, elem);
server_ref(chand->server);
if (initial_op) server_mutate_op(elem, initial_op);
}
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
@ -714,6 +703,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
if (calld->path) {
GRPC_MDSTR_UNREF(calld->path);
}
grpc_metadata_array_destroy(&calld->initial_metadata);
gpr_mu_destroy(&calld->mu_state);
@ -721,17 +711,16 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
}
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(is_first);
GPR_ASSERT(!is_last);
GPR_ASSERT(args->is_first);
GPR_ASSERT(!args->is_last);
chand->server = NULL;
chand->channel = NULL;
chand->path_key = grpc_mdstr_from_string(metadata_context, ":path");
chand->authority_key = grpc_mdstr_from_string(metadata_context, ":authority");
chand->path_key = grpc_mdstr_from_string(args->metadata_context, ":path");
chand->authority_key =
grpc_mdstr_from_string(args->metadata_context, ":authority");
chand->next = chand->prev = chand;
chand->registered_methods = NULL;
chand->connectivity_state = GRPC_CHANNEL_IDLE;
@ -769,8 +758,9 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
static const grpc_channel_filter server_surface_filter = {
server_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, grpc_call_next_get_peer, "server",
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "server",
};
void grpc_server_register_completion_queue(grpc_server *server,
@ -1022,8 +1012,6 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
(server, cq, tag));
GRPC_SERVER_LOG_SHUTDOWN(GPR_INFO, server, cq, tag);
/* lock, and gather up some stuff to do */
gpr_mu_lock(&server->mu_global);
grpc_cq_begin_op(cq);
@ -1187,12 +1175,9 @@ grpc_call_error grpc_server_request_call(
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
"cq_bound_to_call=%p, cq_for_notification=%p, tag%p)",
"cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
7, (server, call, details, initial_metadata, cq_bound_to_call,
cq_for_notification, tag));
GRPC_SERVER_LOG_REQUEST_CALL(GPR_INFO, server, call, details,
initial_metadata, cq_bound_to_call,
cq_for_notification, tag);
if (!grpc_cq_is_server_cq(cq_for_notification)) {
gpr_free(rc);
error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
@ -1207,7 +1192,7 @@ grpc_call_error grpc_server_request_call(
rc->cq_for_notification = cq_for_notification;
rc->call = call;
rc->data.batch.details = details;
rc->data.batch.initial_metadata = initial_metadata;
rc->initial_metadata = initial_metadata;
error = queue_call_request(&exec_ctx, server, rc);
done:
grpc_exec_ctx_finish(&exec_ctx);
@ -1244,7 +1229,7 @@ grpc_call_error grpc_server_request_registered_call(
rc->call = call;
rc->data.registered.registered_method = rm;
rc->data.registered.deadline = deadline;
rc->data.registered.initial_metadata = initial_metadata;
rc->initial_metadata = initial_metadata;
rc->data.registered.optional_payload = optional_payload;
error = queue_call_request(&exec_ctx, server, rc);
done:
@ -1253,12 +1238,7 @@ done:
}
static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *tag);
static void publish_was_not_set(grpc_exec_ctx *exec_ctx, grpc_call *call,
int success, void *tag) {
abort();
}
void *user_data, int success);
static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
gpr_slice slice = value->slice;
@ -1273,9 +1253,10 @@ static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
call_data *calld, requested_call *rc) {
grpc_ioreq_completion_func publish = publish_was_not_set;
grpc_ioreq req[2];
grpc_ioreq *r = req;
grpc_op ops[1];
grpc_op *op = ops;
memset(ops, 0, sizeof(ops));
/* called once initial metadata has been read by the call, but BEFORE
the ioreq to fetch it out of the call has been executed.
@ -1284,8 +1265,10 @@ static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
an ioreq op, that should complete immediately. */
grpc_call_set_completion_queue(exec_ctx, calld->call, rc->cq_bound_to_call);
grpc_closure_init(&rc->publish, publish_registered_or_batch, rc);
*rc->call = calld->call;
calld->cq_new = rc->cq_for_notification;
GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, calld->initial_metadata);
switch (rc->type) {
case BATCH_CALL:
GPR_ASSERT(calld->host != NULL);
@ -1295,31 +1278,22 @@ static void begin_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
cpstr(&rc->data.batch.details->method,
&rc->data.batch.details->method_capacity, calld->path);
rc->data.batch.details->deadline = calld->deadline;
r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
r->data.recv_metadata = rc->data.batch.initial_metadata;
r->flags = 0;
r++;
publish = publish_registered_or_batch;
break;
case REGISTERED_CALL:
*rc->data.registered.deadline = calld->deadline;
r->op = GRPC_IOREQ_RECV_INITIAL_METADATA;
r->data.recv_metadata = rc->data.registered.initial_metadata;
r->flags = 0;
r++;
if (rc->data.registered.optional_payload) {
r->op = GRPC_IOREQ_RECV_MESSAGE;
r->data.recv_message = rc->data.registered.optional_payload;
r->flags = 0;
r++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = rc->data.registered.optional_payload;
op++;
}
publish = publish_registered_or_batch;
break;
default:
GPR_UNREACHABLE_CODE(return );
}
GRPC_CALL_INTERNAL_REF(calld->call, "server");
grpc_call_start_ioreq_and_call_back(exec_ctx, calld->call, req,
(size_t)(r - req), publish, rc);
grpc_call_start_batch_and_execute(exec_ctx, calld->call, ops,
(size_t)(op - ops), &rc->publish);
}
static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
@ -1342,25 +1316,19 @@ static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
requested_call *rc) {
*rc->call = NULL;
switch (rc->type) {
case BATCH_CALL:
rc->data.batch.initial_metadata->count = 0;
break;
case REGISTERED_CALL:
rc->data.registered.initial_metadata->count = 0;
break;
}
rc->initial_metadata->count = 0;
server_ref(server);
grpc_cq_end_op(exec_ctx, rc->cq_for_notification, rc->tag, 0,
done_request_event, rc, &rc->completion);
}
static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *prc) {
static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx, void *prc,
int success) {
requested_call *rc = prc;
grpc_call *call = *rc->call;
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
requested_call *rc = prc;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
server_ref(chand->server);

@ -0,0 +1,76 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/transport/byte_stream.h"
#include <stdlib.h>
#include <grpc/support/log.h>
int grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *byte_stream, gpr_slice *slice,
size_t max_size_hint, grpc_closure *on_complete) {
return byte_stream->next(exec_ctx, byte_stream, slice, max_size_hint,
on_complete);
}
void grpc_byte_stream_destroy(grpc_byte_stream *byte_stream) {
byte_stream->destroy(byte_stream);
}
/* slice_buffer_stream */
static int slice_buffer_stream_next(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *byte_stream,
gpr_slice *slice, size_t max_size_hint,
grpc_closure *on_complete) {
grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
GPR_ASSERT(stream->cursor < stream->backing_buffer->count);
*slice = gpr_slice_ref(stream->backing_buffer->slices[stream->cursor]);
stream->cursor++;
return 1;
}
static void slice_buffer_stream_destroy(grpc_byte_stream *byte_stream) {}
void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
gpr_slice_buffer *slice_buffer,
gpr_uint32 flags) {
GPR_ASSERT(slice_buffer->length <= GPR_UINT32_MAX);
stream->base.length = (gpr_uint32)slice_buffer->length;
stream->base.flags = flags;
stream->base.next = slice_buffer_stream_next;
stream->base.destroy = slice_buffer_stream_destroy;
stream->backing_buffer = slice_buffer;
stream->cursor = 0;
}

@ -0,0 +1,88 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_TRANSPORT_BYTE_STREAM_H
#define GRPC_INTERNAL_CORE_TRANSPORT_BYTE_STREAM_H
#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/slice_buffer.h>
/** Internal bit flag for grpc_begin_message's \a flags signaling the use of
* compression for the message */
#define GRPC_WRITE_INTERNAL_COMPRESS (0x80000000u)
/** Mask of all valid internal flags. */
#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
struct grpc_byte_stream;
typedef struct grpc_byte_stream grpc_byte_stream;
struct grpc_byte_stream {
gpr_uint32 length;
gpr_uint32 flags;
int (*next)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream,
gpr_slice *slice, size_t max_size_hint,
grpc_closure *on_complete);
void (*destroy)(grpc_byte_stream *byte_stream);
};
/* returns 1 if the bytes are available immediately (in which case
* on_complete will not be called), 0 if the bytes will be available
* asynchronously.
*
* on entry, *remaining can be set as a hint as to the maximum number
* of bytes that would be acceptable to read.
*
* fills *buffer, *length, *remaining with the bytes, length of bytes
* and length of data remaining to be read before either returning 1
* or calling on_complete.
*
* once a slice is returned into *slice, it is owned by the caller.
*/
int grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *byte_stream, gpr_slice *slice,
size_t max_size_hint, grpc_closure *on_complete);
void grpc_byte_stream_destroy(grpc_byte_stream *byte_stream);
/* grpc_byte_stream that wraps a slice buffer */
typedef struct grpc_slice_buffer_stream {
grpc_byte_stream base;
gpr_slice_buffer *backing_buffer;
size_t cursor;
} grpc_slice_buffer_stream;
void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
gpr_slice_buffer *slice_buffer,
gpr_uint32 flags);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_BYTE_STREAM_H */

@ -45,12 +45,20 @@
grpc_chttp2_parse_error grpc_chttp2_data_parser_init(
grpc_chttp2_data_parser *parser) {
parser->state = GRPC_CHTTP2_DATA_FH_0;
grpc_sopb_init(&parser->incoming_sopb);
parser->parsing_frame = NULL;
return GRPC_CHTTP2_PARSE_OK;
}
void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser *parser) {
grpc_sopb_destroy(&parser->incoming_sopb);
void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser) {
grpc_byte_stream *bs;
if (parser->parsing_frame) {
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, parser->parsing_frame);
}
while (
(bs = grpc_chttp2_incoming_frame_queue_pop(&parser->incoming_frames))) {
grpc_byte_stream_destroy(bs);
}
}
grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
@ -69,6 +77,62 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
return GRPC_CHTTP2_PARSE_OK;
}
void grpc_chttp2_incoming_frame_queue_merge(
grpc_chttp2_incoming_frame_queue *head_dst,
grpc_chttp2_incoming_frame_queue *tail_src) {
if (tail_src->head == NULL) {
return;
}
if (head_dst->head == NULL) {
*head_dst = *tail_src;
memset(tail_src, 0, sizeof(*tail_src));
return;
}
head_dst->tail->next_message = tail_src->head;
head_dst->tail = tail_src->tail;
memset(tail_src, 0, sizeof(*tail_src));
}
grpc_byte_stream *grpc_chttp2_incoming_frame_queue_pop(
grpc_chttp2_incoming_frame_queue *q) {
grpc_byte_stream *out;
if (q->head == NULL) {
return NULL;
}
out = &q->head->base;
if (q->head == q->tail) {
memset(q, 0, sizeof(*q));
} else {
q->head = q->head->next_message;
}
return out;
}
void grpc_chttp2_encode_data(gpr_uint32 id, gpr_slice_buffer *inbuf,
gpr_uint32 write_bytes, int is_eof,
gpr_slice_buffer *outbuf) {
gpr_slice hdr;
gpr_uint8 *p;
hdr = gpr_slice_malloc(9);
p = GPR_SLICE_START_PTR(hdr);
GPR_ASSERT(write_bytes < 16777316);
*p++ = (gpr_uint8)(write_bytes >> 16);
*p++ = (gpr_uint8)(write_bytes >> 8);
*p++ = (gpr_uint8)(write_bytes);
*p++ = GRPC_CHTTP2_FRAME_DATA;
*p++ = is_eof ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0;
*p++ = (gpr_uint8)(id >> 24);
*p++ = (gpr_uint8)(id >> 16);
*p++ = (gpr_uint8)(id >> 8);
*p++ = (gpr_uint8)(id);
gpr_slice_buffer_add(outbuf, hdr);
gpr_slice_buffer_move_first(inbuf, write_bytes, outbuf);
}
grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_transport_parsing *transport_parsing,
@ -77,7 +141,8 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
gpr_uint8 *cur = beg;
grpc_chttp2_data_parser *p = parser;
gpr_uint32 message_flags = 0;
gpr_uint32 message_flags;
grpc_chttp2_incoming_byte_stream *incoming_byte_stream;
if (is_last && p->is_last_frame) {
stream_parsing->received_close = 1;
@ -132,11 +197,14 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
p->frame_size |= ((gpr_uint32)*cur);
p->state = GRPC_CHTTP2_DATA_FRAME;
++cur;
message_flags = 0;
if (p->is_frame_compressed) {
message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
grpc_sopb_add_begin_message(&p->incoming_sopb, p->frame_size,
message_flags);
p->parsing_frame = incoming_byte_stream =
grpc_chttp2_incoming_byte_stream_create(
exec_ctx, transport_parsing, stream_parsing, p->frame_size,
message_flags, &p->incoming_frames);
/* fallthrough */
case GRPC_CHTTP2_DATA_FRAME:
if (cur == end) {
@ -147,20 +215,25 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
if ((gpr_uint32)(end - cur) == p->frame_size) {
grpc_sopb_add_slice(
&p->incoming_sopb,
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame);
p->parsing_frame = NULL;
p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_CHTTP2_PARSE_OK;
} else if ((gpr_uint32)(end - cur) > p->frame_size) {
grpc_sopb_add_slice(&p->incoming_sopb,
gpr_slice_sub(slice, (size_t)(cur - beg),
(size_t)(cur + p->frame_size - beg)));
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg),
(size_t)(cur + p->frame_size - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame);
p->parsing_frame = NULL;
cur += p->frame_size;
goto fh_0; /* loop */
} else {
grpc_sopb_add_slice(
&p->incoming_sopb,
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
GPR_ASSERT((size_t)(end - cur) <= p->frame_size);
p->frame_size -= (gpr_uint32)(end - cur);

@ -39,7 +39,7 @@
#include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
#include "src/core/transport/stream_op.h"
#include "src/core/transport/byte_stream.h"
#include "src/core/transport/chttp2/frame.h"
typedef enum {
@ -51,6 +51,14 @@ typedef enum {
GRPC_CHTTP2_DATA_FRAME
} grpc_chttp2_stream_state;
typedef struct grpc_chttp2_incoming_byte_stream
grpc_chttp2_incoming_byte_stream;
typedef struct grpc_chttp2_incoming_frame_queue {
grpc_chttp2_incoming_byte_stream *head;
grpc_chttp2_incoming_byte_stream *tail;
} grpc_chttp2_incoming_frame_queue;
typedef struct {
grpc_chttp2_stream_state state;
gpr_uint8 is_last_frame;
@ -58,14 +66,22 @@ typedef struct {
gpr_uint32 frame_size;
int is_frame_compressed;
grpc_stream_op_buffer incoming_sopb;
grpc_chttp2_incoming_frame_queue incoming_frames;
grpc_chttp2_incoming_byte_stream *parsing_frame;
} grpc_chttp2_data_parser;
void grpc_chttp2_incoming_frame_queue_merge(
grpc_chttp2_incoming_frame_queue *head_dst,
grpc_chttp2_incoming_frame_queue *tail_src);
grpc_byte_stream *grpc_chttp2_incoming_frame_queue_pop(
grpc_chttp2_incoming_frame_queue *q);
/* initialize per-stream state for data frame parsing */
grpc_chttp2_parse_error grpc_chttp2_data_parser_init(
grpc_chttp2_data_parser *parser);
void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser *parser);
void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser);
/* start processing a new data frame */
grpc_chttp2_parse_error grpc_chttp2_data_parser_begin_frame(
@ -81,4 +97,8 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
/* create a slice with an empty data frame and is_last set */
gpr_slice grpc_chttp2_data_frame_create_empty_close(gpr_uint32 id);
void grpc_chttp2_encode_data(gpr_uint32 id, gpr_slice_buffer *inbuf,
gpr_uint32 write_bytes, int is_eof,
gpr_slice_buffer *outbuf);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_DATA_H */

@ -89,7 +89,8 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
}
if (p->byte == 4) {
if (p->amount == 0 || (p->amount & 0x80000000u)) {
gpr_uint32 received_update = p->amount;
if (received_update == 0 || (received_update & 0x80000000u)) {
gpr_log(GPR_ERROR, "invalid window update bytes: %d", p->amount);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
@ -97,17 +98,15 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
if (transport_parsing->incoming_stream_id != 0) {
if (stream_parsing != NULL) {
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("update", transport_parsing,
stream_parsing, outgoing_window_update,
p->amount);
stream_parsing->outgoing_window_update += p->amount;
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", transport_parsing,
stream_parsing, outgoing_window,
received_update);
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
}
} else {
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT("update", transport_parsing,
outgoing_window_update, p->amount);
transport_parsing->outgoing_window_update += p->amount;
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", transport_parsing,
outgoing_window, received_update);
}
}

@ -31,7 +31,7 @@
*
*/
#include "src/core/transport/chttp2/stream_encoder.h"
#include "src/core/transport/chttp2/hpack_encoder.h"
#include <assert.h>
#include <string.h>
@ -54,18 +54,13 @@
/* don't consider adding anything bigger than this to the hpack table */
#define MAX_DECODER_SPACE_USAGE 512
/* what kind of frame our we encoding? */
typedef enum { HEADER, DATA, NONE } frame_type;
typedef struct {
frame_type cur_frame_type;
int is_first_frame;
/* number of bytes in 'output' when we started the frame - used to calculate
frame length */
size_t output_length_at_start_of_frame;
/* index (in output) of the header for the current frame */
size_t header_idx;
/* was the last frame emitted a header? (if yes, we'll need a CONTINUATION */
gpr_uint8 last_was_header;
/* have we seen a regular (non-colon-prefixed) header yet? */
gpr_uint8 seen_regular_header;
/* output stream id */
@ -92,58 +87,35 @@ static void fill_header(gpr_uint8 *p, gpr_uint8 type, gpr_uint32 id, size_t len,
static void finish_frame(framer_state *st, int is_header_boundary,
int is_last_in_stream) {
gpr_uint8 type = 0xff;
switch (st->cur_frame_type) {
case HEADER:
type = st->last_was_header ? GRPC_CHTTP2_FRAME_CONTINUATION
: GRPC_CHTTP2_FRAME_HEADER;
st->last_was_header = 1;
break;
case DATA:
type = GRPC_CHTTP2_FRAME_DATA;
st->last_was_header = 0;
is_header_boundary = 0;
break;
case NONE:
return;
}
type = st->is_first_frame ? GRPC_CHTTP2_FRAME_HEADER
: GRPC_CHTTP2_FRAME_CONTINUATION;
fill_header(
GPR_SLICE_START_PTR(st->output->slices[st->header_idx]), type,
st->stream_id, st->output->length - st->output_length_at_start_of_frame,
(gpr_uint8)(
(is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) |
(is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0)));
st->cur_frame_type = NONE;
st->is_first_frame = 0;
}
/* begin a new frame: reserve off header space, remember how many bytes we'd
output before beginning */
static void begin_frame(framer_state *st, frame_type type) {
GPR_ASSERT(type != NONE);
GPR_ASSERT(st->cur_frame_type == NONE);
st->cur_frame_type = type;
static void begin_frame(framer_state *st) {
st->header_idx =
gpr_slice_buffer_add_indexed(st->output, gpr_slice_malloc(9));
st->output_length_at_start_of_frame = st->output->length;
}
static void begin_new_frame(framer_state *st, frame_type type) {
finish_frame(st, 1, 0);
st->last_was_header = 0;
begin_frame(st, type);
}
/* make sure that the current frame is of the type desired, and has sufficient
space to add at least about_to_add bytes -- finishes the current frame if
needed */
static void ensure_frame_type(framer_state *st, frame_type type,
size_t need_bytes) {
if (st->cur_frame_type == type &&
st->output->length - st->output_length_at_start_of_frame + need_bytes <=
GRPC_CHTTP2_MAX_PAYLOAD_LENGTH) {
static void ensure_space(framer_state *st, size_t need_bytes) {
if (st->output->length - st->output_length_at_start_of_frame + need_bytes <=
GRPC_CHTTP2_MAX_PAYLOAD_LENGTH) {
return;
}
finish_frame(st, type != HEADER, 0);
begin_frame(st, type);
finish_frame(st, 0, 0);
begin_frame(st);
}
/* increment a filter count, halve all counts if one element reaches max */
@ -165,31 +137,30 @@ static void add_header_data(framer_state *st, gpr_slice slice) {
size_t len = GPR_SLICE_LENGTH(slice);
size_t remaining;
if (len == 0) return;
ensure_frame_type(st, HEADER, 1);
remaining = GRPC_CHTTP2_MAX_PAYLOAD_LENGTH +
st->output_length_at_start_of_frame - st->output->length;
if (len <= remaining) {
gpr_slice_buffer_add(st->output, slice);
} else {
gpr_slice_buffer_add(st->output, gpr_slice_split_head(&slice, remaining));
finish_frame(st, 0, 0);
begin_frame(st);
add_header_data(st, slice);
}
}
static gpr_uint8 *add_tiny_header_data(framer_state *st, size_t len) {
ensure_frame_type(st, HEADER, len);
ensure_space(st, len);
return gpr_slice_buffer_tiny_add(st->output, len);
}
/* add an element to the decoder table: returns metadata element to unref */
static grpc_mdelem *add_elem(grpc_chttp2_hpack_compressor *c,
grpc_mdelem *elem) {
/* add an element to the decoder table */
static void add_elem(grpc_chttp2_hpack_compressor *c, grpc_mdelem *elem) {
gpr_uint32 key_hash = elem->key->hash;
gpr_uint32 elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
gpr_uint32 new_index = c->tail_remote_index + c->table_elems + 1;
size_t elem_size = 32 + GPR_SLICE_LENGTH(elem->key->slice) +
GPR_SLICE_LENGTH(elem->value->slice);
grpc_mdelem *elem_to_unref;
GPR_ASSERT(elem_size < 65536);
@ -220,31 +191,27 @@ static grpc_mdelem *add_elem(grpc_chttp2_hpack_compressor *c,
if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == elem) {
/* already there: update with new index */
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
elem_to_unref = elem;
} else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem) {
/* already there (cuckoo): update with new index */
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
elem_to_unref = elem;
} else if (c->entries_elems[HASH_FRAGMENT_2(elem_hash)] == NULL) {
/* not there, but a free element: add */
c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = elem;
c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
elem_to_unref = NULL;
} else if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == NULL) {
/* not there (cuckoo), but a free element: add */
c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = elem;
c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
elem_to_unref = NULL;
} else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] <
c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) {
/* not there: replace oldest */
elem_to_unref = c->entries_elems[HASH_FRAGMENT_2(elem_hash)];
c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = elem;
GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_2(elem_hash)]);
c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
} else {
/* not there: replace oldest */
elem_to_unref = c->entries_elems[HASH_FRAGMENT_3(elem_hash)];
c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = elem;
GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_3(elem_hash)]);
c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
}
@ -270,8 +237,6 @@ static grpc_mdelem *add_elem(grpc_chttp2_hpack_compressor *c,
c->entries_keys[HASH_FRAGMENT_3(key_hash)] = GRPC_MDSTR_REF(elem->key);
c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
}
return elem_to_unref;
}
static void emit_indexed(grpc_chttp2_hpack_compressor *c, gpr_uint32 elem_index,
@ -370,9 +335,9 @@ static gpr_uint32 dynidx(grpc_chttp2_hpack_compressor *c,
c->table_elems - elem_index;
}
/* encode an mdelem; returns metadata element to unref */
static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
grpc_mdelem *elem, framer_state *st) {
/* encode an mdelem */
static void hpack_enc(grpc_chttp2_hpack_compressor *c, grpc_mdelem *elem,
framer_state *st) {
gpr_uint32 key_hash = elem->key->hash;
gpr_uint32 elem_hash = GRPC_MDSTR_KV_HASH(key_hash, elem->value->hash);
size_t decoder_space_usage;
@ -397,7 +362,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
/* HIT: complete element (first cuckoo hash) */
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
st);
return elem;
return;
}
if (c->entries_elems[HASH_FRAGMENT_3(elem_hash)] == elem &&
@ -405,7 +370,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
/* HIT: complete element (second cuckoo hash) */
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
st);
return elem;
return;
}
/* should this elem be in the table? */
@ -423,12 +388,13 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
return add_elem(c, elem);
add_elem(c, elem);
return;
} else {
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
return elem;
return;
}
GPR_UNREACHABLE_CODE(return NULL);
GPR_UNREACHABLE_CODE(return );
}
indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
@ -437,24 +403,26 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
return add_elem(c, elem);
add_elem(c, elem);
return;
} else {
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
return elem;
return;
}
GPR_UNREACHABLE_CODE(return NULL);
GPR_UNREACHABLE_CODE(return );
}
/* no elem, key in the table... fall back to literal emission */
if (should_add_elem) {
emit_lithdr_incidx_v(c, elem, st);
return add_elem(c, elem);
add_elem(c, elem);
return;
} else {
emit_lithdr_noidx_v(c, elem, st);
return elem;
return;
}
GPR_UNREACHABLE_CODE(return NULL);
GPR_UNREACHABLE_CODE(return );
}
#define STRLEN_LIT(x) (sizeof(x) - 1)
@ -469,8 +437,8 @@ static void deadline_enc(grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
mdelem = grpc_mdelem_from_metadata_strings(
c->mdctx, GRPC_MDSTR_REF(c->timeout_key_str),
grpc_mdstr_from_string(c->mdctx, timeout_str));
mdelem = hpack_enc(c, mdelem, st);
if (mdelem) GRPC_MDELEM_UNREF(mdelem);
hpack_enc(c, mdelem, st);
GRPC_MDELEM_UNREF(mdelem);
}
gpr_slice grpc_chttp2_data_frame_create_empty_close(gpr_uint32 id) {
@ -495,169 +463,34 @@ void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor *c) {
GRPC_MDSTR_UNREF(c->timeout_key_str);
}
gpr_uint32 grpc_chttp2_preencode(grpc_stream_op *inops, size_t *inops_count,
gpr_uint32 max_flow_controlled_bytes,
grpc_stream_op_buffer *outops) {
gpr_slice slice;
grpc_stream_op *op;
gpr_uint32 max_take_size;
gpr_uint32 flow_controlled_bytes_taken = 0;
gpr_uint32 curop = 0;
gpr_uint8 *p;
gpr_uint8 compressed_flag_set = 0;
while (curop < *inops_count) {
GPR_ASSERT(flow_controlled_bytes_taken <= max_flow_controlled_bytes);
op = &inops[curop];
switch (op->type) {
case GRPC_NO_OP:
/* skip */
curop++;
break;
case GRPC_OP_METADATA:
grpc_metadata_batch_assert_ok(&op->data.metadata);
/* these just get copied as they don't impact the number of flow
controlled bytes */
grpc_sopb_append(outops, op, 1);
curop++;
break;
case GRPC_OP_BEGIN_MESSAGE:
/* begin op: for now we just convert the op to a slice and fall
through - this lets us reuse the slice framing code below */
compressed_flag_set =
(op->data.begin_message.flags & GRPC_WRITE_INTERNAL_COMPRESS) != 0;
slice = gpr_slice_malloc(5);
p = GPR_SLICE_START_PTR(slice);
p[0] = compressed_flag_set;
p[1] = (gpr_uint8)(op->data.begin_message.length >> 24);
p[2] = (gpr_uint8)(op->data.begin_message.length >> 16);
p[3] = (gpr_uint8)(op->data.begin_message.length >> 8);
p[4] = (gpr_uint8)(op->data.begin_message.length);
op->type = GRPC_OP_SLICE;
op->data.slice = slice;
/* fallthrough */
case GRPC_OP_SLICE:
slice = op->data.slice;
if (!GPR_SLICE_LENGTH(slice)) {
/* skip zero length slices */
gpr_slice_unref(slice);
curop++;
break;
}
max_take_size = max_flow_controlled_bytes - flow_controlled_bytes_taken;
if (max_take_size == 0) {
goto exit_loop;
}
if (GPR_SLICE_LENGTH(slice) > max_take_size) {
slice = gpr_slice_split_head(&op->data.slice, max_take_size);
grpc_sopb_add_slice(outops, slice);
} else {
/* consume this op immediately */
grpc_sopb_append(outops, op, 1);
curop++;
}
flow_controlled_bytes_taken += (gpr_uint32)GPR_SLICE_LENGTH(slice);
break;
}
}
exit_loop:
*inops_count -= curop;
memmove(inops, inops + curop, *inops_count * sizeof(grpc_stream_op));
for (curop = 0; curop < *inops_count; curop++) {
if (inops[curop].type == GRPC_OP_METADATA) {
grpc_metadata_batch_assert_ok(&inops[curop].data.metadata);
}
}
return flow_controlled_bytes_taken;
}
void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
gpr_uint32 stream_id,
grpc_chttp2_hpack_compressor *compressor,
gpr_slice_buffer *output) {
void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor *c,
gpr_uint32 stream_id,
grpc_metadata_batch *metadata, int is_eof,
gpr_slice_buffer *outbuf) {
framer_state st;
gpr_slice slice;
grpc_stream_op *op;
size_t max_take_size;
gpr_uint32 curop = 0;
gpr_uint32 unref_op;
grpc_linked_mdelem *l;
int need_unref = 0;
gpr_timespec deadline;
GPR_ASSERT(stream_id != 0);
st.cur_frame_type = NONE;
st.last_was_header = 0;
st.seen_regular_header = 0;
st.stream_id = stream_id;
st.output = output;
while (curop < ops_count) {
op = &ops[curop];
switch (op->type) {
case GRPC_NO_OP:
case GRPC_OP_BEGIN_MESSAGE:
gpr_log(
GPR_ERROR,
"These stream ops should be filtered out by grpc_chttp2_preencode");
abort();
case GRPC_OP_METADATA:
/* Encode a metadata batch; store the returned values, representing
a metadata element that needs to be unreffed back into the metadata
slot. THIS MAY NOT BE THE SAME ELEMENT (if a decoder table slot got
updated). After this loop, we'll do a batch unref of elements. */
begin_new_frame(&st, HEADER);
need_unref |= op->data.metadata.garbage.head != NULL;
grpc_metadata_batch_assert_ok(&op->data.metadata);
for (l = op->data.metadata.list.head; l; l = l->next) {
l->md = hpack_enc(compressor, l->md, &st);
need_unref |= l->md != NULL;
}
deadline = op->data.metadata.deadline;
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
deadline_enc(compressor, deadline, &st);
}
curop++;
break;
case GRPC_OP_SLICE:
slice = op->data.slice;
if (st.cur_frame_type == DATA &&
st.output->length - st.output_length_at_start_of_frame ==
GRPC_CHTTP2_MAX_PAYLOAD_LENGTH) {
finish_frame(&st, 0, 0);
}
ensure_frame_type(&st, DATA, 1);
max_take_size = GRPC_CHTTP2_MAX_PAYLOAD_LENGTH +
st.output_length_at_start_of_frame - st.output->length;
if (GPR_SLICE_LENGTH(slice) > max_take_size) {
slice = gpr_slice_split_head(&op->data.slice, max_take_size);
} else {
/* consume this op immediately */
curop++;
}
gpr_slice_buffer_add(output, slice);
break;
}
st.output = outbuf;
st.is_first_frame = 1;
/* Encode a metadata batch; store the returned values, representing
a metadata element that needs to be unreffed back into the metadata
slot. THIS MAY NOT BE THE SAME ELEMENT (if a decoder table slot got
updated). After this loop, we'll do a batch unref of elements. */
begin_frame(&st);
grpc_metadata_batch_assert_ok(metadata);
for (l = metadata->list.head; l; l = l->next) {
hpack_enc(c, l->md, &st);
}
if (eof && st.cur_frame_type == NONE) {
begin_frame(&st, DATA);
}
finish_frame(&st, 1, eof);
if (need_unref) {
for (unref_op = 0; unref_op < curop; unref_op++) {
op = &ops[unref_op];
if (op->type != GRPC_OP_METADATA) continue;
for (l = op->data.metadata.list.head; l; l = l->next) {
if (l->md) GRPC_MDELEM_UNREF(l->md);
}
for (l = op->data.metadata.garbage.head; l; l = l->next) {
GRPC_MDELEM_UNREF(l->md);
}
}
deadline = metadata->deadline;
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
deadline_enc(c, deadline, &st);
}
finish_frame(&st, 1, is_eof);
}

@ -31,12 +31,12 @@
*
*/
#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_ENCODER_H
#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_ENCODER_H
#ifndef GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_ENCODER_H
#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_ENCODER_H
#include "src/core/transport/chttp2/frame.h"
#include "src/core/transport/metadata.h"
#include "src/core/transport/stream_op.h"
#include "src/core/transport/metadata_batch.h"
#include <grpc/support/port_platform.h>
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
@ -78,16 +78,8 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c,
grpc_mdctx *mdctx);
void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor *c);
/* select stream ops to be encoded, moving them from inops to outops, and
moving subsequent ops in inops forward in the queue */
gpr_uint32 grpc_chttp2_preencode(grpc_stream_op *inops, size_t *inops_count,
gpr_uint32 max_flow_controlled_bytes,
grpc_stream_op_buffer *outops);
void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor *c, gpr_uint32 id,
grpc_metadata_batch *metadata, int is_eof,
gpr_slice_buffer *outbuf);
/* encode stream ops to output */
void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
gpr_uint32 stream_id,
grpc_chttp2_hpack_compressor *compressor,
gpr_slice_buffer *output);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_STREAM_ENCODER_H */
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_HPACK_ENCODER_H */

@ -38,13 +38,15 @@
#include <string.h>
#include <assert.h>
#include "src/core/transport/chttp2/bin_encoder.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/useful.h>
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#include "src/core/transport/chttp2/bin_encoder.h"
typedef enum {
NOT_BINARY,
B64_BYTE0,
@ -1379,20 +1381,23 @@ grpc_chttp2_parse_error grpc_chttp2_header_parser_parse(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
grpc_chttp2_hpack_parser *parser = hpack_parser;
GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
if (!grpc_chttp2_hpack_parser_parse(parser, GPR_SLICE_START_PTR(slice),
GPR_SLICE_END_PTR(slice))) {
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
if (is_last) {
if (parser->is_boundary && parser->state != parse_begin) {
gpr_log(GPR_ERROR,
"end of header frame not aligned with a hpack record boundary");
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
if (parser->is_boundary) {
grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
&stream_parsing->incoming_metadata,
&stream_parsing->data_parser.incoming_sopb);
stream_parsing
->got_metadata_on_parse[stream_parsing->header_frames_received] = 1;
stream_parsing->header_frames_received++;
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
}
@ -1404,5 +1409,6 @@ grpc_chttp2_parse_error grpc_chttp2_header_parser_parse(
parser->is_boundary = 0xde;
parser->is_eof = 0xde;
}
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_PARSE_OK;
}

@ -48,14 +48,27 @@ void grpc_chttp2_incoming_metadata_buffer_init(
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_chttp2_incoming_metadata_buffer *buffer) {
size_t i;
if (!buffer->published) {
for (i = 0; i < buffer->count; i++) {
GRPC_MDELEM_UNREF(buffer->elems[i].md);
}
}
gpr_free(buffer->elems);
}
void grpc_chttp2_incoming_metadata_buffer_reset(
grpc_chttp2_incoming_metadata_buffer *buffer) {
size_t i;
GPR_ASSERT(!buffer->published);
for (i = 0; i < buffer->count; i++) {
GRPC_MDELEM_UNREF(buffer->elems[i].md);
}
gpr_free(buffer->elems);
buffer->count = 0;
}
void grpc_chttp2_incoming_metadata_buffer_add(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem *elem) {
GPR_ASSERT(!buffer->published);
if (buffer->capacity == buffer->count) {
buffer->capacity = GPR_MAX(8, 2 * buffer->capacity);
buffer->elems =
@ -66,117 +79,36 @@ void grpc_chttp2_incoming_metadata_buffer_add(
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) {
GPR_ASSERT(!buffer->published);
buffer->deadline = deadline;
}
void grpc_chttp2_incoming_metadata_live_op_buffer_end(
grpc_chttp2_incoming_metadata_live_op_buffer *buffer) {
gpr_free(buffer->elems);
buffer->elems = NULL;
}
void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb) {
grpc_metadata_batch b;
b.list.head = NULL;
/* Store away the last element of the list, so that in patch_metadata_ops
we can reconstitute the list.
We can't do list building here as later incoming metadata may reallocate
the underlying array. */
b.list.tail = (void *)(gpr_intptr)buffer->count;
b.garbage.head = b.garbage.tail = NULL;
b.deadline = buffer->deadline;
buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(sopb, b);
}
void grpc_chttp2_incoming_metadata_buffer_swap(
grpc_chttp2_incoming_metadata_buffer *a,
grpc_chttp2_incoming_metadata_buffer *b) {
GPR_ASSERT(!a->published);
GPR_ASSERT(!b->published);
GPR_SWAP(grpc_chttp2_incoming_metadata_buffer, *a, *b);
}
void grpc_incoming_metadata_buffer_move_to_referencing_sopb(
grpc_chttp2_incoming_metadata_buffer *src,
grpc_chttp2_incoming_metadata_buffer *dst, grpc_stream_op_buffer *sopb) {
size_t delta;
size_t i;
dst->deadline = gpr_time_min(src->deadline, dst->deadline);
if (src->count == 0) {
return;
}
if (dst->count == 0) {
grpc_chttp2_incoming_metadata_buffer_swap(src, dst);
return;
}
delta = dst->count;
if (dst->capacity < src->count + dst->count) {
dst->capacity = GPR_MAX(dst->capacity * 2, src->count + dst->count);
dst->elems = gpr_realloc(dst->elems, dst->capacity * sizeof(*dst->elems));
}
memcpy(dst->elems + dst->count, src->elems, src->count * sizeof(*src->elems));
dst->count += src->count;
for (i = 0; i < sopb->nops; i++) {
if (sopb->ops[i].type != GRPC_OP_METADATA) continue;
sopb->ops[i].data.metadata.list.tail =
(void *)(delta + (gpr_uintptr)sopb->ops[i].data.metadata.list.tail);
}
src->count = 0;
}
void grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb,
grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer) {
grpc_stream_op *ops = sopb->ops;
size_t nops = sopb->nops;
size_t i;
size_t j;
size_t mdidx = 0;
size_t last_mdidx;
int found_metadata = 0;
/* rework the array of metadata into a linked list, making use
of the breadcrumbs we left in metadata batches during
add_metadata_batch */
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue;
found_metadata = 1;
/* we left a breadcrumb indicating where the end of this list is,
and since we add sequentially, we know from the end of the last
segment where this segment begins */
last_mdidx = (size_t)(gpr_intptr)(op->data.metadata.list.tail);
GPR_ASSERT(last_mdidx > mdidx);
GPR_ASSERT(last_mdidx <= buffer->count);
/* turn the array into a doubly linked list */
op->data.metadata.list.head = &buffer->elems[mdidx];
op->data.metadata.list.tail = &buffer->elems[last_mdidx - 1];
for (j = mdidx + 1; j < last_mdidx; j++) {
buffer->elems[j].prev = &buffer->elems[j - 1];
buffer->elems[j - 1].next = &buffer->elems[j];
void grpc_chttp2_incoming_metadata_buffer_publish(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch) {
GPR_ASSERT(!buffer->published);
buffer->published = 1;
if (buffer->count > 0) {
size_t i;
for (i = 1; i < buffer->count; i++) {
buffer->elems[i].prev = &buffer->elems[i - 1];
}
buffer->elems[mdidx].prev = NULL;
buffer->elems[last_mdidx - 1].next = NULL;
/* track where we're up to */
mdidx = last_mdidx;
}
if (found_metadata) {
live_op_buffer->elems = buffer->elems;
if (mdidx != buffer->count) {
/* we have a partially read metadata batch still in incoming_metadata */
size_t new_count = buffer->count - mdidx;
size_t copy_bytes = sizeof(*buffer->elems) * new_count;
GPR_ASSERT(mdidx < buffer->count);
buffer->elems = gpr_malloc(copy_bytes);
memcpy(buffer->elems, live_op_buffer->elems + mdidx, copy_bytes);
buffer->count = buffer->capacity = new_count;
} else {
buffer->elems = NULL;
buffer->count = 0;
buffer->capacity = 0;
for (i = 0; i < buffer->count - 1; i++) {
buffer->elems[i].next = &buffer->elems[i + 1];
}
buffer->elems[0].prev = NULL;
buffer->elems[buffer->count - 1].next = NULL;
batch->list.head = &buffer->elems[0];
batch->list.tail = &buffer->elems[buffer->count - 1];
} else {
batch->list.head = batch->list.tail = NULL;
}
batch->deadline = buffer->deadline;
}

@ -41,12 +41,9 @@ typedef struct {
size_t count;
size_t capacity;
gpr_timespec deadline;
int published;
} grpc_chttp2_incoming_metadata_buffer;
typedef struct {
grpc_linked_mdelem *elems;
} grpc_chttp2_incoming_metadata_live_op_buffer;
/** assumes everything initially zeroed */
void grpc_chttp2_incoming_metadata_buffer_init(
grpc_chttp2_incoming_metadata_buffer *buffer);
@ -54,27 +51,12 @@ void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_chttp2_incoming_metadata_buffer *buffer);
void grpc_chttp2_incoming_metadata_buffer_reset(
grpc_chttp2_incoming_metadata_buffer *buffer);
void grpc_chttp2_incoming_metadata_buffer_publish(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch);
void grpc_chttp2_incoming_metadata_buffer_add(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem *elem);
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline);
/** extend sopb with a metadata batch; this must be post-processed by
grpc_chttp2_incoming_metadata_buffer_postprocess_sopb before being handed
out of the transport */
void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb);
void grpc_incoming_metadata_buffer_move_to_referencing_sopb(
grpc_chttp2_incoming_metadata_buffer *src,
grpc_chttp2_incoming_metadata_buffer *dst, grpc_stream_op_buffer *sopb);
void grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_stream_op_buffer *sopb,
grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer);
void grpc_chttp2_incoming_metadata_live_op_buffer_end(
grpc_chttp2_incoming_metadata_live_op_buffer *live_op_buffer);
#endif /* GRPC_INTERNAL_CORE_CHTTP2_INCOMING_METADATA_H */

@ -34,6 +34,8 @@
#ifndef GRPC_INTERNAL_CORE_CHTTP2_INTERNAL_H
#define GRPC_INTERNAL_CORE_CHTTP2_INTERNAL_H
#include <assert.h>
#include "src/core/iomgr/endpoint.h"
#include "src/core/transport/chttp2/frame.h"
#include "src/core/transport/chttp2/frame_data.h"
@ -42,9 +44,9 @@
#include "src/core/transport/chttp2/frame_rst_stream.h"
#include "src/core/transport/chttp2/frame_settings.h"
#include "src/core/transport/chttp2/frame_window_update.h"
#include "src/core/transport/chttp2/hpack_encoder.h"
#include "src/core/transport/chttp2/hpack_parser.h"
#include "src/core/transport/chttp2/incoming_metadata.h"
#include "src/core/transport/chttp2/stream_encoder.h"
#include "src/core/transport/chttp2/stream_map.h"
#include "src/core/transport/connectivity_state.h"
#include "src/core/transport/transport_impl.h"
@ -56,14 +58,14 @@ typedef struct grpc_chttp2_stream grpc_chttp2_stream;
happen to them... this enum labels each list */
typedef enum {
GRPC_CHTTP2_LIST_ALL_STREAMS,
GRPC_CHTTP2_LIST_READ_WRITE_STATE_CHANGED,
GRPC_CHTTP2_LIST_CHECK_READ_OPS,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE,
GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING,
GRPC_CHTTP2_LIST_WRITTEN,
GRPC_CHTTP2_LIST_PARSING_SEEN,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
GRPC_CHTTP2_LIST_CANCELLED_WAITING_FOR_WRITING,
GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
@ -113,22 +115,6 @@ typedef enum {
GRPC_DTS_FRAME
} grpc_chttp2_deframe_transport_state;
typedef enum {
GRPC_WRITE_STATE_OPEN,
GRPC_WRITE_STATE_QUEUED_CLOSE,
GRPC_WRITE_STATE_SENT_CLOSE
} grpc_chttp2_write_state;
/* flags that can be or'd into stream_global::writing_now */
#define GRPC_CHTTP2_WRITING_DATA 1
#define GRPC_CHTTP2_WRITING_WINDOW 2
typedef enum {
GRPC_DONT_SEND_CLOSED = 0,
GRPC_SEND_CLOSED,
GRPC_SEND_CLOSED_WITH_RST_STREAM
} grpc_chttp2_send_closed;
typedef struct {
grpc_chttp2_stream *head;
grpc_chttp2_stream *tail;
@ -160,14 +146,28 @@ typedef struct grpc_chttp2_outstanding_ping {
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
/* forward declared in frame_data.h */
struct grpc_chttp2_incoming_byte_stream {
grpc_byte_stream base;
gpr_refcount refs;
struct grpc_chttp2_incoming_byte_stream *next_message;
grpc_chttp2_transport *transport;
grpc_chttp2_stream *stream;
int is_tail;
gpr_slice_buffer slices;
grpc_closure *on_next;
gpr_slice *next;
};
typedef struct {
/** data to write next write */
gpr_slice_buffer qbuf;
/** window available for us to send to peer */
gpr_int64 outgoing_window;
/** window available for peer to send to us - updated after parse */
gpr_uint32 incoming_window;
/** window available to announce to peer */
gpr_int64 announce_incoming_window;
/** how much window would we like to have for incoming_window */
gpr_uint32 connection_window_target;
@ -209,6 +209,7 @@ typedef struct {
gpr_slice_buffer outbuf;
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
gpr_int64 outgoing_window;
/** is this a client? */
gpr_uint8 is_client;
/** callback for when writing is done */
@ -233,6 +234,7 @@ struct grpc_chttp2_transport_parsing {
gpr_slice_buffer qbuf;
/* metadata object cache */
grpc_mdstr *str_grpc_timeout;
grpc_mdelem *elem_grpc_status_ok;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
/** simple one shot parsers */
@ -246,8 +248,7 @@ struct grpc_chttp2_transport_parsing {
grpc_chttp2_goaway_parser goaway_parser;
/** window available for peer to send to us */
gpr_uint32 incoming_window;
gpr_uint32 incoming_window_delta;
gpr_int64 incoming_window;
/** next stream id available at the time of beginning parsing */
gpr_uint32 next_stream_id;
@ -278,7 +279,7 @@ struct grpc_chttp2_transport_parsing {
gpr_uint32 goaway_last_stream_index;
gpr_slice goaway_text;
gpr_int64 outgoing_window_update;
gpr_int64 outgoing_window;
/** pings awaiting responses */
grpc_chttp2_outstanding_ping pings;
@ -345,8 +346,8 @@ struct grpc_chttp2_transport {
struct {
/* accept stream callback */
void (*accept_stream)(void *user_data, grpc_transport *transport,
const void *server_data);
void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *accept_stream_user_data;
/** connectivity tracking */
@ -358,9 +359,6 @@ typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
gpr_uint32 id;
grpc_closure *send_done_closure;
grpc_closure *recv_done_closure;
/** window available for us to send to peer */
gpr_int64 outgoing_window;
/** The number of bytes the upper layers have offered to receive.
@ -371,54 +369,66 @@ typedef struct {
not yet announced to HTTP2 flow control.
As the upper layers offer to read more bytes, this value increases.
As we advertise incoming flow control window, this value decreases. */
gpr_uint32 unannounced_incoming_window;
/** The number of bytes of HTTP2 flow control we have advertised.
As we advertise incoming flow control window, this value increases.
As bytes are read, this value decreases.
Updated after parse. */
gpr_uint32 incoming_window;
/** stream ops the transport user would like to send */
grpc_stream_op_buffer *outgoing_sopb;
gpr_uint32 unannounced_incoming_window_for_parse;
gpr_uint32 unannounced_incoming_window_for_writing;
/** things the upper layers would like to send */
grpc_metadata_batch *send_initial_metadata;
grpc_closure *send_initial_metadata_finished;
grpc_byte_stream *send_message;
grpc_closure *send_message_finished;
grpc_metadata_batch *send_trailing_metadata;
grpc_closure *send_trailing_metadata_finished;
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *recv_initial_metadata_finished;
grpc_byte_stream **recv_message;
grpc_closure *recv_message_ready;
grpc_metadata_batch *recv_trailing_metadata;
grpc_closure *recv_trailing_metadata_finished;
/** when the application requests writes be closed, the write_closed is
'queued'; when the close is flow controlled into the send path, we are
'sending' it; when the write has been performed it is 'sent' */
grpc_chttp2_write_state write_state;
/** is this stream closed (boolean) */
gpr_uint8 write_closed;
/** is this stream reading half-closed (boolean) */
gpr_uint8 read_closed;
/** has this stream been cancelled? (boolean) */
gpr_uint8 cancelled;
grpc_status_code cancelled_status;
/** have we told the upper layer that this stream is cancelled? */
gpr_uint8 published_cancelled;
/** is this stream finished closing (and reportably closed) */
gpr_uint8 finished_close;
/** is this stream in the stream map? (boolean) */
gpr_uint8 in_stream_map;
/** bitmask of GRPC_CHTTP2_WRITING_xxx above */
gpr_uint8 writing_now;
/** has anything been written to this stream? */
gpr_uint8 written_anything;
/** stream state already published to the upper layer */
grpc_stream_state published_state;
/** address to publish next stream state to */
grpc_stream_state *publish_state;
/** pointer to sop buffer to fill in with new stream ops */
grpc_stream_op_buffer *publish_sopb;
grpc_stream_op_buffer incoming_sopb;
/** has this stream seen an error? if 1, then pending incoming frames
can be thrown away */
gpr_uint8 seen_error;
/** incoming metadata */
grpc_chttp2_incoming_metadata_buffer incoming_metadata;
grpc_chttp2_incoming_metadata_live_op_buffer outstanding_metadata;
gpr_uint8 published_initial_metadata;
gpr_uint8 published_trailing_metadata;
gpr_uint8 faked_trailing_metadata;
grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
grpc_chttp2_incoming_frame_queue incoming_frames;
} grpc_chttp2_stream_global;
typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
gpr_uint32 id;
/** sops that have passed flow control to be written */
grpc_stream_op_buffer sopb;
/** how strongly should we indicate closure with the next write */
grpc_chttp2_send_closed send_closed;
gpr_uint8 fetching;
gpr_uint8 sent_initial_metadata;
gpr_uint8 sent_message;
gpr_uint8 sent_trailing_metadata;
gpr_uint8 read_closed;
/** send this initial metadata */
grpc_metadata_batch *send_initial_metadata;
grpc_byte_stream *send_message;
grpc_metadata_batch *send_trailing_metadata;
gpr_int64 outgoing_window;
/** how much window should we announce? */
gpr_uint32 announce_window;
gpr_slice_buffer flow_controlled_buffer;
gpr_slice fetching_slice;
size_t stream_fetched;
grpc_closure finished_fetch;
} grpc_chttp2_stream_writing;
struct grpc_chttp2_stream_parsing {
@ -428,22 +438,29 @@ struct grpc_chttp2_stream_parsing {
gpr_uint8 received_close;
/** saw a rst_stream */
gpr_uint8 saw_rst_stream;
/** incoming_window has been reduced by this much during parsing */
gpr_uint32 incoming_window_delta;
/** how many header frames have we received? */
gpr_uint8 header_frames_received;
/** which metadata did we get (on this parse) */
gpr_uint8 got_metadata_on_parse[2];
/** should we raise the seen_error flag in transport_global */
gpr_uint8 seen_error;
/** window available for peer to send to us */
gpr_uint32 incoming_window;
gpr_int64 incoming_window;
/** parsing state for data frames */
grpc_chttp2_data_parser data_parser;
/** reason give to rst_stream */
gpr_uint32 rst_stream_reason;
/* amount of window given */
gpr_uint64 outgoing_window_update;
/** amount of window given */
gpr_int64 outgoing_window;
/** number of bytes received - reset at end of parse thread execution */
gpr_int64 received_bytes;
/** incoming metadata */
grpc_chttp2_incoming_metadata_buffer incoming_metadata;
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
};
struct grpc_chttp2_stream {
grpc_stream_refcount *refcount;
grpc_chttp2_stream_global global;
grpc_chttp2_stream_writing writing;
grpc_chttp2_stream_parsing parsing;
@ -504,21 +521,10 @@ void grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_add_incoming_window_updated(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_incoming_window_updated(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_remove_incoming_window_updated(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_list_add_writing_stream(
/* returns 1 if stream added, 0 if it was already present */
int grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
grpc_chttp2_stream_writing *stream_writing) GRPC_MUST_USE_RESULT;
int grpc_chttp2_list_have_writing_streams(
grpc_chttp2_transport_writing *transport_writing);
int grpc_chttp2_list_pop_writing_stream(
@ -550,31 +556,44 @@ int grpc_chttp2_list_pop_waiting_for_concurrency(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_closed_waiting_for_parsing(
void grpc_chttp2_list_add_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_cancelled_waiting_for_writing(
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing);
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_cancelled_waiting_for_writing(
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing);
void grpc_chttp2_list_add_read_write_state_changed(
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_read_write_state_changed(
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id);
grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
grpc_chttp2_transport_parsing *transport_parsing, gpr_uint32 id);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
gpr_uint32 id);
void grpc_chttp2_add_incoming_goaway(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
@ -592,7 +611,10 @@ void grpc_chttp2_for_all_streams(
grpc_chttp2_stream_global *stream_global));
void grpc_chttp2_parsing_become_skip_parser(
grpc_chttp2_transport_parsing *transport_parsing);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
grpc_closure **pclosure, int success);
#define GRPC_CHTTP2_CLIENT_CONNECT_STRING "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
@ -607,26 +629,122 @@ extern int grpc_flowctl_trace;
else \
stmt
#define GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(reason, transport, context, var, \
delta) \
if (!(grpc_flowctl_trace)) { \
} else { \
grpc_chttp2_flowctl_trace(__FILE__, __LINE__, reason, #context, #var, \
transport->is_client, context->id, \
(gpr_int64)(context->var), (gpr_int64)(delta)); \
}
#define GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(reason, context, var, delta) \
if (!(grpc_flowctl_trace)) { \
} else { \
grpc_chttp2_flowctl_trace(__FILE__, __LINE__, reason, #context, #var, \
context->is_client, 0, \
(gpr_int64)(context->var), (gpr_int64)(delta)); \
}
void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason,
const char *context, const char *var,
int is_client, gpr_uint32 stream_id,
gpr_int64 current_value, gpr_int64 delta);
typedef enum {
GRPC_CHTTP2_FLOWCTL_MOVE,
GRPC_CHTTP2_FLOWCTL_CREDIT,
GRPC_CHTTP2_FLOWCTL_DEBIT
} grpc_chttp2_flowctl_op;
#define GRPC_CHTTP2_FLOW_MOVE_COMMON(phase, transport, id1, id2, dst_context, \
dst_var, src_context, src_var) \
do { \
assert(id1 == id2); \
if (grpc_flowctl_trace) { \
grpc_chttp2_flowctl_trace( \
__FILE__, __LINE__, phase, GRPC_CHTTP2_FLOWCTL_MOVE, #dst_context, \
#dst_var, #src_context, #src_var, transport->is_client, id1, \
dst_context->dst_var, src_context->src_var); \
} \
dst_context->dst_var += src_context->src_var; \
src_context->src_var = 0; \
} while (0)
#define GRPC_CHTTP2_FLOW_MOVE_STREAM(phase, transport, dst_context, dst_var, \
src_context, src_var) \
GRPC_CHTTP2_FLOW_MOVE_COMMON(phase, transport, dst_context->id, \
src_context->id, dst_context, dst_var, \
src_context, src_var)
#define GRPC_CHTTP2_FLOW_MOVE_TRANSPORT(phase, dst_context, dst_var, \
src_context, src_var) \
GRPC_CHTTP2_FLOW_MOVE_COMMON(phase, dst_context, 0, 0, dst_context, dst_var, \
src_context, src_var)
#define GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, transport, id, dst_context, \
dst_var, amount) \
do { \
if (grpc_flowctl_trace) { \
grpc_chttp2_flowctl_trace(__FILE__, __LINE__, phase, \
GRPC_CHTTP2_FLOWCTL_CREDIT, #dst_context, \
#dst_var, NULL, #amount, transport->is_client, \
id, dst_context->dst_var, amount); \
} \
dst_context->dst_var += amount; \
} while (0)
#define GRPC_CHTTP2_FLOW_CREDIT_STREAM(phase, transport, dst_context, dst_var, \
amount) \
GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, transport, dst_context->id, \
dst_context, dst_var, amount)
#define GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT(phase, dst_context, dst_var, amount) \
GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, dst_context, 0, dst_context, dst_var, \
amount)
#define GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, transport, id, dst_context, \
dst_var, amount) \
do { \
if (grpc_flowctl_trace) { \
grpc_chttp2_flowctl_trace(__FILE__, __LINE__, phase, \
GRPC_CHTTP2_FLOWCTL_DEBIT, #dst_context, \
#dst_var, NULL, #amount, transport->is_client, \
id, dst_context->dst_var, amount); \
} \
dst_context->dst_var -= amount; \
} while (0)
#define GRPC_CHTTP2_FLOW_DEBIT_STREAM(phase, transport, dst_context, dst_var, \
amount) \
GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, transport, dst_context->id, \
dst_context, dst_var, amount)
#define GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT(phase, dst_context, dst_var, amount) \
GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, dst_context, 0, dst_context, dst_var, \
amount)
void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
grpc_chttp2_flowctl_op op, const char *context1,
const char *var1, const char *context2,
const char *var2, int is_client,
gpr_uint32 stream_id, gpr_int64 val1,
gpr_int64 val2);
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream,
grpc_status_code status, gpr_slice *details);
void grpc_chttp2_mark_stream_closed(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, int close_reads,
int close_writes);
void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global, reason)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
const char *reason);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global,
const char *reason);
#else
#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
grpc_chttp2_stream_ref(stream_global)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream_global)
void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global);
void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_global *stream_global);
#endif
grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_uint32 frame_size,
gpr_uint32 flags, grpc_chttp2_incoming_frame_queue *add_to_queue);
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
gpr_slice slice);
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs);
#endif

@ -42,22 +42,28 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing);
static int init_frame_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *transport_parsing);
static int init_header_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing, int is_continuation);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
int is_continuation);
static int init_data_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_rst_stream_parser(
grpc_chttp2_transport_parsing *transport_parsing);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_settings_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_window_update_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing);
static int init_ping_parser(grpc_chttp2_transport_parsing *transport_parsing);
static int init_goaway_parser(grpc_chttp2_transport_parsing *transport_parsing);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
static int init_ping_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *transport_parsing);
static int init_goaway_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *transport_parsing);
static int init_skip_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing, int is_header);
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
int is_header);
static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *transport_parsing,
@ -74,23 +80,11 @@ void grpc_chttp2_prepare_to_read(
transport_parsing->next_stream_id = transport_global->next_stream_id;
/* update the parsing view of incoming window */
if (transport_parsing->incoming_window != transport_global->incoming_window) {
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
"parse", transport_parsing, incoming_window,
(gpr_int64)transport_global->incoming_window -
(gpr_int64)transport_parsing->incoming_window);
transport_parsing->incoming_window = transport_global->incoming_window;
}
while (grpc_chttp2_list_pop_incoming_window_updated(
while (grpc_chttp2_list_pop_unannounced_incoming_window_available(
transport_global, transport_parsing, &stream_global, &stream_parsing)) {
stream_parsing->id = stream_global->id;
if (stream_parsing->incoming_window != stream_global->incoming_window) {
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"parse", transport_parsing, stream_parsing, incoming_window,
(gpr_int64)stream_global->incoming_window -
(gpr_int64)stream_parsing->incoming_window);
stream_parsing->incoming_window = stream_global->incoming_window;
}
GRPC_CHTTP2_FLOW_MOVE_STREAM("parse", transport_parsing, stream_parsing,
incoming_window, stream_global,
unannounced_incoming_window_for_parse);
}
GPR_TIMER_END("grpc_chttp2_prepare_to_read", 0);
@ -101,6 +95,8 @@ void grpc_chttp2_publish_reads(
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_parsing *stream_parsing;
int was_zero;
int is_zero;
/* transport_parsing->last_incoming_stream_id is used as
last-grpc_chttp2_stream-id when
@ -144,98 +140,102 @@ void grpc_chttp2_publish_reads(
}
/* propagate flow control tokens to global state */
if (transport_parsing->outgoing_window_update) {
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
"parsed", transport_global, outgoing_window,
transport_parsing->outgoing_window_update);
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
"parsed", transport_parsing, outgoing_window_update,
-(gpr_int64)transport_parsing->outgoing_window_update);
transport_global->outgoing_window +=
transport_parsing->outgoing_window_update;
transport_parsing->outgoing_window_update = 0;
}
if (transport_parsing->incoming_window_delta) {
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
"parsed", transport_global, incoming_window,
-(gpr_int64)transport_parsing->incoming_window_delta);
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
"parsed", transport_parsing, incoming_window_delta,
-(gpr_int64)transport_parsing->incoming_window_delta);
transport_global->incoming_window -=
transport_parsing->incoming_window_delta;
transport_parsing->incoming_window_delta = 0;
was_zero = transport_global->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("parsed", transport_global, outgoing_window,
transport_parsing, outgoing_window);
is_zero = transport_global->outgoing_window <= 0;
if (was_zero && !is_zero) {
while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
&stream_global)) {
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
}
}
if (transport_parsing->incoming_window <
transport_global->connection_window_target * 3 / 4) {
gpr_int64 announce_bytes = transport_global->connection_window_target -
transport_parsing->incoming_window;
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_global,
announce_incoming_window, announce_bytes);
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_parsing,
incoming_window, announce_bytes);
}
/* for each stream that saw an update, fixup global state */
while (grpc_chttp2_list_pop_parsing_seen_stream(
transport_global, transport_parsing, &stream_global, &stream_parsing)) {
/* update incoming flow control window */
if (stream_parsing->incoming_window_delta) {
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"parsed", transport_parsing, stream_global, incoming_window,
-(gpr_int64)stream_parsing->incoming_window_delta);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"parsed", transport_parsing, stream_parsing, incoming_window_delta,
-(gpr_int64)stream_parsing->incoming_window_delta);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"parsed", transport_parsing, stream_global, max_recv_bytes,
-(gpr_int64)stream_parsing->incoming_window_delta);
stream_global->incoming_window -= stream_parsing->incoming_window_delta;
GPR_ASSERT(stream_global->max_recv_bytes >=
stream_parsing->incoming_window_delta);
stream_global->max_recv_bytes -= stream_parsing->incoming_window_delta;
stream_parsing->incoming_window_delta = 0;
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
if (stream_parsing->seen_error) {
stream_global->seen_error = 1;
grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
/* update outgoing flow control window */
if (stream_parsing->outgoing_window_update) {
int was_zero = stream_global->outgoing_window <= 0;
int is_zero;
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("parsed", transport_parsing,
stream_global, outgoing_window,
stream_parsing->outgoing_window_update);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"parsed", transport_parsing, stream_parsing, outgoing_window_update,
-(gpr_int64)stream_parsing->outgoing_window_update);
GPR_ASSERT(stream_parsing->outgoing_window_update <= GPR_UINT32_MAX);
stream_global->outgoing_window +=
(gpr_uint32)stream_parsing->outgoing_window_update;
stream_parsing->outgoing_window_update = 0;
is_zero = stream_global->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
}
was_zero = stream_global->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_MOVE_STREAM("parsed", transport_global, stream_global,
outgoing_window, stream_parsing,
outgoing_window);
is_zero = stream_global->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
}
/* updating closed status */
if (stream_parsing->received_close) {
stream_global->read_closed = 1;
grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global);
stream_global->max_recv_bytes -= (gpr_uint32)GPR_MIN(
stream_global->max_recv_bytes, stream_parsing->received_bytes);
stream_parsing->received_bytes = 0;
/* publish incoming stream ops */
if (stream_global->incoming_frames.tail != NULL) {
stream_global->incoming_frames.tail->is_tail = 0;
}
if (stream_parsing->data_parser.incoming_frames.head != NULL) {
grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
grpc_chttp2_incoming_frame_queue_merge(
&stream_global->incoming_frames,
&stream_parsing->data_parser.incoming_frames);
if (stream_global->incoming_frames.tail != NULL) {
stream_global->incoming_frames.tail->is_tail = 1;
}
if (!stream_global->published_initial_metadata &&
stream_parsing->got_metadata_on_parse[0]) {
stream_parsing->got_metadata_on_parse[0] = 0;
stream_global->published_initial_metadata = 1;
GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
stream_parsing->metadata_buffer[0],
stream_global->received_initial_metadata);
grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
if (!stream_global->published_trailing_metadata &&
stream_parsing->got_metadata_on_parse[1]) {
stream_parsing->got_metadata_on_parse[1] = 0;
stream_global->published_trailing_metadata = 1;
GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
stream_parsing->metadata_buffer[1],
stream_global->received_trailing_metadata);
grpc_chttp2_list_add_check_read_ops(transport_global, stream_global);
}
if (stream_parsing->saw_rst_stream) {
stream_global->cancelled = 1;
stream_global->cancelled_status = grpc_chttp2_http2_error_to_grpc_status(
(grpc_chttp2_error_code)stream_parsing->rst_stream_reason);
if (stream_parsing->rst_stream_reason == GRPC_CHTTP2_NO_ERROR) {
stream_global->published_cancelled = 1;
if (stream_parsing->rst_stream_reason != GRPC_CHTTP2_NO_ERROR) {
grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
(grpc_chttp2_error_code)stream_parsing->rst_stream_reason);
char *status_details;
gpr_slice slice_details;
gpr_asprintf(&status_details, "Received RST_STREAM err=%d",
stream_parsing->rst_stream_reason);
slice_details = gpr_slice_from_copied_string(status_details);
gpr_free(status_details);
grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
status_code, &slice_details);
}
grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global);
grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
1, 1);
}
/* publish incoming stream ops */
if (stream_parsing->data_parser.incoming_sopb.nops > 0) {
grpc_incoming_metadata_buffer_move_to_referencing_sopb(
&stream_parsing->incoming_metadata, &stream_global->incoming_metadata,
&stream_parsing->data_parser.incoming_sopb);
grpc_sopb_move_to(&stream_parsing->data_parser.incoming_sopb,
&stream_global->incoming_sopb);
grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global);
if (stream_parsing->received_close) {
grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
1, 0);
}
}
}
@ -363,7 +363,7 @@ int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(cur < end);
transport_parsing->incoming_stream_id |= ((gpr_uint32)*cur);
transport_parsing->deframe_state = GRPC_DTS_FRAME;
if (!init_frame_parser(transport_parsing)) {
if (!init_frame_parser(exec_ctx, transport_parsing)) {
return 0;
}
if (transport_parsing->incoming_stream_id) {
@ -428,7 +428,8 @@ int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
GPR_UNREACHABLE_CODE(return 0);
}
static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing) {
static int init_frame_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *transport_parsing) {
if (transport_parsing->expect_continuation_stream_id != 0) {
if (transport_parsing->incoming_frame_type !=
GRPC_CHTTP2_FRAME_CONTINUATION) {
@ -445,30 +446,30 @@ static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing) {
transport_parsing->incoming_stream_id);
return 0;
}
return init_header_frame_parser(transport_parsing, 1);
return init_header_frame_parser(exec_ctx, transport_parsing, 1);
}
switch (transport_parsing->incoming_frame_type) {
case GRPC_CHTTP2_FRAME_DATA:
return init_data_frame_parser(transport_parsing);
return init_data_frame_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_HEADER:
return init_header_frame_parser(transport_parsing, 0);
return init_header_frame_parser(exec_ctx, transport_parsing, 0);
case GRPC_CHTTP2_FRAME_CONTINUATION:
gpr_log(GPR_ERROR, "Unexpected CONTINUATION frame");
return 0;
case GRPC_CHTTP2_FRAME_RST_STREAM:
return init_rst_stream_parser(transport_parsing);
return init_rst_stream_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_SETTINGS:
return init_settings_frame_parser(transport_parsing);
return init_settings_frame_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_WINDOW_UPDATE:
return init_window_update_frame_parser(transport_parsing);
return init_window_update_frame_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_PING:
return init_ping_parser(transport_parsing);
return init_ping_parser(exec_ctx, transport_parsing);
case GRPC_CHTTP2_FRAME_GOAWAY:
return init_goaway_parser(transport_parsing);
return init_goaway_parser(exec_ctx, transport_parsing);
default:
gpr_log(GPR_ERROR, "Unknown frame type %02x",
transport_parsing->incoming_frame_type);
return init_skip_frame_parser(transport_parsing, 0);
return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
}
}
@ -482,7 +483,8 @@ static grpc_chttp2_parse_error skip_parser(
static void skip_header(void *tp, grpc_mdelem *md) { GRPC_MDELEM_UNREF(md); }
static int init_skip_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing, int is_header) {
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
int is_header) {
if (is_header) {
gpr_uint8 is_eoh = transport_parsing->expect_continuation_stream_id != 0;
transport_parsing->parser = grpc_chttp2_header_parser_parse;
@ -499,65 +501,51 @@ static int init_skip_frame_parser(
}
void grpc_chttp2_parsing_become_skip_parser(
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
init_skip_frame_parser(
transport_parsing,
exec_ctx, transport_parsing,
transport_parsing->parser == grpc_chttp2_header_parser_parse);
}
static grpc_chttp2_parse_error update_incoming_window(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing) {
if (transport_parsing->incoming_frame_size >
transport_parsing->incoming_window) {
gpr_uint32 incoming_frame_size = transport_parsing->incoming_frame_size;
if (incoming_frame_size > transport_parsing->incoming_window) {
gpr_log(GPR_ERROR, "frame of size %d overflows incoming window of %d",
transport_parsing->incoming_frame_size,
transport_parsing->incoming_window);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
if (transport_parsing->incoming_frame_size >
stream_parsing->incoming_window) {
if (incoming_frame_size > stream_parsing->incoming_window) {
gpr_log(GPR_ERROR, "frame of size %d overflows incoming window of %d",
transport_parsing->incoming_frame_size,
stream_parsing->incoming_window);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
"data", transport_parsing, incoming_window,
-(gpr_int64)transport_parsing->incoming_frame_size);
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT("data", transport_parsing,
incoming_window_delta,
transport_parsing->incoming_frame_size);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"data", transport_parsing, stream_parsing, incoming_window,
-(gpr_int64)transport_parsing->incoming_frame_size);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("data", transport_parsing, stream_parsing,
incoming_window_delta,
transport_parsing->incoming_frame_size);
transport_parsing->incoming_window -= transport_parsing->incoming_frame_size;
transport_parsing->incoming_window_delta +=
transport_parsing->incoming_frame_size;
stream_parsing->incoming_window -= transport_parsing->incoming_frame_size;
stream_parsing->incoming_window_delta +=
transport_parsing->incoming_frame_size;
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", transport_parsing, incoming_window,
incoming_frame_size);
GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", transport_parsing, stream_parsing,
incoming_window, incoming_frame_size);
stream_parsing->received_bytes += incoming_frame_size;
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
return GRPC_CHTTP2_PARSE_OK;
}
static int init_data_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
grpc_chttp2_stream_parsing *stream_parsing =
grpc_chttp2_parsing_lookup_stream(transport_parsing,
transport_parsing->incoming_stream_id);
grpc_chttp2_parse_error err = GRPC_CHTTP2_PARSE_OK;
if (!stream_parsing || stream_parsing->received_close)
return init_skip_frame_parser(transport_parsing, 0);
return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
if (err == GRPC_CHTTP2_PARSE_OK) {
err = update_incoming_window(transport_parsing, stream_parsing);
err = update_incoming_window(exec_ctx, transport_parsing, stream_parsing);
}
if (err == GRPC_CHTTP2_PARSE_OK) {
err = grpc_chttp2_data_parser_begin_frame(
@ -577,7 +565,7 @@ static int init_data_frame_parser(
&transport_parsing->qbuf,
grpc_chttp2_rst_stream_create(transport_parsing->incoming_stream_id,
GRPC_CHTTP2_PROTOCOL_ERROR));
return init_skip_frame_parser(transport_parsing, 0);
return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
case GRPC_CHTTP2_CONNECTION_ERROR:
return 0;
}
@ -586,11 +574,13 @@ static int init_data_frame_parser(
static void free_timeout(void *p) { gpr_free(p); }
static void on_header(void *tp, grpc_mdelem *md) {
static void on_initial_header(void *tp, grpc_mdelem *md) {
grpc_chttp2_transport_parsing *transport_parsing = tp;
grpc_chttp2_stream_parsing *stream_parsing =
transport_parsing->incoming_stream;
GPR_TIMER_BEGIN("on_initial_header", 0);
GPR_ASSERT(stream_parsing);
GRPC_CHTTP2_IF_TRACING(gpr_log(
@ -598,6 +588,12 @@ static void on_header(void *tp, grpc_mdelem *md) {
transport_parsing->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
if (md->key == transport_parsing->elem_grpc_status_ok->key &&
md != transport_parsing->elem_grpc_status_ok) {
/* TODO(ctiller): check for a status like " 0" */
stream_parsing->seen_error = 1;
}
if (md->key == transport_parsing->str_grpc_timeout) {
gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
if (!cached_timeout) {
@ -612,24 +608,57 @@ static void on_header(void *tp, grpc_mdelem *md) {
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
grpc_chttp2_incoming_metadata_buffer_set_deadline(
&stream_parsing->incoming_metadata,
&stream_parsing->metadata_buffer[0],
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), *cached_timeout));
GRPC_MDELEM_UNREF(md);
} else {
grpc_chttp2_incoming_metadata_buffer_add(&stream_parsing->incoming_metadata,
md);
grpc_chttp2_incoming_metadata_buffer_add(
&stream_parsing->metadata_buffer[0], md);
}
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
GPR_TIMER_END("on_initial_header", 0);
}
static void on_trailing_header(void *tp, grpc_mdelem *md) {
grpc_chttp2_transport_parsing *transport_parsing = tp;
grpc_chttp2_stream_parsing *stream_parsing =
transport_parsing->incoming_stream;
GPR_TIMER_BEGIN("on_trailing_header", 0);
GPR_ASSERT(stream_parsing);
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", stream_parsing->id,
transport_parsing->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
if (md->key == transport_parsing->elem_grpc_status_ok->key &&
md != transport_parsing->elem_grpc_status_ok) {
/* TODO(ctiller): check for a status like " 0" */
stream_parsing->seen_error = 1;
}
grpc_chttp2_incoming_metadata_buffer_add(&stream_parsing->metadata_buffer[1],
md);
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
GPR_TIMER_END("on_trailing_header", 0);
}
static int init_header_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing, int is_continuation) {
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
int is_continuation) {
gpr_uint8 is_eoh = (transport_parsing->incoming_frame_flags &
GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
int via_accept = 0;
grpc_chttp2_stream_parsing *stream_parsing;
/* TODO(ctiller): when to increment header_frames_received? */
if (is_eoh) {
transport_parsing->expect_continuation_stream_id = 0;
} else {
@ -649,7 +678,7 @@ static int init_header_frame_parser(
if (is_continuation) {
gpr_log(GPR_ERROR,
"grpc_chttp2_stream disbanded before CONTINUATION received");
return init_skip_frame_parser(transport_parsing, 1);
return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
if (transport_parsing->is_client) {
if ((transport_parsing->incoming_stream_id & 1) &&
@ -660,7 +689,7 @@ static int init_header_frame_parser(
gpr_log(GPR_ERROR,
"ignoring new grpc_chttp2_stream creation on client");
}
return init_skip_frame_parser(transport_parsing, 1);
return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
} else if (transport_parsing->last_incoming_stream_id >
transport_parsing->incoming_stream_id) {
gpr_log(GPR_ERROR,
@ -669,19 +698,19 @@ static int init_header_frame_parser(
"id=%d, new grpc_chttp2_stream id=%d",
transport_parsing->last_incoming_stream_id,
transport_parsing->incoming_stream_id);
return init_skip_frame_parser(transport_parsing, 1);
return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
} else if ((transport_parsing->incoming_stream_id & 1) == 0) {
gpr_log(GPR_ERROR,
"ignoring grpc_chttp2_stream with non-client generated index %d",
transport_parsing->incoming_stream_id);
return init_skip_frame_parser(transport_parsing, 1);
return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
stream_parsing = transport_parsing->incoming_stream =
grpc_chttp2_parsing_accept_stream(
transport_parsing, transport_parsing->incoming_stream_id);
exec_ctx, transport_parsing, transport_parsing->incoming_stream_id);
if (stream_parsing == NULL) {
gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted");
return init_skip_frame_parser(transport_parsing, 1);
return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
via_accept = 1;
} else {
@ -691,11 +720,21 @@ static int init_header_frame_parser(
if (stream_parsing->received_close) {
gpr_log(GPR_ERROR, "skipping already closed grpc_chttp2_stream header");
transport_parsing->incoming_stream = NULL;
return init_skip_frame_parser(transport_parsing, 1);
return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
transport_parsing->parser = grpc_chttp2_header_parser_parse;
transport_parsing->parser_data = &transport_parsing->hpack_parser;
transport_parsing->hpack_parser.on_header = on_header;
switch (stream_parsing->header_frames_received) {
case 0:
transport_parsing->hpack_parser.on_header = on_initial_header;
break;
case 1:
transport_parsing->hpack_parser.on_header = on_trailing_header;
break;
case 2:
gpr_log(GPR_ERROR, "too many header frames received");
return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
}
transport_parsing->hpack_parser.on_header_user_data = transport_parsing;
transport_parsing->hpack_parser.is_boundary = is_eoh;
transport_parsing->hpack_parser.is_eof =
@ -708,7 +747,7 @@ static int init_header_frame_parser(
}
static int init_window_update_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_window_update_parser_begin_frame(
&transport_parsing->simple.window_update,
transport_parsing->incoming_frame_size,
@ -722,7 +761,8 @@ static int init_window_update_frame_parser(
return ok;
}
static int init_ping_parser(grpc_chttp2_transport_parsing *transport_parsing) {
static int init_ping_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_ping_parser_begin_frame(
&transport_parsing->simple.ping,
transport_parsing->incoming_frame_size,
@ -733,7 +773,7 @@ static int init_ping_parser(grpc_chttp2_transport_parsing *transport_parsing) {
}
static int init_rst_stream_parser(
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_rst_stream_parser_begin_frame(
&transport_parsing->simple.rst_stream,
transport_parsing->incoming_frame_size,
@ -741,7 +781,7 @@ static int init_rst_stream_parser(
transport_parsing->incoming_stream = grpc_chttp2_parsing_lookup_stream(
transport_parsing, transport_parsing->incoming_stream_id);
if (!transport_parsing->incoming_stream) {
return init_skip_frame_parser(transport_parsing, 0);
return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
}
transport_parsing->parser = grpc_chttp2_rst_stream_parser_parse;
transport_parsing->parser_data = &transport_parsing->simple.rst_stream;
@ -749,7 +789,7 @@ static int init_rst_stream_parser(
}
static int init_goaway_parser(
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_goaway_parser_begin_frame(
&transport_parsing->goaway_parser,
transport_parsing->incoming_frame_size,
@ -760,7 +800,7 @@ static int init_goaway_parser(
}
static int init_settings_frame_parser(
grpc_chttp2_transport_parsing *transport_parsing) {
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
int ok;
if (transport_parsing->incoming_stream_id != 0) {
@ -806,7 +846,7 @@ static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
}
return 1;
case GRPC_CHTTP2_STREAM_ERROR:
grpc_chttp2_parsing_become_skip_parser(transport_parsing);
grpc_chttp2_parsing_become_skip_parser(exec_ctx, transport_parsing);
if (stream_parsing) {
stream_parsing->saw_rst_stream = 1;
stream_parsing->rst_stream_reason = GRPC_CHTTP2_PROTOCOL_ERROR;

@ -142,12 +142,13 @@ static void stream_list_add_tail(grpc_chttp2_transport *t,
s->included[id] = 1;
}
static void stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_chttp2_stream_list_id id) {
static int stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_chttp2_stream_list_id id) {
if (s->included[id]) {
return;
return 0;
}
stream_list_add_tail(t, s, id);
return 1;
}
/* wrappers for specializations */
@ -192,12 +193,12 @@ void grpc_chttp2_list_remove_writable_stream(
GRPC_CHTTP2_LIST_WRITABLE);
}
void grpc_chttp2_list_add_writing_stream(
int grpc_chttp2_list_add_writing_stream(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITING);
return stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_WRITING);
}
int grpc_chttp2_list_have_writing_streams(
@ -241,6 +242,40 @@ int grpc_chttp2_list_pop_written_stream(
return r;
}
void grpc_chttp2_list_add_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
void grpc_chttp2_list_remove_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(
TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
}
int grpc_chttp2_list_pop_unannounced_incoming_window_available(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r =
stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
}
return r;
}
void grpc_chttp2_list_add_parsing_seen_stream(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing) {
@ -284,91 +319,60 @@ int grpc_chttp2_list_pop_waiting_for_concurrency(
return r;
}
void grpc_chttp2_list_add_closed_waiting_for_parsing(
void grpc_chttp2_list_add_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
}
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
int grpc_chttp2_list_pop_check_read_ops(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
GRPC_CHTTP2_LIST_CHECK_READ_OPS);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_cancelled_waiting_for_writing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_CANCELLED_WAITING_FOR_WRITING);
void grpc_chttp2_list_add_stalled_by_transport(
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_writing *stream_writing) {
stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
STREAM_FROM_WRITING(stream_writing),
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
int grpc_chttp2_list_pop_cancelled_waiting_for_writing(
int grpc_chttp2_list_pop_stalled_by_transport(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_CANCELLED_WAITING_FOR_WRITING);
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
if (r != 0) {
*stream_global = &stream->global;
}
return r;
}
void grpc_chttp2_list_add_incoming_window_updated(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED);
}
int grpc_chttp2_list_pop_incoming_window_updated(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_parsing **stream_parsing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED);
if (r != 0) {
*stream_global = &stream->global;
*stream_parsing = &stream->parsing;
}
return r;
}
void grpc_chttp2_list_remove_incoming_window_updated(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_INCOMING_WINDOW_UPDATED);
}
void grpc_chttp2_list_add_read_write_state_changed(
void grpc_chttp2_list_add_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_READ_WRITE_STATE_CHANGED);
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
}
int grpc_chttp2_list_pop_read_write_state_changed(
int grpc_chttp2_list_pop_closed_waiting_for_parsing(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_READ_WRITE_STATE_CHANGED);
GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
if (r != 0) {
*stream_global = &stream->global;
}

@ -40,15 +40,16 @@
#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing);
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing);
int grpc_chttp2_unlocking_check_writes(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_writing *stream_writing;
grpc_chttp2_stream_global *first_reinserted_stream = NULL;
gpr_uint32 window_delta;
GPR_TIMER_BEGIN("grpc_chttp2_unlocking_check_writes", 0);
/* simple writes are queued to qbuf, and flushed here */
gpr_slice_buffer_swap(&transport_global->qbuf, &transport_writing->outbuf);
@ -67,98 +68,103 @@ int grpc_chttp2_unlocking_check_writes(
transport_global->sent_local_settings = 1;
}
GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
transport_global, outgoing_window);
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (grpc_chttp2_list_pop_writable_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
if (stream_global == first_reinserted_stream) {
/* prevent infinite loop */
grpc_chttp2_list_add_first_writable_stream(transport_global,
stream_global);
break;
}
gpr_uint8 sent_initial_metadata;
stream_writing->id = stream_global->id;
stream_writing->send_closed = GRPC_DONT_SEND_CLOSED;
if (stream_global->outgoing_sopb) {
window_delta = grpc_chttp2_preencode(
stream_global->outgoing_sopb->ops,
&stream_global->outgoing_sopb->nops,
(gpr_uint32)GPR_MIN(GPR_MIN(transport_global->outgoing_window,
stream_global->outgoing_window),
GPR_UINT32_MAX),
&stream_writing->sopb);
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT(
"write", transport_global, outgoing_window, -(gpr_int64)window_delta);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global,
outgoing_window,
-(gpr_int64)window_delta);
transport_global->outgoing_window -= window_delta;
stream_global->outgoing_window -= window_delta;
if (stream_global->write_state == GRPC_WRITE_STATE_QUEUED_CLOSE &&
stream_global->outgoing_sopb->nops == 0) {
if (!transport_global->is_client && !stream_global->read_closed) {
stream_writing->send_closed = GRPC_SEND_CLOSED_WITH_RST_STREAM;
stream_writing->read_closed = stream_global->read_closed;
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_writing, stream_writing,
outgoing_window, stream_global,
outgoing_window);
sent_initial_metadata = stream_writing->sent_initial_metadata;
if (!sent_initial_metadata && stream_global->send_initial_metadata) {
stream_writing->send_initial_metadata =
stream_global->send_initial_metadata;
stream_global->send_initial_metadata = NULL;
if (grpc_chttp2_list_add_writing_stream(transport_writing,
stream_writing)) {
GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
}
sent_initial_metadata = 1;
}
if (sent_initial_metadata) {
if (stream_global->send_message != NULL) {
gpr_slice hdr = gpr_slice_malloc(5);
gpr_uint8 *p = GPR_SLICE_START_PTR(hdr);
gpr_uint32 len = stream_global->send_message->length;
GPR_ASSERT(stream_writing->send_message == NULL);
p[0] = (stream_global->send_message->flags &
GRPC_WRITE_INTERNAL_COMPRESS) != 0;
p[1] = (gpr_uint8)(len >> 24);
p[2] = (gpr_uint8)(len >> 16);
p[3] = (gpr_uint8)(len >> 8);
p[4] = (gpr_uint8)(len);
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer, hdr);
if (stream_global->send_message->length > 0) {
stream_writing->send_message = stream_global->send_message;
} else {
stream_writing->send_closed = GRPC_SEND_CLOSED;
stream_writing->send_message = NULL;
}
stream_writing->stream_fetched = 0;
stream_global->send_message = NULL;
}
if (stream_global->outgoing_window > 0 &&
stream_global->outgoing_sopb->nops != 0) {
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
if (first_reinserted_stream == NULL &&
transport_global->outgoing_window == 0) {
first_reinserted_stream = stream_global;
if ((stream_writing->send_message != NULL ||
stream_writing->flow_controlled_buffer.length > 0) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
if (grpc_chttp2_list_add_writing_stream(transport_writing,
stream_writing)) {
GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
}
} else {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
}
}
if (stream_global->send_trailing_metadata) {
stream_writing->send_trailing_metadata =
stream_global->send_trailing_metadata;
stream_global->send_trailing_metadata = NULL;
if (grpc_chttp2_list_add_writing_stream(transport_writing,
stream_writing)) {
GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
}
}
}
if (!stream_global->read_closed &&
stream_global->unannounced_incoming_window > 0) {
GPR_ASSERT(stream_writing->announce_window == 0);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"write", transport_writing, stream_writing, announce_window,
stream_global->unannounced_incoming_window);
stream_writing->announce_window =
stream_global->unannounced_incoming_window;
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"write", transport_global, stream_global, incoming_window,
stream_global->unannounced_incoming_window);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"write", transport_global, stream_global, unannounced_incoming_window,
-(gpr_int64)stream_global->unannounced_incoming_window);
stream_global->incoming_window +=
stream_global->unannounced_incoming_window;
stream_global->unannounced_incoming_window = 0;
grpc_chttp2_list_add_incoming_window_updated(transport_global,
stream_global);
stream_global->writing_now |= GRPC_CHTTP2_WRITING_WINDOW;
}
if (stream_writing->sopb.nops > 0 ||
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
stream_global->writing_now |= GRPC_CHTTP2_WRITING_DATA;
}
if (stream_global->writing_now != 0) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
stream_global->unannounced_incoming_window_for_writing > 1024) {
GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_global, stream_writing,
announce_window, stream_global,
unannounced_incoming_window_for_writing);
if (grpc_chttp2_list_add_writing_stream(transport_writing,
stream_writing)) {
GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
}
}
}
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
if (transport_global->incoming_window <
transport_global->connection_window_target * 3 / 4) {
window_delta = transport_global->connection_window_target -
transport_global->incoming_window;
if (transport_global->announce_incoming_window > 0) {
gpr_uint32 announced = (gpr_uint32)GPR_MIN(
transport_global->announce_incoming_window, GPR_UINT32_MAX);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_global,
announce_incoming_window, announced);
gpr_slice_buffer_add(&transport_writing->outbuf,
grpc_chttp2_window_update_create(0, window_delta));
GRPC_CHTTP2_FLOWCTL_TRACE_TRANSPORT("write", transport_global,
incoming_window, window_delta);
transport_global->incoming_window += window_delta;
grpc_chttp2_window_update_create(0, announced));
}
GPR_TIMER_END("grpc_chttp2_unlocking_check_writes", 0);
return transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing);
}
@ -169,50 +175,146 @@ void grpc_chttp2_perform_writes(
GPR_ASSERT(transport_writing->outbuf.count > 0 ||
grpc_chttp2_list_have_writing_streams(transport_writing));
finalize_outbuf(transport_writing);
finalize_outbuf(exec_ctx, transport_writing);
GPR_ASSERT(transport_writing->outbuf.count > 0);
GPR_ASSERT(endpoint);
grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
&transport_writing->done_cb);
if (transport_writing->outbuf.count > 0) {
grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
&transport_writing->done_cb);
} else {
grpc_exec_ctx_enqueue(exec_ctx, &transport_writing->done_cb, 1);
}
}
static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
GPR_TIMER_BEGIN("finalize_outbuf", 0);
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
if (stream_writing->sopb.nops > 0 ||
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops,
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED,
stream_writing->id,
&transport_writing->hpack_compressor,
&transport_writing->outbuf);
stream_writing->sopb.nops = 0;
gpr_uint32 max_outgoing =
(gpr_uint32)GPR_MIN(GRPC_CHTTP2_MAX_PAYLOAD_LENGTH,
GPR_MIN(stream_writing->outgoing_window,
transport_writing->outgoing_window));
/* send initial metadata if it's available */
if (stream_writing->send_initial_metadata != NULL) {
grpc_chttp2_encode_header(
&transport_writing->hpack_compressor, stream_writing->id,
stream_writing->send_initial_metadata, 0, &transport_writing->outbuf);
stream_writing->send_initial_metadata = NULL;
stream_writing->sent_initial_metadata = 1;
}
if (stream_writing->announce_window > 0) {
/* send any window updates */
if (stream_writing->announce_window > 0 &&
stream_writing->send_initial_metadata == NULL) {
gpr_uint32 announce = stream_writing->announce_window;
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(stream_writing->id,
stream_writing->announce_window));
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"write", transport_writing, stream_writing, announce_window,
-(gpr_int64)stream_writing->announce_window);
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing, stream_writing,
announce_window, announce);
stream_writing->announce_window = 0;
}
if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) {
gpr_slice_buffer_add(&transport_writing->outbuf,
grpc_chttp2_rst_stream_create(stream_writing->id,
GRPC_CHTTP2_NO_ERROR));
/* fetch any body bytes */
while (!stream_writing->fetching && stream_writing->send_message &&
stream_writing->flow_controlled_buffer.length < max_outgoing &&
stream_writing->stream_fetched <
stream_writing->send_message->length) {
if (grpc_byte_stream_next(exec_ctx, stream_writing->send_message,
&stream_writing->fetching_slice, max_outgoing,
&stream_writing->finished_fetch)) {
stream_writing->stream_fetched +=
GPR_SLICE_LENGTH(stream_writing->fetching_slice);
if (stream_writing->stream_fetched ==
stream_writing->send_message->length) {
stream_writing->send_message = NULL;
}
gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer,
stream_writing->fetching_slice);
} else {
stream_writing->fetching = 1;
}
}
/* send any body bytes */
if (stream_writing->flow_controlled_buffer.length > 0) {
if (max_outgoing > 0) {
gpr_uint32 send_bytes = (gpr_uint32)GPR_MIN(
max_outgoing, stream_writing->flow_controlled_buffer.length);
int is_last_data_frame =
stream_writing->send_message == NULL &&
send_bytes == stream_writing->flow_controlled_buffer.length;
int is_last_frame = is_last_data_frame &&
stream_writing->send_trailing_metadata != NULL &&
grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata);
grpc_chttp2_encode_data(
stream_writing->id, &stream_writing->flow_controlled_buffer,
send_bytes, is_last_frame, &transport_writing->outbuf);
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing,
stream_writing, outgoing_window,
send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_writing,
outgoing_window, send_bytes);
if (is_last_frame) {
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
if (is_last_data_frame) {
GPR_ASSERT(stream_writing->send_message == NULL);
stream_writing->sent_message = 1;
}
} else if (transport_writing->outgoing_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
}
/* send trailing metadata if it's available and we're ready for it */
if (stream_writing->send_message == NULL &&
stream_writing->flow_controlled_buffer.length == 0 &&
stream_writing->send_trailing_metadata != NULL) {
if (grpc_metadata_batch_is_empty(
stream_writing->send_trailing_metadata)) {
grpc_chttp2_encode_data(stream_writing->id,
&stream_writing->flow_controlled_buffer, 0, 1,
&transport_writing->outbuf);
} else {
grpc_chttp2_encode_header(&transport_writing->hpack_compressor,
stream_writing->id,
stream_writing->send_trailing_metadata, 1,
&transport_writing->outbuf);
}
if (!transport_writing->is_client && !stream_writing->read_closed) {
gpr_slice_buffer_add(&transport_writing->outbuf,
grpc_chttp2_rst_stream_create(
stream_writing->id, GRPC_CHTTP2_NO_ERROR));
}
stream_writing->send_trailing_metadata = NULL;
stream_writing->sent_trailing_metadata = 1;
}
/* if there's more to write, then loop, otherwise prepare to finish the
* write */
if ((stream_writing->flow_controlled_buffer.length > 0 ||
(stream_writing->send_message && !stream_writing->fetching)) &&
stream_writing->outgoing_window > 0) {
if (transport_writing->outgoing_window > 0) {
if (grpc_chttp2_list_add_writing_stream(transport_writing,
stream_writing)) {
/* do nothing - already reffed */
}
} else {
grpc_chttp2_list_add_stalled_by_transport(transport_writing,
stream_writing);
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
} else {
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
GPR_TIMER_END("finalize_outbuf", 0);
}
void grpc_chttp2_cleanup_writing(
@ -223,24 +325,26 @@ void grpc_chttp2_cleanup_writing(
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
GPR_ASSERT(stream_global->writing_now != 0);
if (stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE;
if (!transport_global->is_client) {
stream_global->read_closed = 1;
}
if (stream_writing->sent_trailing_metadata) {
grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
!transport_global->is_client, 1);
}
if (stream_global->writing_now & GRPC_CHTTP2_WRITING_DATA) {
if (stream_global->outgoing_sopb != NULL &&
stream_global->outgoing_sopb->nops == 0) {
GPR_ASSERT(stream_global->write_state != GRPC_WRITE_STATE_QUEUED_CLOSE);
stream_global->outgoing_sopb = NULL;
grpc_exec_ctx_enqueue(exec_ctx, stream_global->send_done_closure, 1);
}
if (stream_writing->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, &stream_global->send_initial_metadata_finished, 1);
}
if (stream_writing->sent_message) {
GPR_ASSERT(stream_writing->send_message == NULL);
GPR_ASSERT(stream_global->send_message_finished);
grpc_chttp2_complete_closure_step(
exec_ctx, &stream_global->send_message_finished, 1);
stream_writing->sent_message = 0;
}
if (stream_writing->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, &stream_global->send_trailing_metadata_finished, 1);
}
stream_global->writing_now = 0;
grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global);
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
}
gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
}

File diff suppressed because it is too large Load Diff

@ -41,9 +41,10 @@
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include "src/core/profiling/timers.h"
#include "src/core/support/murmur_hash.h"
#include "src/core/transport/chttp2/bin_encoder.h"
#include <grpc/support/time.h>
#define INITIAL_STRTAB_CAPACITY 4
#define INITIAL_MDTAB_CAPACITY 4
@ -232,24 +233,32 @@ static void metadata_context_destroy_locked(grpc_mdctx *ctx) {
}
void grpc_mdctx_ref(grpc_mdctx *ctx) {
GPR_TIMER_BEGIN("grpc_mdctx_ref", 0);
lock(ctx);
GPR_ASSERT(ctx->refs > 0);
ctx->refs++;
unlock(ctx);
GPR_TIMER_END("grpc_mdctx_ref", 0);
}
void grpc_mdctx_unref(grpc_mdctx *ctx) {
GPR_TIMER_BEGIN("grpc_mdctx_unref", 0);
lock(ctx);
GPR_ASSERT(ctx->refs > 0);
ctx->refs--;
unlock(ctx);
GPR_TIMER_END("grpc_mdctx_unref", 0);
}
static void grow_strtab(grpc_mdctx *ctx) {
size_t capacity = ctx->strtab_capacity * 2;
size_t i;
internal_string **strtab = gpr_malloc(sizeof(internal_string *) * capacity);
internal_string **strtab;
internal_string *s, *next;
GPR_TIMER_BEGIN("grow_strtab", 0);
strtab = gpr_malloc(sizeof(internal_string *) * capacity);
memset(strtab, 0, sizeof(internal_string *) * capacity);
for (i = 0; i < ctx->strtab_capacity; i++) {
@ -263,12 +272,15 @@ static void grow_strtab(grpc_mdctx *ctx) {
gpr_free(ctx->strtab);
ctx->strtab = strtab;
ctx->strtab_capacity = capacity;
GPR_TIMER_END("grow_strtab", 0);
}
static void internal_destroy_string(internal_string *is) {
internal_string **prev_next;
internal_string *cur;
grpc_mdctx *ctx = is->context;
GPR_TIMER_BEGIN("internal_destroy_string", 0);
if (is->has_base64_and_huffman_encoded) {
gpr_slice_unref(is->base64_and_huffman);
}
@ -279,6 +291,7 @@ static void internal_destroy_string(internal_string *is) {
*prev_next = cur->bucket_next;
ctx->strtab_count--;
gpr_free(is);
GPR_TIMER_END("internal_destroy_string", 0);
}
static void internal_string_ref(internal_string *s DEBUG_ARGS) {
@ -304,18 +317,22 @@ static void slice_ref(void *p) {
internal_string *is =
(internal_string *)((char *)p - offsetof(internal_string, refcount));
grpc_mdctx *ctx = is->context;
GPR_TIMER_BEGIN("slice_ref", 0);
lock(ctx);
INTERNAL_STRING_REF(is);
unlock(ctx);
GPR_TIMER_END("slice_ref", 0);
}
static void slice_unref(void *p) {
internal_string *is =
(internal_string *)((char *)p - offsetof(internal_string, refcount));
grpc_mdctx *ctx = is->context;
GPR_TIMER_BEGIN("slice_unref", 0);
lock(ctx);
INTERNAL_STRING_UNREF(is);
unlock(ctx);
GPR_TIMER_END("slice_unref", 0);
}
grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str) {
@ -334,6 +351,7 @@ grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
gpr_uint32 hash = gpr_murmur_hash3(buf, length, ctx->hash_seed);
internal_string *s;
GPR_TIMER_BEGIN("grpc_mdstr_from_buffer", 0);
lock(ctx);
/* search for an existing string */
@ -342,6 +360,7 @@ grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) {
INTERNAL_STRING_REF(s);
unlock(ctx);
GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
return (grpc_mdstr *)s;
}
}
@ -382,6 +401,7 @@ grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
}
unlock(ctx);
GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
return (grpc_mdstr *)s;
}
@ -391,6 +411,7 @@ static void gc_mdtab(grpc_mdctx *ctx) {
internal_metadata **prev_next;
internal_metadata *md, *next;
GPR_TIMER_BEGIN("gc_mdtab", 0);
for (i = 0; i < ctx->mdtab_capacity; i++) {
prev_next = &ctx->mdtab[i];
for (md = ctx->mdtab[i]; md; md = next) {
@ -412,17 +433,19 @@ static void gc_mdtab(grpc_mdctx *ctx) {
}
}
}
GPR_ASSERT(ctx->mdtab_free == 0);
GPR_TIMER_END("gc_mdtab", 0);
}
static void grow_mdtab(grpc_mdctx *ctx) {
size_t capacity = ctx->mdtab_capacity * 2;
size_t i;
internal_metadata **mdtab =
gpr_malloc(sizeof(internal_metadata *) * capacity);
internal_metadata **mdtab;
internal_metadata *md, *next;
gpr_uint32 hash;
GPR_TIMER_BEGIN("grow_mdtab", 0);
mdtab = gpr_malloc(sizeof(internal_metadata *) * capacity);
memset(mdtab, 0, sizeof(internal_metadata *) * capacity);
for (i = 0; i < ctx->mdtab_capacity; i++) {
@ -437,6 +460,8 @@ static void grow_mdtab(grpc_mdctx *ctx) {
gpr_free(ctx->mdtab);
ctx->mdtab = mdtab;
ctx->mdtab_capacity = capacity;
GPR_TIMER_END("grow_mdtab", 0);
}
static void rehash_mdtab(grpc_mdctx *ctx) {
@ -458,6 +483,8 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
GPR_ASSERT(key->context == ctx);
GPR_ASSERT(value->context == ctx);
GPR_TIMER_BEGIN("grpc_mdelem_from_metadata_strings", 0);
lock(ctx);
/* search for an existing pair */
@ -467,6 +494,7 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
INTERNAL_STRING_UNREF(key);
INTERNAL_STRING_UNREF(value);
unlock(ctx);
GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
return (grpc_mdelem *)md;
}
}
@ -496,6 +524,8 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
unlock(ctx);
GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
return (grpc_mdelem *)md;
}
@ -542,6 +572,7 @@ grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
internal_metadata *md = (internal_metadata *)gmd;
if (!md) return;
#ifdef GRPC_METADATA_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM UNREF:%p:%d->%d: '%s' = '%s'", md,
@ -552,12 +583,14 @@ void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
#endif
if (2 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
grpc_mdctx *ctx = md->context;
GPR_TIMER_BEGIN("grpc_mdelem_unref.to_zero", 0);
lock(ctx);
if (1 == gpr_atm_no_barrier_load(&md->refcnt)) {
ctx->mdtab_free++;
gpr_atm_no_barrier_store(&md->refcnt, 0);
}
unlock(ctx);
GPR_TIMER_END("grpc_mdelem_unref.to_zero", 0);
}
}

@ -31,7 +31,7 @@
*
*/
#include "src/core/transport/stream_op.h"
#include "src/core/transport/metadata_batch.h"
#include <string.h>
@ -40,143 +40,6 @@
#include "src/core/profiling/timers.h"
/* Exponential growth function: Given x, return a larger x.
Currently we grow by 1.5 times upon reallocation. */
#define GROW(x) (3 * (x) / 2)
void grpc_sopb_init(grpc_stream_op_buffer *sopb) {
sopb->ops = sopb->inlined_ops;
sopb->nops = 0;
sopb->capacity = GRPC_SOPB_INLINE_ELEMENTS;
}
void grpc_sopb_destroy(grpc_stream_op_buffer *sopb) {
grpc_stream_ops_unref_owned_objects(sopb->ops, sopb->nops);
if (sopb->ops != sopb->inlined_ops) gpr_free(sopb->ops);
}
void grpc_sopb_reset(grpc_stream_op_buffer *sopb) {
grpc_stream_ops_unref_owned_objects(sopb->ops, sopb->nops);
sopb->nops = 0;
}
void grpc_sopb_swap(grpc_stream_op_buffer *a, grpc_stream_op_buffer *b) {
GPR_SWAP(size_t, a->nops, b->nops);
GPR_SWAP(size_t, a->capacity, b->capacity);
if (a->ops == a->inlined_ops) {
if (b->ops == b->inlined_ops) {
/* swap contents of inlined buffer */
grpc_stream_op temp[GRPC_SOPB_INLINE_ELEMENTS];
memcpy(temp, a->ops, b->nops * sizeof(grpc_stream_op));
memcpy(a->ops, b->ops, a->nops * sizeof(grpc_stream_op));
memcpy(b->ops, temp, b->nops * sizeof(grpc_stream_op));
} else {
/* a is inlined, b is not - copy a inlined into b, fix pointers */
a->ops = b->ops;
b->ops = b->inlined_ops;
memcpy(b->ops, a->inlined_ops, b->nops * sizeof(grpc_stream_op));
}
} else if (b->ops == b->inlined_ops) {
/* b is inlined, a is not - copy b inlined int a, fix pointers */
b->ops = a->ops;
a->ops = a->inlined_ops;
memcpy(a->ops, b->inlined_ops, a->nops * sizeof(grpc_stream_op));
} else {
/* no inlining: easy swap */
GPR_SWAP(grpc_stream_op *, a->ops, b->ops);
}
}
void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops) {
size_t i;
for (i = 0; i < nops; i++) {
switch (ops[i].type) {
case GRPC_OP_SLICE:
gpr_slice_unref(ops[i].data.slice);
break;
case GRPC_OP_METADATA:
grpc_metadata_batch_destroy(&ops[i].data.metadata);
break;
case GRPC_NO_OP:
case GRPC_OP_BEGIN_MESSAGE:
break;
}
}
}
static void expandto(grpc_stream_op_buffer *sopb, size_t new_capacity) {
sopb->capacity = new_capacity;
if (sopb->ops == sopb->inlined_ops) {
sopb->ops = gpr_malloc(sizeof(grpc_stream_op) * new_capacity);
memcpy(sopb->ops, sopb->inlined_ops, sopb->nops * sizeof(grpc_stream_op));
} else {
sopb->ops = gpr_realloc(sopb->ops, sizeof(grpc_stream_op) * new_capacity);
}
}
static grpc_stream_op *add(grpc_stream_op_buffer *sopb) {
grpc_stream_op *out;
GPR_ASSERT(sopb->nops <= sopb->capacity);
if (sopb->nops == sopb->capacity) {
expandto(sopb, GROW(sopb->capacity));
}
out = sopb->ops + sopb->nops;
sopb->nops++;
return out;
}
void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb) {
add(sopb)->type = GRPC_NO_OP;
}
void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
gpr_uint32 flags) {
grpc_stream_op *op = add(sopb);
op->type = GRPC_OP_BEGIN_MESSAGE;
op->data.begin_message.length = length;
op->data.begin_message.flags = flags;
}
void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb,
grpc_metadata_batch b) {
grpc_stream_op *op = add(sopb);
op->type = GRPC_OP_METADATA;
op->data.metadata = b;
}
void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice) {
grpc_stream_op *op = add(sopb);
op->type = GRPC_OP_SLICE;
op->data.slice = slice;
}
void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
size_t nops) {
size_t orig_nops = sopb->nops;
size_t new_nops = orig_nops + nops;
if (new_nops > sopb->capacity) {
expandto(sopb, GPR_MAX(GROW(sopb->capacity), new_nops));
}
memcpy(sopb->ops + orig_nops, ops, sizeof(grpc_stream_op) * nops);
sopb->nops = new_nops;
}
void grpc_sopb_move_to(grpc_stream_op_buffer *src, grpc_stream_op_buffer *dst) {
if (src->nops == 0) {
return;
}
if (dst->nops == 0) {
grpc_sopb_swap(src, dst);
return;
}
grpc_sopb_append(dst, src->ops, src->nops);
src->nops = 0;
}
static void assert_valid_list(grpc_mdelem_list *list) {
#ifndef NDEBUG
grpc_linked_mdelem *l;
@ -200,13 +63,11 @@ static void assert_valid_list(grpc_mdelem_list *list) {
#ifndef NDEBUG
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
}
#endif /* NDEBUG */
void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
batch->list.head = batch->list.tail = batch->garbage.head =
batch->garbage.tail = NULL;
batch->list.head = batch->list.tail = NULL;
batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
@ -215,9 +76,6 @@ void grpc_metadata_batch_destroy(grpc_metadata_batch *batch) {
for (l = batch->list.head; l; l = l->next) {
GRPC_MDELEM_UNREF(l->md);
}
for (l = batch->garbage.head; l; l = l->next) {
GRPC_MDELEM_UNREF(l->md);
}
}
void grpc_metadata_batch_add_head(grpc_metadata_batch *batch,
@ -283,10 +141,6 @@ void grpc_metadata_batch_merge(grpc_metadata_batch *target,
next = l->next;
link_tail(&target->list, l);
}
for (l = to_add->garbage.head; l; l = next) {
next = l->next;
link_tail(&target->garbage, l);
}
}
void grpc_metadata_batch_move(grpc_metadata_batch *dst,
@ -305,7 +159,6 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
GPR_TIMER_BEGIN("grpc_metadata_batch_filter", 0);
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
for (l = batch->list.head; l; l = next) {
grpc_mdelem *orig = l->md;
grpc_mdelem *filt = filter(user_data, orig);
@ -324,14 +177,28 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
batch->list.tail = l->prev;
}
assert_valid_list(&batch->list);
link_head(&batch->garbage, l);
GRPC_MDELEM_UNREF(l->md);
} else if (filt != orig) {
GRPC_MDELEM_UNREF(orig);
l->md = filt;
}
}
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
GPR_TIMER_END("grpc_metadata_batch_filter", 0);
}
static grpc_mdelem *no_metadata_for_you(void *user_data, grpc_mdelem *elem) {
return NULL;
}
void grpc_metadata_batch_clear(grpc_metadata_batch *batch) {
batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_metadata_batch_filter(batch, no_metadata_for_you, NULL);
}
int grpc_metadata_batch_is_empty(grpc_metadata_batch *batch) {
return batch->list.head == NULL &&
gpr_time_cmp(gpr_inf_future(batch->deadline.clock_type),
batch->deadline) == 0;
}

@ -40,39 +40,6 @@
#include <grpc/support/time.h>
#include "src/core/transport/metadata.h"
/* this many stream ops are inlined into a sopb before allocating */
#define GRPC_SOPB_INLINE_ELEMENTS 4
/* Operations that can be performed on a stream.
Used by grpc_stream_op. */
typedef enum grpc_stream_op_code {
/* Do nothing code. Useful if rewriting a batch to exclude some operations.
Must be ignored by receivers */
GRPC_NO_OP,
GRPC_OP_METADATA,
/* Begin a message/metadata element/status - as defined by
grpc_message_type. */
GRPC_OP_BEGIN_MESSAGE,
/* Add a slice of data to the current message/metadata element/status.
Must not overflow the forward declared length. */
GRPC_OP_SLICE
} grpc_stream_op_code;
/** Internal bit flag for grpc_begin_message's \a flags signaling the use of
* compression for the message */
#define GRPC_WRITE_INTERNAL_COMPRESS (0x80000000u)
/** Mask of all valid internal flags. */
#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
/* Arguments for GRPC_OP_BEGIN_MESSAGE */
typedef struct grpc_begin_message {
/* How many bytes of data will this message contain */
gpr_uint32 length;
/* Write flags for the message: see grpc.h GRPC_WRITE_* for the public bits,
* GRPC_WRITE_INTERNAL_* for the internal ones. */
gpr_uint32 flags;
} grpc_begin_message;
typedef struct grpc_linked_mdelem {
grpc_mdelem *md;
struct grpc_linked_mdelem *next;
@ -88,10 +55,6 @@ typedef struct grpc_mdelem_list {
typedef struct grpc_metadata_batch {
/** Metadata elements in this batch */
grpc_mdelem_list list;
/** Elements that have been removed from the batch, but have
not yet been unreffed - used to allow collecting garbage
under a single metadata context lock */
grpc_mdelem_list garbage;
/** Used to calculate grpc-timeout at the point of sending,
or gpr_inf_future if this batch does not need to send a
grpc-timeout */
@ -102,6 +65,8 @@ void grpc_metadata_batch_init(grpc_metadata_batch *batch);
void grpc_metadata_batch_destroy(grpc_metadata_batch *batch);
void grpc_metadata_batch_merge(grpc_metadata_batch *target,
grpc_metadata_batch *add);
void grpc_metadata_batch_clear(grpc_metadata_batch *batch);
int grpc_metadata_batch_is_empty(grpc_metadata_batch *batch);
/** Moves the metadata information from \a src to \a dst. Upon return, \a src is
* zeroed. */
@ -159,54 +124,4 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd);
} while (0)
#endif
/* Represents a single operation performed on a stream/transport */
typedef struct grpc_stream_op {
/* the operation to be applied */
enum grpc_stream_op_code type;
/* the arguments to this operation. union fields are named according to the
associated op-code */
union {
grpc_begin_message begin_message;
grpc_metadata_batch metadata;
gpr_slice slice;
} data;
} grpc_stream_op;
/** A stream op buffer is a wrapper around stream operations that is
* dynamically extendable. */
typedef struct grpc_stream_op_buffer {
grpc_stream_op *ops;
size_t nops;
size_t capacity;
grpc_stream_op inlined_ops[GRPC_SOPB_INLINE_ELEMENTS];
} grpc_stream_op_buffer;
/* Initialize a stream op buffer */
void grpc_sopb_init(grpc_stream_op_buffer *sopb);
/* Destroy a stream op buffer */
void grpc_sopb_destroy(grpc_stream_op_buffer *sopb);
/* Reset a sopb to no elements */
void grpc_sopb_reset(grpc_stream_op_buffer *sopb);
/* Swap two sopbs */
void grpc_sopb_swap(grpc_stream_op_buffer *a, grpc_stream_op_buffer *b);
void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops);
/* Append a GRPC_NO_OP to a buffer */
void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb);
/* Append a GRPC_OP_BEGIN to a buffer */
void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
gpr_uint32 flags);
void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb,
grpc_metadata_batch metadata);
/* Append a GRPC_SLICE to a buffer - does not ref/unref the slice */
void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice);
/* Append a buffer to a buffer - does not ref/unref any internal objects */
void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
size_t nops);
void grpc_sopb_move_to(grpc_stream_op_buffer *src, grpc_stream_op_buffer *dst);
char *grpc_sopb_string(grpc_stream_op_buffer *sopb);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_STREAM_OP_H */

@ -33,9 +33,36 @@
#include "src/core/transport/transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include "src/core/transport/transport_impl.h"
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "STREAM %p:%p REF %d->%d %s", refcount,
refcount->destroy.cb_arg, val, val + 1, reason);
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
gpr_ref(&refcount->refs);
}
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "STREAM %p:%p UNREF %d->%d %s", refcount,
refcount->destroy.cb_arg, val, val - 1, reason);
#else
void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
#endif
if (gpr_unref(&refcount->refs)) {
grpc_exec_ctx_enqueue(exec_ctx, &refcount->destroy, 1);
}
}
size_t grpc_transport_stream_size(grpc_transport *transport) {
return transport->vtable->sizeof_stream;
}
@ -47,10 +74,10 @@ void grpc_transport_destroy(grpc_exec_ctx *exec_ctx,
int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport, grpc_stream *stream,
const void *server_data,
grpc_transport_stream_op *initial_op) {
return transport->vtable->init_stream(exec_ctx, transport, stream,
server_data, initial_op);
grpc_stream_refcount *refcount,
const void *server_data) {
return transport->vtable->init_stream(exec_ctx, transport, stream, refcount,
server_data);
}
void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
@ -66,6 +93,12 @@ void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
transport->vtable->perform_op(exec_ctx, transport, op);
}
void grpc_transport_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_transport *transport, grpc_stream *stream,
grpc_pollset *pollset) {
transport->vtable->set_pollset(exec_ctx, transport, stream, pollset);
}
void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
grpc_stream *stream) {
@ -79,9 +112,8 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op) {
grpc_exec_ctx_enqueue(exec_ctx, op->on_done_recv, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->on_done_send, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->recv_message_ready, 0);
grpc_exec_ctx_enqueue(exec_ctx, op->on_complete, 0);
}
void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
@ -129,9 +161,9 @@ void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
if (optional_message) {
cmd = gpr_malloc(sizeof(*cmd));
cmd->message = *optional_message;
cmd->then_call = op->on_consumed;
cmd->then_call = op->on_complete;
grpc_closure_init(&cmd->closure, free_message, cmd);
op->on_consumed = &cmd->closure;
op->on_complete = &cmd->closure;
op->optional_close_message = &cmd->message;
}
op->close_with_status = status;

@ -38,7 +38,8 @@
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/pollset_set.h"
#include "src/core/transport/stream_op.h"
#include "src/core/transport/metadata_batch.h"
#include "src/core/transport/byte_stream.h"
#include "src/core/channel/context.h"
/* forward declarations */
@ -49,36 +50,35 @@ typedef struct grpc_transport grpc_transport;
for a stream. */
typedef struct grpc_stream grpc_stream;
/* Represents the send/recv closed state of a stream. */
typedef enum grpc_stream_state {
/* the stream is open for sends and receives */
GRPC_STREAM_OPEN,
/* the stream is closed for sends, but may still receive data */
GRPC_STREAM_SEND_CLOSED,
/* the stream is closed for receives, but may still send data */
GRPC_STREAM_RECV_CLOSED,
/* the stream is closed for both sends and receives */
GRPC_STREAM_CLOSED
} grpc_stream_state;
typedef struct grpc_stream_refcount {
gpr_refcount refs;
grpc_closure destroy;
} grpc_stream_refcount;
/*#define GRPC_STREAM_REFCOUNT_DEBUG*/
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason);
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
const char *reason);
#else
void grpc_stream_ref(grpc_stream_refcount *refcount);
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount);
#endif
/* Transport stream op: a set of operations to perform on a transport
against a single stream */
typedef struct grpc_transport_stream_op {
grpc_closure *on_consumed;
grpc_metadata_batch *send_initial_metadata;
grpc_metadata_batch *send_trailing_metadata;
grpc_stream_op_buffer *send_ops;
int is_last_send;
grpc_closure *on_done_send;
grpc_byte_stream *send_message;
grpc_stream_op_buffer *recv_ops;
grpc_stream_state *recv_state;
/** The number of bytes this peer is currently prepared to receive.
These bytes will be eventually used to replenish per-stream flow control
windows. */
size_t max_recv_bytes;
grpc_closure *on_done_recv;
grpc_metadata_batch *recv_initial_metadata;
grpc_byte_stream **recv_message;
grpc_closure *recv_message_ready;
grpc_metadata_batch *recv_trailing_metadata;
grpc_pollset *bind_pollset;
grpc_closure *on_complete;
/** If != GRPC_STATUS_OK, cancel this stream */
grpc_status_code cancel_with_status;
@ -110,8 +110,8 @@ typedef struct grpc_transport_op {
gpr_slice *goaway_message;
/** set the callback for accepting new streams;
this is a permanent callback, unlike the other one-shot closures */
void (*set_accept_stream)(void *user_data, grpc_transport *transport,
const void *server_data);
void (*set_accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_transport *transport, const void *server_data);
void *set_accept_stream_user_data;
/** add this transport to a pollset */
grpc_pollset *bind_pollset;
@ -136,8 +136,12 @@ size_t grpc_transport_stream_size(grpc_transport *transport);
supplied from the accept_stream callback function */
int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport, grpc_stream *stream,
const void *server_data,
grpc_transport_stream_op *initial_op);
grpc_stream_refcount *refcount,
const void *server_data);
void grpc_transport_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_transport *transport, grpc_stream *stream,
grpc_pollset *pollset);
/* Destroy transport data for a stream.

@ -43,8 +43,12 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_init_stream */
int (*init_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
grpc_stream *stream, const void *server_data,
grpc_transport_stream_op *initial_op);
grpc_stream *stream, grpc_stream_refcount *refcount,
const void *server_data);
/* implementation of grpc_transport_set_pollset */
void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
grpc_stream *stream, grpc_pollset *pollset);
/* implementation of grpc_transport_perform_stream_op */
void (*perform_stream_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,

@ -69,42 +69,6 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
}
}
char *grpc_sopb_string(grpc_stream_op_buffer *sopb) {
char *out;
char *tmp;
size_t i;
gpr_strvec b;
gpr_strvec_init(&b);
for (i = 0; i < sopb->nops; i++) {
grpc_stream_op *op = &sopb->ops[i];
if (i > 0) gpr_strvec_add(&b, gpr_strdup(", "));
switch (op->type) {
case GRPC_NO_OP:
gpr_strvec_add(&b, gpr_strdup("NO_OP"));
break;
case GRPC_OP_BEGIN_MESSAGE:
gpr_asprintf(&tmp, "BEGIN_MESSAGE:%d", op->data.begin_message.length);
gpr_strvec_add(&b, tmp);
break;
case GRPC_OP_SLICE:
gpr_asprintf(&tmp, "SLICE:%d", GPR_SLICE_LENGTH(op->data.slice));
gpr_strvec_add(&b, tmp);
break;
case GRPC_OP_METADATA:
gpr_strvec_add(&b, gpr_strdup("METADATA{"));
put_metadata_list(&b, op->data.metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
break;
}
}
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);
return out;
}
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
char *tmp;
char *out;
@ -113,42 +77,52 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
gpr_strvec b;
gpr_strvec_init(&b);
if (op->send_ops) {
if (op->send_initial_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_asprintf(&tmp, "SEND%s:%p", op->is_last_send ? "_LAST" : "",
op->on_done_send);
gpr_strvec_add(&b, tmp);
gpr_strvec_add(&b, gpr_strdup("["));
gpr_strvec_add(&b, grpc_sopb_string(op->send_ops));
gpr_strvec_add(&b, gpr_strdup("]"));
gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));
put_metadata_list(&b, *op->send_initial_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
if (op->recv_ops) {
if (op->send_message != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_asprintf(&tmp, "RECV:%p:max_recv_bytes=%d", op->on_done_recv,
op->max_recv_bytes);
gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d",
op->send_message->flags, op->send_message->length);
gpr_strvec_add(&b, tmp);
}
if (op->bind_pollset) {
if (op->send_trailing_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("BIND"));
gpr_strvec_add(&b, gpr_strdup("SEND_TRAILING_METADATA{"));
put_metadata_list(&b, *op->send_trailing_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
if (op->cancel_with_status != GRPC_STATUS_OK) {
if (op->recv_initial_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_asprintf(&tmp, "CANCEL:%d", op->cancel_with_status);
gpr_strvec_add(&b, tmp);
gpr_strvec_add(&b, gpr_strdup("RECV_INITIAL_METADATA"));
}
if (op->recv_message != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
}
if (op->on_consumed != NULL) {
if (op->recv_trailing_metadata != NULL) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_asprintf(&tmp, "ON_CONSUMED:%p", op->on_consumed);
gpr_strvec_add(&b, gpr_strdup("RECV_TRAILING_METADATA"));
}
if (op->cancel_with_status != GRPC_STATUS_OK) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_asprintf(&tmp, "CANCEL:%d", op->cancel_with_status);
gpr_strvec_add(&b, tmp);
}

@ -38,12 +38,8 @@ namespace grpc {
Alarm::Alarm(CompletionQueue* cq, gpr_timespec deadline, void* tag)
: alarm_(grpc_alarm_create(cq->cq(), deadline, tag)) {}
Alarm::~Alarm() {
grpc_alarm_destroy(alarm_);
}
Alarm::~Alarm() { grpc_alarm_destroy(alarm_); }
void Alarm::Cancel() {
grpc_alarm_cancel(alarm_);
}
void Alarm::Cancel() { grpc_alarm_cancel(alarm_); }
} // namespace grpc

@ -388,6 +388,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
shutdown_ = true;
grpc_server_shutdown_and_notify(server_, cq_.cq(), new ShutdownRequest());
cq_.Shutdown();
lock.unlock();
// Spin, eating requests until the completion queue is completely shutdown.
// If the deadline expires then cancel anything that's pending and keep
// spinning forever until the work is actually drained.
@ -403,6 +404,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
SyncRequest::CallData call_data(this, request);
}
}
lock.lock();
// Wait for running callbacks to finish.
while (num_running_cb_ != 0) {

@ -42,22 +42,19 @@
#include "test/core/util/test_config.h"
static void channel_init_func(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
GPR_ASSERT(args->num_args == 1);
GPR_ASSERT(args->args[0].type == GRPC_ARG_INTEGER);
GPR_ASSERT(0 == strcmp(args->args[0].key, "test_key"));
GPR_ASSERT(args->args[0].value.integer == 42);
GPR_ASSERT(is_first);
GPR_ASSERT(is_last);
grpc_channel_element *elem,
grpc_channel_element_args *args) {
GPR_ASSERT(args->channel_args->num_args == 1);
GPR_ASSERT(args->channel_args->args[0].type == GRPC_ARG_INTEGER);
GPR_ASSERT(0 == strcmp(args->channel_args->args[0].key, "test_key"));
GPR_ASSERT(args->channel_args->args[0].value.integer == 42);
GPR_ASSERT(args->is_first);
GPR_ASSERT(args->is_last);
*(int *)(elem->channel_data) = 0;
}
static void call_init_func(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
++*(int *)(elem->channel_data);
*(int *)(elem->call_data) = 0;
}
@ -86,9 +83,9 @@ static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
static void test_create_channel_stack(void) {
const grpc_channel_filter filter = {
call_func, channel_func, sizeof(int), call_init_func, call_destroy_func,
sizeof(int), channel_init_func, channel_destroy_func, get_peer,
"some_test_filter"};
call_func, channel_func, sizeof(int), call_init_func,
grpc_call_stack_ignore_set_pollset, call_destroy_func, sizeof(int),
channel_init_func, channel_destroy_func, get_peer, "some_test_filter"};
const grpc_channel_filter *filters = &filter;
grpc_channel_stack *channel_stack;
grpc_call_stack *call_stack;
@ -119,7 +116,8 @@ static void test_create_channel_stack(void) {
GPR_ASSERT(*channel_data == 0);
call_stack = gpr_malloc(channel_stack->call_stack_size);
grpc_call_stack_init(&exec_ctx, channel_stack, NULL, NULL, call_stack);
grpc_call_stack_init(&exec_ctx, channel_stack, 0, NULL, NULL, NULL, NULL,
call_stack);
GPR_ASSERT(call_stack->count == 1);
call_elem = grpc_call_stack_element(call_stack, 0);
GPR_ASSERT(call_elem->filter == channel_elem->filter);

@ -119,14 +119,15 @@ static void test_spec_destroy(test_spec *spec) {
static void *tag(gpr_intptr t) { return (void *)t; }
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
static gpr_timespec n_millis_time(int n) {
return gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(n, GPR_TIMESPAN));
}
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, n_seconds_time(5), NULL);
ev = grpc_completion_queue_next(cq, n_millis_time(5000), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
@ -134,29 +135,47 @@ static void kill_server(const servers_fixture *f, size_t i) {
gpr_log(GPR_INFO, "KILLING SERVER %d", i);
GPR_ASSERT(f->servers[i] != NULL);
grpc_server_shutdown_and_notify(f->servers[i], f->cq, tag(10000));
GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(10000),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5),
NULL).type == GRPC_OP_COMPLETE);
GPR_ASSERT(
grpc_completion_queue_pluck(f->cq, tag(10000), n_millis_time(5000), NULL)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->servers[i]);
f->servers[i] = NULL;
}
static void revive_server(const servers_fixture *f, size_t i) {
typedef struct request_data {
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
char *details;
size_t details_capacity;
grpc_status_code status;
grpc_call_details *call_details;
} request_data;
static void revive_server(const servers_fixture *f, request_data *rdata,
size_t i) {
int got_port;
gpr_log(GPR_INFO, "RAISE AGAIN SERVER %d", i);
GPR_ASSERT(f->servers[i] == NULL);
gpr_log(GPR_DEBUG, "revive: %s", f->servers_hostports[i]);
f->servers[i] = grpc_server_create(NULL, NULL);
grpc_server_register_completion_queue(f->servers[i], f->cq, NULL);
GPR_ASSERT((got_port = grpc_server_add_insecure_http2_port(
f->servers[i], f->servers_hostports[i])) > 0);
grpc_server_start(f->servers[i]);
GPR_ASSERT(GRPC_CALL_OK ==
grpc_server_request_call(f->servers[i], &f->server_calls[i],
&rdata->call_details[i],
&f->request_metadata_recv[i], f->cq,
f->cq, tag(1000 + (int)i)));
}
static servers_fixture *setup_servers(const char *server_host,
request_data *rdata,
const size_t num_servers) {
servers_fixture *f = gpr_malloc(sizeof(servers_fixture));
int *ports;
int got_port;
size_t i;
f->num_servers = num_servers;
@ -164,23 +183,16 @@ static servers_fixture *setup_servers(const char *server_host,
f->request_metadata_recv =
gpr_malloc(sizeof(grpc_metadata_array) * num_servers);
/* Create servers. */
ports = gpr_malloc(sizeof(int *) * num_servers);
f->servers = gpr_malloc(sizeof(grpc_server *) * num_servers);
f->servers_hostports = gpr_malloc(sizeof(char *) * num_servers);
f->cq = grpc_completion_queue_create(NULL);
for (i = 0; i < num_servers; i++) {
ports[i] = grpc_pick_unused_port_or_die();
gpr_join_host_port(&f->servers_hostports[i], server_host, ports[i]);
f->servers[i] = grpc_server_create(NULL, NULL);
grpc_server_register_completion_queue(f->servers[i], f->cq, NULL);
GPR_ASSERT((got_port = grpc_server_add_insecure_http2_port(
f->servers[i], f->servers_hostports[i])) > 0);
GPR_ASSERT(ports[i] == got_port);
grpc_server_start(f->servers[i]);
grpc_metadata_array_init(&f->request_metadata_recv[i]);
gpr_join_host_port(&f->servers_hostports[i], server_host,
grpc_pick_unused_port_or_die());
f->servers[i] = 0;
revive_server(f, rdata, i);
}
gpr_free(ports);
return f;
}
@ -191,8 +203,8 @@ static void teardown_servers(servers_fixture *f) {
if (f->servers[i] == NULL) continue;
grpc_server_shutdown_and_notify(f->servers[i], f->cq, tag(10000));
GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(10000),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5),
NULL).type == GRPC_OP_COMPLETE);
n_millis_time(5000), NULL)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->servers[i]);
}
grpc_completion_queue_shutdown(f->cq);
@ -203,6 +215,7 @@ static void teardown_servers(servers_fixture *f) {
for (i = 0; i < f->num_servers; i++) {
gpr_free(f->servers_hostports[i]);
grpc_metadata_array_destroy(&f->request_metadata_recv[i]);
}
gpr_free(f->servers_hostports);
@ -211,22 +224,12 @@ static void teardown_servers(servers_fixture *f) {
gpr_free(f);
}
typedef struct request_data {
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
char *details;
size_t details_capacity;
grpc_status_code status;
grpc_call_details *call_details;
} request_data;
/** Returns connection sequence (server indices), which must be freed */
int *perform_request(servers_fixture *f, grpc_channel *client,
request_data *rdata, const test_spec *spec) {
grpc_call *c;
int s_idx;
int *s_valid;
gpr_timespec deadline;
grpc_op ops[6];
grpc_op *op;
int was_cancelled;
@ -234,13 +237,12 @@ int *perform_request(servers_fixture *f, grpc_channel *client,
grpc_event ev;
int read_tag;
int *connection_sequence;
int completed_client;
s_valid = gpr_malloc(sizeof(int) * f->num_servers);
rdata->call_details = gpr_malloc(sizeof(grpc_call_details) * f->num_servers);
connection_sequence = gpr_malloc(sizeof(int) * spec->num_iters);
/* Send a trivial request. */
deadline = n_seconds_time(60);
for (iter_num = 0; iter_num < spec->num_iters; iter_num++) {
cq_verifier *cqv = cq_verifier_create(f->cq);
@ -253,7 +255,7 @@ int *perform_request(servers_fixture *f, grpc_channel *client,
kill_server(f, i);
} else if (spec->revive_at[iter_num][i] != 0) {
/* killing takes precedence */
revive_server(f, i);
revive_server(f, rdata, i);
}
}
@ -267,8 +269,10 @@ int *perform_request(servers_fixture *f, grpc_channel *client,
memset(s_valid, 0, f->num_servers * sizeof(int));
c = grpc_channel_create_call(client, NULL, GRPC_PROPAGATE_DEFAULTS, f->cq,
"/foo", "foo.test.google.fr", deadline, NULL);
"/foo", "foo.test.google.fr", gpr_inf_future(GPR_CLOCK_REALTIME),
NULL);
GPR_ASSERT(c);
completed_client = 0;
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
@ -286,32 +290,23 @@ int *perform_request(servers_fixture *f, grpc_channel *client,
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &rdata->trailing_metadata_recv;
op->data.recv_status_on_client.trailing_metadata =
&rdata->trailing_metadata_recv;
op->data.recv_status_on_client.status = &rdata->status;
op->data.recv_status_on_client.status_details = &rdata->details;
op->data.recv_status_on_client.status_details_capacity = &rdata->details_capacity;
op->data.recv_status_on_client.status_details_capacity =
&rdata->details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL));
/* "listen" on all servers */
for (i = 0; i < f->num_servers; i++) {
grpc_metadata_array_init(&f->request_metadata_recv[i]);
if (f->servers[i] != NULL) {
GPR_ASSERT(GRPC_CALL_OK ==
grpc_server_request_call(f->servers[i], &f->server_calls[i],
&rdata->call_details[i],
&f->request_metadata_recv[i], f->cq,
f->cq, tag(1000 + (int)i)));
}
}
s_idx = -1;
while ((ev = grpc_completion_queue_next(
f->cq, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1), NULL)).type !=
GRPC_QUEUE_TIMEOUT) {
f->cq, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1), NULL))
.type != GRPC_QUEUE_TIMEOUT) {
GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
read_tag = ((int)(gpr_intptr)ev.tag);
gpr_log(GPR_DEBUG, "EVENT: success:%d, type:%d, tag:%d iter:%d",
ev.success, ev.type, read_tag, iter_num);
@ -321,9 +316,16 @@ int *perform_request(servers_fixture *f, grpc_channel *client,
s_idx = read_tag - 1000;
s_valid[s_idx] = 1;
connection_sequence[iter_num] = s_idx;
break;
} else if (read_tag == 1) {
gpr_log(GPR_DEBUG, "client timed out");
GPR_ASSERT(ev.success);
completed_client = 1;
}
}
gpr_log(GPR_DEBUG, "s_idx=%d", s_idx);
if (s_idx >= 0) {
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
@ -348,23 +350,35 @@ int *perform_request(servers_fixture *f, grpc_channel *client,
tag(102), NULL));
cq_expect_completion(cqv, tag(102), 1);
cq_expect_completion(cqv, tag(1), 1);
if (!completed_client) {
cq_expect_completion(cqv, tag(1), 1);
}
cq_verify(cqv);
gpr_log(GPR_DEBUG, "status=%d; %s", rdata->status, rdata->details);
GPR_ASSERT(rdata->status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == strcmp(rdata->details, "xyz"));
GPR_ASSERT(0 == strcmp(rdata->call_details[s_idx].method, "/foo"));
GPR_ASSERT(0 == strcmp(rdata->call_details[s_idx].host, "foo.test.google.fr"));
GPR_ASSERT(0 ==
strcmp(rdata->call_details[s_idx].host, "foo.test.google.fr"));
GPR_ASSERT(was_cancelled == 1);
} else {
}
for (i = 0; i < f->num_servers; i++) {
if (s_valid[i] != 0) {
grpc_call_destroy(f->server_calls[i]);
grpc_call_destroy(f->server_calls[s_idx]);
/* ask for the next request on this server */
GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
f->servers[s_idx], &f->server_calls[s_idx],
&rdata->call_details[s_idx],
&f->request_metadata_recv[s_idx], f->cq,
f->cq, tag(1000 + (int)s_idx)));
} else {
grpc_call_cancel(c, NULL);
if (!completed_client) {
cq_expect_completion(cqv, tag(1), 1);
cq_verify(cqv);
}
grpc_metadata_array_destroy(&f->request_metadata_recv[i]);
}
grpc_metadata_array_destroy(&rdata->initial_metadata_recv);
grpc_metadata_array_destroy(&rdata->trailing_metadata_recv);
@ -378,7 +392,6 @@ int *perform_request(servers_fixture *f, grpc_channel *client,
gpr_free(rdata->details);
}
gpr_free(rdata->call_details);
gpr_free(s_valid);
return connection_sequence;
@ -441,14 +454,26 @@ void run_spec(const test_spec *spec) {
char *servers_hostports_str;
int *actual_connection_sequence;
request_data rdata;
servers_fixture *f = setup_servers("127.0.0.1", spec->num_servers);
servers_fixture *f;
grpc_channel_args args;
grpc_arg arg;
rdata.call_details =
gpr_malloc(sizeof(grpc_call_details) * spec->num_servers);
f = setup_servers("127.0.0.1", &rdata, spec->num_servers);
/* Create client. */
servers_hostports_str = gpr_strjoin_sep((const char **)f->servers_hostports,
f->num_servers, ",", NULL);
gpr_asprintf(&client_hostport, "ipv4:%s?lb_policy=round_robin",
servers_hostports_str);
client = grpc_insecure_channel_create(client_hostport, NULL, NULL);
arg.type = GRPC_ARG_INTEGER;
arg.key = "grpc.testing.fixed_reconnect_backoff";
arg.value.integer = 100;
args.num_args = 1;
args.args = &arg;
client = grpc_insecure_channel_create(client_hostport, &args, NULL);
gpr_log(GPR_INFO, "Testing '%s' with servers=%s client=%s", spec->description,
servers_hostports_str, client_hostport);
@ -460,6 +485,7 @@ void run_spec(const test_spec *spec) {
gpr_free(client_hostport);
gpr_free(servers_hostports_str);
gpr_free(actual_connection_sequence);
gpr_free(rdata.call_details);
grpc_channel_destroy(client);
teardown_servers(f);

@ -146,6 +146,7 @@ void grpc_end2end_proxy_destroy(grpc_end2end_proxy *proxy) {
}
static void unrefpc(proxy_call *pc, const char *reason) {
gpr_log(GPR_DEBUG, "PROXY UNREF %s", reason);
if (gpr_unref(&pc->refs)) {
grpc_call_destroy(pc->c2p);
grpc_call_destroy(pc->p2s);
@ -157,7 +158,10 @@ static void unrefpc(proxy_call *pc, const char *reason) {
}
}
static void refpc(proxy_call *pc, const char *reason) { gpr_ref(&pc->refs); }
static void refpc(proxy_call *pc, const char *reason) {
gpr_log(GPR_DEBUG, "PROXY REF %s", reason);
gpr_ref(&pc->refs);
}
static void on_c2p_sent_initial_metadata(void *arg, int success) {
proxy_call *pc = arg;

@ -46,21 +46,21 @@ uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=Fal
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False),
'h2_full': default_unsecure_fixture_options,
'h2_uchannel': default_unsecure_fixture_options,
'h2_compress': default_unsecure_fixture_options,
'h2_uds': uds_fixture_options,
'h2_uds+poll': uds_fixture_options._replace(platforms=['linux']),
'h2_full+poll': default_unsecure_fixture_options._replace(platforms=['linux']),
'h2_oauth2': default_secure_fixture_options._replace(ci_mac=False),
'h2_proxy': default_unsecure_fixture_options._replace(includes_proxy=True, ci_mac=False),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(ci_mac=False),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(ci_mac=False),
'h2_sockpair+trace': socketpair_unsecure_fixture_options,
'h2_ssl': default_secure_fixture_options,
'h2_ssl+poll': default_secure_fixture_options._replace(platforms=['linux']),
'h2_ssl_proxy': default_secure_fixture_options._replace(includes_proxy=True, ci_mac=False),
'h2_oauth2': default_secure_fixture_options._replace(ci_mac=False),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(ci_mac=False),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(ci_mac=False),
'h2_sockpair+trace': socketpair_unsecure_fixture_options,
'h2_uchannel': default_unsecure_fixture_options,
'h2_uds+poll': uds_fixture_options._replace(platforms=['linux']),
'h2_uds': uds_fixture_options,
}
TestOptions = collections.namedtuple('TestOptions', 'needs_fullstack needs_dns proxyable flaky secure')
@ -70,38 +70,40 @@ connectivity_test_options = default_test_options._replace(needs_fullstack=True)
# maps test names to options
END2END_TESTS = {
'bad_hostname': default_test_options,
'cancel_after_client_done': default_test_options,
'binary_metadata': default_test_options,
'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options,
'cancel_after_client_done': default_test_options,
'cancel_after_invoke': default_test_options,
'cancel_before_invoke': default_test_options,
'cancel_in_a_vacuum': default_test_options,
'cancel_with_status': default_test_options,
'census_simple_request': default_test_options,
'channel_connectivity': connectivity_test_options._replace(proxyable=False),
'compressed_payload': default_test_options._replace(proxyable=False),
'default_host': default_test_options._replace(needs_fullstack=True, needs_dns=True),
'disappearing_server': connectivity_test_options,
'shutdown_finishes_calls': default_test_options,
'shutdown_finishes_tags': default_test_options,
'empty_batch': default_test_options,
'graceful_server_shutdown': default_test_options,
'high_initial_seqno': default_test_options,
'invoke_large_request': default_test_options,
'large_metadata': default_test_options,
'max_concurrent_streams': default_test_options._replace(proxyable=False),
'max_message_length': default_test_options,
'metadata': default_test_options,
'negative_deadline': default_test_options,
'no_op': default_test_options,
'payload': default_test_options,
'ping_pong_streaming': default_test_options,
'registered_call': default_test_options,
'binary_metadata': default_test_options,
'metadata': default_test_options,
'call_creds': default_test_options._replace(secure=True),
'payload': default_test_options,
'trailing_metadata': default_test_options,
'compressed_payload': default_test_options._replace(proxyable=False),
'request_with_flags': default_test_options._replace(proxyable=False),
'large_metadata': default_test_options,
'request_with_payload': default_test_options,
'server_finishes_request': default_test_options,
'shutdown_finishes_calls': default_test_options,
'shutdown_finishes_tags': default_test_options,
'simple_delayed_request': connectivity_test_options,
'simple_request': default_test_options,
'high_initial_seqno': default_test_options,
'trailing_metadata': default_test_options,
}

@ -0,0 +1,183 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include "src/core/support/string.h"
#include <grpc/byte_buffer.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/end2end/cq_verifier.h"
enum { TIMEOUT = 200000 };
static void *tag(gpr_intptr t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_client(&f, client_args);
config.init_server(&f, server_args);
return f;
}
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
}
static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5),
NULL).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
}
static void simple_request_body(grpc_end2end_test_fixture f, size_t num_ops) {
grpc_call *c;
gpr_timespec deadline = five_seconds_time();
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_status_code status;
grpc_call_error error;
char *details = NULL;
size_t details_capacity = 0;
gpr_log(GPR_DEBUG, "test with %d ops", num_ops);
c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
"/foo", "foo.test.google.fr:1234", deadline,
NULL);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
op = ops;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
GPR_ASSERT(num_ops <= (size_t)(op - ops));
error = grpc_call_start_batch(c, ops, num_ops, tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
grpc_call_cancel_with_status(c, GRPC_STATUS_UNIMPLEMENTED, "xyz", NULL);
cq_expect_completion(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == strcmp(details, "xyz"));
gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_call_destroy(c);
cq_verifier_destroy(cqv);
}
static void test_invoke_simple_request(grpc_end2end_test_config config, size_t num_ops) {
grpc_end2end_test_fixture f;
f = begin_test(config, "test_invoke_simple_request", NULL, NULL);
simple_request_body(f, num_ops);
end_test(&f);
config.tear_down_data(&f);
}
void grpc_end2end_tests(grpc_end2end_test_config config) {
size_t i;
for (i = 1; i <= 4; i++) {
test_invoke_simple_request(config, i);
}
}

@ -109,6 +109,7 @@ static void test_max_message_length(grpc_end2end_test_config config) {
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *recv_payload;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
@ -183,6 +184,11 @@ static void test_max_message_length(grpc_end2end_test_config config) {
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &recv_payload;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
@ -194,6 +200,7 @@ static void test_max_message_length(grpc_end2end_test_config config) {
GPR_ASSERT(0 == strcmp(call_details.method, "/foo"));
GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234"));
GPR_ASSERT(was_cancelled == 1);
GPR_ASSERT(recv_payload == NULL);
gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv);

@ -0,0 +1,180 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include "src/core/support/string.h"
#include <grpc/byte_buffer.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/end2end/cq_verifier.h"
enum { TIMEOUT = 200000 };
static void *tag(gpr_intptr t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_client(&f, client_args);
config.init_server(&f, server_args);
return f;
}
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
}
static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5),
NULL).type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
}
static void simple_request_body(grpc_end2end_test_fixture f, size_t num_ops) {
grpc_call *c;
gpr_timespec deadline = gpr_inf_past(GPR_CLOCK_REALTIME);
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_status_code status;
grpc_call_error error;
char *details = NULL;
size_t details_capacity = 0;
gpr_log(GPR_DEBUG, "test with %d ops", num_ops);
c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
"/foo", "foo.test.google.fr:1234", deadline,
NULL);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
op = ops;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
GPR_ASSERT(num_ops <= (size_t)(op - ops));
error = grpc_call_start_batch(c, ops, num_ops, tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
cq_expect_completion(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_DEADLINE_EXCEEDED);
gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_call_destroy(c);
cq_verifier_destroy(cqv);
}
static void test_invoke_simple_request(grpc_end2end_test_config config, size_t num_ops) {
grpc_end2end_test_fixture f;
f = begin_test(config, "test_invoke_simple_request", NULL, NULL);
simple_request_body(f, num_ops);
end_test(&f);
config.tear_down_data(&f);
}
void grpc_end2end_tests(grpc_end2end_test_config config) {
size_t i;
for (i = 1; i <= 4; i++) {
test_invoke_simple_request(config, i);
}
}

@ -41,7 +41,7 @@
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/transport/stream_op.h"
#include "src/core/transport/byte_stream.h"
#include "test/core/end2end/cq_verifier.h"
enum { TIMEOUT = 200000 };

@ -0,0 +1,200 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/transport/chttp2/hpack_encoder.h"
#include <stdio.h>
#include "src/core/support/string.h"
#include "src/core/transport/chttp2/hpack_parser.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "test/core/util/parse_hexstring.h"
#include "test/core/util/slice_splitter.h"
#include "test/core/util/test_config.h"
#define TEST(x) run_test(x, #x)
grpc_mdctx *g_mdctx;
grpc_chttp2_hpack_compressor g_compressor;
int g_failure = 0;
void **to_delete = NULL;
size_t num_to_delete = 0;
size_t cap_to_delete = 0;
/* verify that the output generated by encoding the stream matches the
hexstring passed in */
static void verify(size_t window_available, int eof, size_t expect_window_used,
const char *expected, size_t nheaders, ...) {
gpr_slice_buffer output;
gpr_slice merged;
gpr_slice expect = parse_hexstring(expected);
size_t i;
va_list l;
grpc_linked_mdelem *e = gpr_malloc(sizeof(*e) * nheaders);
grpc_metadata_batch b;
grpc_metadata_batch_init(&b);
va_start(l, nheaders);
for (i = 0; i < nheaders; i++) {
char *key = va_arg(l, char *);
char *value = va_arg(l, char *);
if (i) {
e[i - 1].next = &e[i];
e[i].prev = &e[i - 1];
}
e[i].md = grpc_mdelem_from_strings(g_mdctx, key, value);
}
e[0].prev = NULL;
e[nheaders - 1].next = NULL;
va_end(l);
b.list.head = &e[0];
b.list.tail = &e[nheaders - 1];
if (cap_to_delete == num_to_delete) {
cap_to_delete = GPR_MAX(2 * cap_to_delete, 1000);
to_delete = gpr_realloc(to_delete, sizeof(*to_delete) * cap_to_delete);
}
to_delete[num_to_delete++] = e;
gpr_slice_buffer_init(&output);
grpc_chttp2_encode_header(&g_compressor, 0xdeadbeef, &b, eof, &output);
merged = grpc_slice_merge(output.slices, output.count);
gpr_slice_buffer_destroy(&output);
grpc_metadata_batch_destroy(&b);
if (0 != gpr_slice_cmp(merged, expect)) {
char *expect_str = gpr_dump_slice(expect, GPR_DUMP_HEX | GPR_DUMP_ASCII);
char *got_str = gpr_dump_slice(merged, GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_ERROR, "mismatched output for %s", expected);
gpr_log(GPR_ERROR, "EXPECT: %s", expect_str);
gpr_log(GPR_ERROR, "GOT: %s", got_str);
gpr_free(expect_str);
gpr_free(got_str);
g_failure = 1;
}
gpr_slice_unref(merged);
gpr_slice_unref(expect);
}
static void test_basic_headers(void) {
int i;
verify(0, 0, 0, "000005 0104 deadbeef 40 0161 0161", 1, "a", "a");
verify(0, 0, 0, "000001 0104 deadbeef be", 1, "a", "a");
verify(0, 0, 0, "000001 0104 deadbeef be", 1, "a", "a");
verify(0, 0, 0, "000006 0104 deadbeef be 40 0162 0163", 2, "a", "a", "b",
"c");
verify(0, 0, 0, "000002 0104 deadbeef bf be", 2, "a", "a", "b", "c");
verify(0, 0, 0, "000004 0104 deadbeef 7f 00 0164", 1, "a", "d");
/* flush out what's there to make a few values look very popular */
for (i = 0; i < 350; i++) {
verify(0, 0, 0, "000003 0104 deadbeef c0 bf be", 3, "a", "a", "b", "c", "a",
"d");
}
verify(0, 0, 0, "000006 0104 deadbeef c0 00 016b 0176", 2, "a", "a", "k",
"v");
/* this could be 000004 0104 deadbeef 0f 30 0176 also */
verify(0, 0, 0, "000004 0104 deadbeef 0f 2f 0176", 1, "a", "v");
}
static void encode_int_to_str(int i, char *p) {
p[0] = (char)('a' + i % 26);
i /= 26;
GPR_ASSERT(i < 26);
p[1] = (char)('a' + i);
p[2] = 0;
}
static void test_decode_table_overflow(void) {
int i;
char key[3], value[3];
char *expect;
for (i = 0; i < 114; i++) {
encode_int_to_str(i, key);
encode_int_to_str(i + 1, value);
if (i + 61 >= 127) {
gpr_asprintf(&expect,
"000009 0104 deadbeef ff%02x 40 02%02x%02x 02%02x%02x",
i + 61 - 127, key[0], key[1], value[0], value[1]);
} else if (i > 0) {
gpr_asprintf(&expect,
"000008 0104 deadbeef %02x 40 02%02x%02x 02%02x%02x",
0x80 + 61 + i, key[0], key[1], value[0], value[1]);
} else {
gpr_asprintf(&expect, "000007 0104 deadbeef 40 02%02x%02x 02%02x%02x",
key[0], key[1], value[0], value[1]);
}
if (i > 0) {
verify(0, 0, 0, expect, 2, "aa", "ba", key, value);
} else {
verify(0, 0, 0, expect, 1, key, value);
}
gpr_free(expect);
}
/* if the above passes, then we must have just knocked this pair out of the
decoder stack, and so we'll be forced to re-encode it */
verify(0, 0, 0, "000007 0104 deadbeef 40 026161 026261", 1, "aa", "ba");
}
static void run_test(void (*test)(), const char *name) {
gpr_log(GPR_INFO, "RUN TEST: %s", name);
g_mdctx = grpc_mdctx_create_with_seed(0);
grpc_chttp2_hpack_compressor_init(&g_compressor, g_mdctx);
test();
grpc_chttp2_hpack_compressor_destroy(&g_compressor);
grpc_mdctx_unref(g_mdctx);
}
int main(int argc, char **argv) {
size_t i;
grpc_test_init(argc, argv);
TEST(test_basic_headers);
TEST(test_decode_table_overflow);
for (i = 0; i < num_to_delete; i++) {
gpr_free(to_delete[i]);
}
return g_failure;
}

@ -1,359 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/transport/chttp2/stream_encoder.h"
#include <stdio.h>
#include "src/core/support/string.h"
#include "src/core/transport/chttp2/hpack_parser.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "test/core/util/parse_hexstring.h"
#include "test/core/util/slice_splitter.h"
#include "test/core/util/test_config.h"
#define TEST(x) run_test(x, #x)
grpc_mdctx *g_mdctx;
grpc_chttp2_hpack_compressor g_compressor;
int g_failure = 0;
grpc_stream_op_buffer g_sopb;
void **to_delete = NULL;
size_t num_to_delete = 0;
size_t cap_to_delete = 0;
static gpr_slice create_test_slice(size_t length) {
gpr_slice slice = gpr_slice_malloc(length);
size_t i;
for (i = 0; i < length; i++) {
GPR_SLICE_START_PTR(slice)[i] = (gpr_uint8)i;
}
return slice;
}
/* verify that the output generated by encoding the stream matches the
hexstring passed in */
static void verify_sopb(size_t window_available, int eof,
size_t expect_window_used, const char *expected) {
gpr_slice_buffer output;
grpc_stream_op_buffer encops;
gpr_slice merged;
gpr_slice expect = parse_hexstring(expected);
gpr_slice_buffer_init(&output);
grpc_sopb_init(&encops);
GPR_ASSERT(expect_window_used ==
grpc_chttp2_preencode(g_sopb.ops, &g_sopb.nops,
(gpr_uint32)window_available, &encops));
grpc_chttp2_encode(encops.ops, encops.nops, eof, 0xdeadbeef, &g_compressor,
&output);
encops.nops = 0;
merged = grpc_slice_merge(output.slices, output.count);
gpr_slice_buffer_destroy(&output);
grpc_sopb_destroy(&encops);
if (0 != gpr_slice_cmp(merged, expect)) {
char *expect_str = gpr_dump_slice(expect, GPR_DUMP_HEX | GPR_DUMP_ASCII);
char *got_str = gpr_dump_slice(merged, GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_ERROR, "mismatched output for %s", expected);
gpr_log(GPR_ERROR, "EXPECT: %s", expect_str);
gpr_log(GPR_ERROR, "GOT: %s", got_str);
gpr_free(expect_str);
gpr_free(got_str);
g_failure = 1;
}
gpr_slice_unref(merged);
gpr_slice_unref(expect);
}
static void test_small_data_framing(void) {
grpc_sopb_add_no_op(&g_sopb);
verify_sopb(10, 0, 0, "");
grpc_sopb_add_slice(&g_sopb, create_test_slice(3));
verify_sopb(10, 0, 3, "000003 0000 deadbeef 000102");
grpc_sopb_add_slice(&g_sopb, create_test_slice(4));
verify_sopb(10, 0, 4, "000004 0000 deadbeef 00010203");
grpc_sopb_add_slice(&g_sopb, create_test_slice(3));
grpc_sopb_add_slice(&g_sopb, create_test_slice(4));
verify_sopb(10, 0, 7, "000007 0000 deadbeef 000102 00010203");
grpc_sopb_add_slice(&g_sopb, create_test_slice(0));
grpc_sopb_add_slice(&g_sopb, create_test_slice(0));
grpc_sopb_add_slice(&g_sopb, create_test_slice(0));
grpc_sopb_add_slice(&g_sopb, create_test_slice(0));
grpc_sopb_add_slice(&g_sopb, create_test_slice(3));
verify_sopb(10, 0, 3, "000003 0000 deadbeef 000102");
verify_sopb(10, 1, 0, "000000 0001 deadbeef");
grpc_sopb_add_begin_message(&g_sopb, 255, 0);
verify_sopb(10, 0, 5, "000005 0000 deadbeef 00000000ff");
}
static void add_sopb_headers(size_t n, ...) {
size_t i;
grpc_metadata_batch b;
va_list l;
grpc_linked_mdelem *e = gpr_malloc(sizeof(*e) * n);
grpc_metadata_batch_init(&b);
va_start(l, n);
for (i = 0; i < n; i++) {
char *key = va_arg(l, char *);
char *value = va_arg(l, char *);
if (i) {
e[i - 1].next = &e[i];
e[i].prev = &e[i - 1];
}
e[i].md = grpc_mdelem_from_strings(g_mdctx, key, value);
}
e[0].prev = NULL;
e[n - 1].next = NULL;
va_end(l);
b.list.head = &e[0];
b.list.tail = &e[n - 1];
if (cap_to_delete == num_to_delete) {
cap_to_delete = GPR_MAX(2 * cap_to_delete, 1000);
to_delete = gpr_realloc(to_delete, sizeof(*to_delete) * cap_to_delete);
}
to_delete[num_to_delete++] = e;
grpc_sopb_add_metadata(&g_sopb, b);
}
static void test_basic_headers(void) {
int i;
add_sopb_headers(1, "a", "a");
verify_sopb(0, 0, 0, "000005 0104 deadbeef 40 0161 0161");
add_sopb_headers(1, "a", "a");
verify_sopb(0, 0, 0, "000001 0104 deadbeef be");
add_sopb_headers(1, "a", "a");
verify_sopb(0, 0, 0, "000001 0104 deadbeef be");
add_sopb_headers(2, "a", "a", "b", "c");
verify_sopb(0, 0, 0, "000006 0104 deadbeef be 40 0162 0163");
add_sopb_headers(2, "a", "a", "b", "c");
verify_sopb(0, 0, 0, "000002 0104 deadbeef bf be");
add_sopb_headers(1, "a", "d");
verify_sopb(0, 0, 0, "000004 0104 deadbeef 7f 00 0164");
/* flush out what's there to make a few values look very popular */
for (i = 0; i < 350; i++) {
add_sopb_headers(3, "a", "a", "b", "c", "a", "d");
verify_sopb(0, 0, 0, "000003 0104 deadbeef c0 bf be");
}
add_sopb_headers(2, "a", "a", "k", "v");
verify_sopb(0, 0, 0, "000006 0104 deadbeef c0 00 016b 0176");
add_sopb_headers(1, "a", "v");
/* this could be 000004 0104 deadbeef 0f 30 0176 also */
verify_sopb(0, 0, 0, "000004 0104 deadbeef 0f 2f 0176");
}
static void encode_int_to_str(int i, char *p) {
p[0] = (char)('a' + i % 26);
i /= 26;
GPR_ASSERT(i < 26);
p[1] = (char)('a' + i);
p[2] = 0;
}
static void test_decode_table_overflow(void) {
int i;
char key[3], value[3];
char *expect;
for (i = 0; i < 114; i++) {
if (i > 0) {
add_sopb_headers(1, "aa", "ba");
}
encode_int_to_str(i, key);
encode_int_to_str(i + 1, value);
if (i + 61 >= 127) {
gpr_asprintf(&expect,
"000002 0104 deadbeef ff%02x 000007 0104 deadbeef 40 "
"02%02x%02x 02%02x%02x",
i + 61 - 127, key[0], key[1], value[0], value[1]);
} else if (i > 0) {
gpr_asprintf(&expect,
"000001 0104 deadbeef %02x 000007 0104 deadbeef 40 "
"02%02x%02x 02%02x%02x",
0x80 + 61 + i, key[0], key[1], value[0], value[1]);
} else {
gpr_asprintf(&expect, "000007 0104 deadbeef 40 02%02x%02x 02%02x%02x",
key[0], key[1], value[0], value[1]);
}
add_sopb_headers(1, key, value);
verify_sopb(0, 0, 0, expect);
gpr_free(expect);
}
/* if the above passes, then we must have just knocked this pair out of the
decoder stack, and so we'll be forced to re-encode it */
add_sopb_headers(1, "aa", "ba");
verify_sopb(0, 0, 0, "000007 0104 deadbeef 40 026161 026261");
}
static void randstr(char *p, int bufsz) {
int i;
int len = 1 + rand() % bufsz;
for (i = 0; i < len; i++) {
p[i] = (char)('a' + rand() % 26);
}
p[len] = 0;
}
typedef struct {
char key[300];
char value[300];
int got_hdr;
} test_decode_random_header_state;
static void chk_hdr(void *p, grpc_mdelem *el) {
test_decode_random_header_state *st = p;
GPR_ASSERT(0 == gpr_slice_str_cmp(el->key->slice, st->key));
GPR_ASSERT(0 == gpr_slice_str_cmp(el->value->slice, st->value));
st->got_hdr = 1;
GRPC_MDELEM_UNREF(el);
}
static void test_decode_random_headers_inner(int max_len) {
int i;
test_decode_random_header_state st;
gpr_slice_buffer output;
gpr_slice merged;
grpc_stream_op_buffer encops;
grpc_chttp2_hpack_parser parser;
grpc_chttp2_hpack_parser_init(&parser, g_mdctx);
grpc_sopb_init(&encops);
gpr_log(GPR_INFO, "max_len = %d", max_len);
for (i = 0; i < 10000; i++) {
randstr(st.key, max_len);
randstr(st.value, max_len);
add_sopb_headers(1, st.key, st.value);
gpr_slice_buffer_init(&output);
GPR_ASSERT(0 ==
grpc_chttp2_preencode(g_sopb.ops, &g_sopb.nops, 0, &encops));
grpc_chttp2_encode(encops.ops, encops.nops, 0, 0xdeadbeef, &g_compressor,
&output);
encops.nops = 0;
merged = grpc_slice_merge(output.slices, output.count);
gpr_slice_buffer_destroy(&output);
st.got_hdr = 0;
parser.on_header = chk_hdr;
parser.on_header_user_data = &st;
grpc_chttp2_hpack_parser_parse(&parser, GPR_SLICE_START_PTR(merged) + 9,
GPR_SLICE_END_PTR(merged));
GPR_ASSERT(st.got_hdr);
gpr_slice_unref(merged);
}
grpc_chttp2_hpack_parser_destroy(&parser);
grpc_sopb_destroy(&encops);
}
#define DECL_TEST_DECODE_RANDOM_HEADERS(n) \
static void test_decode_random_headers_##n(void) { \
test_decode_random_headers_inner(n); \
} \
int keeps_formatting_correct_##n
DECL_TEST_DECODE_RANDOM_HEADERS(1);
DECL_TEST_DECODE_RANDOM_HEADERS(2);
DECL_TEST_DECODE_RANDOM_HEADERS(3);
DECL_TEST_DECODE_RANDOM_HEADERS(5);
DECL_TEST_DECODE_RANDOM_HEADERS(8);
DECL_TEST_DECODE_RANDOM_HEADERS(13);
DECL_TEST_DECODE_RANDOM_HEADERS(21);
DECL_TEST_DECODE_RANDOM_HEADERS(34);
DECL_TEST_DECODE_RANDOM_HEADERS(55);
DECL_TEST_DECODE_RANDOM_HEADERS(89);
DECL_TEST_DECODE_RANDOM_HEADERS(144);
static void run_test(void (*test)(), const char *name) {
gpr_log(GPR_INFO, "RUN TEST: %s", name);
g_mdctx = grpc_mdctx_create_with_seed(0);
grpc_chttp2_hpack_compressor_init(&g_compressor, g_mdctx);
grpc_sopb_init(&g_sopb);
test();
grpc_chttp2_hpack_compressor_destroy(&g_compressor);
grpc_mdctx_unref(g_mdctx);
grpc_sopb_destroy(&g_sopb);
}
int main(int argc, char **argv) {
size_t i;
grpc_test_init(argc, argv);
TEST(test_small_data_framing);
TEST(test_basic_headers);
TEST(test_decode_table_overflow);
TEST(test_decode_random_headers_1);
TEST(test_decode_random_headers_2);
TEST(test_decode_random_headers_3);
TEST(test_decode_random_headers_5);
TEST(test_decode_random_headers_8);
TEST(test_decode_random_headers_13);
TEST(test_decode_random_headers_21);
TEST(test_decode_random_headers_34);
TEST(test_decode_random_headers_55);
TEST(test_decode_random_headers_89);
TEST(test_decode_random_headers_144);
for (i = 0; i < num_to_delete; i++) {
gpr_free(to_delete[i]);
}
return g_failure;
}

@ -1,116 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/transport/stream_op.h"
#include <string.h>
#include <grpc/support/log.h>
#include "test/core/util/test_config.h"
static void assert_slices_equal(gpr_slice a, gpr_slice b) {
GPR_ASSERT(a.refcount == b.refcount);
if (a.refcount) {
GPR_ASSERT(a.data.refcounted.bytes == b.data.refcounted.bytes);
GPR_ASSERT(a.data.refcounted.length == b.data.refcounted.length);
} else {
GPR_ASSERT(a.data.inlined.length == b.data.inlined.length);
GPR_ASSERT(0 == memcmp(a.data.inlined.bytes, b.data.inlined.bytes,
a.data.inlined.length));
}
}
int main(int argc, char **argv) {
/* some basic test data */
gpr_slice test_slice_1 = gpr_slice_malloc(1);
gpr_slice test_slice_2 = gpr_slice_malloc(2);
gpr_slice test_slice_3 = gpr_slice_malloc(3);
gpr_slice test_slice_4 = gpr_slice_malloc(4);
unsigned i;
grpc_stream_op_buffer buf;
grpc_stream_op_buffer buf2;
grpc_test_init(argc, argv);
/* initialize one of our buffers */
grpc_sopb_init(&buf);
/* it should start out empty */
GPR_ASSERT(buf.nops == 0);
/* add some data to the buffer */
grpc_sopb_add_begin_message(&buf, 1, 2);
grpc_sopb_add_slice(&buf, test_slice_1);
grpc_sopb_add_slice(&buf, test_slice_2);
grpc_sopb_add_slice(&buf, test_slice_3);
grpc_sopb_add_slice(&buf, test_slice_4);
grpc_sopb_add_no_op(&buf);
/* verify that the data went in ok */
GPR_ASSERT(buf.nops == 6);
GPR_ASSERT(buf.ops[0].type == GRPC_OP_BEGIN_MESSAGE);
GPR_ASSERT(buf.ops[0].data.begin_message.length == 1);
GPR_ASSERT(buf.ops[0].data.begin_message.flags == 2);
GPR_ASSERT(buf.ops[1].type == GRPC_OP_SLICE);
assert_slices_equal(buf.ops[1].data.slice, test_slice_1);
GPR_ASSERT(buf.ops[2].type == GRPC_OP_SLICE);
assert_slices_equal(buf.ops[2].data.slice, test_slice_2);
GPR_ASSERT(buf.ops[3].type == GRPC_OP_SLICE);
assert_slices_equal(buf.ops[3].data.slice, test_slice_3);
GPR_ASSERT(buf.ops[4].type == GRPC_OP_SLICE);
assert_slices_equal(buf.ops[4].data.slice, test_slice_4);
GPR_ASSERT(buf.ops[5].type == GRPC_NO_OP);
/* initialize the second buffer */
grpc_sopb_init(&buf2);
/* add a no-op, and then the original buffer */
grpc_sopb_add_no_op(&buf2);
grpc_sopb_append(&buf2, buf.ops, buf.nops);
/* should be one element bigger than the original */
GPR_ASSERT(buf2.nops == buf.nops + 1);
GPR_ASSERT(buf2.ops[0].type == GRPC_NO_OP);
/* and the tail should be the same */
for (i = 0; i < buf.nops; i++) {
GPR_ASSERT(buf2.ops[i + 1].type == buf.ops[i].type);
}
/* destroy the buffers */
grpc_sopb_destroy(&buf);
grpc_sopb_destroy(&buf2);
gpr_slice_unref(test_slice_1);
gpr_slice_unref(test_slice_2);
gpr_slice_unref(test_slice_3);
gpr_slice_unref(test_slice_4);
return 0;
}

@ -1188,14 +1188,14 @@ TEST_P(SecureEnd2endTest, ClientAuthContext) {
}
INSTANTIATE_TEST_CASE_P(End2end, End2endTest,
::testing::Values(TestScenario(false, true),
TestScenario(false, false)));
::testing::Values(TestScenario(false, false),
TestScenario(false, true)));
INSTANTIATE_TEST_CASE_P(ProxyEnd2end, ProxyEnd2endTest,
::testing::Values(TestScenario(true, true),
TestScenario(true, false),
::testing::Values(TestScenario(false, false),
TestScenario(false, true),
TestScenario(false, false)));
TestScenario(true, false),
TestScenario(true, true)));
INSTANTIATE_TEST_CASE_P(SecureEnd2end, SecureEnd2endTest,
::testing::Values(TestScenario(false, true)));

@ -46,7 +46,7 @@
#include <grpc++/client_context.h>
#include <grpc++/security/credentials.h>
#include "src/core/transport/stream_op.h"
#include "src/core/transport/byte_stream.h"
#include "test/cpp/interop/client_helper.h"
#include "test/proto/test.grpc.pb.h"
#include "test/proto/empty.grpc.pb.h"

@ -791,6 +791,7 @@ src/core/channel/context.h \
src/core/channel/http_client_filter.h \
src/core/channel/http_server_filter.h \
src/core/channel/noop_filter.h \
src/core/channel/subchannel_call_holder.h \
src/core/client_config/client_config.h \
src/core/client_config/connector.h \
src/core/client_config/lb_policies/pick_first.h \
@ -857,7 +858,6 @@ src/core/json/json_writer.h \
src/core/statistics/census_interface.h \
src/core/statistics/census_rpc_stats.h \
src/core/surface/api_trace.h \
src/core/surface/byte_buffer_queue.h \
src/core/surface/call.h \
src/core/surface/call_test_only.h \
src/core/surface/channel.h \
@ -866,6 +866,7 @@ src/core/surface/event_string.h \
src/core/surface/init.h \
src/core/surface/server.h \
src/core/surface/surface_trace.h \
src/core/transport/byte_stream.h \
src/core/transport/chttp2/alpn.h \
src/core/transport/chttp2/bin_encoder.h \
src/core/transport/chttp2/frame.h \
@ -875,6 +876,7 @@ src/core/transport/chttp2/frame_ping.h \
src/core/transport/chttp2/frame_rst_stream.h \
src/core/transport/chttp2/frame_settings.h \
src/core/transport/chttp2/frame_window_update.h \
src/core/transport/chttp2/hpack_encoder.h \
src/core/transport/chttp2/hpack_parser.h \
src/core/transport/chttp2/hpack_table.h \
src/core/transport/chttp2/http2_errors.h \
@ -882,14 +884,13 @@ src/core/transport/chttp2/huffsyms.h \
src/core/transport/chttp2/incoming_metadata.h \
src/core/transport/chttp2/internal.h \
src/core/transport/chttp2/status_conversion.h \
src/core/transport/chttp2/stream_encoder.h \
src/core/transport/chttp2/stream_map.h \
src/core/transport/chttp2/timeout_encoding.h \
src/core/transport/chttp2/varint.h \
src/core/transport/chttp2_transport.h \
src/core/transport/connectivity_state.h \
src/core/transport/metadata.h \
src/core/transport/stream_op.h \
src/core/transport/metadata_batch.h \
src/core/transport/transport.h \
src/core/transport/transport_impl.h \
src/core/census/aggregation.h \
@ -927,6 +928,7 @@ src/core/channel/connected_channel.c \
src/core/channel/http_client_filter.c \
src/core/channel/http_server_filter.c \
src/core/channel/noop_filter.c \
src/core/channel/subchannel_call_holder.c \
src/core/client_config/client_config.c \
src/core/client_config/connector.c \
src/core/client_config/lb_policies/pick_first.c \
@ -996,7 +998,6 @@ src/core/json/json_string.c \
src/core/json/json_writer.c \
src/core/surface/api_trace.c \
src/core/surface/byte_buffer.c \
src/core/surface/byte_buffer_queue.c \
src/core/surface/byte_buffer_reader.c \
src/core/surface/call.c \
src/core/surface/call_details.c \
@ -1013,6 +1014,7 @@ src/core/surface/server.c \
src/core/surface/server_chttp2.c \
src/core/surface/server_create.c \
src/core/surface/version.c \
src/core/transport/byte_stream.c \
src/core/transport/chttp2/alpn.c \
src/core/transport/chttp2/bin_encoder.c \
src/core/transport/chttp2/frame_data.c \
@ -1021,13 +1023,13 @@ src/core/transport/chttp2/frame_ping.c \
src/core/transport/chttp2/frame_rst_stream.c \
src/core/transport/chttp2/frame_settings.c \
src/core/transport/chttp2/frame_window_update.c \
src/core/transport/chttp2/hpack_encoder.c \
src/core/transport/chttp2/hpack_parser.c \
src/core/transport/chttp2/hpack_table.c \
src/core/transport/chttp2/huffsyms.c \
src/core/transport/chttp2/incoming_metadata.c \
src/core/transport/chttp2/parsing.c \
src/core/transport/chttp2/status_conversion.c \
src/core/transport/chttp2/stream_encoder.c \
src/core/transport/chttp2/stream_lists.c \
src/core/transport/chttp2/stream_map.c \
src/core/transport/chttp2/timeout_encoding.c \
@ -1036,7 +1038,7 @@ src/core/transport/chttp2/writing.c \
src/core/transport/chttp2_transport.c \
src/core/transport/connectivity_state.c \
src/core/transport/metadata.c \
src/core/transport/stream_op.c \
src/core/transport/metadata_batch.c \
src/core/transport/transport.c \
src/core/transport/transport_op_string.c \
src/core/census/context.c \

@ -147,9 +147,9 @@ class CLanguage(object):
self.platform = platform_string()
self.test_lang = test_lang
def test_specs(self, config, travis):
def test_specs(self, config, args):
out = []
binaries = get_c_tests(travis, self.test_lang)
binaries = get_c_tests(args.travis, self.test_lang)
for target in binaries:
if config.build_config in target['exclude_configs']:
continue
@ -160,11 +160,16 @@ class CLanguage(object):
binary = 'bins/%s/%s' % (config.build_config, target['name'])
if os.path.isfile(binary):
out.append(config.job_spec([binary], [binary]))
else:
elif args.regex == '.*' or platform_string() == 'windows':
print '\nWARNING: binary not found, skipping', binary
return sorted(out)
def make_targets(self):
def make_targets(self, test_regex):
if platform_string() != 'windows' and test_regex != '.*':
# use the regex to minimize the number of things to build
return [target['name']
for target in get_c_tests(False, self.test_lang)
if re.search(test_regex, target['name'])]
if platform_string() == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
@ -196,7 +201,7 @@ class CLanguage(object):
class NodeLanguage(object):
def test_specs(self, config, travis):
def test_specs(self, config, args):
return [config.job_spec(['tools/run_tests/run_node.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
@ -204,7 +209,7 @@ class NodeLanguage(object):
# Default to 1 week cache expiration
return [['tools/run_tests/pre_build_node.sh']]
def make_targets(self):
def make_targets(self, test_regex):
return []
def build_steps(self):
@ -225,14 +230,14 @@ class NodeLanguage(object):
class PhpLanguage(object):
def test_specs(self, config, travis):
def test_specs(self, config, args):
return [config.job_spec(['src/php/bin/run_tests.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
def make_targets(self, test_regex):
return ['static_c', 'shared_c']
def build_steps(self):
@ -257,7 +262,7 @@ class PythonLanguage(object):
self._build_python_versions = ['2.7']
self._has_python_versions = []
def test_specs(self, config, travis):
def test_specs(self, config, args):
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
environment['PYVER'] = '2.7'
return [config.job_spec(
@ -271,7 +276,7 @@ class PythonLanguage(object):
def pre_build_steps(self):
return []
def make_targets(self):
def make_targets(self, test_regex):
return ['static_c', 'grpc_python_plugin', 'shared_c']
def build_steps(self):
@ -303,14 +308,14 @@ class PythonLanguage(object):
class RubyLanguage(object):
def test_specs(self, config, travis):
def test_specs(self, config, args):
return [config.job_spec(['tools/run_tests/run_ruby.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return [['tools/run_tests/pre_build_ruby.sh']]
def make_targets(self):
def make_targets(self, test_regex):
return ['static_c']
def build_steps(self):
@ -333,7 +338,7 @@ class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def test_specs(self, config, travis):
def test_specs(self, config, args):
assemblies = ['Grpc.Core.Tests',
'Grpc.Examples.Tests',
'Grpc.HealthCheck.Tests',
@ -361,7 +366,7 @@ class CSharpLanguage(object):
else:
return [['tools/run_tests/pre_build_csharp.sh']]
def make_targets(self):
def make_targets(self, test_regex):
# For Windows, this target doesn't really build anything,
# everything is build by buildall script later.
if self.platform == 'windows':
@ -390,14 +395,14 @@ class CSharpLanguage(object):
class ObjCLanguage(object):
def test_specs(self, config, travis):
def test_specs(self, config, args):
return [config.job_spec(['src/objective-c/tests/run_tests.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
def make_targets(self, test_regex):
return ['grpc_objective_c_plugin', 'interop_server']
def build_steps(self):
@ -418,14 +423,14 @@ class ObjCLanguage(object):
class Sanity(object):
def test_specs(self, config, travis):
def test_specs(self, config, args):
return [config.job_spec(['tools/run_tests/run_sanity.sh'], None),
config.job_spec(['tools/run_tests/check_sources_and_headers.py'], None)]
def pre_build_steps(self):
return []
def make_targets(self):
def make_targets(self, test_regex):
return ['run_dep_checks']
def build_steps(self):
@ -446,13 +451,13 @@ class Sanity(object):
class Build(object):
def test_specs(self, config, travis):
def test_specs(self, config, args):
return []
def pre_build_steps(self):
return []
def make_targets(self):
def make_targets(self, test_regex):
return ['static']
def build_steps(self):
@ -662,7 +667,7 @@ make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
set(l.make_targets(args.regex)))
build_steps = list(set(
jobset.JobSpec(cmdline, environ={'CONFIG': cfg}, flake_retries=5)
@ -836,12 +841,12 @@ def _calculate_num_runs_failures(list_of_results):
return num_runs, num_failures
def _build_and_run(
check_cancelled, newline_on_success, travis, cache, xml_report=None):
check_cancelled, newline_on_success, cache, xml_report=None):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, _ = jobset.run(
build_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=travis)
newline_on_success=newline_on_success, travis=args.travis)
if num_failures:
return 1
@ -857,11 +862,11 @@ def _build_and_run(
spec
for config in run_configs
for language in languages
for spec in language.test_specs(config, args.travis)
for spec in language.test_specs(config, args)
if re.search(args.regex, spec.shortname))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if travis:
if args.travis:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
@ -876,7 +881,7 @@ def _build_and_run(
number_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
cache=cache if not xml_report else None,
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
@ -901,7 +906,7 @@ def _build_and_run(
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=travis)
newline_on_success=newline_on_success, travis=args.travis)
if number_failures:
return 3
@ -922,7 +927,6 @@ if forever:
previous_success = success
success = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
travis=args.travis,
cache=test_cache) == 0
if not previous_success and success:
jobset.message('SUCCESS',
@ -934,7 +938,6 @@ if forever:
else:
result = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
travis=args.travis,
cache=test_cache,
xml_report=args.xml_report)
if result == 0:

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save