Merge pull request #4454 from grpc/master

Re-cutting the 0.12 release from master.
pull/4488/head
Michael Lumish 9 years ago
commit da61668122
  1. 1
      .gitignore
  2. 93
      BUILD
  3. 17050
      Makefile
  4. 38
      README.md
  5. 15
      binding.gyp
  6. 305
      build.yaml
  7. 2
      doc/PROTOCOL-HTTP2.md
  8. 289
      doc/grpc-auth-support.md
  9. 42
      gRPC.podspec
  10. 13
      include/grpc++/client_context.h
  11. 7
      include/grpc++/impl/rpc_service_method.h
  12. 25
      include/grpc++/impl/server_builder_option.h
  13. 23
      include/grpc++/impl/thd_no_cxx11.h
  14. 23
      include/grpc++/server.h
  15. 6
      include/grpc++/server_builder.h
  16. 5
      include/grpc++/support/channel_arguments.h
  17. 6
      include/grpc/compression.h
  18. 16
      include/grpc/grpc.h
  19. 14
      include/grpc/support/alloc.h
  20. 91
      include/grpc/support/avl.h
  21. 8
      include/grpc/support/cmdline.h
  22. 9
      include/grpc/support/port_platform.h
  23. 3
      include/grpc/support/slice.h
  24. 5
      include/grpc/support/slice_buffer.h
  25. 4
      include/grpc/support/time.h
  26. 3
      package.json
  27. 8
      src/core/census/context.h
  28. 89
      src/core/census/grpc_filter.c
  29. 59
      src/core/channel/channel_stack.c
  30. 83
      src/core/channel/channel_stack.h
  31. 612
      src/core/channel/client_channel.c
  32. 12
      src/core/channel/client_channel.h
  33. 423
      src/core/channel/client_uchannel.c
  34. 16
      src/core/channel/client_uchannel.h
  35. 344
      src/core/channel/compress_filter.c
  36. 33
      src/core/channel/connected_channel.c
  37. 2
      src/core/channel/connected_channel.h
  38. 177
      src/core/channel/http_client_filter.c
  39. 208
      src/core/channel/http_server_filter.c
  40. 122
      src/core/channel/noop_filter.c
  41. 259
      src/core/channel/subchannel_call_holder.c
  42. 98
      src/core/channel/subchannel_call_holder.h
  43. 2
      src/core/client_config/connector.h
  44. 14
      src/core/client_config/default_initial_connect_string.c
  45. 53
      src/core/client_config/initial_connect_string.c
  46. 50
      src/core/client_config/initial_connect_string.h
  47. 186
      src/core/client_config/lb_policies/pick_first.c
  48. 276
      src/core/client_config/lb_policies/round_robin.c
  49. 92
      src/core/client_config/lb_policy.c
  50. 50
      src/core/client_config/lb_policy.h
  51. 7
      src/core/client_config/resolver.c
  52. 10
      src/core/client_config/resolver.h
  53. 8
      src/core/client_config/resolvers/dns_resolver.c
  54. 16
      src/core/client_config/resolvers/sockaddr_resolver.c
  55. 7
      src/core/client_config/resolvers/zookeeper_resolver.c
  56. 743
      src/core/client_config/subchannel.c
  57. 107
      src/core/client_config/subchannel.h
  58. 86
      src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
  59. 70
      src/core/compression/algorithm.c
  60. 53
      src/core/compression/algorithm_metadata.h
  61. 28
      src/core/compression/message_compress.c
  62. 9
      src/core/httpcli/httpcli.c
  63. 2
      src/core/httpcli/httpcli.h
  64. 5
      src/core/httpcli/httpcli_security_connector.c
  65. 23
      src/core/iomgr/closure.c
  66. 19
      src/core/iomgr/closure.h
  67. 3
      src/core/iomgr/endpoint_pair_posix.c
  68. 5
      src/core/iomgr/exec_ctx.c
  69. 13
      src/core/iomgr/executor.c
  70. 23
      src/core/iomgr/fd_posix.c
  71. 5
      src/core/iomgr/fd_posix.h
  72. 5
      src/core/iomgr/pollset.h
  73. 36
      src/core/iomgr/pollset_multipoller_with_epoll.c
  74. 23
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  75. 127
      src/core/iomgr/pollset_posix.c
  76. 15
      src/core/iomgr/pollset_posix.h
  77. 22
      src/core/iomgr/pollset_set.h
  78. 51
      src/core/iomgr/pollset_set_posix.c
  79. 4
      src/core/iomgr/pollset_set_posix.h
  80. 8
      src/core/iomgr/pollset_set_windows.c
  81. 13
      src/core/iomgr/pollset_windows.c
  82. 4
      src/core/iomgr/tcp_client_posix.c
  83. 16
      src/core/iomgr/tcp_posix.c
  84. 6
      src/core/iomgr/tcp_posix.h
  85. 19
      src/core/iomgr/tcp_server.h
  86. 153
      src/core/iomgr/tcp_server_posix.c
  87. 125
      src/core/iomgr/tcp_server_windows.c
  88. 14
      src/core/iomgr/tcp_windows.c
  89. 12
      src/core/iomgr/timer.c
  90. 2
      src/core/iomgr/timer_internal.h
  91. 4
      src/core/iomgr/udp_server.c
  92. 8
      src/core/iomgr/wakeup_fd_posix.c
  93. 2
      src/core/iomgr/wakeup_fd_posix.h
  94. 7
      src/core/iomgr/workqueue_posix.c
  95. 15
      src/core/json/json_reader.c
  96. 2
      src/core/json/json_string.c
  97. 190
      src/core/profiling/basic_timers.c
  98. 2
      src/core/profiling/timers.h
  99. 140
      src/core/security/client_auth_filter.c
  100. 21
      src/core/security/credentials.c
  101. Some files were not shown because too many files have changed in this diff Show More

1
.gitignore vendored

@ -38,6 +38,7 @@ cache.mk
# Temporary test reports
report.xml
latency_trace.txt
latency_trace.*.txt
# port server log
portlog.txt

93
BUILD

@ -57,6 +57,7 @@ cc_library(
"src/core/profiling/basic_timers.c",
"src/core/profiling/stap_timers.c",
"src/core/support/alloc.c",
"src/core/support/avl.c",
"src/core/support/cmdline.c",
"src/core/support/cpu_iphone.c",
"src/core/support/cpu_linux.c",
@ -101,6 +102,7 @@ cc_library(
"include/grpc/support/atm_gcc_atomic.h",
"include/grpc/support/atm_gcc_sync.h",
"include/grpc/support/atm_win32.h",
"include/grpc/support/avl.h",
"include/grpc/support/cmdline.h",
"include/grpc/support/cpu.h",
"include/grpc/support/histogram.h",
@ -159,9 +161,10 @@ cc_library(
"src/core/channel/context.h",
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/channel/subchannel_call_holder.h",
"src/core/client_config/client_config.h",
"src/core/client_config/connector.h",
"src/core/client_config/initial_connect_string.h",
"src/core/client_config/lb_policies/pick_first.h",
"src/core/client_config/lb_policies/round_robin.h",
"src/core/client_config/lb_policy.h",
@ -174,9 +177,8 @@ cc_library(
"src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.h",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.h",
"src/core/client_config/uri_parser.h",
"src/core/compression/algorithm_metadata.h",
"src/core/compression/message_compress.h",
"src/core/debug/trace.h",
"src/core/httpcli/format_request.h",
@ -226,7 +228,6 @@ cc_library(
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
"src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/call_test_only.h",
"src/core/surface/channel.h",
@ -235,6 +236,7 @@ cc_library(
"src/core/surface/init.h",
"src/core/surface/server.h",
"src/core/surface/surface_trace.h",
"src/core/transport/byte_stream.h",
"src/core/transport/chttp2/alpn.h",
"src/core/transport/chttp2/bin_encoder.h",
"src/core/transport/chttp2/frame.h",
@ -244,6 +246,7 @@ cc_library(
"src/core/transport/chttp2/frame_rst_stream.h",
"src/core/transport/chttp2/frame_settings.h",
"src/core/transport/chttp2/frame_window_update.h",
"src/core/transport/chttp2/hpack_encoder.h",
"src/core/transport/chttp2/hpack_parser.h",
"src/core/transport/chttp2/hpack_table.h",
"src/core/transport/chttp2/http2_errors.h",
@ -251,14 +254,14 @@ cc_library(
"src/core/transport/chttp2/incoming_metadata.h",
"src/core/transport/chttp2/internal.h",
"src/core/transport/chttp2/status_conversion.h",
"src/core/transport/chttp2/stream_encoder.h",
"src/core/transport/chttp2/stream_map.h",
"src/core/transport/chttp2/timeout_encoding.h",
"src/core/transport/chttp2/varint.h",
"src/core/transport/chttp2_transport.h",
"src/core/transport/connectivity_state.h",
"src/core/transport/metadata.h",
"src/core/transport/stream_op.h",
"src/core/transport/metadata_batch.h",
"src/core/transport/static_metadata.h",
"src/core/transport/transport.h",
"src/core/transport/transport_impl.h",
"src/core/census/aggregation.h",
@ -295,9 +298,11 @@ cc_library(
"src/core/channel/connected_channel.c",
"src/core/channel/http_client_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c",
"src/core/channel/subchannel_call_holder.c",
"src/core/client_config/client_config.c",
"src/core/client_config/connector.c",
"src/core/client_config/default_initial_connect_string.c",
"src/core/client_config/initial_connect_string.c",
"src/core/client_config/lb_policies/pick_first.c",
"src/core/client_config/lb_policies/round_robin.c",
"src/core/client_config/lb_policy.c",
@ -310,8 +315,6 @@ cc_library(
"src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.c",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.c",
"src/core/client_config/uri_parser.c",
"src/core/compression/algorithm.c",
"src/core/compression/message_compress.c",
@ -365,7 +368,6 @@ cc_library(
"src/core/json/json_writer.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
"src/core/surface/byte_buffer_reader.c",
"src/core/surface/call.c",
"src/core/surface/call_details.c",
@ -373,6 +375,7 @@ cc_library(
"src/core/surface/channel.c",
"src/core/surface/channel_connectivity.c",
"src/core/surface/channel_create.c",
"src/core/surface/channel_ping.c",
"src/core/surface/completion_queue.c",
"src/core/surface/event_string.c",
"src/core/surface/init.c",
@ -382,6 +385,7 @@ cc_library(
"src/core/surface/server_chttp2.c",
"src/core/surface/server_create.c",
"src/core/surface/version.c",
"src/core/transport/byte_stream.c",
"src/core/transport/chttp2/alpn.c",
"src/core/transport/chttp2/bin_encoder.c",
"src/core/transport/chttp2/frame_data.c",
@ -390,13 +394,13 @@ cc_library(
"src/core/transport/chttp2/frame_rst_stream.c",
"src/core/transport/chttp2/frame_settings.c",
"src/core/transport/chttp2/frame_window_update.c",
"src/core/transport/chttp2/hpack_encoder.c",
"src/core/transport/chttp2/hpack_parser.c",
"src/core/transport/chttp2/hpack_table.c",
"src/core/transport/chttp2/huffsyms.c",
"src/core/transport/chttp2/incoming_metadata.c",
"src/core/transport/chttp2/parsing.c",
"src/core/transport/chttp2/status_conversion.c",
"src/core/transport/chttp2/stream_encoder.c",
"src/core/transport/chttp2/stream_lists.c",
"src/core/transport/chttp2/stream_map.c",
"src/core/transport/chttp2/timeout_encoding.c",
@ -405,7 +409,8 @@ cc_library(
"src/core/transport/chttp2_transport.c",
"src/core/transport/connectivity_state.c",
"src/core/transport/metadata.c",
"src/core/transport/stream_op.c",
"src/core/transport/metadata_batch.c",
"src/core/transport/static_metadata.c",
"src/core/transport/transport.c",
"src/core/transport/transport_op_string.c",
"src/core/census/context.c",
@ -447,9 +452,10 @@ cc_library(
"src/core/channel/context.h",
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/channel/subchannel_call_holder.h",
"src/core/client_config/client_config.h",
"src/core/client_config/connector.h",
"src/core/client_config/initial_connect_string.h",
"src/core/client_config/lb_policies/pick_first.h",
"src/core/client_config/lb_policies/round_robin.h",
"src/core/client_config/lb_policy.h",
@ -462,9 +468,8 @@ cc_library(
"src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.h",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.h",
"src/core/client_config/uri_parser.h",
"src/core/compression/algorithm_metadata.h",
"src/core/compression/message_compress.h",
"src/core/debug/trace.h",
"src/core/httpcli/format_request.h",
@ -514,7 +519,6 @@ cc_library(
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
"src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/call_test_only.h",
"src/core/surface/channel.h",
@ -523,6 +527,7 @@ cc_library(
"src/core/surface/init.h",
"src/core/surface/server.h",
"src/core/surface/surface_trace.h",
"src/core/transport/byte_stream.h",
"src/core/transport/chttp2/alpn.h",
"src/core/transport/chttp2/bin_encoder.h",
"src/core/transport/chttp2/frame.h",
@ -532,6 +537,7 @@ cc_library(
"src/core/transport/chttp2/frame_rst_stream.h",
"src/core/transport/chttp2/frame_settings.h",
"src/core/transport/chttp2/frame_window_update.h",
"src/core/transport/chttp2/hpack_encoder.h",
"src/core/transport/chttp2/hpack_parser.h",
"src/core/transport/chttp2/hpack_table.h",
"src/core/transport/chttp2/http2_errors.h",
@ -539,14 +545,14 @@ cc_library(
"src/core/transport/chttp2/incoming_metadata.h",
"src/core/transport/chttp2/internal.h",
"src/core/transport/chttp2/status_conversion.h",
"src/core/transport/chttp2/stream_encoder.h",
"src/core/transport/chttp2/stream_map.h",
"src/core/transport/chttp2/timeout_encoding.h",
"src/core/transport/chttp2/varint.h",
"src/core/transport/chttp2_transport.h",
"src/core/transport/connectivity_state.h",
"src/core/transport/metadata.h",
"src/core/transport/stream_op.h",
"src/core/transport/metadata_batch.h",
"src/core/transport/static_metadata.h",
"src/core/transport/transport.h",
"src/core/transport/transport_impl.h",
"src/core/census/aggregation.h",
@ -563,9 +569,11 @@ cc_library(
"src/core/channel/connected_channel.c",
"src/core/channel/http_client_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c",
"src/core/channel/subchannel_call_holder.c",
"src/core/client_config/client_config.c",
"src/core/client_config/connector.c",
"src/core/client_config/default_initial_connect_string.c",
"src/core/client_config/initial_connect_string.c",
"src/core/client_config/lb_policies/pick_first.c",
"src/core/client_config/lb_policies/round_robin.c",
"src/core/client_config/lb_policy.c",
@ -578,8 +586,6 @@ cc_library(
"src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.c",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.c",
"src/core/client_config/uri_parser.c",
"src/core/compression/algorithm.c",
"src/core/compression/message_compress.c",
@ -633,7 +639,6 @@ cc_library(
"src/core/json/json_writer.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
"src/core/surface/byte_buffer_reader.c",
"src/core/surface/call.c",
"src/core/surface/call_details.c",
@ -641,6 +646,7 @@ cc_library(
"src/core/surface/channel.c",
"src/core/surface/channel_connectivity.c",
"src/core/surface/channel_create.c",
"src/core/surface/channel_ping.c",
"src/core/surface/completion_queue.c",
"src/core/surface/event_string.c",
"src/core/surface/init.c",
@ -650,6 +656,7 @@ cc_library(
"src/core/surface/server_chttp2.c",
"src/core/surface/server_create.c",
"src/core/surface/version.c",
"src/core/transport/byte_stream.c",
"src/core/transport/chttp2/alpn.c",
"src/core/transport/chttp2/bin_encoder.c",
"src/core/transport/chttp2/frame_data.c",
@ -658,13 +665,13 @@ cc_library(
"src/core/transport/chttp2/frame_rst_stream.c",
"src/core/transport/chttp2/frame_settings.c",
"src/core/transport/chttp2/frame_window_update.c",
"src/core/transport/chttp2/hpack_encoder.c",
"src/core/transport/chttp2/hpack_parser.c",
"src/core/transport/chttp2/hpack_table.c",
"src/core/transport/chttp2/huffsyms.c",
"src/core/transport/chttp2/incoming_metadata.c",
"src/core/transport/chttp2/parsing.c",
"src/core/transport/chttp2/status_conversion.c",
"src/core/transport/chttp2/stream_encoder.c",
"src/core/transport/chttp2/stream_lists.c",
"src/core/transport/chttp2/stream_map.c",
"src/core/transport/chttp2/timeout_encoding.c",
@ -673,7 +680,8 @@ cc_library(
"src/core/transport/chttp2_transport.c",
"src/core/transport/connectivity_state.c",
"src/core/transport/metadata.c",
"src/core/transport/stream_op.c",
"src/core/transport/metadata_batch.c",
"src/core/transport/static_metadata.c",
"src/core/transport/transport.c",
"src/core/transport/transport_op_string.c",
"src/core/census/context.c",
@ -730,14 +738,13 @@ cc_library(
"src/cpp/server/dynamic_thread_pool.h",
"src/cpp/server/fixed_size_thread_pool.h",
"src/cpp/server/thread_pool_interface.h",
"src/cpp/client/secure_channel_arguments.cc",
"src/cpp/client/secure_credentials.cc",
"src/cpp/common/auth_property_iterator.cc",
"src/cpp/common/secure_auth_context.cc",
"src/cpp/common/secure_channel_arguments.cc",
"src/cpp/common/secure_create_auth_context.cc",
"src/cpp/server/secure_server_credentials.cc",
"src/cpp/client/channel.cc",
"src/cpp/client/channel_arguments.cc",
"src/cpp/client/client_context.cc",
"src/cpp/client/create_channel.cc",
"src/cpp/client/create_channel_internal.cc",
@ -745,6 +752,7 @@ cc_library(
"src/cpp/client/generic_stub.cc",
"src/cpp/client/insecure_credentials.cc",
"src/cpp/common/call.cc",
"src/cpp/common/channel_arguments.cc",
"src/cpp/common/completion_queue.cc",
"src/cpp/common/rpc_method.cc",
"src/cpp/proto/proto_utils.cc",
@ -778,6 +786,7 @@ cc_library(
"include/grpc++/impl/rpc_method.h",
"include/grpc++/impl/rpc_service_method.h",
"include/grpc++/impl/serialization_traits.h",
"include/grpc++/impl/server_builder_option.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",
@ -829,7 +838,6 @@ cc_library(
"src/cpp/server/thread_pool_interface.h",
"src/cpp/common/insecure_create_auth_context.cc",
"src/cpp/client/channel.cc",
"src/cpp/client/channel_arguments.cc",
"src/cpp/client/client_context.cc",
"src/cpp/client/create_channel.cc",
"src/cpp/client/create_channel_internal.cc",
@ -837,6 +845,7 @@ cc_library(
"src/cpp/client/generic_stub.cc",
"src/cpp/client/insecure_credentials.cc",
"src/cpp/common/call.cc",
"src/cpp/common/channel_arguments.cc",
"src/cpp/common/completion_queue.cc",
"src/cpp/common/rpc_method.cc",
"src/cpp/proto/proto_utils.cc",
@ -870,6 +879,7 @@ cc_library(
"include/grpc++/impl/rpc_method.h",
"include/grpc++/impl/rpc_service_method.h",
"include/grpc++/impl/serialization_traits.h",
"include/grpc++/impl/server_builder_option.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",
@ -971,6 +981,7 @@ objc_library(
"src/core/profiling/basic_timers.c",
"src/core/profiling/stap_timers.c",
"src/core/support/alloc.c",
"src/core/support/avl.c",
"src/core/support/cmdline.c",
"src/core/support/cpu_iphone.c",
"src/core/support/cpu_linux.c",
@ -1015,6 +1026,7 @@ objc_library(
"include/grpc/support/atm_gcc_atomic.h",
"include/grpc/support/atm_gcc_sync.h",
"include/grpc/support/atm_win32.h",
"include/grpc/support/avl.h",
"include/grpc/support/cmdline.h",
"include/grpc/support/cpu.h",
"include/grpc/support/histogram.h",
@ -1091,9 +1103,11 @@ objc_library(
"src/core/channel/connected_channel.c",
"src/core/channel/http_client_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c",
"src/core/channel/subchannel_call_holder.c",
"src/core/client_config/client_config.c",
"src/core/client_config/connector.c",
"src/core/client_config/default_initial_connect_string.c",
"src/core/client_config/initial_connect_string.c",
"src/core/client_config/lb_policies/pick_first.c",
"src/core/client_config/lb_policies/round_robin.c",
"src/core/client_config/lb_policy.c",
@ -1106,8 +1120,6 @@ objc_library(
"src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.c",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.c",
"src/core/client_config/uri_parser.c",
"src/core/compression/algorithm.c",
"src/core/compression/message_compress.c",
@ -1161,7 +1173,6 @@ objc_library(
"src/core/json/json_writer.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
"src/core/surface/byte_buffer_reader.c",
"src/core/surface/call.c",
"src/core/surface/call_details.c",
@ -1169,6 +1180,7 @@ objc_library(
"src/core/surface/channel.c",
"src/core/surface/channel_connectivity.c",
"src/core/surface/channel_create.c",
"src/core/surface/channel_ping.c",
"src/core/surface/completion_queue.c",
"src/core/surface/event_string.c",
"src/core/surface/init.c",
@ -1178,6 +1190,7 @@ objc_library(
"src/core/surface/server_chttp2.c",
"src/core/surface/server_create.c",
"src/core/surface/version.c",
"src/core/transport/byte_stream.c",
"src/core/transport/chttp2/alpn.c",
"src/core/transport/chttp2/bin_encoder.c",
"src/core/transport/chttp2/frame_data.c",
@ -1186,13 +1199,13 @@ objc_library(
"src/core/transport/chttp2/frame_rst_stream.c",
"src/core/transport/chttp2/frame_settings.c",
"src/core/transport/chttp2/frame_window_update.c",
"src/core/transport/chttp2/hpack_encoder.c",
"src/core/transport/chttp2/hpack_parser.c",
"src/core/transport/chttp2/hpack_table.c",
"src/core/transport/chttp2/huffsyms.c",
"src/core/transport/chttp2/incoming_metadata.c",
"src/core/transport/chttp2/parsing.c",
"src/core/transport/chttp2/status_conversion.c",
"src/core/transport/chttp2/stream_encoder.c",
"src/core/transport/chttp2/stream_lists.c",
"src/core/transport/chttp2/stream_map.c",
"src/core/transport/chttp2/timeout_encoding.c",
@ -1201,7 +1214,8 @@ objc_library(
"src/core/transport/chttp2_transport.c",
"src/core/transport/connectivity_state.c",
"src/core/transport/metadata.c",
"src/core/transport/stream_op.c",
"src/core/transport/metadata_batch.c",
"src/core/transport/static_metadata.c",
"src/core/transport/transport.c",
"src/core/transport/transport_op_string.c",
"src/core/census/context.c",
@ -1240,9 +1254,10 @@ objc_library(
"src/core/channel/context.h",
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/channel/subchannel_call_holder.h",
"src/core/client_config/client_config.h",
"src/core/client_config/connector.h",
"src/core/client_config/initial_connect_string.h",
"src/core/client_config/lb_policies/pick_first.h",
"src/core/client_config/lb_policies/round_robin.h",
"src/core/client_config/lb_policy.h",
@ -1255,9 +1270,8 @@ objc_library(
"src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.h",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.h",
"src/core/client_config/uri_parser.h",
"src/core/compression/algorithm_metadata.h",
"src/core/compression/message_compress.h",
"src/core/debug/trace.h",
"src/core/httpcli/format_request.h",
@ -1307,7 +1321,6 @@ objc_library(
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
"src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/call_test_only.h",
"src/core/surface/channel.h",
@ -1316,6 +1329,7 @@ objc_library(
"src/core/surface/init.h",
"src/core/surface/server.h",
"src/core/surface/surface_trace.h",
"src/core/transport/byte_stream.h",
"src/core/transport/chttp2/alpn.h",
"src/core/transport/chttp2/bin_encoder.h",
"src/core/transport/chttp2/frame.h",
@ -1325,6 +1339,7 @@ objc_library(
"src/core/transport/chttp2/frame_rst_stream.h",
"src/core/transport/chttp2/frame_settings.h",
"src/core/transport/chttp2/frame_window_update.h",
"src/core/transport/chttp2/hpack_encoder.h",
"src/core/transport/chttp2/hpack_parser.h",
"src/core/transport/chttp2/hpack_table.h",
"src/core/transport/chttp2/http2_errors.h",
@ -1332,14 +1347,14 @@ objc_library(
"src/core/transport/chttp2/incoming_metadata.h",
"src/core/transport/chttp2/internal.h",
"src/core/transport/chttp2/status_conversion.h",
"src/core/transport/chttp2/stream_encoder.h",
"src/core/transport/chttp2/stream_map.h",
"src/core/transport/chttp2/timeout_encoding.h",
"src/core/transport/chttp2/varint.h",
"src/core/transport/chttp2_transport.h",
"src/core/transport/connectivity_state.h",
"src/core/transport/metadata.h",
"src/core/transport/stream_op.h",
"src/core/transport/metadata_batch.h",
"src/core/transport/static_metadata.h",
"src/core/transport/transport.h",
"src/core/transport/transport_impl.h",
"src/core/census/aggregation.h",

17050
Makefile

File diff suppressed because one or more lines are too long

@ -13,34 +13,28 @@ You can find more detailed documentation and examples in the [doc](doc) and [exa
See grpc/INSTALL for installation instructions for various platforms.
#Repository Structure
#Repository Structure & Status
This repository contains source code for gRPC libraries for multiple languages written on top
of shared C core library [src/core] (src/core).
This repository contains source code for gRPC libraries for multiple languages written on top of shared C core library [src/core] (src/core).
* C++ source code: [src/cpp] (src/cpp)
* Ruby source code: [src/ruby] (src/ruby)
* NodeJS source code: [src/node] (src/node)
* Python source code: [src/python] (src/python)
* PHP source code: [src/php] (src/php)
* C# source code: [src/csharp] (src/csharp)
* Objective-C source code: [src/objective-c] (src/objective-c)
Libraries in different languages are in different state of development. We are seeking contributions for all of these libraries.
| Language | Source | Status |
|-------------------------|-------------------------------------|---------------------------------|
| Shared C [core library] | [src/core] (src/core) | Beta - the surface API is stable |
| C++ | [src/cpp] (src/cpp) | Beta - the surface API is stable |
| Ruby | [src/ruby] (src/ruby) | Beta - the surface API is stable |
| NodeJS | [src/node] (src/node) | Beta - the surface API is stable |
| Python | [src/python] (src/python) | Beta - the surface API is stable |
| PHP | [src/php] (src/php) | Beta - the surface API is stable |
| C# | [src/csharp] (src/csharp) | Beta - the surface API is stable |
| Objective-C | [src/objective-c] (src/objective-c) | Beta - the surface API is stable |
<small>
Java source code is in [grpc-java] (http://github.com/grpc/grpc-java) repository.
Go source code is in [grpc-go] (http://github.com/grpc/grpc-go) repository.
</small>
#Current Status of libraries
Libraries in different languages are in different state of development. We are seeking contributions for all of these libraries.
* shared C core library [src/core] (src/core) : Beta - the surface API is stable
* C++ Library: [src/cpp] (src/cpp) : Beta - the surface API is stable
* Ruby Library: [src/ruby] (src/ruby) : Beta - the surface API is stable
* NodeJS Library: [src/node] (src/node) : Beta - the surface API is stable
* Python Library: [src/python] (src/python) : Beta - the surface API is stable
* C# Library: [src/csharp] (src/csharp) : Beta - the surface API is stable
* Objective-C Library: [src/objective-c] (src/objective-c): Beta - the surface API is stable
* PHP Library: [src/php] (src/php) : Beta - the surface API is stable
#Overview

@ -97,6 +97,7 @@
'src/core/profiling/basic_timers.c',
'src/core/profiling/stap_timers.c',
'src/core/support/alloc.c',
'src/core/support/avl.c',
'src/core/support/cmdline.c',
'src/core/support/cpu_iphone.c',
'src/core/support/cpu_linux.c',
@ -182,9 +183,11 @@
'src/core/channel/connected_channel.c',
'src/core/channel/http_client_filter.c',
'src/core/channel/http_server_filter.c',
'src/core/channel/noop_filter.c',
'src/core/channel/subchannel_call_holder.c',
'src/core/client_config/client_config.c',
'src/core/client_config/connector.c',
'src/core/client_config/default_initial_connect_string.c',
'src/core/client_config/initial_connect_string.c',
'src/core/client_config/lb_policies/pick_first.c',
'src/core/client_config/lb_policies/round_robin.c',
'src/core/client_config/lb_policy.c',
@ -197,8 +200,6 @@
'src/core/client_config/resolvers/sockaddr_resolver.c',
'src/core/client_config/subchannel.c',
'src/core/client_config/subchannel_factory.c',
'src/core/client_config/subchannel_factory_decorators/add_channel_arg.c',
'src/core/client_config/subchannel_factory_decorators/merge_channel_args.c',
'src/core/client_config/uri_parser.c',
'src/core/compression/algorithm.c',
'src/core/compression/message_compress.c',
@ -252,7 +253,6 @@
'src/core/json/json_writer.c',
'src/core/surface/api_trace.c',
'src/core/surface/byte_buffer.c',
'src/core/surface/byte_buffer_queue.c',
'src/core/surface/byte_buffer_reader.c',
'src/core/surface/call.c',
'src/core/surface/call_details.c',
@ -260,6 +260,7 @@
'src/core/surface/channel.c',
'src/core/surface/channel_connectivity.c',
'src/core/surface/channel_create.c',
'src/core/surface/channel_ping.c',
'src/core/surface/completion_queue.c',
'src/core/surface/event_string.c',
'src/core/surface/init.c',
@ -269,6 +270,7 @@
'src/core/surface/server_chttp2.c',
'src/core/surface/server_create.c',
'src/core/surface/version.c',
'src/core/transport/byte_stream.c',
'src/core/transport/chttp2/alpn.c',
'src/core/transport/chttp2/bin_encoder.c',
'src/core/transport/chttp2/frame_data.c',
@ -277,13 +279,13 @@
'src/core/transport/chttp2/frame_rst_stream.c',
'src/core/transport/chttp2/frame_settings.c',
'src/core/transport/chttp2/frame_window_update.c',
'src/core/transport/chttp2/hpack_encoder.c',
'src/core/transport/chttp2/hpack_parser.c',
'src/core/transport/chttp2/hpack_table.c',
'src/core/transport/chttp2/huffsyms.c',
'src/core/transport/chttp2/incoming_metadata.c',
'src/core/transport/chttp2/parsing.c',
'src/core/transport/chttp2/status_conversion.c',
'src/core/transport/chttp2/stream_encoder.c',
'src/core/transport/chttp2/stream_lists.c',
'src/core/transport/chttp2/stream_map.c',
'src/core/transport/chttp2/timeout_encoding.c',
@ -292,7 +294,8 @@
'src/core/transport/chttp2_transport.c',
'src/core/transport/connectivity_state.c',
'src/core/transport/metadata.c',
'src/core/transport/stream_op.c',
'src/core/transport/metadata_batch.c',
'src/core/transport/static_metadata.c',
'src/core/transport/transport.c',
'src/core/transport/transport_op_string.c',
'src/core/census/context.c',

@ -37,6 +37,7 @@ filegroups:
- include/grpc++/impl/rpc_method.h
- include/grpc++/impl/rpc_service_method.h
- include/grpc++/impl/serialization_traits.h
- include/grpc++/impl/server_builder_option.h
- include/grpc++/impl/service_type.h
- include/grpc++/impl/sync.h
- include/grpc++/impl/sync_cxx11.h
@ -72,7 +73,6 @@ filegroups:
- src/cpp/server/thread_pool_interface.h
src:
- src/cpp/client/channel.cc
- src/cpp/client/channel_arguments.cc
- src/cpp/client/client_context.cc
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
@ -80,6 +80,7 @@ filegroups:
- src/cpp/client/generic_stub.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/common/call.cc
- src/cpp/common/channel_arguments.cc
- src/cpp/common/completion_queue.cc
- src/cpp/common/rpc_method.cc
- src/cpp/proto/proto_utils.cc
@ -115,9 +116,10 @@ filegroups:
- src/core/channel/context.h
- src/core/channel/http_client_filter.h
- src/core/channel/http_server_filter.h
- src/core/channel/noop_filter.h
- src/core/channel/subchannel_call_holder.h
- src/core/client_config/client_config.h
- src/core/client_config/connector.h
- src/core/client_config/initial_connect_string.h
- src/core/client_config/lb_policies/pick_first.h
- src/core/client_config/lb_policies/round_robin.h
- src/core/client_config/lb_policy.h
@ -130,9 +132,8 @@ filegroups:
- src/core/client_config/resolvers/sockaddr_resolver.h
- src/core/client_config/subchannel.h
- src/core/client_config/subchannel_factory.h
- src/core/client_config/subchannel_factory_decorators/add_channel_arg.h
- src/core/client_config/subchannel_factory_decorators/merge_channel_args.h
- src/core/client_config/uri_parser.h
- src/core/compression/algorithm_metadata.h
- src/core/compression/message_compress.h
- src/core/debug/trace.h
- src/core/httpcli/format_request.h
@ -182,7 +183,6 @@ filegroups:
- src/core/statistics/census_interface.h
- src/core/statistics/census_rpc_stats.h
- src/core/surface/api_trace.h
- src/core/surface/byte_buffer_queue.h
- src/core/surface/call.h
- src/core/surface/call_test_only.h
- src/core/surface/channel.h
@ -191,6 +191,7 @@ filegroups:
- src/core/surface/init.h
- src/core/surface/server.h
- src/core/surface/surface_trace.h
- src/core/transport/byte_stream.h
- src/core/transport/chttp2/alpn.h
- src/core/transport/chttp2/bin_encoder.h
- src/core/transport/chttp2/frame.h
@ -200,6 +201,7 @@ filegroups:
- src/core/transport/chttp2/frame_rst_stream.h
- src/core/transport/chttp2/frame_settings.h
- src/core/transport/chttp2/frame_window_update.h
- src/core/transport/chttp2/hpack_encoder.h
- src/core/transport/chttp2/hpack_parser.h
- src/core/transport/chttp2/hpack_table.h
- src/core/transport/chttp2/http2_errors.h
@ -207,14 +209,14 @@ filegroups:
- src/core/transport/chttp2/incoming_metadata.h
- src/core/transport/chttp2/internal.h
- src/core/transport/chttp2/status_conversion.h
- src/core/transport/chttp2/stream_encoder.h
- src/core/transport/chttp2/stream_map.h
- src/core/transport/chttp2/timeout_encoding.h
- src/core/transport/chttp2/varint.h
- src/core/transport/chttp2_transport.h
- src/core/transport/connectivity_state.h
- src/core/transport/metadata.h
- src/core/transport/stream_op.h
- src/core/transport/metadata_batch.h
- src/core/transport/static_metadata.h
- src/core/transport/transport.h
- src/core/transport/transport_impl.h
src:
@ -228,9 +230,11 @@ filegroups:
- src/core/channel/connected_channel.c
- src/core/channel/http_client_filter.c
- src/core/channel/http_server_filter.c
- src/core/channel/noop_filter.c
- src/core/channel/subchannel_call_holder.c
- src/core/client_config/client_config.c
- src/core/client_config/connector.c
- src/core/client_config/default_initial_connect_string.c
- src/core/client_config/initial_connect_string.c
- src/core/client_config/lb_policies/pick_first.c
- src/core/client_config/lb_policies/round_robin.c
- src/core/client_config/lb_policy.c
@ -243,8 +247,6 @@ filegroups:
- src/core/client_config/resolvers/sockaddr_resolver.c
- src/core/client_config/subchannel.c
- src/core/client_config/subchannel_factory.c
- src/core/client_config/subchannel_factory_decorators/add_channel_arg.c
- src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
- src/core/client_config/uri_parser.c
- src/core/compression/algorithm.c
- src/core/compression/message_compress.c
@ -298,7 +300,6 @@ filegroups:
- src/core/json/json_writer.c
- src/core/surface/api_trace.c
- src/core/surface/byte_buffer.c
- src/core/surface/byte_buffer_queue.c
- src/core/surface/byte_buffer_reader.c
- src/core/surface/call.c
- src/core/surface/call_details.c
@ -306,6 +307,7 @@ filegroups:
- src/core/surface/channel.c
- src/core/surface/channel_connectivity.c
- src/core/surface/channel_create.c
- src/core/surface/channel_ping.c
- src/core/surface/completion_queue.c
- src/core/surface/event_string.c
- src/core/surface/init.c
@ -315,6 +317,7 @@ filegroups:
- src/core/surface/server_chttp2.c
- src/core/surface/server_create.c
- src/core/surface/version.c
- src/core/transport/byte_stream.c
- src/core/transport/chttp2/alpn.c
- src/core/transport/chttp2/bin_encoder.c
- src/core/transport/chttp2/frame_data.c
@ -323,13 +326,13 @@ filegroups:
- src/core/transport/chttp2/frame_rst_stream.c
- src/core/transport/chttp2/frame_settings.c
- src/core/transport/chttp2/frame_window_update.c
- src/core/transport/chttp2/hpack_encoder.c
- src/core/transport/chttp2/hpack_parser.c
- src/core/transport/chttp2/hpack_table.c
- src/core/transport/chttp2/huffsyms.c
- src/core/transport/chttp2/incoming_metadata.c
- src/core/transport/chttp2/parsing.c
- src/core/transport/chttp2/status_conversion.c
- src/core/transport/chttp2/stream_encoder.c
- src/core/transport/chttp2/stream_lists.c
- src/core/transport/chttp2/stream_map.c
- src/core/transport/chttp2/timeout_encoding.c
@ -338,7 +341,8 @@ filegroups:
- src/core/transport/chttp2_transport.c
- src/core/transport/connectivity_state.c
- src/core/transport/metadata.c
- src/core/transport/stream_op.c
- src/core/transport/metadata_batch.c
- src/core/transport/static_metadata.c
- src/core/transport/transport.c
- src/core/transport/transport_op_string.c
- name: grpc_test_util_base
@ -346,7 +350,6 @@ filegroups:
- test/core/end2end/cq_verifier.h
- test/core/end2end/fixtures/proxy.h
- test/core/iomgr/endpoint_tests.h
- test/core/security/oauth2_utils.h
- test/core/util/grpc_profiler.h
- test/core/util/parse_hexstring.h
- test/core/util/port.h
@ -355,7 +358,6 @@ filegroups:
- test/core/end2end/cq_verifier.c
- test/core/end2end/fixtures/proxy.c
- test/core/iomgr/endpoint_tests.c
- test/core/security/oauth2_utils.c
- test/core/util/grpc_profiler.c
- test/core/util/parse_hexstring.c
- test/core/util/port_posix.c
@ -371,6 +373,7 @@ libs:
- include/grpc/support/atm_gcc_atomic.h
- include/grpc/support/atm_gcc_sync.h
- include/grpc/support/atm_win32.h
- include/grpc/support/avl.h
- include/grpc/support/cmdline.h
- include/grpc/support/cpu.h
- include/grpc/support/histogram.h
@ -408,6 +411,7 @@ libs:
- src/core/profiling/basic_timers.c
- src/core/profiling/stap_timers.c
- src/core/support/alloc.c
- src/core/support/avl.c
- src/core/support/cmdline.c
- src/core/support/cpu_iphone.c
- src/core/support/cpu_linux.c
@ -516,10 +520,12 @@ libs:
language: c
headers:
- test/core/end2end/data/ssl_test_data.h
- test/core/security/oauth2_utils.h
src:
- test/core/end2end/data/server1_cert.c
- test/core/end2end/data/server1_key.c
- test/core/end2end/data/test_root_cert.c
- test/core/security/oauth2_utils.c
deps:
- gpr
- gpr_test_util
@ -533,7 +539,7 @@ libs:
deps:
- gpr
- gpr_test_util
- grpc
- grpc_unsecure
filegroups:
- grpc_test_util_base
secure: false
@ -577,6 +583,19 @@ libs:
src:
- test/core/util/reconnect_server.c
deps:
- test_tcp_server
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: test_tcp_server
build: private
language: c
headers:
- test/core/util/test_tcp_server.h
src:
- test/core/util/test_tcp_server.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
@ -589,10 +608,10 @@ libs:
- src/cpp/common/secure_auth_context.h
- src/cpp/server/secure_server_credentials.h
src:
- src/cpp/client/secure_channel_arguments.cc
- src/cpp/client/secure_credentials.cc
- src/cpp/common/auth_property_iterator.cc
- src/cpp/common/secure_auth_context.cc
- src/cpp/common/secure_channel_arguments.cc
- src/cpp/common/secure_create_auth_context.cc
- src/cpp/server/secure_server_credentials.cc
deps:
@ -791,6 +810,24 @@ libs:
- winsock
- global
targets:
- name: algorithm_test
build: test
language: c
src:
- test/core/compression/algorithm_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: alloc_test
build: test
language: c
src:
- test/core/support/alloc_test.c
deps:
- gpr_test_util
- gpr
- name: alpn_test
build: test
language: c
@ -811,21 +848,31 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: chttp2_status_conversion_test
- name: channel_create_test
build: test
language: c
src:
- test/core/transport/chttp2/status_conversion_test.c
- test/core/surface/channel_create_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: chttp2_hpack_encoder_test
build: test
language: c
src:
- test/core/transport/chttp2/hpack_encoder_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: chttp2_stream_encoder_test
- name: chttp2_status_conversion_test
build: test
language: c
src:
- test/core/transport/chttp2/stream_encoder_test.c
- test/core/transport/chttp2/status_conversion_test.c
deps:
- grpc_test_util
- grpc
@ -841,6 +888,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: chttp2_varint_test
build: test
language: c
src:
- test/core/transport/chttp2/varint_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: compression_test
build: test
language: c
@ -851,6 +908,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: dns_resolver_test
build: test
language: c
src:
- test/core/client_config/resolvers/dns_resolver_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: dualstack_socket_test
build: test
language: c
@ -967,6 +1034,14 @@ targets:
src:
- tools/codegen/core/gen_legal_metadata_characters.c
deps: []
- name: gpr_avl_test
build: test
language: c
src:
- test/core/support/avl_test.c
deps:
- gpr_test_util
- gpr
- name: gpr_cmdline_test
build: test
language: c
@ -1185,6 +1260,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: grpc_invalid_channel_args_test
build: test
language: c
src:
- test/core/surface/invalid_channel_args_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: grpc_json_token_test
build: test
language: c
@ -1229,16 +1314,6 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: grpc_stream_op_test
build: test
language: c
src:
- test/core/transport/stream_op_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: grpc_verify_jwt
build: tool
language: c
@ -1303,6 +1378,38 @@ targets:
- mac
- linux
- posix
- name: httpscli_test
build: test
language: c
src:
- test/core/httpcli/httpscli_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
platforms:
- linux
- name: init_test
build: test
language: c
src:
- test/core/surface/init_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: invalid_call_argument_test
build: test
language: c
src:
- test/core/end2end/invalid_call_argument_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: json_rewrite
build: test
run: false
@ -1322,6 +1429,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: json_stream_error_test
build: test
language: c
src:
- test/core/json/json_stream_error_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: json_test
build: test
language: c
@ -1376,16 +1493,6 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: multi_init_test
build: test
language: c
src:
- test/core/surface/multi_init_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: multiple_server_queues_test
build: test
language: c
@ -1424,6 +1531,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: secure_channel_create_test
build: test
language: c
src:
- test/core/surface/secure_channel_create_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: secure_endpoint_test
build: test
language: c
@ -1434,6 +1551,47 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: server_chttp2_test
build: test
language: c
src:
- test/core/surface/server_chttp2_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: server_test
build: test
language: c
src:
- test/core/surface/server_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: set_initial_connect_string_test
build: test
language: c
src:
- test/core/client_config/set_initial_connect_string_test.c
deps:
- test_tcp_server
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: sockaddr_resolver_test
build: test
language: c
src:
- test/core/client_config/resolvers/sockaddr_resolver_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: sockaddr_utils_test
build: test
language: c
@ -1444,6 +1602,20 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: socket_utils_test
build: test
language: c
src:
- test/core/iomgr/socket_utils_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
platforms:
- mac
- linux
- posix
- name: tcp_client_posix_test
build: test
language: c
@ -1536,6 +1708,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: transport_connectivity_state_test
build: test
language: c
src:
- test/core/transport/connectivity_state_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: transport_metadata_test
build: test
language: c
@ -1660,7 +1842,7 @@ targets:
build: test
language: c++
src:
- test/cpp/client/channel_arguments_test.cc
- test/cpp/common/channel_arguments_test.cc
deps:
- grpc++
- grpc
@ -1895,6 +2077,20 @@ targets:
- mac
- linux
- posix
- name: metrics_client
build: test
run: false
language: c++
headers:
- test/cpp/util/metrics_server.h
src:
- test/proto/metrics.proto
- test/cpp/interop/metrics_client.cc
deps:
- grpc++
- grpc
- gpr
- grpc++_test_config
- name: mock_test
build: test
language: c++
@ -2023,6 +2219,7 @@ targets:
- test/cpp/interop/reconnect_interop_server.cc
deps:
- reconnect_server
- test_tcp_server
- grpc++_test_util
- grpc_test_util
- grpc++
@ -2135,13 +2332,16 @@ targets:
- test/cpp/interop/client_helper.h
- test/cpp/interop/interop_client.h
- test/cpp/interop/stress_interop_client.h
- test/cpp/util/metrics_server.h
src:
- test/proto/empty.proto
- test/proto/messages.proto
- test/proto/metrics.proto
- test/proto/test.proto
- test/cpp/interop/interop_client.cc
- test/cpp/interop/stress_interop_client.cc
- test/cpp/interop/stress_test.cc
- test/cpp/util/metrics_server.cc
deps:
- grpc++_test_util
- grpc_test_util
@ -2219,11 +2419,11 @@ vspackages:
name: grpc.dependencies.zlib
props: false
redist: true
version: 1.2.8.9
version: 1.2.8.10
- name: grpc.dependencies.openssl
props: true
redist: true
version: 1.0.2.3
version: 1.0.204.1
- name: gflags
props: false
redist: false
@ -2236,6 +2436,23 @@ node_modules:
- deps:
- grpc
- gpr
headers:
- src/node/ext/byte_buffer.h
- src/node/ext/call.h
- src/node/ext/call_credentials.h
- src/node/ext/channel.h
- src/node/ext/channel_credentials.h
- src/node/ext/completion_queue_async_worker.h
- src/node/ext/server.h
- src/node/ext/server_credentials.h
- src/node/ext/timeval.h
js:
- src/node/index.js
- src/node/src/client.js
- src/node/src/common.js
- src/node/src/credentials.js
- src/node/src/metadata.js
- src/node/src/server.js
name: grpc_node
src:
- src/node/ext/byte_buffer.cc

@ -45,7 +45,7 @@ Request-Headers are delivered as HTTP2 headers in HEADERS + CONTINUATION frames.
* **Custom-Metadata** → Binary-Header / ASCII-Header
* **Binary-Header** → {Header-Name "-bin" } {_base64 encoded value_}
* **ASCII-Header** → Header-Name ASCII-Value
* **Header-Name** → 1\*( %x30-39 / %x61-7A / "\_" / "-") ; 0-9 a-z \_ -
* **Header-Name** → 1\*( %x30-39 / %x61-7A / "\_" / "-" / ".") ; 0-9 a-z \_ - .
* **ASCII-Value** → 1\*( %x20-%x7E ) ; space and printable ASCII

@ -1,289 +0,0 @@
#gRPC Authentication support
gRPC is designed to plug-in a number of authentication mechanisms. This document
provides a quick overview of the various auth mechanisms supported, discusses
the API with some examples, and concludes with a discussion of extensibility.
More documentation and examples are coming soon!
## Supported auth mechanisms
###SSL/TLS
gRPC has SSL/TLS integration and promotes the use of SSL/TLS to authenticate the
server, and encrypt all the data exchanged between the client and the server.
Optional mechanisms are available for clients to provide certificates to
accomplish mutual authentication.
###OAuth 2.0
gRPC provides a generic mechanism (described below) to attach metadata to
requests and responses. This mechanism can be used to attach OAuth 2.0 Access
Tokens to RPCs being made at a client. Additional support for acquiring Access
Tokens while accessing Google APIs through gRPC is provided for certain auth
flows, demonstrated through code examples below.
## API
To reduce complexity and minimize API clutter, gRPC works with a unified concept
of a Credentials object. Users construct gRPC credentials using corresponding
bootstrap credentials (e.g., SSL client certs or Service Account Keys), and use
the credentials while creating a gRPC channel to any server. Depending on the
type of credential supplied, the channel uses the credentials during the initial
SSL/TLS handshake with the server, or uses the credential to generate and
attach Access Tokens to each request being made on the channel.
###SSL/TLS for server authentication and encryption
This is the simplest authentication scenario, where a client just wants to
authenticate the server and encrypt all data.
```cpp
SslCredentialsOptions ssl_opts; // Options to override SSL params, empty by default
// Create the credentials object by providing service account key in constructor
std::shared_ptr<ChannelCredentials> creds = SslCredentials(ssl_opts);
// Create a channel using the credentials created in the previous step
std::shared_ptr<Channel> channel = CreateChannel(server_name, creds);
// Create a stub on the channel
std::unique_ptr<Greeter::Stub> stub(Greeter::NewStub(channel));
// Make actual RPC calls on the stub.
grpc::Status s = stub->sayHello(&context, *request, response);
```
For advanced use cases such as modifying the root CA or using client certs,
the corresponding options can be set in the SslCredentialsOptions parameter
passed to the factory method.
###Authenticating with Google
gRPC applications can use a simple API to create a credential that works in various deployment scenarios.
```cpp
std::shared_ptr<ChannelCredentials> creds = GoogleDefaultCredentials();
// Create a channel, stub and make RPC calls (same as in the previous example)
std::shared_ptr<Channel> channel = CreateChannel(server_name, creds);
std::unique_ptr<Greeter::Stub> stub(Greeter::NewStub(channel));
grpc::Status s = stub->sayHello(&context, *request, response);
```
This credential works for applications using Service Accounts as well as for
applications running in [Google Compute Engine (GCE)](https://cloud.google.com/compute/). In the former case, the
service account’s private keys are loaded from the file named in the environment
variable `GOOGLE_APPLICATION_CREDENTIALS`. The
keys are used to generate bearer tokens that are attached to each outgoing RPC
on the corresponding channel.
For applications running in GCE, a default service account and corresponding
OAuth scopes can be configured during VM setup. At run-time, this credential
handles communication with the authentication systems to obtain OAuth2 access
tokens and attaches them to each outgoing RPC on the corresponding channel.
Extending gRPC to support other authentication mechanisms
The gRPC protocol is designed with a general mechanism for sending metadata
associated with RPC. Clients can send metadata at the beginning of an RPC and
servers can send back metadata at the beginning and end of the RPC. This
provides a natural mechanism to support OAuth2 and other authentication
mechanisms that need attach bearer tokens to individual request.
In the simplest case, there is a single line of code required on the client
to add a specific token as metadata to an RPC and a corresponding access on
the server to retrieve this piece of metadata. The generation of the token
on the client side and its verification at the server can be done separately.
A deeper integration can be achieved by plugging in a gRPC credentials implementation for any custom authentication mechanism that needs to attach per-request tokens. gRPC internals also allow switching out SSL/TLS with other encryption mechanisms.
## Examples
These authentication mechanisms will be available in all gRPC's supported languages.
The following sections demonstrate how authentication and authorization features described above appear in each language: more languages are coming soon.
###SSL/TLS for server authentication and encryption (Ruby)
```ruby
# Base case - No encryption
stub = Helloworld::Greeter::Stub.new('localhost:50051')
...
# With server authentication SSL/TLS
creds = GRPC::Core::Credentials.new(load_certs) # load_certs typically loads a CA roots file
stub = Helloworld::Greeter::Stub.new('localhost:50051', creds: creds)
```
###SSL/TLS for server authentication and encryption (C#)
```csharp
// Base case - No encryption
var channel = new Channel("localhost:50051");
var client = new Greeter.GreeterClient(channel);
...
// With server authentication SSL/TLS
var credentials = new SslCredentials(File.ReadAllText("ca.pem")); // Load a CA file
var channel = new Channel("localhost:50051", credentials);
var client = new Greeter.GreeterClient(channel);
```
###SSL/TLS for server authentication and encryption (Objective-C)
The default for Objective-C is to use SSL/TLS, as that's the most common use case when accessing
remote APIs.
```objective-c
// Base case - With server authentication SSL/TLS
HLWGreeter *client = [[HLWGreeter alloc] initWithHost:@"localhost:50051"];
// Same as using @"https://localhost:50051".
...
// No encryption
HLWGreeter *client = [[HLWGreeter alloc] initWithHost:@"http://localhost:50051"];
// Specifying the HTTP scheme explicitly forces no encryption.
```
###SSL/TLS for server authentication and encryption (Python)
```python
# Base case - No encryption
stub = early_adopter_create_GreeterService_stub('localhost', 50051)
...
# With server authentication SSL/TLS
stub = early_adopter_create_GreeterService_stub(
'localhost', 50051, secure=True, root_certificates=open('ca.pem').read())
...
```
n.b.: the beta API will look different
###Authenticating with Google (Ruby)
```ruby
# Base case - No encryption/authorization
stub = Helloworld::Greeter::Stub.new('localhost:50051')
...
# Authenticating with Google
require 'googleauth' # from http://www.rubydoc.info/gems/googleauth/0.1.0
...
creds = GRPC::Core::Credentials.new(load_certs) # load_certs typically loads a CA roots file
scope = 'https://www.googleapis.com/auth/grpc-testing'
authorization = Google::Auth.get_application_default(scope)
stub = Helloworld::Greeter::Stub.new('localhost:50051',
creds: creds,
update_metadata: authorization.updater_proc)
```
###Authenticating with Google (Node.js)
```node
// Base case - No encryption/authorization
var stub = new helloworld.Greeter('localhost:50051');
...
// Authenticating with Google
var GoogleAuth = require('google-auth-library'); // from https://www.npmjs.com/package/google-auth-library
...
var creds = grpc.Credentials.createSsl(load_certs); // load_certs typically loads a CA roots file
var scope = 'https://www.googleapis.com/auth/grpc-testing';
(new GoogleAuth()).getApplicationDefault(function(err, auth) {
if (auth.createScopeRequired()) {
auth = auth.createScoped(scope);
}
var stub = new helloworld.Greeter('localhost:50051',
{credentials: creds},
grpc.getGoogleAuthDelegate(auth));
});
```
###Authenticating with Google (C#)
```csharp
// Base case - No encryption/authorization
var channel = new Channel("localhost:50051");
var client = new Greeter.GreeterClient(channel);
...
// Authenticating with Google
using Grpc.Auth; // from Grpc.Auth NuGet package
...
var credentials = new SslCredentials(File.ReadAllText("ca.pem")); // Load a CA file
var channel = new Channel("localhost:50051", credentials);
string scope = "https://www.googleapis.com/auth/grpc-testing";
var authorization = GoogleCredential.GetApplicationDefault();
if (authorization.IsCreateScopedRequired)
{
authorization = credential.CreateScoped(new[] { scope });
}
var client = new Greeter.GreeterClient(channel,
new StubConfiguration(OAuth2InterceptorFactory.Create(credential)));
```
###Authenticating with Google (PHP)
```php
// Base case - No encryption/authorization
$client = new helloworld\GreeterClient(
new Grpc\BaseStub('localhost:50051', []));
...
// Authenticating with Google
// the environment variable "GOOGLE_APPLICATION_CREDENTIALS" needs to be set
$scope = "https://www.googleapis.com/auth/grpc-testing";
$auth = Google\Auth\ApplicationDefaultCredentials::getCredentials($scope);
$opts = [
'credentials' => Grpc\Credentials::createSsl(file_get_contents('ca.pem'));
'update_metadata' => $auth->getUpdateMetadataFunc(),
];
$client = new helloworld\GreeterClient(
new Grpc\BaseStub('localhost:50051', $opts));
```
###Authenticating with Google (Objective-C)
This example uses the [Google iOS Sign-In library](https://developers.google.com/identity/sign-in/ios/),
but it's easily extrapolated to any other OAuth2 library.
```objective-c
// Base case - No authentication
[client sayHelloWithRequest:request handler:^(HLWHelloReply *response, NSError *error) {
...
}];
...
// Authenticating with Google
// When signing the user in, ask her for the relevant scopes.
GIDSignIn.sharedInstance.scopes = @[@"https://www.googleapis.com/auth/grpc-testing"];
...
#import <ProtoRPC/ProtoRPC.h>
// Create a not-yet-started RPC. We want to set the request headers on this object before starting
// it.
ProtoRPC *call =
[client RPCToSayHelloWithRequest:request handler:^(HLWHelloReply *response, NSError *error) {
...
}];
// Set the access token to be used.
NSString *accessToken = GIDSignIn.sharedInstance.currentUser.authentication.accessToken;
call.requestMetadata[@"Authorization"] = [@"Bearer " stringByAppendingString:accessToken]}];
// Start the RPC.
[call start];
```
You can see a working example app, with a more detailed explanation, [here](examples/objective-c/auth_sample).
### Authenticating with Google (Python)
```python
# Base case - No encryption
stub = early_adopter_create_GreeterService_stub('localhost', 50051)
...
# With server authentication SSL/TLS
import oauth2client.client
credentials = oauth2client.GoogleCredentials.get_application_default()
scope = 'https://www.googleapis.com/auth/grpc-testing'
scoped_credentials = credentials.create_scoped([scope])
access_token = scoped_credentials.get_access_token().access_token
metadata_transformer = (
lambda x: [('Authorization', 'Bearer {}'.format(access_token))])
stub = early_adopter_create_GreeterService_stub(
'localhost', 50051, secure=True, root_certificates=open('ca.pem').read(),
metadata_transformer=metadata_transformer)
...
```
n.b.: the beta API will look different

@ -78,6 +78,7 @@ Pod::Spec.new do |s|
'include/grpc/support/atm_gcc_atomic.h',
'include/grpc/support/atm_gcc_sync.h',
'include/grpc/support/atm_win32.h',
'include/grpc/support/avl.h',
'include/grpc/support/cmdline.h',
'include/grpc/support/cpu.h',
'include/grpc/support/histogram.h',
@ -103,6 +104,7 @@ Pod::Spec.new do |s|
'src/core/profiling/basic_timers.c',
'src/core/profiling/stap_timers.c',
'src/core/support/alloc.c',
'src/core/support/avl.c',
'src/core/support/cmdline.c',
'src/core/support/cpu_iphone.c',
'src/core/support/cpu_linux.c',
@ -163,9 +165,10 @@ Pod::Spec.new do |s|
'src/core/channel/context.h',
'src/core/channel/http_client_filter.h',
'src/core/channel/http_server_filter.h',
'src/core/channel/noop_filter.h',
'src/core/channel/subchannel_call_holder.h',
'src/core/client_config/client_config.h',
'src/core/client_config/connector.h',
'src/core/client_config/initial_connect_string.h',
'src/core/client_config/lb_policies/pick_first.h',
'src/core/client_config/lb_policies/round_robin.h',
'src/core/client_config/lb_policy.h',
@ -178,9 +181,8 @@ Pod::Spec.new do |s|
'src/core/client_config/resolvers/sockaddr_resolver.h',
'src/core/client_config/subchannel.h',
'src/core/client_config/subchannel_factory.h',
'src/core/client_config/subchannel_factory_decorators/add_channel_arg.h',
'src/core/client_config/subchannel_factory_decorators/merge_channel_args.h',
'src/core/client_config/uri_parser.h',
'src/core/compression/algorithm_metadata.h',
'src/core/compression/message_compress.h',
'src/core/debug/trace.h',
'src/core/httpcli/format_request.h',
@ -230,7 +232,6 @@ Pod::Spec.new do |s|
'src/core/statistics/census_interface.h',
'src/core/statistics/census_rpc_stats.h',
'src/core/surface/api_trace.h',
'src/core/surface/byte_buffer_queue.h',
'src/core/surface/call.h',
'src/core/surface/call_test_only.h',
'src/core/surface/channel.h',
@ -239,6 +240,7 @@ Pod::Spec.new do |s|
'src/core/surface/init.h',
'src/core/surface/server.h',
'src/core/surface/surface_trace.h',
'src/core/transport/byte_stream.h',
'src/core/transport/chttp2/alpn.h',
'src/core/transport/chttp2/bin_encoder.h',
'src/core/transport/chttp2/frame.h',
@ -248,6 +250,7 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/frame_rst_stream.h',
'src/core/transport/chttp2/frame_settings.h',
'src/core/transport/chttp2/frame_window_update.h',
'src/core/transport/chttp2/hpack_encoder.h',
'src/core/transport/chttp2/hpack_parser.h',
'src/core/transport/chttp2/hpack_table.h',
'src/core/transport/chttp2/http2_errors.h',
@ -255,14 +258,14 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/incoming_metadata.h',
'src/core/transport/chttp2/internal.h',
'src/core/transport/chttp2/status_conversion.h',
'src/core/transport/chttp2/stream_encoder.h',
'src/core/transport/chttp2/stream_map.h',
'src/core/transport/chttp2/timeout_encoding.h',
'src/core/transport/chttp2/varint.h',
'src/core/transport/chttp2_transport.h',
'src/core/transport/connectivity_state.h',
'src/core/transport/metadata.h',
'src/core/transport/stream_op.h',
'src/core/transport/metadata_batch.h',
'src/core/transport/static_metadata.h',
'src/core/transport/transport.h',
'src/core/transport/transport_impl.h',
'src/core/census/aggregation.h',
@ -306,9 +309,11 @@ Pod::Spec.new do |s|
'src/core/channel/connected_channel.c',
'src/core/channel/http_client_filter.c',
'src/core/channel/http_server_filter.c',
'src/core/channel/noop_filter.c',
'src/core/channel/subchannel_call_holder.c',
'src/core/client_config/client_config.c',
'src/core/client_config/connector.c',
'src/core/client_config/default_initial_connect_string.c',
'src/core/client_config/initial_connect_string.c',
'src/core/client_config/lb_policies/pick_first.c',
'src/core/client_config/lb_policies/round_robin.c',
'src/core/client_config/lb_policy.c',
@ -321,8 +326,6 @@ Pod::Spec.new do |s|
'src/core/client_config/resolvers/sockaddr_resolver.c',
'src/core/client_config/subchannel.c',
'src/core/client_config/subchannel_factory.c',
'src/core/client_config/subchannel_factory_decorators/add_channel_arg.c',
'src/core/client_config/subchannel_factory_decorators/merge_channel_args.c',
'src/core/client_config/uri_parser.c',
'src/core/compression/algorithm.c',
'src/core/compression/message_compress.c',
@ -376,7 +379,6 @@ Pod::Spec.new do |s|
'src/core/json/json_writer.c',
'src/core/surface/api_trace.c',
'src/core/surface/byte_buffer.c',
'src/core/surface/byte_buffer_queue.c',
'src/core/surface/byte_buffer_reader.c',
'src/core/surface/call.c',
'src/core/surface/call_details.c',
@ -384,6 +386,7 @@ Pod::Spec.new do |s|
'src/core/surface/channel.c',
'src/core/surface/channel_connectivity.c',
'src/core/surface/channel_create.c',
'src/core/surface/channel_ping.c',
'src/core/surface/completion_queue.c',
'src/core/surface/event_string.c',
'src/core/surface/init.c',
@ -393,6 +396,7 @@ Pod::Spec.new do |s|
'src/core/surface/server_chttp2.c',
'src/core/surface/server_create.c',
'src/core/surface/version.c',
'src/core/transport/byte_stream.c',
'src/core/transport/chttp2/alpn.c',
'src/core/transport/chttp2/bin_encoder.c',
'src/core/transport/chttp2/frame_data.c',
@ -401,13 +405,13 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/frame_rst_stream.c',
'src/core/transport/chttp2/frame_settings.c',
'src/core/transport/chttp2/frame_window_update.c',
'src/core/transport/chttp2/hpack_encoder.c',
'src/core/transport/chttp2/hpack_parser.c',
'src/core/transport/chttp2/hpack_table.c',
'src/core/transport/chttp2/huffsyms.c',
'src/core/transport/chttp2/incoming_metadata.c',
'src/core/transport/chttp2/parsing.c',
'src/core/transport/chttp2/status_conversion.c',
'src/core/transport/chttp2/stream_encoder.c',
'src/core/transport/chttp2/stream_lists.c',
'src/core/transport/chttp2/stream_map.c',
'src/core/transport/chttp2/timeout_encoding.c',
@ -416,7 +420,8 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2_transport.c',
'src/core/transport/connectivity_state.c',
'src/core/transport/metadata.c',
'src/core/transport/stream_op.c',
'src/core/transport/metadata_batch.c',
'src/core/transport/static_metadata.c',
'src/core/transport/transport.c',
'src/core/transport/transport_op_string.c',
'src/core/census/context.c',
@ -457,9 +462,10 @@ Pod::Spec.new do |s|
'src/core/channel/context.h',
'src/core/channel/http_client_filter.h',
'src/core/channel/http_server_filter.h',
'src/core/channel/noop_filter.h',
'src/core/channel/subchannel_call_holder.h',
'src/core/client_config/client_config.h',
'src/core/client_config/connector.h',
'src/core/client_config/initial_connect_string.h',
'src/core/client_config/lb_policies/pick_first.h',
'src/core/client_config/lb_policies/round_robin.h',
'src/core/client_config/lb_policy.h',
@ -472,9 +478,8 @@ Pod::Spec.new do |s|
'src/core/client_config/resolvers/sockaddr_resolver.h',
'src/core/client_config/subchannel.h',
'src/core/client_config/subchannel_factory.h',
'src/core/client_config/subchannel_factory_decorators/add_channel_arg.h',
'src/core/client_config/subchannel_factory_decorators/merge_channel_args.h',
'src/core/client_config/uri_parser.h',
'src/core/compression/algorithm_metadata.h',
'src/core/compression/message_compress.h',
'src/core/debug/trace.h',
'src/core/httpcli/format_request.h',
@ -524,7 +529,6 @@ Pod::Spec.new do |s|
'src/core/statistics/census_interface.h',
'src/core/statistics/census_rpc_stats.h',
'src/core/surface/api_trace.h',
'src/core/surface/byte_buffer_queue.h',
'src/core/surface/call.h',
'src/core/surface/call_test_only.h',
'src/core/surface/channel.h',
@ -533,6 +537,7 @@ Pod::Spec.new do |s|
'src/core/surface/init.h',
'src/core/surface/server.h',
'src/core/surface/surface_trace.h',
'src/core/transport/byte_stream.h',
'src/core/transport/chttp2/alpn.h',
'src/core/transport/chttp2/bin_encoder.h',
'src/core/transport/chttp2/frame.h',
@ -542,6 +547,7 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/frame_rst_stream.h',
'src/core/transport/chttp2/frame_settings.h',
'src/core/transport/chttp2/frame_window_update.h',
'src/core/transport/chttp2/hpack_encoder.h',
'src/core/transport/chttp2/hpack_parser.h',
'src/core/transport/chttp2/hpack_table.h',
'src/core/transport/chttp2/http2_errors.h',
@ -549,14 +555,14 @@ Pod::Spec.new do |s|
'src/core/transport/chttp2/incoming_metadata.h',
'src/core/transport/chttp2/internal.h',
'src/core/transport/chttp2/status_conversion.h',
'src/core/transport/chttp2/stream_encoder.h',
'src/core/transport/chttp2/stream_map.h',
'src/core/transport/chttp2/timeout_encoding.h',
'src/core/transport/chttp2/varint.h',
'src/core/transport/chttp2_transport.h',
'src/core/transport/connectivity_state.h',
'src/core/transport/metadata.h',
'src/core/transport/stream_op.h',
'src/core/transport/metadata_batch.h',
'src/core/transport/static_metadata.h',
'src/core/transport/transport.h',
'src/core/transport/transport_impl.h',
'src/core/census/aggregation.h',

@ -268,7 +268,7 @@ class ClientContext {
/// \return The call's peer URI.
grpc::string peer() const;
/// Get and set census context
/// Get and set census context.
void set_census_context(struct census_context* ccp) { census_context_ = ccp; }
struct census_context* census_context() const {
return census_context_;
@ -280,6 +280,17 @@ class ClientContext {
/// There is no guarantee the call will be cancelled.
void TryCancel();
/// Global Callbacks
///
/// Can be set exactly once per application to install hooks whenever
/// a client context is constructed and destructed.
class GlobalCallbacks {
public:
virtual void DefaultConstructor(ClientContext* context) = 0;
virtual void Destructor(ClientContext* context) = 0;
};
static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
private:
// Disallow copy and assign.
ClientContext(const ClientContext&);

@ -34,6 +34,7 @@
#ifndef GRPCXX_IMPL_RPC_SERVICE_METHOD_H
#define GRPCXX_IMPL_RPC_SERVICE_METHOD_H
#include <climits>
#include <functional>
#include <map>
#include <memory>
@ -251,7 +252,11 @@ class RpcService {
void AddMethod(RpcServiceMethod* method) { methods_.emplace_back(method); }
RpcServiceMethod* GetMethod(int i) { return methods_[i].get(); }
int GetMethodCount() const { return methods_.size(); }
int GetMethodCount() const {
// On win x64, int is only 32bit
GPR_ASSERT(methods_.size() <= INT_MAX);
return (int)methods_.size();
}
private:
std::vector<std::unique_ptr<RpcServiceMethod>> methods_;

@ -31,16 +31,21 @@
*
*/
#ifndef GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H
#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H
#ifndef GRPCXX_IMPL_SERVER_BUILDER_OPTION_H
#define GRPCXX_IMPL_SERVER_BUILDER_OPTION_H
#include "src/core/client_config/subchannel_factory.h"
#include <grpc++/support/channel_arguments.h>
/** Takes a subchannel factory, returns a new one that mutates incoming
channel_args by adding a new argument; ownership of input, arg is retained
by the caller. */
grpc_subchannel_factory *grpc_subchannel_factory_add_channel_arg(
grpc_subchannel_factory *input, const grpc_arg *arg);
namespace grpc {
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_ADD_CHANNEL_ARG_H \
*/
/// Interface to pass an option to a \a ServerBuilder.
class ServerBuilderOption {
public:
virtual ~ServerBuilderOption() {}
/// Alter the \a ChannelArguments used to create the gRPC server.
virtual void UpdateArguments(ChannelArguments* args) = 0;
};
} // namespace grpc
#endif // GRPCXX_IMPL_SERVER_BUILDER_OPTION_H

@ -46,10 +46,21 @@ class thread {
joined_ = false;
start();
}
template <class T, class U>
thread(void (T::*fptr)(U arg), T *obj, U arg) {
func_ = new thread_function_arg<T, U>(fptr, obj, arg);
joined_ = false;
start();
}
~thread() {
if (!joined_) std::terminate();
delete func_;
}
thread(thread &&other)
: func_(other.func_), thd_(other.thd_), joined_(other.joined_) {
other.joined_ = true;
other.func_ = NULL;
}
void join() {
gpr_thd_join(thd_);
joined_ = true;
@ -80,6 +91,18 @@ class thread {
void (T::*fptr_)();
T *obj_;
};
template <class T, class U>
class thread_function_arg : public thread_function_base {
public:
thread_function_arg(void (T::*fptr)(U arg), T *obj, U arg)
: fptr_(fptr), obj_(obj), arg_(arg) {}
virtual void call() { (obj_->*fptr_)(arg_); }
private:
void (T::*fptr_)(U arg);
T *obj_;
U arg_;
};
thread_function_base *func_;
gpr_thd_id thd_;
bool joined_;

@ -37,14 +37,15 @@
#include <list>
#include <memory>
#include <grpc/compression.h>
#include <grpc++/completion_queue.h>
#include <grpc++/impl/call.h>
#include <grpc++/impl/grpc_library.h>
#include <grpc++/impl/sync.h>
#include <grpc++/security/server_credentials.h>
#include <grpc++/support/channel_arguments.h>
#include <grpc++/support/config.h>
#include <grpc++/support/status.h>
#include <grpc/compression.h>
struct grpc_server;
@ -56,6 +57,7 @@ class AsyncGenericService;
class RpcService;
class RpcServiceMethod;
class ServerAsyncStreamingInterface;
class ServerContext;
class ThreadPoolInterface;
/// Models a gRPC server.
@ -84,6 +86,23 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
/// call \a Shutdown for this function to ever return.
void Wait();
/// Global Callbacks
///
/// Can be set exactly once per application to install hooks whenever
/// a server event occurs
class GlobalCallbacks {
public:
virtual ~GlobalCallbacks() {}
/// Called before application callback for each synchronous server request
virtual void PreSynchronousRequest(ServerContext* context) = 0;
/// Called after application callback for each synchronous server request
virtual void PostSynchronousRequest(ServerContext* context) = 0;
};
/// Set the global callback object. Can only be called once. Does not take
/// ownership of callbacks, and expects the pointed to object to be alive
/// until all server objects in the process have been destroyed.
static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
private:
friend class AsyncGenericService;
friend class AsynchronousService;
@ -100,7 +119,7 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
/// \param max_message_size Maximum message length that the channel can
/// receive.
Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned,
int max_message_size, grpc_compression_options compression_options);
int max_message_size, const ChannelArguments& args);
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance.

@ -37,8 +37,9 @@
#include <memory>
#include <vector>
#include <grpc/compression.h>
#include <grpc++/impl/server_builder_option.h>
#include <grpc++/support/config.h>
#include <grpc/compression.h>
namespace grpc {
@ -98,6 +99,8 @@ class ServerBuilder {
compression_options_ = options;
}
void SetOption(std::unique_ptr<ServerBuilderOption> option);
/// Tries to bind \a server to the given \a addr.
///
/// It can be invoked multiple times.
@ -140,6 +143,7 @@ class ServerBuilder {
int max_message_size_;
grpc_compression_options compression_options_;
std::vector<std::unique_ptr<ServerBuilderOption>> options_;
std::vector<std::unique_ptr<NamedService<RpcService>>> services_;
std::vector<std::unique_ptr<NamedService<AsynchronousService>>>
async_services_;

@ -80,6 +80,11 @@ class ChannelArguments {
// Generic channel argument setters. Only for advanced use cases.
/// Set an integer argument \a value under \a key.
void SetInt(const grpc::string& key, int value);
// Generic channel argument setter. Only for advanced use cases.
/// Set a pointer argument \a value under \a key. Owership is not transferred.
void SetPointer(const grpc::string& key, void* value);
/// Set a textual argument \a value under \a key.
void SetString(const grpc::string& key, const grpc::string& value);

@ -79,12 +79,6 @@ int grpc_compression_algorithm_parse(const char *name, size_t name_length,
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
char **name);
/** Returns the compression level corresponding to \a algorithm.
*
* It abort()s for unknown algorithms. */
grpc_compression_level grpc_compression_level_for_algorithm(
grpc_compression_algorithm algorithm);
/** Returns the compression algorithm corresponding to \a level.
*
* It abort()s for unknown levels . */

@ -127,6 +127,17 @@ typedef struct {
/** Initial sequence number for http2 transports */
#define GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER \
"grpc.http2.initial_sequence_number"
/** Amount to read ahead on individual streams. Defaults to 64kb, larger
values can help throughput on high-latency connections.
NOTE: at some point we'd like to auto-tune this, and this parameter
will become a no-op. */
#define GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES "grpc.http2.lookahead_bytes"
/** How much memory to use for hpack decoding */
#define GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER \
"grpc.http2.hpack_table_size.decoder"
/** How much memory to use for hpack encoding */
#define GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER \
"grpc.http2.hpack_table_size.encoder"
/** Default authority to pass if none specified on call construction */
#define GRPC_ARG_DEFAULT_AUTHORITY "grpc.default_authority"
/** Primary user agent: goes at the start of the user-agent metadata
@ -520,6 +531,11 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
const char *method, const char *host,
gpr_timespec deadline, void *reserved);
/** Ping the channels peer (load balanced channels will select one sub-channel
to ping); if the channel is not connected, posts a failed. */
void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
void *tag, void *reserved);
/** Pre-register a method/host pair on a channel. */
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host, void *reserved);

@ -40,6 +40,12 @@
extern "C" {
#endif
typedef struct gpr_allocation_functions {
void *(*malloc_fn)(size_t size);
void *(*realloc_fn)(void *ptr, size_t size);
void (*free_fn)(void *ptr);
} gpr_allocation_functions;
/* malloc, never returns NULL */
void *gpr_malloc(size_t size);
/* free */
@ -51,6 +57,14 @@ void *gpr_malloc_aligned(size_t size, size_t alignment_log);
/* free memory allocated by gpr_malloc_aligned */
void gpr_free_aligned(void *ptr);
/** Request the family of allocation functions in \a functions be used. NOTE
* that this request will be honored in a *best effort* basis and that no
* guarantees are made about the default functions (eg, malloc) being called. */
void gpr_set_allocation_functions(gpr_allocation_functions functions);
/** Return the family of allocation functions currently in effect. */
gpr_allocation_functions gpr_get_allocation_functions();
#ifdef __cplusplus
}
#endif

@ -0,0 +1,91 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_SUPPORT_AVL_H
#define GRPC_SUPPORT_AVL_H
#include <grpc/support/sync.h>
/** internal node of an AVL tree */
typedef struct gpr_avl_node {
gpr_refcount refs;
void *key;
void *value;
struct gpr_avl_node *left;
struct gpr_avl_node *right;
long height;
} gpr_avl_node;
typedef struct gpr_avl_vtable {
/** destroy a key */
void (*destroy_key)(void *key);
/** copy a key, returning new value */
void *(*copy_key)(void *key);
/** compare key1, key2; return <0 if key1 < key2,
>0 if key1 > key2, 0 if key1 == key2 */
long (*compare_keys)(void *key1, void *key2);
/** destroy a value */
void (*destroy_value)(void *value);
/** copy a value */
void *(*copy_value)(void *value);
} gpr_avl_vtable;
/** "pointer" to an AVL tree - this is a reference
counted object - use gpr_avl_ref to add a reference,
gpr_avl_unref when done with a reference */
typedef struct gpr_avl {
const gpr_avl_vtable *vtable;
gpr_avl_node *root;
} gpr_avl;
/** create an immutable AVL tree */
gpr_avl gpr_avl_create(const gpr_avl_vtable *vtable);
/** add a reference to an existing tree - returns
the tree as a convenience */
gpr_avl gpr_avl_ref(gpr_avl avl);
/** remove a reference to a tree - destroying it if there
are no references left */
void gpr_avl_unref(gpr_avl avl);
/** return a new tree with (key, value) added to avl.
implicitly unrefs avl to allow easy chaining.
if key exists in avl, the new tree's key entry updated
(i.e. a duplicate is not created) */
gpr_avl gpr_avl_add(gpr_avl avl, void *key, void *value);
/** return a new tree with key deleted */
gpr_avl gpr_avl_remove(gpr_avl avl, void *key);
/** lookup key, and return the associated value.
does not mutate avl.
returns NULL if key is not found. */
void *gpr_avl_get(gpr_avl avl, void *key);
#endif

@ -83,8 +83,12 @@ void gpr_cmdline_add_string(gpr_cmdline *cl, const char *name, const char *help,
void gpr_cmdline_on_extra_arg(
gpr_cmdline *cl, const char *name, const char *help,
void (*on_extra_arg)(void *user_data, const char *arg), void *user_data);
/* Parse the command line */
void gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv);
/* Enable surviving failure: default behavior is to exit the process */
void gpr_cmdline_set_survive_failure(gpr_cmdline *cl);
/* Parse the command line; returns 1 on success, on failure either dies
(by default) or returns 0 if gpr_cmdline_set_survive_failure() has been
called */
int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv);
/* Destroy the parser */
void gpr_cmdline_destroy(gpr_cmdline *cl);
/* Get a string describing usage */

@ -181,9 +181,9 @@
#ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#define GPR_FORBID_UNREACHABLE_CODE
#define GPR_MSG_IOVLEN_TYPE int
#if TARGET_OS_IPHONE
#define GPR_FORBID_UNREACHABLE_CODE 1
#define GPR_PLATFORM_STRING "ios"
#define GPR_CPU_IPHONE 1
#define GPR_PTHREAD_TLS 1
@ -252,6 +252,11 @@
#define GPR_PLATFORM_STRING "unknown"
#endif
#ifdef GPR_GCOV
#undef GPR_FORBID_UNREACHABLE_CODE
#define GPR_FORBID_UNREACHABLE_CODE 1
#endif
/* For a common case, assume that the platform has a C99-like stdint.h */
#include <stdint.h>
@ -337,7 +342,7 @@ typedef uintptr_t gpr_uintptr;
#endif
#endif
#ifdef GPR_FORBID_UNREACHABLE_CODE
#if GPR_FORBID_UNREACHABLE_CODE
#define GPR_UNREACHABLE_CODE(STATEMENT)
#else
#define GPR_UNREACHABLE_CODE(STATEMENT) \

@ -144,6 +144,9 @@ gpr_slice gpr_slice_from_copied_string(const char *source);
memcpy(slice->data, source, len); */
gpr_slice gpr_slice_from_copied_buffer(const char *source, size_t len);
/* Create a slice pointing to constant memory */
gpr_slice gpr_slice_from_static_string(const char *source);
/* Return a result slice derived from s, which shares a ref count with s, where
result.data==s.data+begin, and result.length==end-begin.
The ref count of s is increased by one.

@ -89,6 +89,11 @@ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst);
/* remove n bytes from the end of a slice buffer */
void gpr_slice_buffer_trim_end(gpr_slice_buffer *src, size_t n,
gpr_slice_buffer *garbage);
/* move the first n bytes of src into dst */
void gpr_slice_buffer_move_first(gpr_slice_buffer *src, size_t n,
gpr_slice_buffer *dst);
/* take the first slice in the slice buffer */
gpr_slice gpr_slice_buffer_take_first(gpr_slice_buffer *src);
#ifdef __cplusplus
}

@ -61,8 +61,8 @@ typedef enum {
} gpr_clock_type;
typedef struct gpr_timespec {
time_t tv_sec;
int tv_nsec;
gpr_int64 tv_sec;
gpr_int32 tv_nsec;
/** Against which clock was this time measured? (or GPR_TIMESPAN if
this is a relative time meaure) */
gpr_clock_type clock_type;

@ -39,7 +39,8 @@
"minimist": "^1.1.0",
"mocha": "~1.21.0",
"mocha-jenkins-reporter": "^0.1.9",
"mustache": "^2.0.0"
"mustache": "^2.0.0",
"poisson-process": "^0.2.1"
},
"engines": {
"node": ">=0.10.13"

@ -36,14 +36,12 @@
#include <grpc/census.h>
#define GRPC_CENSUS_MAX_ON_THE_WIRE_TAG_BYTES 2048
/* census_context is the in-memory representation of information needed to
* maintain tracing, RPC statistics and resource usage information. */
struct census_context {
gpr_uint64 op_id; /* Operation identifier - unique per-context */
gpr_uint64 trace_id; /* Globally unique trace identifier */
/* TODO(aveitch) Add census tags:
const census_tag_set *tags;
*/
census_tag_set *tags; /* Opaque data structure for census tags. */
};
#endif /* GRPC_INTERNAL_CORE_CENSUS_CONTEXT_H */

@ -36,16 +36,17 @@
#include <stdio.h>
#include <string.h>
#include "src/core/channel/channel_stack.h"
#include "src/core/channel/noop_filter.h"
#include "src/core/statistics/census_interface.h"
#include "src/core/statistics/census_rpc_stats.h"
#include <grpc/census.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice.h>
#include <grpc/support/time.h>
#include "src/core/channel/channel_stack.h"
#include "src/core/statistics/census_interface.h"
#include "src/core/statistics/census_rpc_stats.h"
#include "src/core/transport/static_metadata.h"
typedef struct call_data {
census_op_id op_id;
census_context *ctxt;
@ -53,28 +54,22 @@ typedef struct call_data {
int error;
/* recv callback */
grpc_stream_op_buffer *recv_ops;
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *on_done_recv;
grpc_closure finish_recv;
} call_data;
typedef struct channel_data {
grpc_mdstr *path_str; /* pointer to meta data str with key == ":path" */
} channel_data;
typedef struct channel_data { gpr_uint8 unused; } channel_data;
static void extract_and_annotate_method_tag(grpc_stream_op_buffer *sopb,
static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
call_data *calld,
channel_data *chand) {
grpc_linked_mdelem *m;
size_t i;
for (i = 0; i < sopb->nops; i++) {
grpc_stream_op *op = &sopb->ops[i];
if (op->type != GRPC_OP_METADATA) continue;
for (m = op->data.metadata.list.head; m != NULL; m = m->next) {
if (m->md->key == chand->path_str) {
gpr_log(GPR_DEBUG, "%s",
(const char *)GPR_SLICE_START_PTR(m->md->value->slice));
/* Add method tag here */
}
for (m = md->list.head; m != NULL; m = m->next) {
if (m->md->key == GRPC_MDSTR_PATH) {
gpr_log(GPR_DEBUG, "%s",
(const char *)GPR_SLICE_START_PTR(m->md->value->slice));
/* Add method tag here */
}
}
}
@ -83,8 +78,8 @@ static void client_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (op->send_ops) {
extract_and_annotate_method_tag(op->send_ops, calld, chand);
if (op->send_initial_metadata) {
extract_and_annotate_method_tag(op->send_initial_metadata, calld, chand);
}
}
@ -101,7 +96,7 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (success) {
extract_and_annotate_method_tag(calld->recv_ops, calld, chand);
extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
}
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
@ -109,32 +104,33 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
if (op->recv_ops) {
if (op->recv_initial_metadata) {
/* substitute our callback for the op callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
op->on_done_recv = calld->on_done_recv;
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->on_done_recv = op->on_complete;
op->on_complete = &calld->finish_recv;
}
}
static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* TODO(ctiller): this code fails. I don't know why. I expect it's
incomplete, and someone should look at it soon.
call_data *calld = elem->call_data;
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0));
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0)); */
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
static void client_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
if (initial_op) client_mutate_op(elem, initial_op);
}
static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
@ -146,15 +142,13 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static void server_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
/* TODO(hongyu): call census_tracing_start_op here. */
grpc_closure_init(d->on_done_recv, server_on_done_recv, elem);
if (initial_op) server_mutate_op(elem, initial_op);
grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
}
static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
@ -165,33 +159,26 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
}
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(chand != NULL);
chand->path_str = grpc_mdstr_from_string(mdctx, ":path");
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(chand != NULL);
if (chand->path_str != NULL) {
GRPC_MDSTR_UNREF(chand->path_str);
}
}
const grpc_channel_filter grpc_client_census_filter = {
client_start_transport_op, grpc_channel_next_op,
sizeof(call_data), client_init_call_elem,
client_destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "census-client"};
client_start_transport_op, grpc_channel_next_op, sizeof(call_data),
client_init_call_elem, grpc_call_stack_ignore_set_pollset,
client_destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, grpc_call_next_get_peer, "census-client"};
const grpc_channel_filter grpc_server_census_filter = {
server_start_transport_op, grpc_channel_next_op,
sizeof(call_data), server_init_call_elem,
server_destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "census-server"};
server_start_transport_op, grpc_channel_next_op, sizeof(call_data),
server_init_call_elem, grpc_call_stack_ignore_set_pollset,
server_destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, grpc_call_next_get_peer, "census-server"};

@ -101,20 +101,23 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
return CALL_ELEMS_FROM_STACK(call_stack) + index;
}
void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
grpc_iomgr_cb_func destroy, void *destroy_arg,
const grpc_channel_filter **filters,
size_t filter_count, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context,
grpc_channel_stack *stack) {
size_t filter_count,
const grpc_channel_args *channel_args,
const char *name, grpc_channel_stack *stack) {
size_t call_size =
ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
grpc_channel_element *elems;
grpc_channel_element_args args;
char *user_data;
size_t i;
stack->count = filter_count;
GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg,
name);
elems = CHANNEL_ELEMS_FROM_STACK(stack);
user_data =
((char *)elems) +
@ -122,11 +125,13 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
/* init per-filter data */
for (i = 0; i < filter_count; i++) {
args.channel_stack = stack;
args.channel_args = channel_args;
args.is_first = i == 0;
args.is_last = i == (filter_count - 1);
elems[i].filter = filters[i];
elems[i].channel_data = user_data;
elems[i].filter->init_channel_elem(exec_ctx, &elems[i], master, args,
metadata_context, i == 0,
i == (filter_count - 1));
elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
}
@ -151,33 +156,63 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
}
void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
grpc_channel_stack *channel_stack,
grpc_channel_stack *channel_stack, int initial_refs,
grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context,
const void *transport_server_data,
grpc_transport_stream_op *initial_op,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
size_t count = channel_stack->count;
grpc_call_element *call_elems;
char *user_data;
size_t i;
call_stack->count = count;
GRPC_STREAM_REF_INIT(&call_stack->refcount, initial_refs, destroy,
destroy_arg, "CALL_STACK");
call_elems = CALL_ELEMS_FROM_STACK(call_stack);
user_data = ((char *)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
for (i = 0; i < count; i++) {
args.call_stack = call_stack;
args.server_transport_data = transport_server_data;
args.context = context;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i],
transport_server_data, initial_op);
call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i], &args);
user_data +=
ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
}
void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_call_stack *call_stack,
grpc_pollset *pollset) {
size_t count = call_stack->count;
grpc_call_element *call_elems;
char *user_data;
size_t i;
call_elems = CALL_ELEMS_FROM_STACK(call_stack);
user_data = ((char *)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
for (i = 0; i < count; i++) {
call_elems[i].filter->set_pollset(exec_ctx, &call_elems[i], pollset);
user_data +=
ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
}
void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_pollset *pollset) {}
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack) {
grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;

@ -51,6 +51,22 @@
typedef struct grpc_channel_element grpc_channel_element;
typedef struct grpc_call_element grpc_call_element;
typedef struct grpc_channel_stack grpc_channel_stack;
typedef struct grpc_call_stack grpc_call_stack;
typedef struct {
grpc_channel_stack *channel_stack;
const grpc_channel_args *channel_args;
int is_first;
int is_last;
} grpc_channel_element_args;
typedef struct {
grpc_call_stack *call_stack;
const void *server_transport_data;
grpc_call_context_element *context;
} grpc_call_element_args;
/* Channel filters specify:
1. the amount of memory needed in the channel & call (via the sizeof_XXX
members)
@ -84,8 +100,9 @@ typedef struct {
transport and is on the server. Most filters want to ignore this
argument. */
void (*init_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op);
grpc_call_element_args *args);
void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset);
/* Destroy per call data.
The filter does not need to do any chaining */
void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
@ -99,9 +116,7 @@ typedef struct {
useful for asserting correct configuration by upper layer code.
The filter does not need to do any chaining */
void (*init_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_channel *master, const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last);
grpc_channel_element_args *args);
/* Destroy per channel data.
The filter does not need to do any chaining */
void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
@ -132,16 +147,24 @@ struct grpc_call_element {
/* A channel stack tracks a set of related filters for one channel, and
guarantees they live within a single malloc() allocation */
typedef struct {
struct grpc_channel_stack {
grpc_stream_refcount refcount;
size_t count;
/* Memory required for a call stack (computed at channel stack
initialization) */
size_t call_stack_size;
} grpc_channel_stack;
};
/* A call stack tracks a set of related filters for one call, and guarantees
they live within a single malloc() allocation */
typedef struct { size_t count; } grpc_call_stack;
struct grpc_call_stack {
/* shared refcount for this channel stack.
MUST be the first element: the underlying code calls destroy
with the address of the refcount, but higher layers prefer to think
about the address of the call stack itself. */
grpc_stream_refcount refcount;
size_t count;
};
/* Get a channel element given a channel stack and its index */
grpc_channel_element *grpc_channel_stack_element(grpc_channel_stack *stack,
@ -156,12 +179,11 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i);
size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
size_t filter_count);
/* Initialize a channel stack given some filters */
void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
grpc_iomgr_cb_func destroy, void *destroy_arg,
const grpc_channel_filter **filters,
size_t filter_count, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context,
grpc_channel_stack *stack);
size_t filter_count, const grpc_channel_args *args,
const char *name, grpc_channel_stack *stack);
/* Destroy a channel stack */
void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
grpc_channel_stack *stack);
@ -170,13 +192,44 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
expected to be NULL on a client, or an opaque transport owned pointer on the
server. */
void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
grpc_channel_stack *channel_stack,
grpc_channel_stack *channel_stack, int initial_refs,
grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context,
const void *transport_server_data,
grpc_transport_stream_op *initial_op,
grpc_call_stack *call_stack);
/* Set a pollset for a call stack: must occur before the first op is started */
void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_call_stack *call_stack,
grpc_pollset *pollset);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_CALL_STACK_REF(call_stack, reason) \
grpc_stream_ref(&(call_stack)->refcount, reason)
#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
grpc_stream_unref(exec_ctx, &(call_stack)->refcount, reason)
#define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
grpc_stream_ref(&(channel_stack)->refcount, reason)
#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
grpc_stream_unref(exec_ctx, &(channel_stack)->refcount, reason)
#else
#define GRPC_CALL_STACK_REF(call_stack, reason) \
grpc_stream_ref(&(call_stack)->refcount)
#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
grpc_stream_unref(exec_ctx, &(call_stack)->refcount)
#define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
grpc_stream_ref(&(channel_stack)->refcount)
#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
grpc_stream_unref(exec_ctx, &(channel_stack)->refcount)
#endif
/* Destroy a call stack */
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack);
/* Ignore set pollset - used by filters to implement the set_pollset method
if they don't care about pollsets at all. Does nothing. */
void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_pollset *pollset);
/* Call the next operation in a call stack */
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op *op);

@ -43,6 +43,7 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/subchannel_call_holder.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
@ -51,20 +52,13 @@
/* Client channel implementation */
typedef struct call_data call_data;
typedef grpc_subchannel_call_holder call_data;
typedef struct client_channel_channel_data {
/** metadata context for this channel */
grpc_mdctx *mdctx;
/** resolver for this channel */
grpc_resolver *resolver;
/** have we started resolving this channel */
int started_resolving;
/** master channel - the grpc_channel instance that ultimately owns
this channel_data via its channel stack.
We occasionally use this to bump the refcount on the master channel
to keep ourselves alive through an asynchronous operation. */
grpc_channel *master;
/** mutex protecting client configuration, including all
variables below in this data structure */
@ -82,8 +76,10 @@ typedef struct client_channel_channel_data {
grpc_connectivity_state_tracker state_tracker;
/** when an lb_policy arrives, should we try to exit idle */
int exit_idle_when_lb_policy_arrives;
/** pollset_set of interested parties in a new connection */
grpc_pollset_set pollset_set;
/** owning stack */
grpc_channel_stack *owning_stack;
/** interested parties */
grpc_pollset_set interested_parties;
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
@ -98,360 +94,20 @@ typedef struct {
grpc_lb_policy *lb_policy;
} lb_policy_connectivity_watcher;
typedef enum {
CALL_CREATED,
CALL_WAITING_FOR_SEND,
CALL_WAITING_FOR_CONFIG,
CALL_WAITING_FOR_PICK,
CALL_WAITING_FOR_CALL,
CALL_ACTIVE,
CALL_CANCELLED
} call_state;
struct call_data {
/* owning element */
grpc_call_element *elem;
gpr_mu mu_state;
call_state state;
gpr_timespec deadline;
grpc_subchannel *picked_channel;
grpc_closure async_setup_task;
grpc_transport_stream_op waiting_op;
/* our child call stack */
grpc_subchannel_call *subchannel_call;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
};
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
grpc_transport_stream_op *new_op)
GRPC_MUST_USE_RESULT;
static void handle_op_after_cancellation(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (op->send_ops) {
grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
}
if (op->recv_ops) {
char status[GPR_LTOA_MIN_BUFSIZE];
grpc_metadata_batch mdb;
gpr_ltoa(GRPC_STATUS_CANCELLED, status);
calld->status.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-status", status);
calld->details.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-message", "Cancelled");
calld->status.prev = calld->details.next = NULL;
calld->status.next = &calld->details;
calld->details.prev = &calld->status;
mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
}
if (op->on_consumed) {
op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
}
}
typedef struct {
grpc_closure closure;
grpc_call_element *elem;
} waiting_call;
static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation);
static void continue_with_pick(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
waiting_call *wc = arg;
call_data *calld = wc->elem->call_data;
perform_transport_stream_op(exec_ctx, wc->elem, &calld->waiting_op, 1);
gpr_free(wc);
}
static void add_to_lb_policy_wait_queue_locked_state_config(
grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
waiting_call *wc = gpr_malloc(sizeof(*wc));
grpc_closure_init(&wc->closure, continue_with_pick, wc);
wc->elem = elem;
grpc_closure_list_add(&chand->waiting_for_config_closures, &wc->closure, 1);
}
static int is_empty(void *p, int len) {
char *ptr = p;
int i;
for (i = 0; i < len; i++) {
if (ptr[i] != 0) return 0;
}
return 1;
}
static void started_call_locked(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
grpc_transport_stream_op op;
int have_waiting;
if (calld->state == CALL_CANCELLED && calld->subchannel_call != NULL) {
memset(&op, 0, sizeof(op));
op.cancel_with_status = GRPC_STATUS_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, &op);
} else if (calld->state == CALL_WAITING_FOR_CALL) {
have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
if (calld->subchannel_call != NULL) {
calld->state = CALL_ACTIVE;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
&calld->waiting_op);
}
} else {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
}
}
} else {
GPR_ASSERT(calld->state == CALL_CANCELLED);
gpr_mu_unlock(&calld->mu_state);
}
}
static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
gpr_mu_lock(&calld->mu_state);
started_call_locked(exec_ctx, arg, iomgr_success);
}
static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
grpc_pollset *pollset;
grpc_subchannel_call_create_status call_creation_status;
GPR_TIMER_BEGIN("picked_target", 0);
if (calld->picked_channel == NULL) {
/* treat this like a cancellation */
calld->waiting_op.cancel_with_status = GRPC_STATUS_UNAVAILABLE;
perform_transport_stream_op(exec_ctx, calld->elem, &calld->waiting_op, 1);
} else {
gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_CANCELLED) {
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
} else {
GPR_ASSERT(calld->state == CALL_WAITING_FOR_PICK);
calld->state = CALL_WAITING_FOR_CALL;
pollset = calld->waiting_op.bind_pollset;
grpc_closure_init(&calld->async_setup_task, started_call, calld);
call_creation_status = grpc_subchannel_create_call(
exec_ctx, calld->picked_channel, pollset, &calld->subchannel_call,
&calld->async_setup_task);
if (call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY) {
started_call_locked(exec_ctx, calld, iomgr_success);
} else {
gpr_mu_unlock(&calld->mu_state);
}
}
}
GPR_TIMER_END("picked_target", 0);
}
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
grpc_transport_stream_op *new_op) {
call_data *calld = elem->call_data;
grpc_closure *consumed_op = NULL;
grpc_transport_stream_op *waiting_op = &calld->waiting_op;
GPR_ASSERT((waiting_op->send_ops != NULL) + (new_op->send_ops != NULL) <= 1);
GPR_ASSERT((waiting_op->recv_ops != NULL) + (new_op->recv_ops != NULL) <= 1);
if (new_op->send_ops != NULL) {
waiting_op->send_ops = new_op->send_ops;
waiting_op->is_last_send = new_op->is_last_send;
waiting_op->on_done_send = new_op->on_done_send;
}
if (new_op->recv_ops != NULL) {
waiting_op->recv_ops = new_op->recv_ops;
waiting_op->recv_state = new_op->recv_state;
waiting_op->on_done_recv = new_op->on_done_recv;
}
if (new_op->on_consumed != NULL) {
if (waiting_op->on_consumed != NULL) {
consumed_op = waiting_op->on_consumed;
}
waiting_op->on_consumed = new_op->on_consumed;
}
if (new_op->cancel_with_status != GRPC_STATUS_OK) {
waiting_op->cancel_with_status = new_op->cancel_with_status;
}
return consumed_op;
}
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
char *result;
gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_ACTIVE) {
subchannel_call = calld->subchannel_call;
GRPC_SUBCHANNEL_CALL_REF(subchannel_call, "get_peer");
gpr_mu_unlock(&calld->mu_state);
result = grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "get_peer");
return result;
} else {
gpr_mu_unlock(&calld->mu_state);
return grpc_channel_get_target(chand->master);
}
}
static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
grpc_lb_policy *lb_policy;
grpc_transport_stream_op op2;
GPR_TIMER_BEGIN("perform_transport_stream_op", 0);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
gpr_mu_lock(&calld->mu_state);
switch (calld->state) {
case CALL_ACTIVE:
GPR_ASSERT(!continuation);
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
grpc_subchannel_call_process_op(exec_ctx, subchannel_call, op);
break;
case CALL_CANCELLED:
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
break;
case CALL_WAITING_FOR_SEND:
GPR_ASSERT(!continuation);
grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
if (!calld->waiting_op.send_ops &&
calld->waiting_op.cancel_with_status == GRPC_STATUS_OK) {
gpr_mu_unlock(&calld->mu_state);
break;
}
*op = calld->waiting_op;
memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
continuation = 1;
/* fall through */
case CALL_WAITING_FOR_CONFIG:
case CALL_WAITING_FOR_PICK:
case CALL_WAITING_FOR_CALL:
if (!continuation) {
if (op->cancel_with_status != GRPC_STATUS_OK) {
calld->state = CALL_CANCELLED;
op2 = calld->waiting_op;
memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
if (op->on_consumed) {
calld->waiting_op.on_consumed = op->on_consumed;
op->on_consumed = NULL;
} else if (op2.on_consumed) {
calld->waiting_op.on_consumed = op2.on_consumed;
op2.on_consumed = NULL;
}
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
handle_op_after_cancellation(exec_ctx, elem, &op2);
} else {
grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
gpr_mu_unlock(&calld->mu_state);
}
break;
}
/* fall through */
case CALL_CREATED:
if (op->cancel_with_status != GRPC_STATUS_OK) {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
} else {
calld->waiting_op = *op;
if (op->send_ops == NULL) {
/* need to have some send ops before we can select the
lb target */
calld->state = CALL_WAITING_FOR_SEND;
gpr_mu_unlock(&calld->mu_state);
} else {
gpr_mu_lock(&chand->mu_config);
lb_policy = chand->lb_policy;
if (lb_policy) {
grpc_transport_stream_op *waiting_op = &calld->waiting_op;
grpc_pollset *bind_pollset = waiting_op->bind_pollset;
grpc_metadata_batch *initial_metadata =
&waiting_op->send_ops->ops[0].data.metadata;
GRPC_LB_POLICY_REF(lb_policy, "pick");
gpr_mu_unlock(&chand->mu_config);
calld->state = CALL_WAITING_FOR_PICK;
GPR_ASSERT(waiting_op->bind_pollset);
GPR_ASSERT(waiting_op->send_ops);
GPR_ASSERT(waiting_op->send_ops->nops >= 1);
GPR_ASSERT(waiting_op->send_ops->ops[0].type == GRPC_OP_METADATA);
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->async_setup_task, picked_target, calld);
grpc_lb_policy_pick(exec_ctx, lb_policy, bind_pollset,
initial_metadata, &calld->picked_channel,
&calld->async_setup_task);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick");
} else if (chand->resolver != NULL) {
calld->state = CALL_WAITING_FOR_CONFIG;
add_to_lb_policy_wait_queue_locked_state_config(elem);
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
chand->started_resolving = 1;
grpc_resolver_next(exec_ctx, chand->resolver,
&chand->incoming_configuration,
&chand->on_config_changed);
}
gpr_mu_unlock(&chand->mu_config);
gpr_mu_unlock(&calld->mu_state);
} else {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&chand->mu_config);
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
}
}
}
break;
}
GPR_TIMER_END("perform_transport_stream_op", 0);
return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
}
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
perform_transport_stream_op(exec_ctx, elem, op, 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
}
static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
@ -460,10 +116,18 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
static void on_lb_policy_state_changed_locked(
grpc_exec_ctx *exec_ctx, lb_policy_connectivity_watcher *w) {
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for a stale policy */
if (w->lb_policy != w->chand->lb_policy) return;
grpc_connectivity_state_set(exec_ctx, &w->chand->state_tracker, w->state,
if (publish_state == GRPC_CHANNEL_FATAL_FAILURE &&
w->chand->resolver != NULL) {
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
w->chand->lb_policy = NULL;
}
grpc_connectivity_state_set(exec_ctx, &w->chand->state_tracker, publish_state,
"lb_changed");
if (w->state != GRPC_CHANNEL_FATAL_FAILURE) {
watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
@ -478,7 +142,7 @@ static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
on_lb_policy_state_changed_locked(exec_ctx, w);
gpr_mu_unlock(&w->chand->mu_config);
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->chand->master, "watch_lb_policy");
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
gpr_free(w);
}
@ -486,7 +150,7 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
GRPC_CHANNEL_INTERNAL_REF(chand->master, "watch_lb_policy");
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
@ -518,6 +182,11 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
chand->incoming_configuration = NULL;
if (lb_policy != NULL) {
grpc_pollset_set_add_pollset_set(exec_ctx, &lb_policy->interested_parties,
&chand->interested_parties);
}
gpr_mu_lock(&chand->mu_config);
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
@ -539,7 +208,7 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
watch_lb_policy(exec_ctx, chand, lb_policy, state);
}
gpr_mu_unlock(&chand->mu_config);
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
&chand->on_config_changed);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel-next");
@ -561,7 +230,9 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
if (old_lb_policy != NULL) {
grpc_lb_policy_shutdown(exec_ctx, old_lb_policy);
grpc_pollset_set_del_pollset_set(exec_ctx,
&old_lb_policy->interested_parties,
&chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
}
@ -569,20 +240,22 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
}
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->master, "resolver");
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
}
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
grpc_lb_policy *lb_policy = NULL;
channel_data *chand = elem->channel_data;
grpc_resolver *destroy_resolver = NULL;
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
GPR_ASSERT(op->set_accept_stream == NULL);
GPR_ASSERT(op->bind_pollset == NULL);
if (op->bind_pollset != NULL) {
grpc_pollset_set_add_pollset(exec_ctx, &chand->interested_parties,
op->bind_pollset);
}
gpr_mu_lock(&chand->mu_config);
if (op->on_connectivity_state_change != NULL) {
@ -593,11 +266,14 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
op->connectivity_state = NULL;
}
if (!is_empty(op, sizeof(*op))) {
lb_policy = chand->lb_policy;
if (lb_policy) {
GRPC_LB_POLICY_REF(lb_policy, "broadcast");
if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) {
grpc_exec_ctx_enqueue(exec_ctx, op->send_ping, 0);
} else {
grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
}
op->send_ping = NULL;
}
if (op->disconnect && chand->resolver != NULL) {
@ -606,7 +282,9 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
destroy_resolver = chand->resolver;
chand->resolver = NULL;
if (chand->lb_policy != NULL) {
grpc_lb_policy_shutdown(exec_ctx, chand->lb_policy);
grpc_pollset_set_del_pollset_set(exec_ctx,
&chand->lb_policy->interested_parties,
&chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
chand->lb_policy = NULL;
}
@ -617,79 +295,119 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_resolver_shutdown(exec_ctx, destroy_resolver);
GRPC_RESOLVER_UNREF(exec_ctx, destroy_resolver, "channel");
}
}
if (lb_policy) {
grpc_lb_policy_broadcast(exec_ctx, lb_policy, op);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "broadcast");
typedef struct {
grpc_metadata_batch *initial_metadata;
grpc_connected_subchannel **connected_subchannel;
grpc_closure *on_ready;
grpc_call_element *elem;
grpc_closure closure;
} continue_picking_args;
static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready);
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, int success) {
continue_picking_args *cpa = arg;
if (!success) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
} else if (cpa->connected_subchannel == NULL) {
/* cancelled, do nothing */
} else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->connected_subchannel, cpa->on_ready)) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 1);
}
gpr_free(cpa);
}
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready) {
grpc_call_element *elem = elemp;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
continue_picking_args *cpa;
grpc_closure *closure;
/* TODO(ctiller): is there something useful we can do here? */
GPR_ASSERT(initial_op == NULL);
GPR_ASSERT(connected_subchannel);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GPR_ASSERT(server_transport_data == NULL);
gpr_mu_init(&calld->mu_state);
calld->elem = elem;
calld->state = CALL_CREATED;
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
gpr_mu_lock(&chand->mu_config);
if (initial_metadata == NULL) {
if (chand->lb_policy != NULL) {
grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
connected_subchannel);
}
for (closure = chand->waiting_for_config_closures.head; closure != NULL;
closure = grpc_closure_next(closure)) {
cpa = closure->cb_arg;
if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL;
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
}
}
gpr_mu_unlock(&chand->mu_config);
return 1;
}
if (chand->lb_policy != NULL) {
int r =
grpc_lb_policy_pick(exec_ctx, chand->lb_policy, calld->pollset,
initial_metadata, connected_subchannel, on_ready);
gpr_mu_unlock(&chand->mu_config);
return r;
}
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = 1;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, chand->resolver,
&chand->incoming_configuration,
&chand->on_config_changed);
}
cpa = gpr_malloc(sizeof(*cpa));
cpa->initial_metadata = initial_metadata;
cpa->connected_subchannel = connected_subchannel;
cpa->on_ready = on_ready;
cpa->elem = elem;
grpc_closure_init(&cpa->closure, continue_picking, cpa);
grpc_closure_list_add(&chand->waiting_for_config_closures, &cpa->closure, 1);
gpr_mu_unlock(&chand->mu_config);
return 0;
}
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_call_element_args *args) {
grpc_subchannel_call_holder_init(elem->call_data, cc_pick_subchannel, elem,
args->call_stack);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call;
/* if the call got activated, we need to destroy the child stack also, and
remove it from the in-flight requests tracked by the child_entry we
picked */
gpr_mu_lock(&calld->mu_state);
switch (calld->state) {
case CALL_ACTIVE:
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "client_channel");
break;
case CALL_CREATED:
case CALL_CANCELLED:
gpr_mu_unlock(&calld->mu_state);
break;
case CALL_WAITING_FOR_PICK:
case CALL_WAITING_FOR_CONFIG:
case CALL_WAITING_FOR_CALL:
case CALL_WAITING_FOR_SEND:
GPR_UNREACHABLE_CODE(return );
}
grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
GPR_ASSERT(is_last);
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
gpr_mu_init(&chand->mu_config);
chand->mdctx = metadata_context;
chand->master = master;
grpc_pollset_set_init(&chand->pollset_set);
grpc_closure_init(&chand->on_config_changed, cc_on_config_changed, chand);
chand->owning_stack = args->channel_stack;
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
grpc_pollset_set_init(&chand->interested_parties);
}
/* Destructor for channel_data */
@ -702,17 +420,26 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
}
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
&chand->lb_policy->interested_parties,
&chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(&chand->pollset_set);
grpc_pollset_set_destroy(&chand->interested_parties);
gpr_mu_destroy(&chand->mu_config);
}
static void cc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset) {
call_data *calld = elem->call_data;
calld->pollset = pollset;
}
const grpc_channel_filter grpc_client_channel_filter = {
cc_start_transport_stream_op, cc_start_transport_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, cc_get_peer, "client-channel",
init_call_elem, cc_set_pollset, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, cc_get_peer, "client-channel",
};
void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
@ -728,7 +455,7 @@ void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
chand->exit_idle_when_lb_policy_arrives) {
chand->started_resolving = 1;
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
&chand->on_config_changed);
}
@ -747,7 +474,7 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
} else {
chand->exit_idle_when_lb_policy_arrives = 1;
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
chand->started_resolving = 1;
grpc_resolver_next(exec_ctx, chand->resolver,
&chand->incoming_configuration,
@ -759,32 +486,39 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
return out;
}
typedef struct {
channel_data *chand;
grpc_pollset *pollset;
grpc_closure *on_complete;
grpc_closure my_closure;
} external_connectivity_watcher;
static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
external_connectivity_watcher *w = arg;
grpc_closure *follow_up = w->on_complete;
grpc_pollset_set_del_pollset(exec_ctx, &w->chand->interested_parties,
w->pollset);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
"external_connectivity_watcher");
gpr_free(w);
follow_up->cb(exec_ctx, follow_up->cb_arg, iomgr_success);
}
void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete) {
channel_data *chand = elem->channel_data;
external_connectivity_watcher *w = gpr_malloc(sizeof(*w));
w->chand = chand;
w->pollset = pollset;
w->on_complete = on_complete;
grpc_pollset_set_add_pollset(exec_ctx, &chand->interested_parties, pollset);
grpc_closure_init(&w->my_closure, on_external_watch_complete, w);
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
"external_connectivity_watcher");
gpr_mu_lock(&chand->mu_config);
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &chand->state_tracker, state, on_complete);
exec_ctx, &chand->state_tracker, state, &w->my_closure);
gpr_mu_unlock(&chand->mu_config);
}
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
return &chand->pollset_set;
}
void grpc_client_channel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_pollset *pollset) {
channel_data *chand = elem->channel_data;
grpc_pollset_set_add_pollset(exec_ctx, &chand->pollset_set, pollset);
}
void grpc_client_channel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_pollset *pollset) {
channel_data *chand = elem->channel_data;
grpc_pollset_set_del_pollset(exec_ctx, &chand->pollset_set, pollset);
}

@ -57,17 +57,7 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete);
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
grpc_channel_element *elem);
void grpc_client_channel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *channel,
grpc_pollset *pollset);
void grpc_client_channel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *channel,
grpc_pollset *pollset);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_CHANNEL_H */

@ -39,6 +39,7 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/client_channel.h"
#include "src/core/channel/compress_filter.h"
#include "src/core/channel/subchannel_call_holder.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
@ -52,23 +53,18 @@
/** Microchannel (uchannel) implementation: a lightweight channel without any
* load-balancing mechanisms meant for communication from within the core. */
typedef struct call_data call_data;
typedef struct client_uchannel_channel_data {
/** metadata context for this channel */
grpc_mdctx *mdctx;
/** master channel - the grpc_channel instance that ultimately owns
this channel_data via its channel stack.
We occasionally use this to bump the refcount on the master channel
to keep ourselves alive through an asynchronous operation. */
grpc_channel *master;
grpc_channel_stack *owning_stack;
/** connectivity state being tracked */
grpc_connectivity_state_tracker state_tracker;
/** the subchannel wrapped by the microchannel */
grpc_subchannel *subchannel;
grpc_connected_subchannel *connected_subchannel;
/** the callback used to stay subscribed to subchannel connectivity
* notifications */
@ -80,85 +76,7 @@ typedef struct client_uchannel_channel_data {
gpr_mu mu_state;
} channel_data;
typedef enum {
CALL_CREATED,
CALL_WAITING_FOR_SEND,
CALL_WAITING_FOR_CALL,
CALL_ACTIVE,
CALL_CANCELLED
} call_state;
struct call_data {
/* owning element */
grpc_call_element *elem;
gpr_mu mu_state;
call_state state;
gpr_timespec deadline;
grpc_closure async_setup_task;
grpc_transport_stream_op waiting_op;
/* our child call stack */
grpc_subchannel_call *subchannel_call;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
};
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
grpc_transport_stream_op *new_op)
GRPC_MUST_USE_RESULT;
static void handle_op_after_cancellation(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (op->send_ops) {
grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
op->on_done_send->cb(exec_ctx, op->on_done_send->cb_arg, 0);
}
if (op->recv_ops) {
char status[GPR_LTOA_MIN_BUFSIZE];
grpc_metadata_batch mdb;
gpr_ltoa(GRPC_STATUS_CANCELLED, status);
calld->status.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-status", status);
calld->details.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-message", "Cancelled");
calld->status.prev = calld->details.next = NULL;
calld->status.next = &calld->details;
calld->details.prev = &calld->status;
mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv->cb(exec_ctx, op->on_done_recv->cb_arg, 1);
}
if (op->on_consumed) {
op->on_consumed->cb(exec_ctx, op->on_consumed->cb_arg, 0);
}
}
typedef struct {
grpc_closure closure;
grpc_call_element *elem;
} waiting_call;
static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation);
static int is_empty(void *p, int len) {
char *ptr = p;
int i;
for (i = 0; i < len; i++) {
if (ptr[i] != 0) return 0;
}
return 1;
}
typedef grpc_subchannel_call_holder call_data;
static void monitor_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
@ -166,206 +84,20 @@ static void monitor_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
chand->subchannel_connectivity,
"uchannel_monitor_subchannel");
grpc_subchannel_notify_on_state_change(exec_ctx, chand->subchannel,
&chand->subchannel_connectivity,
&chand->connectivity_cb);
}
static void started_call_locked(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
grpc_transport_stream_op op;
int have_waiting;
if (calld->state == CALL_CANCELLED && iomgr_success == 0) {
have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
}
} else if (calld->state == CALL_CANCELLED && calld->subchannel_call != NULL) {
memset(&op, 0, sizeof(op));
op.cancel_with_status = GRPC_STATUS_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, &op);
} else if (calld->state == CALL_WAITING_FOR_CALL) {
have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
if (calld->subchannel_call != NULL) {
calld->state = CALL_ACTIVE;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
&calld->waiting_op);
}
} else {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
if (have_waiting) {
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
}
}
} else {
GPR_ASSERT(calld->state == CALL_CANCELLED);
gpr_mu_unlock(&calld->mu_state);
have_waiting = !is_empty(&calld->waiting_op, sizeof(calld->waiting_op));
if (have_waiting) {
handle_op_after_cancellation(exec_ctx, calld->elem, &calld->waiting_op);
}
}
}
static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
gpr_mu_lock(&calld->mu_state);
started_call_locked(exec_ctx, arg, iomgr_success);
}
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
grpc_transport_stream_op *new_op) {
call_data *calld = elem->call_data;
grpc_closure *consumed_op = NULL;
grpc_transport_stream_op *waiting_op = &calld->waiting_op;
GPR_ASSERT((waiting_op->send_ops != NULL) + (new_op->send_ops != NULL) <= 1);
GPR_ASSERT((waiting_op->recv_ops != NULL) + (new_op->recv_ops != NULL) <= 1);
if (new_op->send_ops != NULL) {
waiting_op->send_ops = new_op->send_ops;
waiting_op->is_last_send = new_op->is_last_send;
waiting_op->on_done_send = new_op->on_done_send;
}
if (new_op->recv_ops != NULL) {
waiting_op->recv_ops = new_op->recv_ops;
waiting_op->recv_state = new_op->recv_state;
waiting_op->on_done_recv = new_op->on_done_recv;
}
if (new_op->on_consumed != NULL) {
if (waiting_op->on_consumed != NULL) {
consumed_op = waiting_op->on_consumed;
}
waiting_op->on_consumed = new_op->on_consumed;
}
if (new_op->cancel_with_status != GRPC_STATUS_OK) {
waiting_op->cancel_with_status = new_op->cancel_with_status;
}
return consumed_op;
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, chand->connected_subchannel, NULL,
&chand->subchannel_connectivity, &chand->connectivity_cb);
}
static char *cuc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
char *result;
gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_ACTIVE) {
subchannel_call = calld->subchannel_call;
GRPC_SUBCHANNEL_CALL_REF(subchannel_call, "get_peer");
gpr_mu_unlock(&calld->mu_state);
result = grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "get_peer");
return result;
} else {
gpr_mu_unlock(&calld->mu_state);
return grpc_channel_get_target(chand->master);
}
}
static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op,
int continuation) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
grpc_transport_stream_op op2;
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
gpr_mu_lock(&calld->mu_state);
/* make sure the wrapped subchannel has been set (see
* grpc_client_uchannel_set_subchannel) */
GPR_ASSERT(chand->subchannel != NULL);
switch (calld->state) {
case CALL_ACTIVE:
GPR_ASSERT(!continuation);
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
grpc_subchannel_call_process_op(exec_ctx, subchannel_call, op);
break;
case CALL_CANCELLED:
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
break;
case CALL_WAITING_FOR_SEND:
GPR_ASSERT(!continuation);
grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
if (!calld->waiting_op.send_ops &&
calld->waiting_op.cancel_with_status == GRPC_STATUS_OK) {
gpr_mu_unlock(&calld->mu_state);
break;
}
*op = calld->waiting_op;
memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
continuation = 1;
/* fall through */
case CALL_WAITING_FOR_CALL:
if (!continuation) {
if (op->cancel_with_status != GRPC_STATUS_OK) {
calld->state = CALL_CANCELLED;
op2 = calld->waiting_op;
memset(&calld->waiting_op, 0, sizeof(calld->waiting_op));
if (op->on_consumed) {
calld->waiting_op.on_consumed = op->on_consumed;
op->on_consumed = NULL;
} else if (op2.on_consumed) {
calld->waiting_op.on_consumed = op2.on_consumed;
op2.on_consumed = NULL;
}
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
handle_op_after_cancellation(exec_ctx, elem, &op2);
grpc_subchannel_cancel_waiting_call(exec_ctx, chand->subchannel, 1);
} else {
grpc_exec_ctx_enqueue(exec_ctx, merge_into_waiting_op(elem, op), 1);
gpr_mu_unlock(&calld->mu_state);
}
break;
}
/* fall through */
case CALL_CREATED:
if (op->cancel_with_status != GRPC_STATUS_OK) {
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&calld->mu_state);
handle_op_after_cancellation(exec_ctx, elem, op);
} else {
calld->waiting_op = *op;
if (op->send_ops == NULL) {
calld->state = CALL_WAITING_FOR_SEND;
gpr_mu_unlock(&calld->mu_state);
} else {
grpc_subchannel_call_create_status call_creation_status;
grpc_pollset *pollset = calld->waiting_op.bind_pollset;
calld->state = CALL_WAITING_FOR_CALL;
grpc_closure_init(&calld->async_setup_task, started_call, calld);
call_creation_status = grpc_subchannel_create_call(
exec_ctx, chand->subchannel, pollset, &calld->subchannel_call,
&calld->async_setup_task);
if (call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY) {
started_call_locked(exec_ctx, calld, 1);
} else {
gpr_mu_unlock(&calld->mu_state);
}
}
}
break;
}
return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
}
static void cuc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
perform_transport_stream_op(exec_ctx, elem, op, 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
}
static void cuc_start_transport_op(grpc_exec_ctx *exec_ctx,
@ -392,64 +124,39 @@ static void cuc_start_transport_op(grpc_exec_ctx *exec_ctx,
}
}
static int cuc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready) {
channel_data *chand = arg;
GPR_ASSERT(initial_metadata != NULL);
*connected_subchannel = chand->connected_subchannel;
return 1;
}
/* Constructor for call_data */
static void cuc_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(call_data));
/* TODO(ctiller): is there something useful we can do here? */
GPR_ASSERT(initial_op == NULL);
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
GPR_ASSERT(server_transport_data == NULL);
gpr_mu_init(&calld->mu_state);
calld->elem = elem;
calld->state = CALL_CREATED;
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_call_element_args *args) {
grpc_subchannel_call_holder_init(elem->call_data, cuc_pick_subchannel,
elem->channel_data, args->call_stack);
}
/* Destructor for call_data */
static void cuc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
grpc_subchannel_call *subchannel_call;
/* if the call got activated, we need to destroy the child stack also, and
remove it from the in-flight requests tracked by the child_entry we
picked */
gpr_mu_lock(&calld->mu_state);
switch (calld->state) {
case CALL_ACTIVE:
subchannel_call = calld->subchannel_call;
gpr_mu_unlock(&calld->mu_state);
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, subchannel_call, "client_uchannel");
break;
case CALL_CREATED:
case CALL_CANCELLED:
gpr_mu_unlock(&calld->mu_state);
break;
case CALL_WAITING_FOR_CALL:
case CALL_WAITING_FOR_SEND:
GPR_UNREACHABLE_CODE(return );
}
grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
}
/* Constructor for channel_data */
static void cuc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
memset(chand, 0, sizeof(*chand));
grpc_closure_init(&chand->connectivity_cb, monitor_subchannel, chand);
GPR_ASSERT(is_last);
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
chand->mdctx = metadata_context;
chand->master = master;
chand->owning_stack = args->channel_stack;
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_uchannel");
gpr_mu_init(&chand->mu_state);
@ -459,40 +166,41 @@ static void cuc_init_channel_elem(grpc_exec_ctx *exec_ctx,
static void cuc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
grpc_subchannel_state_change_unsubscribe(exec_ctx, chand->subchannel,
&chand->connectivity_cb);
/* cancel subscription */
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, chand->connected_subchannel, NULL, NULL,
&chand->connectivity_cb);
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
gpr_mu_destroy(&chand->mu_state);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, chand->connected_subchannel,
"uchannel");
}
static void cuc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset) {
call_data *calld = elem->call_data;
calld->pollset = pollset;
}
const grpc_channel_filter grpc_client_uchannel_filter = {
cuc_start_transport_stream_op, cuc_start_transport_op, sizeof(call_data),
cuc_init_call_elem, cuc_destroy_call_elem, sizeof(channel_data),
cuc_init_channel_elem, cuc_destroy_channel_elem, cuc_get_peer,
"client-uchannel",
cuc_init_call_elem, cuc_set_pollset, cuc_destroy_call_elem,
sizeof(channel_data), cuc_init_channel_elem, cuc_destroy_channel_elem,
cuc_get_peer, "client-uchannel",
};
grpc_connectivity_state grpc_client_uchannel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
channel_data *chand = elem->channel_data;
grpc_connectivity_state out;
out = grpc_connectivity_state_check(&chand->state_tracker);
gpr_mu_lock(&chand->mu_state);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
GRPC_CHANNEL_CONNECTING,
"uchannel_connecting_changed");
chand->subchannel_connectivity = out;
grpc_subchannel_notify_on_state_change(exec_ctx, chand->subchannel,
&chand->subchannel_connectivity,
&chand->connectivity_cb);
}
out = grpc_connectivity_state_check(&chand->state_tracker);
gpr_mu_unlock(&chand->mu_state);
return out;
}
void grpc_client_uchannel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete) {
channel_data *chand = elem->channel_data;
gpr_mu_lock(&chand->mu_state);
@ -501,45 +209,14 @@ void grpc_client_uchannel_watch_connectivity_state(
gpr_mu_unlock(&chand->mu_state);
}
grpc_pollset_set *grpc_client_uchannel_get_connecting_pollset_set(
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
grpc_channel_element *parent_elem;
gpr_mu_lock(&chand->mu_state);
parent_elem = grpc_channel_stack_last_element(grpc_channel_get_channel_stack(
grpc_subchannel_get_master(chand->subchannel)));
gpr_mu_unlock(&chand->mu_state);
return grpc_client_channel_get_connecting_pollset_set(parent_elem);
}
void grpc_client_uchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_pollset *pollset) {
grpc_pollset_set *master_pollset_set =
grpc_client_uchannel_get_connecting_pollset_set(elem);
grpc_pollset_set_add_pollset(exec_ctx, master_pollset_set, pollset);
}
void grpc_client_uchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_pollset *pollset) {
grpc_pollset_set *master_pollset_set =
grpc_client_uchannel_get_connecting_pollset_set(elem);
grpc_pollset_set_del_pollset(exec_ctx, master_pollset_set, pollset);
}
grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
grpc_channel_args *args) {
grpc_channel *channel = NULL;
#define MAX_FILTERS 3
const grpc_channel_filter *filters[MAX_FILTERS];
grpc_mdctx *mdctx = grpc_subchannel_get_mdctx(subchannel);
grpc_channel *master = grpc_subchannel_get_master(subchannel);
char *target = grpc_channel_get_target(master);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
size_t n = 0;
grpc_mdctx_ref(mdctx);
if (grpc_channel_args_is_census_enabled(args)) {
filters[n++] = &grpc_client_census_filter;
}
@ -547,20 +224,20 @@ grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
filters[n++] = &grpc_client_uchannel_filter;
GPR_ASSERT(n <= MAX_FILTERS);
channel = grpc_channel_create_from_filters(&exec_ctx, target, filters, n,
args, mdctx, 1);
channel =
grpc_channel_create_from_filters(&exec_ctx, NULL, filters, n, args, 1);
gpr_free(target);
return channel;
}
void grpc_client_uchannel_set_subchannel(grpc_channel *uchannel,
grpc_subchannel *subchannel) {
void grpc_client_uchannel_set_connected_subchannel(
grpc_channel *uchannel, grpc_connected_subchannel *connected_subchannel) {
grpc_channel_element *elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(uchannel));
channel_data *chand = elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
gpr_mu_lock(&chand->mu_state);
chand->subchannel = subchannel;
chand->connected_subchannel = connected_subchannel;
GRPC_CONNECTED_SUBCHANNEL_REF(connected_subchannel, "uchannel");
gpr_mu_unlock(&chand->mu_state);
}

@ -48,23 +48,13 @@ grpc_connectivity_state grpc_client_uchannel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
void grpc_client_uchannel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete);
grpc_pollset_set *grpc_client_uchannel_get_connecting_pollset_set(
grpc_channel_element *elem);
void grpc_client_uchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *channel,
grpc_pollset *pollset);
void grpc_client_uchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *channel,
grpc_pollset *pollset);
grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
grpc_channel_args *args);
void grpc_client_uchannel_set_subchannel(grpc_channel *uchannel,
grpc_subchannel *subchannel);
void grpc_client_uchannel_set_connected_subchannel(
grpc_channel *uchannel, grpc_connected_subchannel *connected_subchannel);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_MICROCHANNEL_H */

@ -39,61 +39,44 @@
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
#include "src/core/channel/compress_filter.h"
#include "src/core/channel/channel_args.h"
#include "src/core/profiling/timers.h"
#include "src/core/channel/compress_filter.h"
#include "src/core/compression/algorithm_metadata.h"
#include "src/core/compression/message_compress.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#include "src/core/transport/static_metadata.h"
typedef struct call_data {
gpr_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
gpr_uint32 remaining_slice_bytes;
/**< Input data to be read, as per BEGIN_MESSAGE */
int written_initial_metadata; /**< Already processed initial md? */
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
/** If true, contents of \a compression_algorithm are authoritative */
int has_compression_algorithm;
grpc_transport_stream_op send_op;
gpr_uint32 send_length;
gpr_uint32 send_flags;
gpr_slice incoming_slice;
grpc_slice_buffer_stream replacement_stream;
grpc_closure *post_send;
grpc_closure send_done;
grpc_closure got_slice;
} call_data;
typedef struct channel_data {
/** Metadata key for the incoming (requested) compression algorithm */
grpc_mdstr *mdstr_request_compression_algorithm_key;
/** Metadata key for the outgoing (used) compression algorithm */
grpc_mdstr *mdstr_outgoing_compression_algorithm_key;
/** Metadata key for the accepted encodings */
grpc_mdstr *mdstr_compression_capabilities_key;
/** Precomputed metadata elements for all available compression algorithms */
grpc_mdelem *mdelem_compression_algorithms[GRPC_COMPRESS_ALGORITHMS_COUNT];
/** Precomputed metadata elements for the accepted encodings */
grpc_mdelem *mdelem_accept_encoding;
/** The default, channel-level, compression algorithm */
grpc_compression_algorithm default_compression_algorithm;
/** Compression options for the channel */
grpc_compression_options compression_options;
/** Supported compression algorithms */
gpr_uint32 supported_compression_algorithms;
} channel_data;
/** Compress \a slices in place using \a algorithm. Returns 1 if compression did
* actually happen, 0 otherwise (for example if the compressed output size was
* larger than the raw input).
*
* Returns 1 if the data was actually compress and 0 otherwise. */
static int compress_send_sb(grpc_compression_algorithm algorithm,
gpr_slice_buffer *slices) {
int did_compress;
gpr_slice_buffer tmp;
gpr_slice_buffer_init(&tmp);
did_compress = grpc_msg_compress(algorithm, slices, &tmp);
if (did_compress) {
gpr_slice_buffer_swap(slices, &tmp);
}
gpr_slice_buffer_destroy(&tmp);
return did_compress;
}
/** For each \a md element from the incoming metadata, filter out the entry for
* "grpc-encoding", using its value to populate the call data's
* compression_algorithm field. */
@ -102,7 +85,7 @@ static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
if (md->key == channeld->mdstr_request_compression_algorithm_key) {
if (md->key == GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST) {
const char *md_c_str = grpc_mdstr_as_c_string(md->value);
if (!grpc_compression_algorithm_parse(md_c_str, strlen(md_c_str),
&calld->compression_algorithm)) {
@ -127,7 +110,9 @@ static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
return md;
}
static int skip_compression(channel_data *channeld, call_data *calld) {
static int skip_compression(grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
if (calld->has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
return 1;
@ -138,169 +123,126 @@ static int skip_compression(channel_data *channeld, call_data *calld) {
return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
}
/** Assembles a new grpc_stream_op_buffer with the compressed slices, modifying
* the associated GRPC_OP_BEGIN_MESSAGE accordingly (new compressed length,
* flags indicating compression is in effect) and replaces \a send_ops with it.
* */
static void finish_compressed_sopb(grpc_stream_op_buffer *send_ops,
grpc_call_element *elem) {
size_t i;
/** Filter initial metadata */
static void process_send_initial_metadata(
grpc_call_element *elem, grpc_metadata_batch *initial_metadata) {
call_data *calld = elem->call_data;
int new_slices_added = 0; /* GPR_FALSE */
grpc_metadata_batch metadata;
grpc_stream_op_buffer new_send_ops;
grpc_sopb_init(&new_send_ops);
for (i = 0; i < send_ops->nops; i++) {
grpc_stream_op *sop = &send_ops->ops[i];
switch (sop->type) {
case GRPC_OP_BEGIN_MESSAGE:
GPR_ASSERT(calld->slices.length <= GPR_UINT32_MAX);
grpc_sopb_add_begin_message(
&new_send_ops, (gpr_uint32)calld->slices.length,
sop->data.begin_message.flags | GRPC_WRITE_INTERNAL_COMPRESS);
break;
case GRPC_OP_SLICE:
/* Once we reach the slices section of the original buffer, simply add
* all the new (compressed) slices. We obviously want to do this only
* once, hence the "new_slices_added" guard. */
if (!new_slices_added) {
size_t j;
for (j = 0; j < calld->slices.count; ++j) {
grpc_sopb_add_slice(&new_send_ops,
gpr_slice_ref(calld->slices.slices[j]));
}
new_slices_added = 1; /* GPR_TRUE */
}
break;
case GRPC_OP_METADATA:
/* move the metadata to the new buffer. */
grpc_metadata_batch_move(&metadata, &sop->data.metadata);
grpc_sopb_add_metadata(&new_send_ops, metadata);
break;
case GRPC_NO_OP:
break;
}
channel_data *channeld = elem->channel_data;
/* Parse incoming request for compression. If any, it'll be available
* at calld->compression_algorithm */
grpc_metadata_batch_filter(initial_metadata, compression_md_filter, elem);
if (!calld->has_compression_algorithm) {
/* If no algorithm was found in the metadata and we aren't
* exceptionally skipping compression, fall back to the channel
* default */
calld->compression_algorithm = channeld->default_compression_algorithm;
calld->has_compression_algorithm = 1; /* GPR_TRUE */
}
grpc_sopb_swap(send_ops, &new_send_ops);
grpc_sopb_destroy(&new_send_ops);
/* hint compression algorithm */
grpc_metadata_batch_add_tail(
initial_metadata, &calld->compression_algorithm_storage,
grpc_compression_encoding_mdelem(calld->compression_algorithm));
/* convey supported compression algorithms */
grpc_metadata_batch_add_tail(initial_metadata,
&calld->accept_encoding_storage,
GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(
channeld->supported_compression_algorithms));
}
/** Filter's "main" function, called for any incoming grpc_transport_stream_op
* instance that holds a non-zero number of send operations, accesible to this
* function in \a send_ops. */
static void process_send_ops(grpc_call_element *elem,
grpc_stream_op_buffer *send_ops) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
size_t i;
int did_compress = 0;
static void continue_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem);
/* In streaming calls, we need to reset the previously accumulated slices */
static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, int success) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
gpr_slice_buffer_reset_and_unref(&calld->slices);
for (i = 0; i < send_ops->nops; ++i) {
grpc_stream_op *sop = &send_ops->ops[i];
switch (sop->type) {
case GRPC_OP_BEGIN_MESSAGE:
/* buffer up slices until we've processed all the expected ones (as
* given by GRPC_OP_BEGIN_MESSAGE) */
calld->remaining_slice_bytes = sop->data.begin_message.length;
if (sop->data.begin_message.flags & GRPC_WRITE_NO_COMPRESS) {
calld->has_compression_algorithm = 1; /* GPR_TRUE */
calld->compression_algorithm = GRPC_COMPRESS_NONE;
}
break;
case GRPC_OP_METADATA:
if (!calld->written_initial_metadata) {
/* Parse incoming request for compression. If any, it'll be available
* at calld->compression_algorithm */
grpc_metadata_batch_filter(&(sop->data.metadata),
compression_md_filter, elem);
if (!calld->has_compression_algorithm) {
/* If no algorithm was found in the metadata and we aren't
* exceptionally skipping compression, fall back to the channel
* default */
calld->compression_algorithm =
channeld->default_compression_algorithm;
calld->has_compression_algorithm = 1; /* GPR_TRUE */
}
/* hint compression algorithm */
grpc_metadata_batch_add_tail(
&(sop->data.metadata), &calld->compression_algorithm_storage,
GRPC_MDELEM_REF(channeld->mdelem_compression_algorithms
[calld->compression_algorithm]));
/* convey supported compression algorithms */
grpc_metadata_batch_add_tail(
&(sop->data.metadata), &calld->accept_encoding_storage,
GRPC_MDELEM_REF(channeld->mdelem_accept_encoding));
calld->written_initial_metadata = 1; /* GPR_TRUE */
}
break;
case GRPC_OP_SLICE:
if (skip_compression(channeld, calld)) continue;
GPR_ASSERT(calld->remaining_slice_bytes > 0);
/* Increase input ref count, gpr_slice_buffer_add takes ownership. */
gpr_slice_buffer_add(&calld->slices, gpr_slice_ref(sop->data.slice));
GPR_ASSERT(GPR_SLICE_LENGTH(sop->data.slice) <=
calld->remaining_slice_bytes);
calld->remaining_slice_bytes -=
(gpr_uint32)GPR_SLICE_LENGTH(sop->data.slice);
if (calld->remaining_slice_bytes == 0) {
did_compress =
compress_send_sb(calld->compression_algorithm, &calld->slices);
}
break;
case GRPC_NO_OP:
break;
}
}
calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, success);
}
/* Modify the send_ops stream_op_buffer depending on whether compression was
* carried out */
static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
int did_compress;
gpr_slice_buffer tmp;
gpr_slice_buffer_init(&tmp);
did_compress =
grpc_msg_compress(calld->compression_algorithm, &calld->slices, &tmp);
if (did_compress) {
finish_compressed_sopb(send_ops, elem);
gpr_slice_buffer_swap(&calld->slices, &tmp);
calld->send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
gpr_slice_buffer_destroy(&tmp);
grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
calld->send_flags);
calld->send_op.send_message = &calld->replacement_stream.base;
calld->post_send = calld->send_op.on_complete;
calld->send_op.on_complete = &calld->send_done;
grpc_call_next_op(exec_ctx, elem, &calld->send_op);
}
static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, int success) {
grpc_call_element *elem = elemp;
call_data *calld = elem->call_data;
gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
finish_send_message(exec_ctx, elem);
} else {
continue_send_message(exec_ctx, elem);
}
}
static void continue_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
while (grpc_byte_stream_next(exec_ctx, calld->send_op.send_message,
&calld->incoming_slice, ~(size_t)0,
&calld->got_slice)) {
gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
finish_send_message(exec_ctx, elem);
break;
}
}
}
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
GPR_TIMER_BEGIN("compress_start_transport_stream_op", 0);
if (op->send_ops && op->send_ops->nops > 0) {
process_send_ops(elem, op->send_ops);
if (op->send_initial_metadata) {
process_send_initial_metadata(elem, op->send_initial_metadata);
}
if (op->send_message != NULL && !skip_compression(elem) &&
0 == (op->send_message->flags & GRPC_WRITE_NO_COMPRESS)) {
calld->send_op = *op;
calld->send_length = op->send_message->length;
calld->send_flags = op->send_message->flags;
continue_send_message(exec_ctx, elem);
} else {
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}
GPR_TIMER_END("compress_start_transport_stream_op", 0);
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
gpr_slice_buffer_init(&calld->slices);
calld->has_compression_algorithm = 0;
calld->written_initial_metadata = 0; /* GPR_FALSE */
if (initial_op) {
if (initial_op->send_ops && initial_op->send_ops->nops > 0) {
process_send_ops(elem, initial_op->send_ops);
}
}
grpc_closure_init(&calld->got_slice, got_slice, elem);
grpc_closure_init(&calld->send_done, send_done, elem);
}
/* Destructor for call_data */
@ -313,85 +255,43 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *channeld = elem->channel_data;
grpc_compression_algorithm algo_idx;
const char *supported_algorithms_names[GRPC_COMPRESS_ALGORITHMS_COUNT - 1];
size_t supported_algorithms_idx = 0;
char *accept_encoding_str;
size_t accept_encoding_str_len;
grpc_compression_options_init(&channeld->compression_options);
channeld->compression_options.enabled_algorithms_bitset =
(gpr_uint32)grpc_channel_args_compression_algorithm_get_states(args);
(gpr_uint32)grpc_channel_args_compression_algorithm_get_states(
args->channel_args);
channeld->default_compression_algorithm =
grpc_channel_args_get_compression_algorithm(args);
grpc_channel_args_get_compression_algorithm(args->channel_args);
/* Make sure the default isn't disabled. */
GPR_ASSERT(grpc_compression_options_is_algorithm_enabled(
&channeld->compression_options, channeld->default_compression_algorithm));
channeld->compression_options.default_compression_algorithm =
channeld->default_compression_algorithm;
channeld->mdstr_request_compression_algorithm_key =
grpc_mdstr_from_string(mdctx, GRPC_COMPRESS_REQUEST_ALGORITHM_KEY);
channeld->mdstr_outgoing_compression_algorithm_key =
grpc_mdstr_from_string(mdctx, "grpc-encoding");
channeld->mdstr_compression_capabilities_key =
grpc_mdstr_from_string(mdctx, "grpc-accept-encoding");
channeld->supported_compression_algorithms = 0;
for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
char *algorithm_name;
/* skip disabled algorithms */
if (grpc_compression_options_is_algorithm_enabled(
&channeld->compression_options, algo_idx) == 0) {
continue;
}
GPR_ASSERT(grpc_compression_algorithm_name(algo_idx, &algorithm_name) != 0);
channeld->mdelem_compression_algorithms[algo_idx] =
grpc_mdelem_from_metadata_strings(
mdctx,
GRPC_MDSTR_REF(channeld->mdstr_outgoing_compression_algorithm_key),
grpc_mdstr_from_string(mdctx, algorithm_name));
if (algo_idx > 0) {
supported_algorithms_names[supported_algorithms_idx++] = algorithm_name;
}
channeld->supported_compression_algorithms |= 1u << algo_idx;
}
/* TODO(dgq): gpr_strjoin_sep could be made to work with statically allocated
* arrays, as to avoid the heap allocs */
accept_encoding_str =
gpr_strjoin_sep(supported_algorithms_names, supported_algorithms_idx, ",",
&accept_encoding_str_len);
channeld->mdelem_accept_encoding = grpc_mdelem_from_metadata_strings(
mdctx, GRPC_MDSTR_REF(channeld->mdstr_compression_capabilities_key),
grpc_mdstr_from_string(mdctx, accept_encoding_str));
gpr_free(accept_encoding_str);
GPR_ASSERT(!is_last);
GPR_ASSERT(!args->is_last);
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *channeld = elem->channel_data;
grpc_compression_algorithm algo_idx;
GRPC_MDSTR_UNREF(channeld->mdstr_request_compression_algorithm_key);
GRPC_MDSTR_UNREF(channeld->mdstr_outgoing_compression_algorithm_key);
GRPC_MDSTR_UNREF(channeld->mdstr_compression_capabilities_key);
for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
GRPC_MDELEM_UNREF(channeld->mdelem_compression_algorithms[algo_idx]);
}
GRPC_MDELEM_UNREF(channeld->mdelem_accept_encoding);
}
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_compress_filter = {
compress_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, grpc_call_next_get_peer, "compress"};
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "compress"};

@ -83,19 +83,26 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
int r;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
r = grpc_transport_init_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
server_transport_data, initial_op);
r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
&args->call_stack->refcount, args->server_transport_data);
GPR_ASSERT(r == 0);
}
static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_transport_set_pollset(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollset);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
@ -108,11 +115,10 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(is_last);
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
cd->transport = NULL;
}
@ -132,8 +138,8 @@ static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
const grpc_channel_filter grpc_connected_channel_filter = {
con_start_transport_stream_op, con_start_transport_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, con_get_peer, "connected",
init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, con_get_peer, "connected",
};
void grpc_connected_channel_bind_transport(grpc_channel_stack *channel_stack,
@ -154,3 +160,8 @@ void grpc_connected_channel_bind_transport(grpc_channel_stack *channel_stack,
channel. */
channel_stack->call_stack_size += grpc_transport_stream_size(transport);
}
grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
call_data *calld = elem->call_data;
return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}

@ -46,4 +46,6 @@ extern const grpc_channel_filter grpc_connected_channel_filter;
void grpc_connected_channel_bind_transport(grpc_channel_stack* channel_stack,
grpc_transport* transport);
grpc_stream* grpc_connected_channel_get_stream(grpc_call_element* elem);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CONNECTED_CHANNEL_H */

@ -31,12 +31,13 @@
*/
#include "src/core/channel/http_client_filter.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/support/string.h"
#include <string.h>
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#include "src/core/transport/static_metadata.h"
typedef struct call_data {
grpc_linked_mdelem method;
@ -45,10 +46,8 @@ typedef struct call_data {
grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type;
grpc_linked_mdelem user_agent;
int sent_initial_metadata;
int got_initial_metadata;
grpc_stream_op_buffer *recv_ops;
grpc_metadata_batch *recv_initial_metadata;
/** Closure to call when finished with the hc_on_recv hook */
grpc_closure *on_done_recv;
@ -59,12 +58,7 @@ typedef struct call_data {
} call_data;
typedef struct channel_data {
grpc_mdelem *te_trailers;
grpc_mdelem *method;
grpc_mdelem *scheme;
grpc_mdelem *content_type;
grpc_mdelem *status;
/** complete user agent mdelem */
grpc_mdelem *static_scheme;
grpc_mdelem *user_agent;
} channel_data;
@ -75,14 +69,12 @@ typedef struct {
static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
client_recv_filter_args *a = user_data;
grpc_call_element *elem = a->elem;
channel_data *channeld = elem->channel_data;
if (md == channeld->status) {
if (md == GRPC_MDELEM_STATUS_200) {
return NULL;
} else if (md->key == channeld->status->key) {
grpc_call_element_send_cancel(a->exec_ctx, elem);
} else if (md->key == GRPC_MDSTR_STATUS) {
grpc_call_element_send_cancel(a->exec_ctx, a->elem);
return NULL;
} else if (md->key == channeld->content_type->key) {
} else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
return NULL;
}
return md;
@ -91,30 +83,21 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
size_t i;
size_t nops = calld->recv_ops->nops;
grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
client_recv_filter_args a;
if (op->type != GRPC_OP_METADATA) continue;
calld->got_initial_metadata = 1;
a.elem = elem;
a.exec_ctx = exec_ctx;
grpc_metadata_batch_filter(&op->data.metadata, client_recv_filter, &a);
}
client_recv_filter_args a;
a.elem = elem;
a.exec_ctx = exec_ctx;
grpc_metadata_batch_filter(calld->recv_initial_metadata, client_recv_filter,
&a);
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
}
static grpc_mdelem *client_strip_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
channel_data *channeld = elem->channel_data;
/* eat the things we'd like to set ourselves */
if (md->key == channeld->method->key) return NULL;
if (md->key == channeld->scheme->key) return NULL;
if (md->key == channeld->te_trailers->key) return NULL;
if (md->key == channeld->content_type->key) return NULL;
if (md->key == channeld->user_agent->key) return NULL;
if (md->key == GRPC_MDSTR_METHOD) return NULL;
if (md->key == GRPC_MDSTR_SCHEME) return NULL;
if (md->key == GRPC_MDSTR_TE) return NULL;
if (md->key == GRPC_MDSTR_CONTENT_TYPE) return NULL;
if (md->key == GRPC_MDSTR_USER_AGENT) return NULL;
return md;
}
@ -123,40 +106,29 @@ static void hc_mutate_op(grpc_call_element *elem,
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
size_t i;
if (op->send_ops && !calld->sent_initial_metadata) {
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *stream_op = &ops[i];
if (stream_op->type != GRPC_OP_METADATA) continue;
calld->sent_initial_metadata = 1;
grpc_metadata_batch_filter(&stream_op->data.metadata, client_strip_filter,
elem);
/* Send : prefixed headers, which have to be before any application
layer headers. */
grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->method,
GRPC_MDELEM_REF(channeld->method));
grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->scheme,
GRPC_MDELEM_REF(channeld->scheme));
grpc_metadata_batch_add_tail(&stream_op->data.metadata,
&calld->te_trailers,
GRPC_MDELEM_REF(channeld->te_trailers));
grpc_metadata_batch_add_tail(&stream_op->data.metadata,
&calld->content_type,
GRPC_MDELEM_REF(channeld->content_type));
grpc_metadata_batch_add_tail(&stream_op->data.metadata,
&calld->user_agent,
GRPC_MDELEM_REF(channeld->user_agent));
break;
}
if (op->send_initial_metadata != NULL) {
grpc_metadata_batch_filter(op->send_initial_metadata, client_strip_filter,
elem);
/* Send : prefixed headers, which have to be before any application
layer headers. */
grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->method,
GRPC_MDELEM_METHOD_POST);
grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->scheme,
channeld->static_scheme);
grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->te_trailers,
GRPC_MDELEM_TE_TRAILERS);
grpc_metadata_batch_add_tail(
op->send_initial_metadata, &calld->content_type,
GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->user_agent,
GRPC_MDELEM_REF(channeld->user_agent));
}
if (op->recv_ops && !calld->got_initial_metadata) {
if (op->recv_initial_metadata != NULL) {
/* substitute our callback for the higher callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
op->on_done_recv = &calld->hc_on_recv;
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->on_done_recv = op->on_complete;
op->on_complete = &calld->hc_on_recv;
}
}
@ -172,35 +144,38 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
calld->sent_initial_metadata = 0;
calld->got_initial_metadata = 0;
calld->on_done_recv = NULL;
grpc_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
if (initial_op) hc_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {}
static const char *scheme_from_args(const grpc_channel_args *args) {
static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
unsigned i;
size_t j;
grpc_mdelem *valid_schemes[] = {GRPC_MDELEM_SCHEME_HTTP,
GRPC_MDELEM_SCHEME_HTTPS};
if (args != NULL) {
for (i = 0; i < args->num_args; ++i) {
if (args->args[i].type == GRPC_ARG_STRING &&
strcmp(args->args[i].key, GRPC_ARG_HTTP2_SCHEME) == 0) {
return args->args[i].value.string;
for (j = 0; j < GPR_ARRAY_SIZE(valid_schemes); j++) {
if (0 == strcmp(grpc_mdstr_as_c_string(valid_schemes[j]->value),
args->args[i].value.string)) {
return valid_schemes[j];
}
}
}
}
}
return "http";
return GRPC_MDELEM_SCHEME_HTTP;
}
static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
const grpc_channel_args *args) {
static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args) {
gpr_strvec v;
size_t i;
int is_first = 1;
@ -242,7 +217,7 @@ static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
tmp = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
result = grpc_mdstr_from_string(mdctx, tmp);
result = grpc_mdstr_from_string(tmp);
gpr_free(tmp);
return result;
@ -250,46 +225,24 @@ static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *channel_args,
grpc_mdctx *mdctx, int is_first, int is_last) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_last);
/* initialize members */
channeld->te_trailers = grpc_mdelem_from_strings(mdctx, "te", "trailers");
channeld->method = grpc_mdelem_from_strings(mdctx, ":method", "POST");
channeld->scheme = grpc_mdelem_from_strings(mdctx, ":scheme",
scheme_from_args(channel_args));
channeld->content_type =
grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
channeld->status = grpc_mdelem_from_strings(mdctx, ":status", "200");
channeld->user_agent = grpc_mdelem_from_metadata_strings(
mdctx, grpc_mdstr_from_string(mdctx, "user-agent"),
user_agent_from_args(mdctx, channel_args));
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(!args->is_last);
chand->static_scheme = scheme_from_args(args->channel_args);
chand->user_agent = grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_USER_AGENT, user_agent_from_args(args->channel_args));
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
GRPC_MDELEM_UNREF(channeld->te_trailers);
GRPC_MDELEM_UNREF(channeld->method);
GRPC_MDELEM_UNREF(channeld->scheme);
GRPC_MDELEM_UNREF(channeld->content_type);
GRPC_MDELEM_UNREF(channeld->status);
GRPC_MDELEM_UNREF(channeld->user_agent);
channel_data *chand = elem->channel_data;
GRPC_MDELEM_UNREF(chand->user_agent);
}
const grpc_channel_filter grpc_http_client_filter = {
hc_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"http-client"};
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "http-client"};

@ -33,13 +33,13 @@
#include "src/core/channel/http_server_filter.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <string.h>
#include "src/core/profiling/timers.h"
#include "src/core/transport/static_metadata.h"
typedef struct call_data {
gpr_uint8 got_initial_metadata;
gpr_uint8 seen_path;
gpr_uint8 seen_post;
gpr_uint8 sent_status;
@ -49,7 +49,7 @@ typedef struct call_data {
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
grpc_stream_op_buffer *recv_ops;
grpc_metadata_batch *recv_initial_metadata;
/** Closure to call when finished with the hs_on_recv hook */
grpc_closure *on_done_recv;
/** Receive closures are chained: we inject this closure as the on_done_recv
@ -58,22 +58,7 @@ typedef struct call_data {
grpc_closure hs_on_recv;
} call_data;
typedef struct channel_data {
grpc_mdelem *te_trailers;
grpc_mdelem *method_post;
grpc_mdelem *http_scheme;
grpc_mdelem *https_scheme;
/* TODO(klempner): Remove this once we stop using it */
grpc_mdelem *grpc_scheme;
grpc_mdelem *content_type;
grpc_mdelem *status_ok;
grpc_mdelem *status_not_found;
grpc_mdstr *path_key;
grpc_mdstr *authority_key;
grpc_mdstr *host_key;
grpc_mdctx *mdctx;
} channel_data;
typedef struct channel_data { gpr_uint8 unused; } channel_data;
typedef struct {
grpc_call_element *elem;
@ -83,25 +68,24 @@ typedef struct {
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
server_filter_args *a = user_data;
grpc_call_element *elem = a->elem;
channel_data *channeld = elem->channel_data;
call_data *calld = elem->call_data;
/* Check if it is one of the headers we care about. */
if (md == channeld->te_trailers || md == channeld->method_post ||
md == channeld->http_scheme || md == channeld->https_scheme ||
md == channeld->grpc_scheme || md == channeld->content_type) {
if (md == GRPC_MDELEM_TE_TRAILERS || md == GRPC_MDELEM_METHOD_POST ||
md == GRPC_MDELEM_SCHEME_HTTP || md == GRPC_MDELEM_SCHEME_HTTPS ||
md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
/* swallow it */
if (md == channeld->method_post) {
if (md == GRPC_MDELEM_METHOD_POST) {
calld->seen_post = 1;
} else if (md->key == channeld->http_scheme->key) {
} else if (md->key == GRPC_MDSTR_SCHEME) {
calld->seen_scheme = 1;
} else if (md == channeld->te_trailers) {
} else if (md == GRPC_MDELEM_TE_TRAILERS) {
calld->seen_te_trailers = 1;
}
/* TODO(klempner): Track that we've seen all the headers we should
require */
return NULL;
} else if (md->key == channeld->content_type->key) {
} else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
if (strncmp(grpc_mdstr_as_c_string(md->value), "application/grpc+", 17) ==
0) {
/* Although the C implementation doesn't (currently) generate them,
@ -113,12 +97,11 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
gpr_log(GPR_INFO, "Unexpected content-type %s",
channeld->content_type->key);
grpc_mdstr_as_c_string(md->value));
}
return NULL;
} else if (md->key == channeld->te_trailers->key ||
md->key == channeld->method_post->key ||
md->key == channeld->http_scheme->key) {
} else if (md->key == GRPC_MDSTR_TE || md->key == GRPC_MDSTR_METHOD ||
md->key == GRPC_MDSTR_SCHEME) {
gpr_log(GPR_ERROR, "Invalid %s: header: '%s'",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value));
/* swallow it and error everything out. */
@ -126,23 +109,21 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
on the wire here. */
grpc_call_element_send_cancel(a->exec_ctx, elem);
return NULL;
} else if (md->key == channeld->path_key) {
} else if (md->key == GRPC_MDSTR_PATH) {
if (calld->seen_path) {
gpr_log(GPR_ERROR, "Received :path twice");
return NULL;
}
calld->seen_path = 1;
return md;
} else if (md->key == channeld->authority_key) {
} else if (md->key == GRPC_MDSTR_AUTHORITY) {
calld->seen_authority = 1;
return md;
} else if (md->key == channeld->host_key) {
} else if (md->key == GRPC_MDSTR_HOST) {
/* translate host to :authority since :authority may be
omitted */
grpc_mdelem *authority = grpc_mdelem_from_metadata_strings(
channeld->mdctx, GRPC_MDSTR_REF(channeld->authority_key),
GRPC_MDSTR_REF(md->value));
GRPC_MDELEM_UNREF(md);
GRPC_MDSTR_AUTHORITY, GRPC_MDSTR_REF(md->value));
calld->seen_authority = 1;
return authority;
} else {
@ -154,43 +135,35 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, int success) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (success) {
size_t i;
size_t nops = calld->recv_ops->nops;
grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
server_filter_args a;
if (op->type != GRPC_OP_METADATA) continue;
calld->got_initial_metadata = 1;
a.elem = elem;
a.exec_ctx = exec_ctx;
grpc_metadata_batch_filter(&op->data.metadata, server_filter, &a);
/* Have we seen the required http2 transport headers?
(:method, :scheme, content-type, with :path and :authority covered
at the channel level right now) */
if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
calld->seen_path && calld->seen_authority) {
/* do nothing */
} else {
if (!calld->seen_path) {
gpr_log(GPR_ERROR, "Missing :path header");
}
if (!calld->seen_authority) {
gpr_log(GPR_ERROR, "Missing :authority header");
}
if (!calld->seen_post) {
gpr_log(GPR_ERROR, "Missing :method header");
}
if (!calld->seen_scheme) {
gpr_log(GPR_ERROR, "Missing :scheme header");
}
if (!calld->seen_te_trailers) {
gpr_log(GPR_ERROR, "Missing te trailers header");
}
/* Error this call out */
success = 0;
grpc_call_element_send_cancel(exec_ctx, elem);
server_filter_args a;
a.elem = elem;
a.exec_ctx = exec_ctx;
grpc_metadata_batch_filter(calld->recv_initial_metadata, server_filter, &a);
/* Have we seen the required http2 transport headers?
(:method, :scheme, content-type, with :path and :authority covered
at the channel level right now) */
if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
calld->seen_path && calld->seen_authority) {
/* do nothing */
} else {
if (!calld->seen_path) {
gpr_log(GPR_ERROR, "Missing :path header");
}
if (!calld->seen_authority) {
gpr_log(GPR_ERROR, "Missing :authority header");
}
if (!calld->seen_post) {
gpr_log(GPR_ERROR, "Missing :method header");
}
if (!calld->seen_scheme) {
gpr_log(GPR_ERROR, "Missing :scheme header");
}
if (!calld->seen_te_trailers) {
gpr_log(GPR_ERROR, "Missing te trailers header");
}
/* Error this call out */
success = 0;
grpc_call_element_send_cancel(exec_ctx, elem);
}
}
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, success);
@ -200,30 +173,21 @@ static void hs_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
size_t i;
if (op->send_ops && !calld->sent_status) {
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *stream_op = &ops[i];
if (stream_op->type != GRPC_OP_METADATA) continue;
calld->sent_status = 1;
grpc_metadata_batch_add_head(&stream_op->data.metadata, &calld->status,
GRPC_MDELEM_REF(channeld->status_ok));
grpc_metadata_batch_add_tail(&stream_op->data.metadata,
&calld->content_type,
GRPC_MDELEM_REF(channeld->content_type));
break;
}
if (op->send_initial_metadata != NULL && !calld->sent_status) {
calld->sent_status = 1;
grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->status,
GRPC_MDELEM_STATUS_200);
grpc_metadata_batch_add_tail(
op->send_initial_metadata, &calld->content_type,
GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
}
if (op->recv_ops && !calld->got_initial_metadata) {
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
op->on_done_recv = &calld->hs_on_recv;
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->on_done_recv = op->on_complete;
op->on_complete = &calld->hs_on_recv;
}
}
@ -239,14 +203,12 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
if (initial_op) hs_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
@ -255,57 +217,17 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
/* initialize members */
channeld->te_trailers = grpc_mdelem_from_strings(mdctx, "te", "trailers");
channeld->status_ok = grpc_mdelem_from_strings(mdctx, ":status", "200");
channeld->status_not_found =
grpc_mdelem_from_strings(mdctx, ":status", "404");
channeld->method_post = grpc_mdelem_from_strings(mdctx, ":method", "POST");
channeld->http_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "http");
channeld->https_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "https");
channeld->grpc_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "grpc");
channeld->path_key = grpc_mdstr_from_string(mdctx, ":path");
channeld->authority_key = grpc_mdstr_from_string(mdctx, ":authority");
channeld->host_key = grpc_mdstr_from_string(mdctx, "host");
channeld->content_type =
grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
channeld->mdctx = mdctx;
grpc_channel_element *elem,
grpc_channel_element_args *args) {
GPR_ASSERT(!args->is_last);
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
GRPC_MDELEM_UNREF(channeld->te_trailers);
GRPC_MDELEM_UNREF(channeld->status_ok);
GRPC_MDELEM_UNREF(channeld->status_not_found);
GRPC_MDELEM_UNREF(channeld->method_post);
GRPC_MDELEM_UNREF(channeld->http_scheme);
GRPC_MDELEM_UNREF(channeld->https_scheme);
GRPC_MDELEM_UNREF(channeld->grpc_scheme);
GRPC_MDELEM_UNREF(channeld->content_type);
GRPC_MDSTR_UNREF(channeld->path_key);
GRPC_MDSTR_UNREF(channeld->authority_key);
GRPC_MDSTR_UNREF(channeld->host_key);
}
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"http-server"};
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "http-server"};

@ -1,122 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/channel/noop_filter.h"
#include <grpc/support/log.h>
typedef struct call_data {
int unused; /* C89 requires at least one struct element */
} call_data;
typedef struct channel_data {
int unused; /* C89 requires at least one struct element */
} channel_data;
/* used to silence 'variable not used' warnings */
static void ignore_unused(void *ignored) {}
static void noop_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
ignore_unused(calld);
ignore_unused(channeld);
/* do nothing */
}
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void noop_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
noop_mutate_op(elem, op);
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
/* initialize members */
calld->unused = channeld->unused;
if (initial_op) noop_mutate_op(elem, initial_op);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
/* initialize members */
channeld->unused = 0;
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
ignore_unused(channeld);
}
const grpc_channel_filter grpc_no_op_filter = {
noop_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, grpc_call_next_get_peer, "no-op"};

@ -0,0 +1,259 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/channel/subchannel_call_holder.h"
#include <grpc/support/alloc.h>
#include "src/core/profiling/timers.h"
#define GET_CALL(holder) \
((grpc_subchannel_call *)(gpr_atm_acq_load(&(holder)->subchannel_call)))
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *holder,
int success);
static void retry_ops(grpc_exec_ctx *exec_ctx, void *retry_ops_args,
int success);
static void add_waiting_locked(grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op);
static void fail_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
void grpc_subchannel_call_holder_init(
grpc_subchannel_call_holder *holder,
grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
void *pick_subchannel_arg, grpc_call_stack *owning_call) {
gpr_atm_rel_store(&holder->subchannel_call, 0);
holder->pick_subchannel = pick_subchannel;
holder->pick_subchannel_arg = pick_subchannel_arg;
gpr_mu_init(&holder->mu);
holder->connected_subchannel = NULL;
holder->waiting_ops = NULL;
holder->waiting_ops_count = 0;
holder->waiting_ops_capacity = 0;
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
holder->owning_call = owning_call;
}
void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder) {
grpc_subchannel_call *call = GET_CALL(holder);
if (call != NULL && call != CANCELLED_CALL) {
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "holder");
}
GPR_ASSERT(holder->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
gpr_mu_destroy(&holder->mu);
GPR_ASSERT(holder->waiting_ops_count == 0);
gpr_free(holder->waiting_ops);
}
void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op) {
/* try to (atomically) get the call */
grpc_subchannel_call *call = GET_CALL(holder);
GPR_TIMER_BEGIN("grpc_subchannel_call_holder_perform_op", 0);
if (call == CANCELLED_CALL) {
grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
if (call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, call, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
/* we failed; lock and figure out what to do */
gpr_mu_lock(&holder->mu);
retry:
/* need to recheck that another thread hasn't set the call */
call = GET_CALL(holder);
if (call == CANCELLED_CALL) {
gpr_mu_unlock(&holder->mu);
grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
if (call != NULL) {
gpr_mu_unlock(&holder->mu);
grpc_subchannel_call_process_op(exec_ctx, call, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
/* if this is a cancellation, then we can raise our cancelled flag */
if (op->cancel_with_status != GRPC_STATUS_OK) {
if (!gpr_atm_rel_cas(&holder->subchannel_call, 0, 1)) {
goto retry;
} else {
switch (holder->creation_phase) {
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
fail_locked(exec_ctx, holder);
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
&holder->connected_subchannel, NULL);
break;
}
gpr_mu_unlock(&holder->mu);
grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
return;
}
}
/* if we don't have a subchannel, try to get one */
if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
holder->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
grpc_closure_init(&holder->next_step, subchannel_ready, holder);
GRPC_CALL_STACK_REF(holder->owning_call, "pick_subchannel");
if (holder->pick_subchannel(
exec_ctx, holder->pick_subchannel_arg, op->send_initial_metadata,
&holder->connected_subchannel, &holder->next_step)) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
}
}
/* if we've got a subchannel, then let's ask it to create a call */
if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
holder->connected_subchannel != NULL) {
gpr_atm_rel_store(
&holder->subchannel_call,
(gpr_atm)(gpr_uintptr)grpc_connected_subchannel_create_call(
exec_ctx, holder->connected_subchannel, holder->pollset));
retry_waiting_locked(exec_ctx, holder);
goto retry;
}
/* nothing to be done but wait */
add_waiting_locked(holder, op);
gpr_mu_unlock(&holder->mu);
GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
}
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
grpc_subchannel_call_holder *holder = arg;
grpc_subchannel_call *call;
gpr_mu_lock(&holder->mu);
GPR_ASSERT(holder->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
call = GET_CALL(holder);
GPR_ASSERT(call == NULL || call == CANCELLED_CALL);
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (holder->connected_subchannel == NULL) {
fail_locked(exec_ctx, holder);
} else {
gpr_atm_rel_store(
&holder->subchannel_call,
(gpr_atm)(gpr_uintptr)grpc_connected_subchannel_create_call(
exec_ctx, holder->connected_subchannel, holder->pollset));
retry_waiting_locked(exec_ctx, holder);
}
gpr_mu_unlock(&holder->mu);
GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
}
typedef struct {
grpc_transport_stream_op *ops;
size_t nops;
grpc_subchannel_call *call;
} retry_ops_args;
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder) {
retry_ops_args *a = gpr_malloc(sizeof(*a));
a->ops = holder->waiting_ops;
a->nops = holder->waiting_ops_count;
a->call = GET_CALL(holder);
if (a->call == CANCELLED_CALL) {
gpr_free(a);
fail_locked(exec_ctx, holder);
return;
}
holder->waiting_ops = NULL;
holder->waiting_ops_count = 0;
holder->waiting_ops_capacity = 0;
GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(retry_ops, a), 1);
}
static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, int success) {
retry_ops_args *a = args;
size_t i;
for (i = 0; i < a->nops; i++) {
grpc_subchannel_call_process_op(exec_ctx, a->call, &a->ops[i]);
}
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
gpr_free(a->ops);
gpr_free(a);
}
static void add_waiting_locked(grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("add_waiting_locked", 0);
if (holder->waiting_ops_count == holder->waiting_ops_capacity) {
holder->waiting_ops_capacity = GPR_MAX(3, 2 * holder->waiting_ops_capacity);
holder->waiting_ops =
gpr_realloc(holder->waiting_ops, holder->waiting_ops_capacity *
sizeof(*holder->waiting_ops));
}
holder->waiting_ops[holder->waiting_ops_count++] = *op;
GPR_TIMER_END("add_waiting_locked", 0);
}
static void fail_locked(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder) {
size_t i;
for (i = 0; i < holder->waiting_ops_count; i++) {
grpc_exec_ctx_enqueue(exec_ctx, holder->waiting_ops[i].on_complete, 0);
grpc_exec_ctx_enqueue(exec_ctx, holder->waiting_ops[i].recv_message_ready,
0);
}
holder->waiting_ops_count = 0;
}
char *grpc_subchannel_call_holder_get_peer(
grpc_exec_ctx *exec_ctx, grpc_subchannel_call_holder *holder) {
grpc_subchannel_call *subchannel_call = GET_CALL(holder);
if (subchannel_call) {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
} else {
return NULL;
}
}

@ -0,0 +1,98 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_CHANNEL_SUBCHANNEL_CALL_HOLDER_H
#define GRPC_INTERNAL_CORE_CHANNEL_SUBCHANNEL_CALL_HOLDER_H
#include "src/core/client_config/subchannel.h"
/** Pick a subchannel for grpc_subchannel_call_holder;
Return 1 if subchannel is available immediately (in which case on_ready
should not be called), or 0 otherwise (in which case on_ready should be
called when the subchannel is available) */
typedef int (*grpc_subchannel_call_holder_pick_subchannel)(
grpc_exec_ctx *exec_ctx, void *arg, grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready);
typedef enum {
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
} grpc_subchannel_call_holder_creation_phase;
/** Wrapper for holding a pointer to grpc_subchannel_call, and the
associated machinery to create such a pointer.
Handles queueing of stream ops until a call object is ready, waiting
for initial metadata before trying to create a call object,
and handling cancellation gracefully.
Both the channel and uchannel filter use this as their call_data. */
typedef struct grpc_subchannel_call_holder {
/** either 0 for no call, 1 for cancelled, or a pointer to a
grpc_subchannel_call */
gpr_atm subchannel_call;
/** Helper function to choose the subchannel on which to create
the call object. Channel filter delegates to the load
balancing policy (once it's ready); uchannel returns
immediately */
grpc_subchannel_call_holder_pick_subchannel pick_subchannel;
void *pick_subchannel_arg;
gpr_mu mu;
grpc_subchannel_call_holder_creation_phase creation_phase;
grpc_connected_subchannel *connected_subchannel;
grpc_pollset *pollset;
grpc_transport_stream_op *waiting_ops;
size_t waiting_ops_count;
size_t waiting_ops_capacity;
grpc_closure next_step;
grpc_call_stack *owning_call;
} grpc_subchannel_call_holder;
void grpc_subchannel_call_holder_init(
grpc_subchannel_call_holder *holder,
grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
void *pick_subchannel_arg, grpc_call_stack *owning_call);
void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op);
char *grpc_subchannel_call_holder_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
#endif

@ -51,6 +51,8 @@ typedef struct {
/** address to connect to */
const struct sockaddr *addr;
size_t addr_len;
/** initial connect string to send */
gpr_slice initial_connect_string;
/** deadline for connection */
gpr_timespec deadline;
/** channel arguments (to be passed to transport) */

@ -31,13 +31,9 @@
*
*/
#include "src/core/client_config/subchannel_factory_decorators/add_channel_arg.h"
#include "src/core/client_config/subchannel_factory_decorators/merge_channel_args.h"
#include <grpc/support/slice.h>
#include "src/core/iomgr/sockaddr.h"
grpc_subchannel_factory *grpc_subchannel_factory_add_channel_arg(
grpc_subchannel_factory *input, const grpc_arg *arg) {
grpc_channel_args args;
args.num_args = 1;
args.args = (grpc_arg *)arg;
return grpc_subchannel_factory_merge_channel_args(input, &args);
}
void grpc_set_default_initial_connect_string(struct sockaddr **addr,
size_t *addr_len,
gpr_slice *initial_str) {}

@ -0,0 +1,53 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/client_config/initial_connect_string.h"
#include <stddef.h>
extern void grpc_set_default_initial_connect_string(struct sockaddr **addr,
size_t *addr_len,
gpr_slice *initial_str);
static grpc_set_initial_connect_string_func g_set_initial_connect_string_func =
grpc_set_default_initial_connect_string;
void grpc_test_set_initial_connect_string_function(
grpc_set_initial_connect_string_func func) {
g_set_initial_connect_string_func = func;
}
void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
gpr_slice *initial_str) {
g_set_initial_connect_string_func(addr, addr_len, initial_str);
}

@ -0,0 +1,50 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
#include <grpc/support/slice.h>
#include "src/core/iomgr/sockaddr.h"
typedef void (*grpc_set_initial_connect_string_func)(struct sockaddr **addr,
size_t *addr_len,
gpr_slice *initial_str);
void grpc_test_set_initial_connect_string_function(
grpc_set_initial_connect_string_func func);
/** Set a string to be sent once connected. Optionally reset addr. */
void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
gpr_slice *connect_string);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H */

@ -42,7 +42,7 @@
typedef struct pending_pick {
struct pending_pick *next;
grpc_pollset *pollset;
grpc_subchannel **target;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
} pending_pick;
@ -60,7 +60,7 @@ typedef struct {
/** the selected channel
TODO(ctiller): this should be atomically set so we don't
need to take a mutex in the common case */
grpc_subchannel *selected;
grpc_connected_subchannel *selected;
/** have we started picking? */
int started_picking;
/** are we shut down? */
@ -76,24 +76,6 @@ typedef struct {
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
static void del_interested_parties_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
pending_pick *pp;
for (pp = p->pending_picks; pp; pp = pp->next) {
grpc_subchannel_del_interested_party(
exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
}
}
static void add_interested_parties_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
pending_pick *pp;
for (pp = p->pending_picks; pp; pp = pp->next) {
grpc_subchannel_add_interested_party(
exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
}
}
void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
size_t i;
@ -102,7 +84,7 @@ void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
if (p->selected) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
@ -114,30 +96,65 @@ void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
del_interested_parties_locked(exec_ctx, p);
p->shutdown = 1;
pp = p->pending_picks;
p->pending_picks = NULL;
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
/* cancel subscription */
if (p->selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
} else {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
}
gpr_mu_unlock(&p->mu);
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
pp = next;
}
}
static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
}
pp = next;
}
gpr_mu_unlock(&p->mu);
}
static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
p->started_picking = 1;
p->checking_subchannel = 0;
p->checking_connectivity = GRPC_CHANNEL_IDLE;
GRPC_LB_POLICY_REF(&p->base, "pick_first_connectivity");
GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
&p->checking_connectivity, &p->connectivity_changed);
&p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
}
void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
@ -149,22 +166,22 @@ void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_mu_unlock(&p->mu);
}
void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete) {
int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **target, grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
if (p->selected) {
gpr_mu_unlock(&p->mu);
*target = p->selected;
grpc_exec_ctx_enqueue(exec_ctx, on_complete, 1);
return 1;
} else {
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
grpc_subchannel_add_interested_party(
exec_ctx, p->subchannels[p->checking_subchannel], pollset);
grpc_pollset_set_add_pollset(exec_ctx, &p->base.interested_parties,
pollset);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollset = pollset;
@ -172,6 +189,7 @@ void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pp->on_complete = on_complete;
p->pending_picks = pp;
gpr_mu_unlock(&p->mu);
return 0;
}
}
@ -179,25 +197,17 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
size_t i;
grpc_transport_op op;
size_t num_subchannels = p->num_subchannels;
grpc_subchannel **subchannels;
grpc_subchannel *exclude_subchannel;
gpr_mu_lock(&p->mu);
subchannels = p->subchannels;
p->num_subchannels = 0;
p->subchannels = NULL;
exclude_subchannel = p->selected;
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "destroy_subchannels");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
for (i = 0; i < num_subchannels; i++) {
if (subchannels[i] != exclude_subchannel) {
memset(&op, 0, sizeof(op));
op.disconnect = 1;
grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], &op);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
}
@ -207,23 +217,28 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
gpr_mu_lock(&p->mu);
if (p->shutdown) {
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
return;
} else if (p->selected != NULL) {
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */
p->checking_connectivity = GRPC_CHANNEL_FATAL_FAILURE;
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
p->checking_connectivity, "selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
grpc_subchannel_notify_on_state_change(exec_ctx, p->selected,
&p->checking_connectivity,
&p->connectivity_changed);
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, &p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
} else {
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
}
} else {
loop:
@ -231,39 +246,41 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
case GRPC_CHANNEL_READY:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, "connecting_ready");
p->selected = p->subchannels[p->checking_subchannel];
GRPC_SUBCHANNEL_REF(p->selected, "picked_first");
selected_subchannel = p->subchannels[p->checking_subchannel];
p->selected =
grpc_subchannel_get_connected_subchannel(selected_subchannel);
GPR_ASSERT(p->selected);
GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked_first");
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_REF(&p->base, "destroy_subchannels");
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
grpc_exec_ctx_enqueue(exec_ctx,
grpc_closure_create(destroy_subchannels, p), 1);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = p->selected;
grpc_subchannel_del_interested_party(exec_ctx, p->selected,
pp->pollset);
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(exec_ctx, p->selected,
&p->checking_connectivity,
&p->connectivity_changed);
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, &p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"connecting_transient_failure");
del_interested_parties_locked(exec_ctx, p);
p->checking_subchannel =
(p->checking_subchannel + 1) % p->num_subchannels;
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel]);
add_interested_parties_locked(exec_ctx, p);
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
&p->checking_connectivity, &p->connectivity_changed);
&p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
} else {
goto loop;
}
@ -275,13 +292,13 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"connecting_changed");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
&p->checking_connectivity, &p->connectivity_changed);
&p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
break;
case GRPC_CHANNEL_FATAL_FAILURE:
del_interested_parties_locked(exec_ctx, p);
GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
p->subchannels[p->num_subchannels - 1]);
p->num_subchannels--;
GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
p->subchannels[p->num_subchannels]);
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
"pick_first");
if (p->num_subchannels == 0) {
@ -294,7 +311,8 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
"pick_first_connectivity");
} else {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
@ -302,7 +320,6 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
p->checking_subchannel %= p->num_subchannels;
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel]);
add_interested_parties_locked(exec_ctx, p);
goto loop;
}
}
@ -311,39 +328,6 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&p->mu);
}
static void pf_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_transport_op *op) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
size_t i;
size_t n;
grpc_subchannel **subchannels;
grpc_subchannel *selected;
gpr_mu_lock(&p->mu);
n = p->num_subchannels;
subchannels = gpr_malloc(n * sizeof(*subchannels));
selected = p->selected;
if (selected) {
GRPC_SUBCHANNEL_REF(selected, "pf_broadcast_to_selected");
}
for (i = 0; i < n; i++) {
subchannels[i] = p->subchannels[i];
GRPC_SUBCHANNEL_REF(subchannels[i], "pf_broadcast");
}
gpr_mu_unlock(&p->mu);
for (i = 0; i < n; i++) {
if (selected == subchannels[i]) continue;
grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pf_broadcast");
}
if (p->selected) {
grpc_subchannel_process_transport_op(exec_ctx, selected, op);
GRPC_SUBCHANNEL_UNREF(exec_ctx, selected, "pf_broadcast_to_selected");
}
gpr_free(subchannels);
}
static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
@ -364,8 +348,20 @@ void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_unlock(&p->mu);
}
void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
gpr_mu_lock(&p->mu);
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
} else {
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
}
gpr_mu_unlock(&p->mu);
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_destroy, pf_shutdown, pf_pick, pf_exit_idle, pf_broadcast,
pf_destroy, pf_shutdown, pf_pick, pf_cancel_pick, pf_ping_one, pf_exit_idle,
pf_check_connectivity, pf_notify_on_state_change};
static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}

@ -38,6 +38,8 @@
#include <grpc/support/alloc.h>
#include "src/core/transport/connectivity_state.h"
typedef struct round_robin_lb_policy round_robin_lb_policy;
int grpc_lb_round_robin_trace = 0;
/** List of entities waiting for a pick.
@ -46,7 +48,7 @@ int grpc_lb_round_robin_trace = 0;
typedef struct pending_pick {
struct pending_pick *next;
grpc_pollset *pollset;
grpc_subchannel **target;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
} pending_pick;
@ -58,22 +60,27 @@ typedef struct ready_list {
} ready_list;
typedef struct {
size_t subchannel_idx; /**< Index over p->subchannels */
void *p; /**< round_robin_lb_policy instance */
} connectivity_changed_cb_arg;
typedef struct {
/** index within policy->subchannels */
size_t index;
/** backpointer to owning policy */
round_robin_lb_policy *policy;
/** subchannel itself */
grpc_subchannel *subchannel;
/** notification that connectivity has changed on subchannel */
grpc_closure connectivity_changed_closure;
/** this subchannels current position in subchannel->ready_list */
ready_list *ready_list_node;
/** last observed connectivity */
grpc_connectivity_state connectivity_state;
} subchannel_data;
struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
/** all our subchannels */
grpc_subchannel **subchannels;
size_t num_subchannels;
/** Callbacks, one per subchannel being watched, to be called when their
* respective connectivity changes */
grpc_closure *connectivity_changed_cbs;
connectivity_changed_cb_arg *cb_args;
subchannel_data **subchannels;
/** mutex protecting remaining members */
gpr_mu mu;
@ -81,8 +88,6 @@ typedef struct {
int started_picking;
/** are we shutting down? */
int shutdown;
/** Connectivity state of the subchannels being watched */
grpc_connectivity_state *subchannel_connectivity;
/** List of picks that are waiting on connectivity */
pending_pick *pending_picks;
@ -93,13 +98,7 @@ typedef struct {
ready_list ready_list;
/** Last pick from the ready list. */
ready_list *ready_list_last_pick;
/** Subchannel index to ready_list node.
*
* Kept in order to remove nodes from the ready list associated with a
* subchannel */
ready_list **subchannel_index_to_readylist_node;
} round_robin_lb_policy;
};
/** Returns the next subchannel from the connected list or NULL if the list is
* empty.
@ -144,9 +143,9 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
/** Prepends (relative to the root at p->ready_list) the connected subchannel \a
* csc to the list of ready subchannels. */
static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
grpc_subchannel *csc) {
grpc_subchannel *sc) {
ready_list *new_elem = gpr_malloc(sizeof(ready_list));
new_elem->subchannel = csc;
new_elem->subchannel = sc;
if (p->ready_list.prev == NULL) {
/* first element */
new_elem->next = &p->ready_list;
@ -160,7 +159,7 @@ static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
p->ready_list.prev = new_elem;
}
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (SC %p)", new_elem, csc);
gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (SC %p)", new_elem, sc);
}
return new_elem;
}
@ -200,28 +199,15 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
gpr_free(node);
}
static void del_interested_parties_locked(grpc_exec_ctx *exec_ctx,
round_robin_lb_policy *p,
const size_t subchannel_idx) {
pending_pick *pp;
for (pp = p->pending_picks; pp; pp = pp->next) {
grpc_subchannel_del_interested_party(
exec_ctx, p->subchannels[subchannel_idx], pp->pollset);
}
}
void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
size_t i;
ready_list *elem;
for (i = 0; i < p->num_subchannels; i++) {
del_interested_parties_locked(exec_ctx, p, i);
subchannel_data *sd = p->subchannels[i];
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
gpr_free(sd);
}
for (i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "round_robin");
}
gpr_free(p->connectivity_changed_cbs);
gpr_free(p->subchannel_connectivity);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
@ -237,20 +223,15 @@ void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(elem);
elem = tmp;
}
gpr_free(p->subchannel_index_to_readylist_node);
gpr_free(p->cb_args);
gpr_free(p);
}
void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
size_t i;
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
size_t i;
for (i = 0; i < p->num_subchannels; i++) {
del_interested_parties_locked(exec_ctx, p, i);
}
gpr_mu_lock(&p->mu);
p->shutdown = 1;
while ((pp = p->pending_picks)) {
@ -261,6 +242,35 @@ void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
gpr_mu_unlock(&p->mu);
}
static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
p->pending_picks = pp;
}
pp = next;
}
gpr_mu_unlock(&p->mu);
}
@ -268,12 +278,16 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%d", p,
p->num_subchannels);
for (i = 0; i < p->num_subchannels; i++) {
p->subchannel_connectivity[i] = GRPC_CHANNEL_IDLE;
grpc_subchannel_notify_on_state_change(exec_ctx, p->subchannels[i],
&p->subchannel_connectivity[i],
&p->connectivity_changed_cbs[i]);
GRPC_LB_POLICY_REF(&p->base, "round_robin_connectivity");
subchannel_data *sd = p->subchannels[i];
sd->connectivity_state = GRPC_CHANNEL_IDLE;
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
GRPC_LB_POLICY_WEAK_REF(&p->base, "round_robin_connectivity");
}
}
@ -286,32 +300,30 @@ void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_mu_unlock(&p->mu);
}
void rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete) {
size_t i;
int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **target, grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
ready_list *selected;
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
gpr_mu_unlock(&p->mu);
*target = selected->subchannel;
*target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "[RR PICK] TARGET <-- SUBCHANNEL %p (NODE %p)",
gpr_log(GPR_DEBUG,
"[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
selected->subchannel, selected);
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
on_complete->cb(exec_ctx, on_complete->cb_arg, 1);
return 1;
} else {
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
for (i = 0; i < p->num_subchannels; i++) {
grpc_subchannel_add_interested_party(exec_ctx, p->subchannels[i],
pollset);
}
grpc_pollset_set_add_pollset(exec_ctx, &p->base.interested_parties,
pollset);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollset = pollset;
@ -319,38 +331,31 @@ void rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pp->on_complete = on_complete;
p->pending_picks = pp;
gpr_mu_unlock(&p->mu);
return 0;
}
}
static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
connectivity_changed_cb_arg *cb_arg = arg;
round_robin_lb_policy *p = cb_arg->p;
/* index over p->subchannels of this cb's subchannel */
const size_t this_idx = cb_arg->subchannel_idx;
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
ready_list *selected;
int unref = 0;
/* connectivity state of this cb's subchannel */
grpc_connectivity_state *this_connectivity;
gpr_mu_lock(&p->mu);
this_connectivity = &p->subchannel_connectivity[this_idx];
if (p->shutdown) {
unref = 1;
} else {
switch (*this_connectivity) {
switch (sd->connectivity_state) {
case GRPC_CHANNEL_READY:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, "connecting_ready");
/* add the newly connected subchannel to the list of connected ones.
* Note that it goes to the "end of the line". */
p->subchannel_index_to_readylist_node[this_idx] =
add_connected_sc_locked(p, p->subchannels[this_idx]);
sd->ready_list_node = add_connected_sc_locked(p, sd->subchannel);
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
@ -362,60 +367,60 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = selected->subchannel;
*pp->target =
grpc_subchannel_get_connected_subchannel(selected->subchannel);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
selected->subchannel, selected);
}
grpc_subchannel_del_interested_party(exec_ctx, selected->subchannel,
pp->pollset);
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[this_idx], this_connectivity,
&p->connectivity_changed_cbs[this_idx]);
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
*this_connectivity, "connecting_changed");
sd->connectivity_state,
"connecting_changed");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[this_idx], this_connectivity,
&p->connectivity_changed_cbs[this_idx]);
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
del_interested_parties_locked(exec_ctx, p, this_idx);
/* renew state notification */
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[this_idx], this_connectivity,
&p->connectivity_changed_cbs[this_idx]);
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
/* remove from ready list if still present */
if (p->subchannel_index_to_readylist_node[this_idx] != NULL) {
remove_disconnected_sc_locked(
p, p->subchannel_index_to_readylist_node[this_idx]);
p->subchannel_index_to_readylist_node[this_idx] = NULL;
if (sd->ready_list_node != NULL) {
remove_disconnected_sc_locked(p, sd->ready_list_node);
sd->ready_list_node = NULL;
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"connecting_transient_failure");
break;
case GRPC_CHANNEL_FATAL_FAILURE:
del_interested_parties_locked(exec_ctx, p, this_idx);
if (p->subchannel_index_to_readylist_node[this_idx] != NULL) {
remove_disconnected_sc_locked(
p, p->subchannel_index_to_readylist_node[this_idx]);
p->subchannel_index_to_readylist_node[this_idx] = NULL;
if (sd->ready_list_node != NULL) {
remove_disconnected_sc_locked(p, sd->ready_list_node);
sd->ready_list_node = NULL;
}
GPR_SWAP(grpc_subchannel *, p->subchannels[this_idx],
p->subchannels[p->num_subchannels - 1]);
p->num_subchannels--;
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
"round_robin");
GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
p->subchannels[p->num_subchannels]);
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
p->subchannels[sd->index]->index = sd->index;
gpr_free(sd);
unref = 1;
if (p->num_subchannels == 0) {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE,
@ -426,7 +431,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
unref = 1;
} else {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
@ -438,33 +442,10 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&p->mu);
if (unref) {
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
}
}
static void rr_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_transport_op *op) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
size_t i;
size_t n;
grpc_subchannel **subchannels;
gpr_mu_lock(&p->mu);
n = p->num_subchannels;
subchannels = gpr_malloc(n * sizeof(*subchannels));
for (i = 0; i < n; i++) {
subchannels[i] = p->subchannels[i];
GRPC_SUBCHANNEL_REF(subchannels[i], "rr_broadcast");
}
gpr_mu_unlock(&p->mu);
for (i = 0; i < n; i++) {
grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "rr_broadcast");
}
gpr_free(subchannels);
}
static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
@ -486,8 +467,24 @@ static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&p->mu);
}
static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *selected;
grpc_connected_subchannel *target;
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
gpr_mu_unlock(&p->mu);
target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
grpc_connected_subchannel_ping(exec_ctx, target, closure);
} else {
gpr_mu_unlock(&p->mu);
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
}
}
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_destroy, rr_shutdown, rr_pick, rr_exit_idle, rr_broadcast,
rr_destroy, rr_shutdown, rr_pick, rr_cancel_pick, rr_ping_one, rr_exit_idle,
rr_check_connectivity, rr_notify_on_state_change};
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
@ -501,27 +498,22 @@ static grpc_lb_policy *create_round_robin(grpc_lb_policy_factory *factory,
GPR_ASSERT(args->num_subchannels > 0);
memset(p, 0, sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
p->subchannels =
gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
p->num_subchannels = args->num_subchannels;
p->subchannels = gpr_malloc(sizeof(*p->subchannels) * p->num_subchannels);
memset(p->subchannels, 0, sizeof(*p->subchannels) * p->num_subchannels);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
memcpy(p->subchannels, args->subchannels,
sizeof(grpc_subchannel *) * args->num_subchannels);
gpr_mu_init(&p->mu);
p->connectivity_changed_cbs =
gpr_malloc(sizeof(grpc_closure) * args->num_subchannels);
p->subchannel_connectivity =
gpr_malloc(sizeof(grpc_connectivity_state) * args->num_subchannels);
p->cb_args =
gpr_malloc(sizeof(connectivity_changed_cb_arg) * args->num_subchannels);
for (i = 0; i < args->num_subchannels; i++) {
p->cb_args[i].subchannel_idx = i;
p->cb_args[i].p = p;
grpc_closure_init(&p->connectivity_changed_cbs[i], rr_connectivity_changed,
&p->cb_args[i]);
subchannel_data *sd = gpr_malloc(sizeof(*sd));
memset(sd, 0, sizeof(*sd));
p->subchannels[i] = sd;
sd->policy = p;
sd->index = i;
sd->subchannel = args->subchannels[i];
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed, sd);
}
/* The (dummy node) root of the ready list */
@ -530,10 +522,6 @@ static grpc_lb_policy *create_round_robin(grpc_lb_policy_factory *factory,
p->ready_list.next = NULL;
p->ready_list_last_pick = &p->ready_list;
p->subchannel_index_to_readylist_node =
gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
memset(p->subchannel_index_to_readylist_node, 0,
sizeof(grpc_subchannel *) * args->num_subchannels);
return &p->base;
}

@ -33,58 +33,94 @@
#include "src/core/client_config/lb_policy.h"
#define WEAK_REF_BITS 16
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable) {
policy->vtable = vtable;
gpr_ref_init(&policy->refs, 1);
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
grpc_pollset_set_init(&policy->interested_parties);
}
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "LB_POLICY:%p ref %d -> %d %s",
policy, (int)policy->refs.count, (int)policy->refs.count + 1, reason);
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
#else
void grpc_lb_policy_ref(grpc_lb_policy *policy) {
#define REF_FUNC_EXTRA_ARGS
#define REF_MUTATE_EXTRA_ARGS
#define REF_FUNC_PASS_ARGS(new_reason)
#define REF_MUTATE_PASS_ARGS(x)
#endif
gpr_ref(&policy->refs);
}
static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
void grpc_lb_policy_unref(grpc_lb_policy *policy,
grpc_closure_list *closure_list, const char *file,
int line, const char *reason) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "LB_POLICY:%p unref %d -> %d %s",
policy, (int)policy->refs.count, (int)policy->refs.count - 1, reason);
#else
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"LB_POLICY: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val,
old_val + delta, reason);
#endif
if (gpr_unref(&policy->refs)) {
policy->vtable->destroy(exec_ctx, policy);
return old_val;
}
void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
policy->vtable->shutdown(exec_ctx, policy);
}
grpc_lb_policy_weak_unref(exec_ctx,
policy REF_FUNC_PASS_ARGS("strong-unref"));
}
void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
policy->vtable->shutdown(exec_ctx, policy);
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
}
void grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete) {
policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata, target,
on_complete);
void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(&policy->interested_parties);
policy->vtable->destroy(exec_ctx, policy);
}
}
void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_transport_op *op) {
policy->vtable->broadcast(exec_ctx, policy, op);
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **target,
grpc_closure *on_complete) {
return policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata,
target, on_complete);
}
void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connected_subchannel **target) {
policy->vtable->cancel_pick(exec_ctx, policy, target);
}
void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
policy->vtable->exit_idle(exec_ctx, policy);
}
void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure) {
policy->vtable->ping_one(exec_ctx, policy, closure);
}
void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connectivity_state *state,

@ -47,7 +47,8 @@ typedef void (*grpc_lb_completion)(void *cb_arg, grpc_subchannel *subchannel,
struct grpc_lb_policy {
const grpc_lb_policy_vtable *vtable;
gpr_refcount refs;
gpr_atm ref_pair;
grpc_pollset_set interested_parties;
};
struct grpc_lb_policy_vtable {
@ -56,17 +57,18 @@ struct grpc_lb_policy_vtable {
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** implement grpc_lb_policy_pick */
void (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete);
int (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **target, grpc_closure *on_complete);
void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connected_subchannel **target);
void (*ping_one)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure);
/** try to enter a READY connectivity state */
void (*exit_idle)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** broadcast a transport op to all subchannels */
void (*broadcast)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_transport_op *op);
/** check the current connectivity of the lb_policy */
grpc_connectivity_state (*check_connectivity)(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy);
@ -79,40 +81,54 @@ struct grpc_lb_policy_vtable {
grpc_closure *closure);
};
/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_REF(p, r) \
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason);
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const char *file, int line, const char *reason);
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason);
void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const char *file, int line, const char *reason);
#else
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p))
void grpc_lb_policy_ref(grpc_lb_policy *policy);
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy);
void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
#endif
/** called by concrete implementations to initialize the base struct */
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable);
/** Start shutting down (fail any pending picks) */
void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** Given initial metadata in \a initial_metadata, find an appropriate
target for this rpc, and 'return' it by calling \a on_complete after setting
\a target.
Picking can be asynchronous. Any IO should be done under \a pollset. */
void grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete);
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **target,
grpc_closure *on_complete);
void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure);
void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_transport_op *op);
void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connected_subchannel **target);
void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);

@ -71,11 +71,8 @@ void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
}
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
struct sockaddr *failing_address,
int failing_address_len) {
resolver->vtable->channel_saw_error(exec_ctx, resolver, failing_address,
failing_address_len);
grpc_resolver *resolver) {
resolver->vtable->channel_saw_error(exec_ctx, resolver);
}
void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,

@ -35,8 +35,8 @@
#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_RESOLVER_H
#include "src/core/client_config/client_config.h"
#include "src/core/client_config/subchannel.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/sockaddr.h"
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
@ -51,9 +51,7 @@ struct grpc_resolver {
struct grpc_resolver_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
struct sockaddr *failing_address,
int failing_address_len);
void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config, grpc_closure *on_complete);
};
@ -81,9 +79,7 @@ void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
/** Notification that the channel has seen an error on some address.
Can be used as a hint that re-resolution is desirable soon. */
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
struct sockaddr *failing_address,
int failing_address_len);
grpc_resolver *resolver);
/** Get the next client config. Called by the channel to fetch a new
configuration. Expected to set *target_config with a new configuration,

@ -40,7 +40,6 @@
#include <grpc/support/string_util.h>
#include "src/core/client_config/lb_policy_registry.h"
#include "src/core/client_config/subchannel_factory_decorators/add_channel_arg.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/support/string.h"
@ -81,9 +80,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r);
static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_client_config **target_config,
grpc_closure *on_complete);
@ -103,8 +100,7 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
}
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver, struct sockaddr *sa,
int len) {
grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (!r->resolving) {

@ -83,9 +83,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
grpc_resolver *r);
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_client_config **target_config,
grpc_closure *on_complete);
@ -107,8 +105,13 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
}
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
struct sockaddr *sa, int len) {}
grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
r->published = 0;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
gpr_mu_unlock(&r->mu);
}
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config,
@ -344,6 +347,9 @@ static grpc_resolver *sockaddr_create(
gpr_slice_buffer_destroy(&path_parts);
gpr_slice_unref(path_slice);
if (errors_found) {
gpr_free(r->lb_policy_name);
gpr_free(r->addrs);
gpr_free(r->addrs_len);
gpr_free(r);
return NULL;
}

@ -96,9 +96,7 @@ static void zookeeper_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
grpc_resolver *r);
static void zookeeper_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_client_config **target_config,
grpc_closure *on_complete);
@ -125,8 +123,7 @@ static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx,
}
static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
struct sockaddr *sa, int len) {
grpc_resolver *resolver) {
zookeeper_resolver *r = (zookeeper_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->resolving == 0) {

File diff suppressed because it is too large Load Diff

@ -41,14 +41,23 @@
/** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */
typedef struct grpc_subchannel grpc_subchannel;
typedef struct grpc_connected_subchannel grpc_connected_subchannel;
typedef struct grpc_subchannel_call grpc_subchannel_call;
typedef struct grpc_subchannel_args grpc_subchannel_args;
#ifdef GRPC_SUBCHANNEL_REFCOUNT_DEBUG
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_SUBCHANNEL_REF(p, r) \
grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_UNREF(cl, p, r) \
grpc_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) \
grpc_subchannel_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
grpc_subchannel_weak_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) \
grpc_connected_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
grpc_connected_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) \
grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
@ -58,6 +67,12 @@ typedef struct grpc_subchannel_args grpc_subchannel_args;
#else
#define GRPC_SUBCHANNEL_REF(p, r) grpc_subchannel_ref((p))
#define GRPC_SUBCHANNEL_UNREF(cl, p, r) grpc_subchannel_unref((cl), (p))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
grpc_subchannel_weak_unref((cl), (p))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) grpc_connected_subchannel_ref((p))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
grpc_connected_subchannel_unref((cl), (p))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
grpc_subchannel_call_unref((cl), (p))
@ -69,38 +84,31 @@ void grpc_subchannel_ref(grpc_subchannel *channel
void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_ref(grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_connected_subchannel_ref(grpc_connected_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_connected_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
typedef enum {
GRPC_SUBCHANNEL_CALL_CREATE_READY,
GRPC_SUBCHANNEL_CALL_CREATE_PENDING
} grpc_subchannel_call_create_status;
/** construct a subchannel call (possibly asynchronously).
*
* If the returned status is \a GRPC_SUBCHANNEL_CALL_CREATE_READY, the call will
* return immediately and \a target will point to a connected \a subchannel_call
* instance. Note that \a notify will \em not be invoked in this case.
* Otherwise, if the returned status is GRPC_SUBCHANNEL_CALL_CREATE_PENDING, the
* subchannel call will be created asynchronously, invoking the \a notify
* callback upon completion. */
grpc_subchannel_call_create_status grpc_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_subchannel *subchannel, grpc_pollset *pollset,
grpc_subchannel_call **target, grpc_closure *notify);
/** cancel \a call in the waiting state. */
void grpc_subchannel_cancel_waiting_call(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
int iomgr_success);
/** construct a subchannel call */
grpc_subchannel_call *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
grpc_pollset *pollset);
/** process a transport level op */
void grpc_subchannel_process_transport_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
grpc_transport_op *op);
void grpc_connected_subchannel_process_transport_op(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *subchannel,
grpc_transport_op *op);
/** poll the current connectivity state of a channel */
grpc_connectivity_state grpc_subchannel_check_connectivity(
@ -108,26 +116,22 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the channel */
void grpc_subchannel_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel,
grpc_connectivity_state *state,
grpc_closure *notify);
/** Remove \a subscribed_notify from the list of closures to be called on a
* state change if present, returning 1. Otherwise, nothing is done and return
* 0. */
int grpc_subchannel_state_change_unsubscribe(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel,
grpc_closure *subscribed_notify);
/** express interest in \a channel's activities through \a pollset. */
void grpc_subchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel,
grpc_pollset *pollset);
/** stop following \a channel's activity through \a pollset. */
void grpc_subchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel,
grpc_pollset *pollset);
void grpc_subchannel_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
grpc_closure *notify);
void grpc_connected_subchannel_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *channel,
grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
grpc_closure *notify);
void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
grpc_connected_subchannel *channel,
grpc_closure *notify);
/** retrieve the grpc_connected_subchannel - or NULL if called before
the subchannel becomes connected */
grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_subchannel *subchannel);
/** continue processing a transport op */
void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
@ -138,6 +142,9 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call);
grpc_call_stack *grpc_subchannel_call_get_call_stack(
grpc_subchannel_call *subchannel_call);
struct grpc_subchannel_args {
/** Channel filters for this channel - wrapped factories will likely
want to mutate this */
@ -149,20 +156,10 @@ struct grpc_subchannel_args {
/** Address to connect to */
struct sockaddr *addr;
size_t addr_len;
/** metadata context to use */
grpc_mdctx *mdctx;
/** master channel */
grpc_channel *master;
};
/** create a subchannel given a connector */
grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
grpc_subchannel_args *args);
/** Return the metadata context associated with the subchannel */
grpc_mdctx *grpc_subchannel_get_mdctx(grpc_subchannel *subchannel);
/** Return the master channel associated with the subchannel */
grpc_channel *grpc_subchannel_get_master(grpc_subchannel *subchannel);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_H */

@ -1,86 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/client_config/subchannel_factory_decorators/merge_channel_args.h"
#include <grpc/support/alloc.h>
#include "src/core/channel/channel_args.h"
typedef struct {
grpc_subchannel_factory base;
gpr_refcount refs;
grpc_subchannel_factory *wrapped;
grpc_channel_args *merge_args;
} merge_args_factory;
static void merge_args_factory_ref(grpc_subchannel_factory *scf) {
merge_args_factory *f = (merge_args_factory *)scf;
gpr_ref(&f->refs);
}
static void merge_args_factory_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_factory *scf) {
merge_args_factory *f = (merge_args_factory *)scf;
if (gpr_unref(&f->refs)) {
grpc_subchannel_factory_unref(exec_ctx, f->wrapped);
grpc_channel_args_destroy(f->merge_args);
gpr_free(f);
}
}
static grpc_subchannel *merge_args_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
grpc_subchannel_args *args) {
merge_args_factory *f = (merge_args_factory *)scf;
grpc_channel_args *final_args =
grpc_channel_args_merge(args->args, f->merge_args);
grpc_subchannel *s;
args->args = final_args;
s = grpc_subchannel_factory_create_subchannel(exec_ctx, f->wrapped, args);
grpc_channel_args_destroy(final_args);
return s;
}
static const grpc_subchannel_factory_vtable merge_args_factory_vtable = {
merge_args_factory_ref, merge_args_factory_unref,
merge_args_factory_create_subchannel};
grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args(
grpc_subchannel_factory *input, const grpc_channel_args *args) {
merge_args_factory *f = gpr_malloc(sizeof(*f));
f->base.vtable = &merge_args_factory_vtable;
gpr_ref_init(&f->refs, 1);
grpc_subchannel_factory_ref(input);
f->wrapped = input;
f->merge_args = grpc_channel_args_copy(args);
return &f->base;
}

@ -37,7 +37,9 @@
#include <grpc/compression.h>
#include <grpc/support/useful.h>
#include "src/core/compression/algorithm_metadata.h"
#include "src/core/surface/api_trace.h"
#include "src/core/transport/static_metadata.h"
int grpc_compression_algorithm_parse(const char *name, size_t name_length,
grpc_compression_algorithm *algorithm) {
@ -72,17 +74,55 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
switch (algorithm) {
case GRPC_COMPRESS_NONE:
*name = "identity";
break;
return 1;
case GRPC_COMPRESS_DEFLATE:
*name = "deflate";
break;
return 1;
case GRPC_COMPRESS_GZIP:
*name = "gzip";
break;
default:
return 1;
case GRPC_COMPRESS_ALGORITHMS_COUNT:
return 0;
}
return 1;
return 0;
}
grpc_compression_algorithm grpc_compression_algorithm_from_mdstr(
grpc_mdstr *str) {
if (str == GRPC_MDSTR_IDENTITY) return GRPC_COMPRESS_NONE;
if (str == GRPC_MDSTR_DEFLATE) return GRPC_COMPRESS_DEFLATE;
if (str == GRPC_MDSTR_GZIP) return GRPC_COMPRESS_GZIP;
return GRPC_COMPRESS_ALGORITHMS_COUNT;
}
grpc_mdstr *grpc_compression_algorithm_mdstr(
grpc_compression_algorithm algorithm) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
return GRPC_MDSTR_IDENTITY;
case GRPC_COMPRESS_DEFLATE:
return GRPC_MDSTR_DEFLATE;
case GRPC_COMPRESS_GZIP:
return GRPC_MDSTR_GZIP;
case GRPC_COMPRESS_ALGORITHMS_COUNT:
return NULL;
}
return NULL;
}
grpc_mdelem *grpc_compression_encoding_mdelem(
grpc_compression_algorithm algorithm) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
return GRPC_MDELEM_GRPC_ENCODING_IDENTITY;
case GRPC_COMPRESS_DEFLATE:
return GRPC_MDELEM_GRPC_ENCODING_DEFLATE;
case GRPC_COMPRESS_GZIP:
return GRPC_MDELEM_GRPC_ENCODING_GZIP;
default:
break;
}
return NULL;
}
/* TODO(dgq): Add the ability to specify parameters to the individual
@ -99,25 +139,9 @@ grpc_compression_algorithm grpc_compression_algorithm_for_level(
case GRPC_COMPRESS_LEVEL_HIGH:
return GRPC_COMPRESS_DEFLATE;
default:
/* we shouldn't be making it here */
abort();
return GRPC_COMPRESS_NONE;
}
}
grpc_compression_level grpc_compression_level_for_algorithm(
grpc_compression_algorithm algorithm) {
grpc_compression_level clevel;
GRPC_API_TRACE("grpc_compression_level_for_algorithm(algorithm=%d)", 1,
((int)algorithm));
for (clevel = GRPC_COMPRESS_LEVEL_NONE; clevel < GRPC_COMPRESS_LEVEL_COUNT;
++clevel) {
if (grpc_compression_algorithm_for_level(clevel) == algorithm) {
return clevel;
}
break;
}
abort();
return GRPC_COMPRESS_LEVEL_NONE;
GPR_UNREACHABLE_CODE(return GRPC_COMPRESS_NONE);
}
void grpc_compression_options_init(grpc_compression_options *opts) {

@ -0,0 +1,53 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_COMPRESSION_ALGORITHM_METADATA_H
#define GRPC_INTERNAL_CORE_COMPRESSION_ALGORITHM_METADATA_H
#include <grpc/compression.h>
#include "src/core/transport/metadata.h"
/** Return compression algorithm based metadata value */
grpc_mdstr *grpc_compression_algorithm_mdstr(
grpc_compression_algorithm algorithm);
/** Return compression algorithm based metadata element (grpc-encoding: xxx) */
grpc_mdelem *grpc_compression_encoding_mdelem(
grpc_compression_algorithm algorithm);
/** Find compression algorithm based on passed in mdstr - returns
* GRPC_COMPRESS_ALGORITHM_COUNT on failure */
grpc_compression_algorithm grpc_compression_algorithm_from_mdstr(
grpc_mdstr *str);
#endif /* GRPC_INTERNAL_CORE_COMPRESSION_ALGORITHM_METADATA_H */

@ -69,8 +69,8 @@ static int zlib_body(z_stream* zs, gpr_slice_buffer* input,
zs->next_out = GPR_SLICE_START_PTR(outbuf);
}
r = flate(zs, flush);
if (r == Z_STREAM_ERROR) {
gpr_log(GPR_INFO, "zlib: stream error");
if (r < 0 && r != Z_BUF_ERROR /* not fatal */) {
gpr_log(GPR_INFO, "zlib error (%d)", r);
goto error;
}
} while (zs->avail_out == 0);
@ -91,6 +91,12 @@ error:
return 0;
}
static void* zalloc_gpr(void* opaque, unsigned int items, unsigned int size) {
return gpr_malloc(items * size);
}
static void zfree_gpr(void* opaque, void* address) { gpr_free(address); }
static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
int gzip) {
z_stream zs;
@ -99,12 +105,11 @@ static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
size_t count_before = output->count;
size_t length_before = output->length;
memset(&zs, 0, sizeof(zs));
zs.zalloc = zalloc_gpr;
zs.zfree = zfree_gpr;
r = deflateInit2(&zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 15 | (gzip ? 16 : 0),
8, Z_DEFAULT_STRATEGY);
if (r != Z_OK) {
gpr_log(GPR_ERROR, "deflateInit2 returns %d", r);
return 0;
}
GPR_ASSERT(r == Z_OK);
r = zlib_body(&zs, input, output, deflate) && output->length < input->length;
if (!r) {
for (i = count_before; i < output->count; i++) {
@ -125,11 +130,10 @@ static int zlib_decompress(gpr_slice_buffer* input, gpr_slice_buffer* output,
size_t count_before = output->count;
size_t length_before = output->length;
memset(&zs, 0, sizeof(zs));
zs.zalloc = zalloc_gpr;
zs.zfree = zfree_gpr;
r = inflateInit2(&zs, 15 | (gzip ? 16 : 0));
if (r != Z_OK) {
gpr_log(GPR_ERROR, "inflateInit2 returns %d", r);
return 0;
}
GPR_ASSERT(r == Z_OK);
r = zlib_body(&zs, input, output, inflate);
if (!r) {
for (i = count_before; i < output->count; i++) {
@ -150,8 +154,8 @@ static int copy(gpr_slice_buffer* input, gpr_slice_buffer* output) {
return 1;
}
int compress_inner(grpc_compression_algorithm algorithm,
gpr_slice_buffer* input, gpr_slice_buffer* output) {
static int compress_inner(grpc_compression_algorithm algorithm,
gpr_slice_buffer* input, gpr_slice_buffer* output) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:
/* the fallback path always needs to be send uncompressed: we simply

@ -53,6 +53,7 @@ typedef struct {
size_t next_address;
grpc_endpoint *ep;
char *host;
char *ssl_host_override;
gpr_timespec deadline;
int have_read_byte;
const grpc_httpcli_handshaker *handshaker;
@ -106,6 +107,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
}
gpr_slice_unref(req->request_text);
gpr_free(req->host);
gpr_free(req->ssl_host_override);
grpc_iomgr_unregister_object(&req->iomgr_obj);
gpr_slice_buffer_destroy(&req->incoming);
gpr_slice_buffer_destroy(&req->outgoing);
@ -180,8 +182,10 @@ static void on_connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
next_address(exec_ctx, req);
return;
}
req->handshaker->handshake(exec_ctx, req, req->ep, req->host,
on_handshake_done);
req->handshaker->handshake(
exec_ctx, req, req->ep,
req->ssl_host_override ? req->ssl_host_override : req->host,
on_handshake_done);
}
static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req) {
@ -231,6 +235,7 @@ static void internal_request_begin(
gpr_slice_buffer_init(&req->outgoing);
grpc_iomgr_register_object(&req->iomgr_obj, name);
req->host = gpr_strdup(request->host);
req->ssl_host_override = gpr_strdup(request->ssl_host_override);
grpc_pollset_set_add_pollset(exec_ctx, &req->context->pollset_set,
req->pollset);

@ -74,6 +74,8 @@ extern const grpc_httpcli_handshaker grpc_httpcli_ssl;
typedef struct grpc_httpcli_request {
/* The host name to connect to */
char *host;
/* The host to verify in the SSL handshake (or NULL) */
char *ssl_host_override;
/* The path of the resource to fetch */
char *path;
/* Additional headers: count and key/values; the following are supplied

@ -68,7 +68,7 @@ static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
tsi_result result = TSI_OK;
tsi_handshaker *handshaker;
if (c->handshaker_factory == NULL) {
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
return;
}
result = tsi_ssl_handshaker_factory_create_handshaker(
@ -76,7 +76,7 @@ static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
@ -149,7 +149,6 @@ typedef struct {
static void on_secure_transport_setup_done(grpc_exec_ctx *exec_ctx, void *rp,
grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
on_done_closure *c = rp;
if (status != GRPC_SECURITY_OK) {

@ -39,18 +39,17 @@ void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
closure->cb_arg = cb_arg;
closure->next = NULL;
closure->final_data = 0;
}
void grpc_closure_list_add(grpc_closure_list *closure_list,
grpc_closure *closure, int success) {
if (closure == NULL) return;
closure->next = NULL;
closure->success = success;
closure->final_data = (success != 0);
if (closure_list->head == NULL) {
closure_list->head = closure;
} else {
closure_list->tail->next = closure;
closure_list->tail->final_data |= (gpr_uintptr)closure;
}
closure_list->tail = closure;
}
@ -66,22 +65,12 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
if (dst->head == NULL) {
*dst = *src;
} else {
dst->tail->next = src->head;
dst->tail->final_data |= (gpr_uintptr)src->head;
dst->tail = src->tail;
}
src->head = src->tail = NULL;
}
grpc_closure *grpc_closure_list_pop(grpc_closure_list *list) {
grpc_closure *head;
if (list->head == NULL) {
return NULL;
}
head = list->head;
list->head = list->head->next;
return head;
}
typedef struct {
grpc_iomgr_cb_func cb;
void *cb_arg;
@ -103,3 +92,7 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
return &wc->wrapper;
}
grpc_closure *grpc_closure_next(grpc_closure *closure) {
return (grpc_closure *)(closure->final_data & ~(gpr_uintptr)1);
}

@ -34,7 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H
#define GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H
#include <stddef.h>
#include <grpc/support/port_platform.h>
struct grpc_closure;
typedef struct grpc_closure grpc_closure;
@ -64,13 +64,10 @@ struct grpc_closure {
/** Arguments to be passed to "cb". */
void *cb_arg;
/** Internal. A boolean indication to "cb" on the state of the iomgr.
* For instance, closures created during a shutdown would have this field set
* to false. */
int success;
/**< Internal. Do not touch */
struct grpc_closure *next;
/** Once enqueued, contains in the lower bit the success of the closure,
and in the upper bits the pointer to the next closure in the list.
Before enqueing for execution, this is usable for scratch data. */
gpr_uintptr final_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */
@ -91,10 +88,10 @@ void grpc_closure_list_add(grpc_closure_list *list, grpc_closure *closure,
/** append all closures from \a src to \a dst and empty \a src. */
void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
/** pop (return and remove) the head closure from \a list. */
grpc_closure *grpc_closure_list_pop(grpc_closure_list *list);
/** return whether \a list is empty. */
int grpc_closure_list_empty(grpc_closure_list list);
/** return the next pointer for a queued closure list */
grpc_closure *grpc_closure_next(grpc_closure *closure);
#endif /* GRPC_INTERNAL_CORE_IOMGR_CLOSURE_H */

@ -36,6 +36,7 @@
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/endpoint_pair.h"
#include "src/core/iomgr/socket_utils_posix.h"
#include <errno.h>
#include <fcntl.h>
@ -56,6 +57,8 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0);
flags = fcntl(sv[1], F_GETFL, 0);
GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[0]));
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]));
}
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,

@ -44,10 +44,11 @@ int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next;
int success = (int)(c->final_data & 1);
grpc_closure *next = (grpc_closure *)(c->final_data & ~(gpr_uintptr)1);
did_something++;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
c->cb(exec_ctx, c->cb_arg, c->success);
c->cb(exec_ctx, c->cb_arg, success);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
}

@ -63,8 +63,6 @@ void grpc_executor_init() {
/* thread body */
static void closure_exec_thread_func(void *ignored) {
grpc_closure *closure;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (1) {
gpr_mu_lock(&g_executor.mu);
@ -72,16 +70,16 @@ static void closure_exec_thread_func(void *ignored) {
gpr_mu_unlock(&g_executor.mu);
break;
}
closure = grpc_closure_list_pop(&g_executor.closures);
if (closure == NULL) {
if (grpc_closure_list_empty(g_executor.closures)) {
/* no more work, time to die */
GPR_ASSERT(g_executor.busy == 1);
g_executor.busy = 0;
gpr_mu_unlock(&g_executor.mu);
break;
} else {
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures);
}
gpr_mu_unlock(&g_executor.mu);
closure->cb(&exec_ctx, closure->cb_arg, closure->success);
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
@ -125,7 +123,6 @@ void grpc_executor_enqueue(grpc_closure *closure, int success) {
void grpc_executor_shutdown() {
int pending_join;
grpc_closure *closure;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&g_executor.mu);
@ -136,9 +133,7 @@ void grpc_executor_shutdown() {
* list below because we aren't accepting new work */
/* Execute pending callbacks, some may be performing cleanups */
while ((closure = grpc_closure_list_pop(&g_executor.closures)) != NULL) {
closure->cb(&exec_ctx, closure->cb_arg, closure->success);
}
grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
if (pending_join) {

@ -43,6 +43,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#define CLOSURE_NOT_READY ((grpc_closure *)0)
@ -158,7 +159,10 @@ void grpc_fd_global_shutdown(void) {
grpc_fd *grpc_fd_create(int fd, const char *name) {
grpc_fd *r = alloc_fd(fd);
grpc_iomgr_register_object(&r->iomgr_object, name);
char *name2;
gpr_asprintf(&name2, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&r->iomgr_object, name2);
gpr_free(name2);
#ifdef GRPC_FD_REF_COUNT_DEBUG
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name);
#endif
@ -207,14 +211,21 @@ static int has_watchers(grpc_fd *fd) {
}
void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
const char *reason) {
int *release_fd, const char *reason) {
fd->on_done_closure = on_done;
shutdown(fd->fd, SHUT_RDWR);
fd->released = release_fd != NULL;
if (!fd->released) {
shutdown(fd->fd, SHUT_RDWR);
} else {
*release_fd = fd->fd;
}
gpr_mu_lock(&fd->mu);
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
if (!has_watchers(fd)) {
fd->closed = 1;
close(fd->fd);
if (!fd->released) {
close(fd->fd);
}
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
} else {
wake_all_watchers_locked(fd);
@ -406,7 +417,9 @@ void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
}
if (grpc_fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
fd->closed = 1;
close(fd->fd);
if (!fd->released) {
close(fd->fd);
}
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
}
gpr_mu_unlock(&fd->mu);

@ -62,6 +62,7 @@ struct grpc_fd {
gpr_mu mu;
int shutdown;
int closed;
int released;
/* The watcher list.
@ -107,11 +108,12 @@ grpc_fd *grpc_fd_create(int fd, const char *name);
/* Releases fd to be asynchronously destroyed.
on_done is called when the underlying file descriptor is definitely close()d.
If on_done is NULL, no callback will be made.
If release_fd is not NULL, it's set to fd and fd will not be closed.
Requires: *fd initialized; no outstanding notify_on_read or
notify_on_write.
MUST NOT be called with a pollset lock taken */
void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
const char *reason);
int *release_fd, const char *reason);
/* Begin polling on an fd.
Registers that the given pollset is interested in this fd - so that if read
@ -168,6 +170,7 @@ void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
/* Reference counting for fds */
/*#define GRPC_FD_REF_COUNT_DEBUG*/
#ifdef GRPC_FD_REF_COUNT_DEBUG
void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file, int line);

@ -55,8 +55,13 @@
#endif
void grpc_pollset_init(grpc_pollset *pollset);
/* Begin shutting down the pollset, and call closure when done.
* GRPC_POLLSET_MU(pollset) must be held */
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
/** Reset the pollset to its initial state (perhaps with some cached objects);
* must have been previously shutdown */
void grpc_pollset_reset(grpc_pollset *pollset);
void grpc_pollset_destroy(grpc_pollset *pollset);
/* Do some work on a pollset.

@ -47,21 +47,13 @@
#include "src/core/support/block_annotate.h"
#include "src/core/profiling/timers.h"
typedef struct wakeup_fd_hdl {
grpc_wakeup_fd wakeup_fd;
struct wakeup_fd_hdl *next;
} wakeup_fd_hdl;
typedef struct {
grpc_pollset *pollset;
grpc_fd *fd;
grpc_closure closure;
} delayed_add;
typedef struct {
int epoll_fd;
wakeup_fd_hdl *free_wakeup_fds;
} pollset_hdr;
typedef struct { int epoll_fd; } pollset_hdr;
static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
@ -131,26 +123,6 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
}
}
static void multipoll_with_epoll_pollset_del_fd(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
grpc_fd *fd,
int and_unlock_pollset) {
pollset_hdr *h = pollset->data.ptr;
int err;
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
/* Note that this can race with concurrent poll, but that should be fine since
* at worst it creates a spurious read event on a reused grpc_fd object. */
err = epoll_ctl(h->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
if (err < 0) {
gpr_log(GPR_ERROR, "epoll_ctl del for %d failed: %s", fd->fd,
strerror(errno));
}
}
/* TODO(klempner): We probably want to turn this down a bit */
#define GRPC_EPOLL_MAX_EVENTS 1000
@ -174,7 +146,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
pfds[0].events = POLLIN;
pfds[0].revents = 0;
pfds[1].fd = h->epoll_fd;
@ -197,7 +169,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
/* do nothing */
} else {
if (pfds[0].revents) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
}
if (pfds[1].revents) {
do {
@ -243,7 +215,7 @@ static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
multipoll_with_epoll_pollset_add_fd, multipoll_with_epoll_pollset_del_fd,
multipoll_with_epoll_pollset_add_fd,
multipoll_with_epoll_pollset_maybe_work_and_unlock,
multipoll_with_epoll_pollset_finish_shutdown,
multipoll_with_epoll_pollset_destroy};

@ -82,23 +82,6 @@ exit:
}
}
static void multipoll_with_poll_pollset_del_fd(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
grpc_fd *fd,
int and_unlock_pollset) {
/* will get removed next poll cycle */
pollset_hdr *h = pollset->data.ptr;
if (h->del_count == h->del_capacity) {
h->del_capacity = GPR_MAX(h->del_capacity + 8, h->del_count * 3 / 2);
h->dels = gpr_realloc(h->dels, sizeof(grpc_fd *) * h->del_capacity);
}
h->dels[h->del_count++] = fd;
GRPC_FD_REF(fd, "multipoller_del");
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void multipoll_with_poll_pollset_maybe_work_and_unlock(
grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline, gpr_timespec now) {
@ -124,7 +107,7 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
pfds[0].events = POLLIN;
pfds[0].revents = 0;
pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
pfds[1].events = POLLIN;
pfds[1].revents = 0;
for (i = 0; i < h->fd_count; i++) {
@ -174,7 +157,7 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
if (pfds[1].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
}
for (i = 2; i < pfd_count; i++) {
if (watchers[i].fd == NULL) {
@ -212,7 +195,7 @@ static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable multipoll_with_poll_pollset = {
multipoll_with_poll_pollset_add_fd, multipoll_with_poll_pollset_del_fd,
multipoll_with_poll_pollset_add_fd,
multipoll_with_poll_pollset_maybe_work_and_unlock,
multipoll_with_poll_pollset_finish_shutdown,
multipoll_with_poll_pollset_destroy};

@ -111,7 +111,7 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
p->kicked_without_pollers = 1;
GPR_TIMER_END("grpc_pollset_kick_ext.broadcast", 0);
@ -122,14 +122,14 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
specific_worker->reevaluate_polling_on_wakeup = 1;
}
specific_worker->kicked_specifically = 1;
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
} else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
GPR_TIMER_MARK("kick_yoself", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = 1;
}
specific_worker->kicked_specifically = 1;
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
} else if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) {
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
@ -151,7 +151,7 @@ void grpc_pollset_kick_ext(grpc_pollset *p,
if (specific_worker != NULL) {
GPR_TIMER_MARK("finally_kick", 0);
push_back_worker(p, specific_worker);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
}
} else {
GPR_TIMER_MARK("kicked_no_pollers", 0);
@ -177,9 +177,9 @@ void grpc_pollset_global_init(void) {
void grpc_pollset_global_shutdown(void) {
grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
grpc_wakeup_fd_global_destroy();
gpr_tls_destroy(&g_current_thread_poller);
gpr_tls_destroy(&g_current_thread_worker);
grpc_wakeup_fd_global_destroy();
}
void grpc_kick_poller(void) { grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); }
@ -194,30 +194,45 @@ void grpc_pollset_init(grpc_pollset *pollset) {
pollset->in_flight_cbs = 0;
pollset->shutting_down = 0;
pollset->called_shutdown = 0;
pollset->kicked_without_pollers = 0;
pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL;
pollset->local_wakeup_cache = NULL;
pollset->kicked_without_pollers = 0;
become_basic_pollset(pollset, NULL);
}
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);
#endif
void grpc_pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(pollset->in_flight_cbs == 0);
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
pollset->vtable->destroy(pollset);
gpr_mu_destroy(&pollset->mu);
while (pollset->local_wakeup_cache) {
grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
gpr_free(pollset->local_wakeup_cache);
pollset->local_wakeup_cache = next;
}
}
void grpc_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
void grpc_pollset_reset(grpc_pollset *pollset) {
GPR_ASSERT(pollset->shutting_down);
GPR_ASSERT(pollset->in_flight_cbs == 0);
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
pollset->vtable->destroy(pollset);
pollset->shutting_down = 0;
pollset->called_shutdown = 0;
pollset->kicked_without_pollers = 0;
become_basic_pollset(pollset, NULL);
}
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->del_fd(exec_ctx, pollset, fd, 1);
pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
our lock - meaning that if the unlocking flag passed to add_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
@ -244,13 +259,19 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;
worker->reevaluate_polling_on_wakeup = 0;
if (pollset->local_wakeup_cache != NULL) {
worker->wakeup_fd = pollset->local_wakeup_cache;
pollset->local_wakeup_cache = worker->wakeup_fd->next;
} else {
worker->wakeup_fd = gpr_malloc(sizeof(*worker->wakeup_fd));
grpc_wakeup_fd_init(&worker->wakeup_fd->fd);
}
worker->kicked_specifically = 0;
/* TODO(ctiller): pool these */
grpc_wakeup_fd_init(&worker->wakeup_fd);
/* If there's work waiting for the pollset to be idle, and the
pollset is idle, then do that work */
if (!grpc_pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) {
GPR_TIMER_MARK("grpc_pollset_work.idle_jobs", 0);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
goto done;
}
@ -259,16 +280,19 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
May update deadline to ensure timely wakeups.
TODO(ctiller): can this work be localized? */
if (grpc_timer_check(exec_ctx, now, &deadline)) {
GPR_TIMER_MARK("grpc_pollset_work.alarm_triggered", 0);
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
}
/* If we're shutting down then we don't execute any extended work */
if (pollset->shutting_down) {
GPR_TIMER_MARK("grpc_pollset_work.shutting_down", 0);
goto done;
}
/* Give do_promote priority so we don't starve it out */
if (pollset->in_flight_cbs) {
GPR_TIMER_MARK("grpc_pollset_work.in_flight_cbs", 0);
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
@ -293,6 +317,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
locked = 0;
gpr_tls_set(&g_current_thread_poller, 0);
} else {
GPR_TIMER_MARK("grpc_pollset_work.kicked_without_pollers", 0);
pollset->kicked_without_pollers = 0;
}
/* Finished execution - start cleaning up.
@ -323,7 +348,10 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
remove_worker(pollset, worker);
gpr_tls_set(&g_current_thread_worker, 0);
}
grpc_wakeup_fd_destroy(&worker->wakeup_fd);
/* release wakeup fd to the local pool */
worker->wakeup_fd->next = pollset->local_wakeup_cache;
pollset->local_wakeup_cache = worker->wakeup_fd;
/* check shutdown conditions */
if (pollset->shutting_down) {
if (grpc_pollset_has_workers(pollset)) {
grpc_pollset_kick(pollset, NULL);
@ -338,8 +366,8 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
@ -349,35 +377,20 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
int call_shutdown = 0;
gpr_mu_lock(&pollset->mu);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
!grpc_pollset_has_workers(pollset)) {
pollset->called_shutdown = 1;
call_shutdown = 1;
}
pollset->shutdown_done = closure;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!grpc_pollset_has_workers(pollset)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
}
pollset->shutdown_done = closure;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
gpr_mu_unlock(&pollset->mu);
if (call_shutdown) {
if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
!grpc_pollset_has_workers(pollset)) {
pollset->called_shutdown = 1;
finish_shutdown(exec_ctx, pollset);
}
}
void grpc_pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(pollset->shutting_down);
GPR_ASSERT(pollset->in_flight_cbs == 0);
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
pollset->vtable->destroy(pollset);
gpr_mu_destroy(&pollset->mu);
}
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
@ -520,19 +533,6 @@ exit:
}
}
static void basic_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd, int and_unlock_pollset) {
GPR_ASSERT(fd);
if (fd == pollset->data.ptr) {
GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
pollset->data.ptr = NULL;
}
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
grpc_pollset_worker *worker,
@ -557,7 +557,7 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
pfd[0].events = POLLIN;
pfd[0].revents = 0;
pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
pfd[1].events = POLLIN;
pfd[1].revents = 0;
nfds = 2;
@ -586,7 +586,9 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("poll", 0);
if (r < 0) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
if (errno != EINTR) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
}
if (fd) {
grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
}
@ -599,7 +601,7 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
if (pfd[1].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
}
if (nfds > 2) {
grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
@ -622,9 +624,8 @@ static void basic_pollset_destroy(grpc_pollset *pollset) {
}
static const grpc_pollset_vtable basic_pollset = {
basic_pollset_add_fd, basic_pollset_del_fd,
basic_pollset_maybe_work_and_unlock, basic_pollset_destroy,
basic_pollset_destroy};
basic_pollset_add_fd, basic_pollset_maybe_work_and_unlock,
basic_pollset_destroy, basic_pollset_destroy};
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
pollset->vtable = &basic_pollset;

@ -48,8 +48,13 @@ typedef struct grpc_pollset_vtable grpc_pollset_vtable;
use the struct tag */
struct grpc_fd;
typedef struct grpc_cached_wakeup_fd {
grpc_wakeup_fd fd;
struct grpc_cached_wakeup_fd *next;
} grpc_cached_wakeup_fd;
typedef struct grpc_pollset_worker {
grpc_wakeup_fd wakeup_fd;
grpc_cached_wakeup_fd *wakeup_fd;
int reevaluate_polling_on_wakeup;
int kicked_specifically;
struct grpc_pollset_worker *next;
@ -74,13 +79,13 @@ typedef struct grpc_pollset {
int fd;
void *ptr;
} data;
/* Local cache of eventfds for workers */
grpc_cached_wakeup_fd *local_wakeup_cache;
} grpc_pollset;
struct grpc_pollset_vtable {
void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd, int and_unlock_pollset);
void (*del_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd, int and_unlock_pollset);
void (*maybe_work_and_unlock)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker,
gpr_timespec deadline, gpr_timespec now);
@ -93,10 +98,6 @@ struct grpc_pollset_vtable {
/* Add an fd to a pollset */
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd);
/* Force remove an fd from a pollset (normally they are removed on the next
poll after an fd is orphaned) */
void grpc_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd);
/* Returns the fd to listen on for kicks */
int grpc_kick_read_fd(grpc_pollset *p);

@ -49,13 +49,19 @@
#include "src/core/iomgr/pollset_set_windows.h"
#endif
void grpc_pollset_set_init(grpc_pollset_set* pollset_set);
void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set);
void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* pollset_set,
grpc_pollset* pollset);
void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* pollset_set,
grpc_pollset* pollset);
void grpc_pollset_set_init(grpc_pollset_set *pollset_set);
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set);
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item);
void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */

@ -52,9 +52,10 @@ void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
size_t i;
gpr_mu_destroy(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
GRPC_FD_UNREF(pollset_set->fds[i], "pollset");
GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
}
gpr_free(pollset_set->pollsets);
gpr_free(pollset_set->pollset_sets);
gpr_free(pollset_set->fds);
}
@ -73,7 +74,7 @@ void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
if (grpc_fd_is_orphaned(pollset_set->fds[i])) {
GRPC_FD_UNREF(pollset_set->fds[i], "pollset");
GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
} else {
grpc_pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
pollset_set->fds[j++] = pollset_set->fds[i];
@ -99,6 +100,46 @@ void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {
size_t i, j;
gpr_mu_lock(&bag->mu);
if (bag->pollset_set_count == bag->pollset_set_capacity) {
bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
bag->pollset_sets =
gpr_realloc(bag->pollset_sets,
bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
}
bag->pollset_sets[bag->pollset_set_count++] = item;
for (i = 0, j = 0; i < bag->fd_count; i++) {
if (grpc_fd_is_orphaned(bag->fds[i])) {
GRPC_FD_UNREF(bag->fds[i], "pollset_set");
} else {
grpc_pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
bag->fds[j++] = bag->fds[i];
}
}
bag->fd_count = j;
gpr_mu_unlock(&bag->mu);
}
void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {
size_t i;
gpr_mu_lock(&bag->mu);
for (i = 0; i < bag->pollset_set_count; i++) {
if (bag->pollset_sets[i] == item) {
bag->pollset_set_count--;
GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
bag->pollset_sets[bag->pollset_set_count]);
break;
}
}
gpr_mu_unlock(&bag->mu);
}
void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set, grpc_fd *fd) {
size_t i;
@ -113,6 +154,9 @@ void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
for (i = 0; i < pollset_set->pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
}
for (i = 0; i < pollset_set->pollset_set_count; i++) {
grpc_pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
}
gpr_mu_unlock(&pollset_set->mu);
}
@ -129,6 +173,9 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
break;
}
}
for (i = 0; i < pollset_set->pollset_set_count; i++) {
grpc_pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
}
gpr_mu_unlock(&pollset_set->mu);
}

@ -44,6 +44,10 @@ typedef struct grpc_pollset_set {
size_t pollset_capacity;
grpc_pollset **pollsets;
size_t pollset_set_count;
size_t pollset_set_capacity;
struct grpc_pollset_set **pollset_sets;
size_t fd_count;
size_t fd_capacity;
grpc_fd **fds;

@ -49,4 +49,12 @@ void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {}
void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* bag,
grpc_pollset_set* item) {}
void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* bag,
grpc_pollset_set* item) {}
#endif /* GPR_WINSOCK_SOCKET */

@ -35,6 +35,7 @@
#ifdef GPR_WINSOCK_SOCKET
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include "src/core/iomgr/timer_internal.h"
@ -112,7 +113,6 @@ void grpc_pollset_init(grpc_pollset *pollset) {
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
gpr_mu_lock(&grpc_polling_mu);
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
@ -120,11 +120,20 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} else {
pollset->on_shutdown = closure;
}
gpr_mu_unlock(&grpc_polling_mu);
}
void grpc_pollset_destroy(grpc_pollset *pollset) {}
void grpc_pollset_reset(grpc_pollset *pollset) {
GPR_ASSERT(pollset->shutting_down);
GPR_ASSERT(
!has_workers(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET));
pollset->shutting_down = 0;
pollset->is_iocp_worker = 0;
pollset->kicked_without_pollers = 0;
pollset->on_shutdown = NULL;
}
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker, gpr_timespec now,
gpr_timespec deadline) {

@ -196,7 +196,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, int success) {
finish:
if (fd != NULL) {
grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
grpc_fd_orphan(exec_ctx, fd, NULL, "tcp_client_orphan");
grpc_fd_orphan(exec_ctx, fd, NULL, NULL, "tcp_client_orphan");
fd = NULL;
}
done = (--ac->refs == 0);
@ -265,7 +265,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno));
grpc_fd_orphan(exec_ctx, fdobj, NULL, "tcp_client_connect_error");
grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
goto done;
}

@ -90,6 +90,8 @@ typedef struct {
grpc_closure *read_cb;
grpc_closure *write_cb;
grpc_closure *release_fd_cb;
int *release_fd;
grpc_closure read_closure;
grpc_closure write_closure;
@ -108,7 +110,8 @@ static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
}
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_fd_orphan(exec_ctx, tcp->em_fd, NULL, "tcp_unref_orphan");
grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
"tcp_unref_orphan");
gpr_slice_buffer_destroy(&tcp->last_read_buffer);
gpr_free(tcp->peer_string);
gpr_free(tcp);
@ -452,6 +455,8 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
tcp->fd = em_fd->fd;
tcp->read_cb = NULL;
tcp->write_cb = NULL;
tcp->release_fd_cb = NULL;
tcp->release_fd = NULL;
tcp->incoming_buffer = NULL;
tcp->slice_size = slice_size;
tcp->iov_size = 1;
@ -468,4 +473,13 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
return &tcp->base;
}
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int *fd, grpc_closure *done) {
grpc_tcp *tcp = (grpc_tcp *)ep;
GPR_ASSERT(ep->vtable == &vtable);
tcp->release_fd = fd;
tcp->release_fd_cb = done;
TCP_UNREF(exec_ctx, tcp, "destroy");
}
#endif

@ -56,4 +56,10 @@ extern int grpc_tcp_trace;
grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size,
const char *peer_string);
/* Destroy the tcp endpoint without closing its fd. *fd will be set and done
* will be called when the endpoint is destroyed.
* Requires: ep must be a tcp endpoint and fd must not be NULL. */
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int *fd, grpc_closure *done);
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_POSIX_H */

@ -39,6 +39,9 @@
/* Forward decl of grpc_tcp_server */
typedef struct grpc_tcp_server grpc_tcp_server;
/* Forward decl of grpc_tcp_listener */
typedef struct grpc_tcp_listener grpc_tcp_listener;
/* Called for newly connected TCP connections. */
typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *ep);
@ -51,19 +54,17 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
grpc_pollset **pollsets, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb, void *cb_arg);
/* Add a port to the server, returning port number on success, or negative
on failure.
/* Add a port to the server, returning the newly created listener on success,
or a null pointer on failure.
The :: and 0.0.0.0 wildcard addresses are treated identically, accepting
both IPv4 and IPv6 connections, but :: is the preferred style. This usually
creates one socket, but possibly two on systems which support IPv6,
but not dualstack sockets.
For raw access to the underlying sockets, see grpc_tcp_server_get_fd(). */
but not dualstack sockets. */
/* TODO(ctiller): deprecate this, and make grpc_tcp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
size_t addr_len);
grpc_tcp_listener *grpc_tcp_server_add_port(grpc_tcp_server *s,
const void *addr, size_t addr_len);
/* Returns the file descriptor of the Nth listening socket on this server,
or -1 if the index is out of bounds.
@ -75,4 +76,8 @@ int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index);
void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
grpc_closure *closure);
int grpc_tcp_listener_get_port(grpc_tcp_listener *listener);
void grpc_tcp_listener_ref(grpc_tcp_listener *listener);
void grpc_tcp_listener_unref(grpc_tcp_listener *listener);
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_SERVER_H */

@ -67,14 +67,13 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#define INIT_PORT_CAP 2
#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100
static gpr_once s_init_max_accept_queue_size;
static int s_max_accept_queue_size;
/* one listening port */
typedef struct {
struct grpc_tcp_listener {
int fd;
grpc_fd *emfd;
grpc_tcp_server *server;
@ -84,9 +83,18 @@ typedef struct {
struct sockaddr_un un;
} addr;
size_t addr_len;
int port;
grpc_closure read_closure;
grpc_closure destroyed_closure;
} server_port;
gpr_refcount refs;
struct grpc_tcp_listener *next;
/* When we add a listener, more than one can be created, mainly because of
IPv6. A sibling will still be in the normal list, but will be flagged
as such. Any action, such as ref or unref, will affect all of the
siblings in the list. */
struct grpc_tcp_listener *sibling;
int is_sibling;
};
static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) {
struct stat st;
@ -112,10 +120,9 @@ struct grpc_tcp_server {
/* is this server shutting down? (boolean) */
int shutdown;
/* all listening ports */
server_port *ports;
size_t nports;
size_t port_capacity;
/* linked list of server ports */
grpc_tcp_listener *head;
unsigned nports;
/* shutdown callback */
grpc_closure *shutdown_complete;
@ -134,9 +141,8 @@ grpc_tcp_server *grpc_tcp_server_create(void) {
s->shutdown = 0;
s->on_accept_cb = NULL;
s->on_accept_cb_arg = NULL;
s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
s->head = NULL;
s->nports = 0;
s->port_capacity = INIT_PORT_CAP;
return s;
}
@ -145,7 +151,12 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_mu_destroy(&s->mu);
gpr_free(s->ports);
while (s->head) {
grpc_tcp_listener *sp = s->head;
s->head = sp->next;
grpc_tcp_listener_unref(sp);
}
gpr_free(s);
}
@ -166,8 +177,6 @@ static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, int success) {
events will be received on them - at this point it's safe to destroy
things */
static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
size_t i;
/* delete ALL the things */
gpr_mu_lock(&s->mu);
@ -176,15 +185,15 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
return;
}
if (s->nports) {
for (i = 0; i < s->nports; i++) {
server_port *sp = &s->ports[i];
if (s->head) {
grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) {
if (sp->addr.sockaddr.sa_family == AF_UNIX) {
unlink_if_unix_domain_socket(&sp->addr.un);
}
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure,
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"tcp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
@ -196,7 +205,6 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
grpc_closure *closure) {
size_t i;
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
@ -206,8 +214,9 @@ void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
/* shutdown all fd's */
if (s->active_ports) {
for (i = 0; i < s->nports; i++) {
grpc_fd_shutdown(exec_ctx, s->ports[i].emfd);
grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_fd_shutdown(exec_ctx, sp->emfd);
}
gpr_mu_unlock(&s->mu);
} else {
@ -298,7 +307,7 @@ error:
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
server_port *sp = arg;
grpc_tcp_listener *sp = arg;
grpc_fd *fdobj;
size_t i;
@ -364,9 +373,10 @@ error:
}
}
static int add_socket_to_server(grpc_tcp_server *s, int fd,
const struct sockaddr *addr, size_t addr_len) {
server_port *sp;
static grpc_tcp_listener *add_socket_to_server(grpc_tcp_server *s, int fd,
const struct sockaddr *addr,
size_t addr_len) {
grpc_tcp_listener *sp = NULL;
int port;
char *addr_str;
char *name;
@ -376,32 +386,33 @@ static int add_socket_to_server(grpc_tcp_server *s, int fd,
grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
gpr_mu_lock(&s->mu);
s->nports++;
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
/* append it to the list under a lock */
if (s->nports == s->port_capacity) {
s->port_capacity *= 2;
s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
}
sp = &s->ports[s->nports++];
sp = gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = s->head;
s->head = sp;
sp->server = s;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
memcpy(sp->addr.untyped, addr, addr_len);
sp->addr_len = addr_len;
sp->port = port;
sp->is_sibling = 0;
sp->sibling = NULL;
gpr_ref_init(&sp->refs, 1);
GPR_ASSERT(sp->emfd);
gpr_mu_unlock(&s->mu);
gpr_free(addr_str);
gpr_free(name);
}
return port;
return sp;
}
int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
size_t addr_len) {
int allocated_port1 = -1;
int allocated_port2 = -1;
unsigned i;
grpc_tcp_listener *grpc_tcp_server_add_port(grpc_tcp_server *s,
const void *addr, size_t addr_len) {
grpc_tcp_listener *sp;
grpc_tcp_listener *sp2 = NULL;
int fd;
grpc_dualstack_mode dsmode;
struct sockaddr_in6 addr6_v4mapped;
@ -420,9 +431,9 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
for (i = 0; i < s->nports; i++) {
for (sp = s->head; sp; sp = sp->next) {
sockname_len = sizeof(sockname_temp);
if (0 == getsockname(s->ports[i].fd, (struct sockaddr *)&sockname_temp,
if (0 == getsockname(sp->fd, (struct sockaddr *)&sockname_temp,
&sockname_len)) {
port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
if (port > 0) {
@ -436,6 +447,8 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
}
}
sp = NULL;
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
addr = (const struct sockaddr *)&addr6_v4mapped;
addr_len = sizeof(addr6_v4mapped);
@ -449,14 +462,15 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
addr = (struct sockaddr *)&wild6;
addr_len = sizeof(wild6);
fd = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode);
allocated_port1 = add_socket_to_server(s, fd, addr, addr_len);
sp = add_socket_to_server(s, fd, addr, addr_len);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
}
/* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
if (port == 0 && allocated_port1 > 0) {
grpc_sockaddr_set_port((struct sockaddr *)&wild4, allocated_port1);
if (port == 0 && sp != NULL) {
grpc_sockaddr_set_port((struct sockaddr *)&wild4, sp->port);
sp2 = sp;
}
addr = (struct sockaddr *)&wild4;
addr_len = sizeof(wild4);
@ -471,22 +485,32 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
addr = (struct sockaddr *)&addr4_copy;
addr_len = sizeof(addr4_copy);
}
allocated_port2 = add_socket_to_server(s, fd, addr, addr_len);
sp = add_socket_to_server(s, fd, addr, addr_len);
if (sp != NULL) sp->sibling = sp2;
if (sp2 != NULL) sp2->is_sibling = 1;
done:
gpr_free(allocated_addr);
return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
return sp;
}
int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned port_index) {
return (port_index < s->nports) ? s->ports[port_index].fd : -1;
grpc_tcp_listener *sp;
for (sp = s->head; sp && port_index != 0; sp = sp->next, port_index--)
;
if (port_index == 0 && sp) {
return sp->fd;
} else {
return -1;
}
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
grpc_pollset **pollsets, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb,
void *on_accept_cb_arg) {
size_t i, j;
size_t i;
grpc_tcp_listener *sp;
GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb);
@ -495,17 +519,44 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
s->on_accept_cb_arg = on_accept_cb_arg;
s->pollsets = pollsets;
s->pollset_count = pollset_count;
for (i = 0; i < s->nports; i++) {
for (j = 0; j < pollset_count; j++) {
grpc_pollset_add_fd(exec_ctx, pollsets[j], s->ports[i].emfd);
for (sp = s->head; sp; sp = sp->next) {
for (i = 0; i < pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
s->ports[i].read_closure.cb = on_read;
s->ports[i].read_closure.cb_arg = &s->ports[i];
grpc_fd_notify_on_read(exec_ctx, s->ports[i].emfd,
&s->ports[i].read_closure);
sp->read_closure.cb = on_read;
sp->read_closure.cb_arg = sp;
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
}
int grpc_tcp_listener_get_port(grpc_tcp_listener *listener) {
if (listener != NULL) {
grpc_tcp_listener *sp = listener;
return sp->port;
} else {
return 0;
}
}
void grpc_tcp_listener_ref(grpc_tcp_listener *listener) {
grpc_tcp_listener *sp = listener;
gpr_ref(&sp->refs);
}
void grpc_tcp_listener_unref(grpc_tcp_listener *listener) {
grpc_tcp_listener *sp = listener;
if (sp->is_sibling) return;
if (gpr_unref(&sp->refs)) {
grpc_tcp_listener *sibling = sp->sibling;
while (sibling) {
sp = sibling;
sibling = sp->sibling;
gpr_free(sp);
}
gpr_free(listener);
}
}
#endif

@ -35,7 +35,8 @@
#ifdef GPR_WINSOCK_SOCKET
#define _GNU_SOURCE
#include <io.h>
#include "src/core/iomgr/sockaddr_utils.h"
#include <grpc/support/alloc.h>
@ -51,25 +52,29 @@
#include "src/core/iomgr/tcp_server.h"
#include "src/core/iomgr/tcp_windows.h"
#define INIT_PORT_CAP 2
#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100
/* one listening port */
typedef struct server_port {
struct grpc_tcp_listener {
/* This seemingly magic number comes from AcceptEx's documentation. each
address buffer needs to have at least 16 more bytes at their end. */
gpr_uint8 addresses[(sizeof(struct sockaddr_in6) + 16) * 2];
/* This will hold the socket for the next accept. */
SOCKET new_socket;
/* The listener winsocked. */
/* The listener winsocket. */
grpc_winsocket *socket;
/* The actual TCP port number. */
int port;
grpc_tcp_server *server;
/* The cached AcceptEx for that port. */
LPFN_ACCEPTEX AcceptEx;
int shutting_down;
/* closure for socket notification of accept being ready */
grpc_closure on_accept;
} server_port;
gpr_refcount refs;
/* linked list */
struct grpc_tcp_listener *next;
};
/* the overall server */
struct grpc_tcp_server {
@ -82,10 +87,8 @@ struct grpc_tcp_server {
/* active port count: how many ports are actually still listening */
int active_ports;
/* all listening ports */
server_port *ports;
size_t nports;
size_t port_capacity;
/* linked list of server ports */
grpc_tcp_listener *head;
/* shutdown callback */
grpc_closure *shutdown_complete;
@ -99,9 +102,7 @@ grpc_tcp_server *grpc_tcp_server_create(void) {
s->active_ports = 0;
s->on_accept_cb = NULL;
s->on_accept_cb_arg = NULL;
s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
s->nports = 0;
s->port_capacity = INIT_PORT_CAP;
s->head = NULL;
s->shutdown_complete = NULL;
return s;
}
@ -109,26 +110,26 @@ grpc_tcp_server *grpc_tcp_server_create(void) {
static void dont_care_about_shutdown_completion(void *arg) {}
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
size_t i;
grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1);
/* Now that the accepts have been aborted, we can destroy the sockets.
The IOCP won't get notified on these, so we can flag them as already
closed by the system. */
for (i = 0; i < s->nports; i++) {
server_port *sp = &s->ports[i];
while (s->head) {
grpc_tcp_listener *sp = s->head;
s->head = sp->next;
sp->next = NULL;
grpc_winsocket_destroy(sp->socket);
grpc_tcp_listener_unref(sp);
}
gpr_free(s->ports);
gpr_free(s);
}
/* Public function. Stops and destroys a grpc_tcp_server. */
void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
grpc_closure *shutdown_complete) {
size_t i;
int immediately_done = 0;
grpc_tcp_listener *sp;
gpr_mu_lock(&s->mu);
s->shutdown_complete = shutdown_complete;
@ -138,8 +139,7 @@ void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
if (s->active_ports == 0) {
immediately_done = 1;
}
for (i = 0; i < s->nports; i++) {
server_port *sp = &s->ports[i];
for (sp = s->head; sp; sp = sp->next) {
sp->shutting_down = 1;
grpc_winsocket_shutdown(sp->socket);
}
@ -199,7 +199,7 @@ error:
}
static void decrement_active_ports_and_notify(grpc_exec_ctx *exec_ctx,
server_port *sp) {
grpc_tcp_listener *sp) {
int notify = 0;
sp->shutting_down = 0;
gpr_mu_lock(&sp->server->mu);
@ -216,7 +216,7 @@ static void decrement_active_ports_and_notify(grpc_exec_ctx *exec_ctx,
/* In order to do an async accept, we need to create a socket first which
will be the one assigned to the new incoming connection. */
static void start_accept(grpc_exec_ctx *exec_ctx, server_port *port) {
static void start_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *port) {
SOCKET sock = INVALID_SOCKET;
char *message;
char *utf8_message;
@ -276,7 +276,7 @@ failure:
/* Event manager callback when reads are ready. */
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, int from_iocp) {
server_port *sp = arg;
grpc_tcp_listener *sp = arg;
SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info *info = &sp->socket->read_info;
grpc_endpoint *ep = NULL;
@ -351,16 +351,17 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, int from_iocp) {
start_accept(exec_ctx, sp);
}
static int add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
const struct sockaddr *addr, size_t addr_len) {
server_port *sp;
static grpc_tcp_listener *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
const struct sockaddr *addr,
size_t addr_len) {
grpc_tcp_listener *sp = NULL;
int port;
int status;
GUID guid = WSAID_ACCEPTEX;
DWORD ioctl_num_bytes;
LPFN_ACCEPTEX AcceptEx;
if (sock == INVALID_SOCKET) return -1;
if (sock == INVALID_SOCKET) return NULL;
/* We need to grab the AcceptEx pointer for that port, as it may be
interface-dependent. We'll cache it to avoid doing that again. */
@ -373,37 +374,34 @@ static int add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message);
gpr_free(utf8_message);
closesocket(sock);
return -1;
return NULL;
}
port = prepare_socket(sock, addr, addr_len);
if (port >= 0) {
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
/* append it to the list under a lock */
if (s->nports == s->port_capacity) {
/* too many ports, and we need to store their address in a closure */
/* TODO(ctiller): make server_port a linked list */
abort();
}
sp = &s->ports[s->nports++];
sp = gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = s->head;
s->head = sp;
sp->server = s;
sp->socket = grpc_winsocket_create(sock, "listener");
sp->shutting_down = 0;
sp->AcceptEx = AcceptEx;
sp->new_socket = INVALID_SOCKET;
sp->port = port;
gpr_ref_init(&sp->refs, 1);
grpc_closure_init(&sp->on_accept, on_accept, sp);
GPR_ASSERT(sp->socket);
gpr_mu_unlock(&s->mu);
}
return port;
return sp;
}
int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
size_t addr_len) {
int allocated_port = -1;
unsigned i;
grpc_tcp_listener *grpc_tcp_server_add_port(grpc_tcp_server *s,
const void *addr, size_t addr_len) {
grpc_tcp_listener *sp;
SOCKET sock;
struct sockaddr_in6 addr6_v4mapped;
struct sockaddr_in6 wildcard;
@ -415,9 +413,9 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
for (i = 0; i < s->nports; i++) {
for (sp = s->head; sp; sp = sp->next) {
sockname_len = sizeof(sockname_temp);
if (0 == getsockname(s->ports[i].socket->socket,
if (0 == getsockname(sp->socket->socket,
(struct sockaddr *)&sockname_temp, &sockname_len)) {
port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
if (port > 0) {
@ -452,33 +450,60 @@ int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
gpr_free(utf8_message);
}
allocated_port = add_socket_to_server(s, sock, addr, addr_len);
sp = add_socket_to_server(s, sock, addr, addr_len);
gpr_free(allocated_addr);
return allocated_port;
return sp;
}
SOCKET
grpc_tcp_server_get_socket(grpc_tcp_server *s, unsigned index) {
return (index < s->nports) ? s->ports[index].socket->socket : INVALID_SOCKET;
int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned port_index) {
grpc_tcp_listener *sp;
for (sp = s->head; sp && port_index != 0; sp = sp->next, port_index--)
;
if (port_index == 0 && sp) {
return _open_osfhandle(sp->socket->socket, 0);
} else {
return -1;
}
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
grpc_pollset **pollset, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb,
void *on_accept_cb_arg) {
size_t i;
grpc_tcp_listener *sp;
GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb);
GPR_ASSERT(s->active_ports == 0);
s->on_accept_cb = on_accept_cb;
s->on_accept_cb_arg = on_accept_cb_arg;
for (i = 0; i < s->nports; i++) {
start_accept(exec_ctx, s->ports + i);
for (sp = s->head; sp; sp = sp->next) {
start_accept(exec_ctx, sp);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
}
int grpc_tcp_listener_get_port(grpc_tcp_listener *listener) {
if (listener != NULL) {
grpc_tcp_listener *sp = listener;
return sp->port;
} else {
return 0;
}
}
void grpc_tcp_listener_ref(grpc_tcp_listener *listener) {
grpc_tcp_listener *sp = listener;
gpr_ref(&sp->refs);
}
void grpc_tcp_listener_unref(grpc_tcp_listener *listener) {
grpc_tcp_listener *sp = listener;
if (gpr_unref(&sp->refs)) {
gpr_free(listener);
}
}
#endif /* GPR_WINSOCK_SOCKET */

@ -197,7 +197,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->read_slice = gpr_slice_malloc(8192);
buffer.len = GPR_SLICE_LENGTH(tcp->read_slice);
buffer.len = (ULONG)GPR_SLICE_LENGTH(
tcp->read_slice); // we know slice size fits in 32bit.
buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
TCP_REF(tcp, "read");
@ -273,6 +274,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF local_buffers[16];
WSABUF *allocated = NULL;
WSABUF *buffers = local_buffers;
size_t len;
if (tcp->shutting_down) {
grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
@ -281,19 +283,21 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
tcp->write_slices = slices;
GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) {
buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count);
allocated = buffers;
}
for (i = 0; i < tcp->write_slices->count; i++) {
buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
GPR_ASSERT(len <= ULONG_MAX);
buffers[i].len = (ULONG)len;
buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]);
}
/* First, let's try a synchronous, non-blocking write. */
status = WSASend(socket->socket, buffers, tcp->write_slices->count,
status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count,
&bytes_sent, 0, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
@ -322,7 +326,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
operation, this time asynchronously. */
memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
status = WSASend(socket->socket, buffers, tcp->write_slices->count,
status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count,
&bytes_sent, 0, &socket->write_info.overlapped, NULL);
if (allocated) gpr_free(allocated);

@ -126,8 +126,8 @@ static double ts_to_dbl(gpr_timespec ts) {
static gpr_timespec dbl_to_ts(double d) {
gpr_timespec ts;
ts.tv_sec = (time_t)d;
ts.tv_nsec = (int)(1e9 * (d - (double)ts.tv_sec));
ts.tv_sec = (gpr_int64)d;
ts.tv_nsec = (gpr_int32)(1e9 * (d - (double)ts.tv_sec));
ts.clock_type = GPR_TIMESPAN;
return ts;
}
@ -343,11 +343,3 @@ int grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
exec_ctx, now, next,
gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0);
}
gpr_timespec grpc_timer_list_next_timeout(void) {
gpr_timespec out;
gpr_mu_lock(&g_mu);
out = g_shard_queue[0]->min_deadline;
gpr_mu_unlock(&g_mu);
return out;
}

@ -54,8 +54,6 @@ int grpc_timer_check(grpc_exec_ctx* exec_ctx, gpr_timespec now,
void grpc_timer_list_init(gpr_timespec now);
void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx);
gpr_timespec grpc_timer_list_next_timeout(void);
/* the following must be implemented by each iomgr implementation */
void grpc_kick_poller(void);

@ -38,6 +38,7 @@
#include <grpc/support/port_platform.h>
#ifdef GRPC_NEED_UDP
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/udp_server.h"
@ -179,7 +180,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
}
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure,
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"udp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
@ -435,3 +436,4 @@ void grpc_udp_server_write(server_port *sp, const char *buffer, size_t buf_len,
}
#endif
#endif

@ -40,19 +40,17 @@
#include <stddef.h>
static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL;
int grpc_allow_specialized_wakeup_fd = 1;
void grpc_wakeup_fd_global_init(void) {
if (grpc_specialized_wakeup_fd_vtable.check_availability()) {
if (grpc_allow_specialized_wakeup_fd &&
grpc_specialized_wakeup_fd_vtable.check_availability()) {
wakeup_fd_vtable = &grpc_specialized_wakeup_fd_vtable;
} else {
wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
}
}
void grpc_wakeup_fd_global_init_force_fallback(void) {
wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
}
void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = NULL; }
void grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) {

@ -85,6 +85,8 @@ struct grpc_wakeup_fd {
int write_fd;
};
extern int grpc_allow_specialized_wakeup_fd;
#define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd)
void grpc_wakeup_fd_init(grpc_wakeup_fd* fd_info);

@ -103,6 +103,9 @@ void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
gpr_mu_lock(&workqueue->mu);
if (grpc_closure_list_empty(workqueue->closure_list)) {
grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
}
grpc_closure_list_move(&exec_ctx->closure_list, &workqueue->closure_list);
gpr_mu_unlock(&workqueue->mu);
}
@ -115,7 +118,7 @@ static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, int success) {
/* HACK: let wakeup_fd code know that we stole the fd */
workqueue->wakeup_fd.read_fd = 0;
grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, "destroy");
grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy");
gpr_free(workqueue);
} else {
gpr_mu_lock(&workqueue->mu);
@ -129,8 +132,6 @@ static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, int success) {
void grpc_workqueue_push(grpc_workqueue *workqueue, grpc_closure *closure,
int success) {
closure->success = success;
closure->next = NULL;
gpr_mu_lock(&workqueue->mu);
if (grpc_closure_list_empty(workqueue->closure_list)) {
grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);

@ -35,6 +35,8 @@
#include <grpc/support/port_platform.h>
#include <grpc/support/log.h>
#include "src/core/json/json_reader.h"
static void json_reader_string_clear(grpc_json_reader *reader) {
@ -224,13 +226,13 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
reader->in_array = 1;
break;
case GRPC_JSON_TOP_LEVEL:
if (reader->depth != 0) return GRPC_JSON_INTERNAL_ERROR;
GPR_ASSERT(reader->depth == 0);
reader->in_object = 0;
reader->in_array = 0;
reader->state = GRPC_JSON_STATE_END;
break;
default:
return GRPC_JSON_INTERNAL_ERROR;
GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
}
}
break;
@ -279,8 +281,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
break;
case GRPC_JSON_STATE_OBJECT_KEY_STRING:
if (reader->unicode_high_surrogate != 0)
return GRPC_JSON_PARSE_ERROR;
GPR_ASSERT(reader->unicode_high_surrogate == 0);
if (c == '"') {
reader->state = GRPC_JSON_STATE_OBJECT_KEY_END;
json_reader_set_key(reader);
@ -461,7 +462,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
}
break;
default:
return GRPC_JSON_INTERNAL_ERROR;
GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
}
break;
@ -641,7 +642,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
case ',':
case '}':
case ']':
return GRPC_JSON_INTERNAL_ERROR;
GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
break;
default:
@ -655,5 +656,5 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
}
}
return GRPC_JSON_INTERNAL_ERROR;
GPR_UNREACHABLE_CODE(return GRPC_JSON_INTERNAL_ERROR);
}

@ -353,7 +353,7 @@ static void json_dump_recursive(grpc_json_writer *writer, grpc_json *json,
grpc_json_writer_value_raw_with_len(writer, "null", 4);
break;
default:
abort();
GPR_UNREACHABLE_CODE(abort());
}
json = json->next;
}

@ -50,60 +50,198 @@ typedef struct gpr_timer_entry {
gpr_timespec tm;
const char *tagstr;
const char *file;
int line;
short line;
char type;
gpr_uint8 important;
int thd;
} gpr_timer_entry;
#define MAX_COUNT (1024 * 1024 / sizeof(gpr_timer_entry))
#define MAX_COUNT 1000000
static __thread gpr_timer_entry g_log[MAX_COUNT];
static __thread int g_count;
typedef struct gpr_timer_log {
size_t num_entries;
struct gpr_timer_log *next;
struct gpr_timer_log *prev;
gpr_timer_entry log[MAX_COUNT];
} gpr_timer_log;
typedef struct gpr_timer_log_list {
gpr_timer_log *head;
/* valid iff head!=NULL */
gpr_timer_log *tail;
} gpr_timer_log_list;
static __thread gpr_timer_log *g_thread_log;
static gpr_once g_once_init = GPR_ONCE_INIT;
static FILE *output_file;
static const char *output_filename = "latency_trace.txt";
static pthread_mutex_t g_mu;
static pthread_cond_t g_cv;
static gpr_timer_log_list g_in_progress_logs;
static gpr_timer_log_list g_done_logs;
static int g_shutdown;
static gpr_thd_id g_writing_thread;
static __thread int g_thread_id;
static int g_next_thread_id;
static void close_output() { fclose(output_file); }
static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
if (list->head == NULL) {
list->head = list->tail = log;
log->next = log->prev = NULL;
return 1;
} else {
log->prev = list->tail;
log->next = NULL;
list->tail->next = log;
list->tail = log;
return 0;
}
}
static void init_output() {
output_file = fopen("latency_trace.txt", "w");
GPR_ASSERT(output_file);
atexit(close_output);
static gpr_timer_log *timer_log_pop_front(gpr_timer_log_list *list) {
gpr_timer_log *out = list->head;
if (out != NULL) {
list->head = out->next;
if (list->head != NULL) {
list->head->prev = NULL;
} else {
list->tail = NULL;
}
}
return out;
}
static void log_report() {
int i;
gpr_once_init(&g_once_init, init_output);
for (i = 0; i < g_count; i++) {
gpr_timer_entry *entry = &(g_log[i]);
static void timer_log_remove(gpr_timer_log_list *list, gpr_timer_log *log) {
if (log->prev == NULL) {
list->head = log->next;
if (list->head != NULL) {
list->head->prev = NULL;
}
} else {
log->prev->next = log->next;
}
if (log->next == NULL) {
list->tail = log->prev;
if (list->tail != NULL) {
list->tail->next = NULL;
}
} else {
log->next->prev = log->prev;
}
}
static void write_log(gpr_timer_log *log) {
size_t i;
if (output_file == NULL) {
output_file = fopen(output_filename, "w");
}
for (i = 0; i < log->num_entries; i++) {
gpr_timer_entry *entry = &(log->log[i]);
if (gpr_time_cmp(entry->tm, gpr_time_0(entry->tm.clock_type)) < 0) {
entry->tm = gpr_time_0(entry->tm.clock_type);
}
fprintf(output_file,
"{\"t\": %ld.%09d, \"thd\": \"%p\", \"type\": \"%c\", \"tag\": "
"{\"t\": %lld.%09d, \"thd\": \"%d\", \"type\": \"%c\", \"tag\": "
"\"%s\", \"file\": \"%s\", \"line\": %d, \"imp\": %d}\n",
entry->tm.tv_sec, entry->tm.tv_nsec,
(void *)(gpr_intptr)gpr_thd_currentid(), entry->type, entry->tagstr,
entry->file, entry->line, entry->important);
(long long)entry->tm.tv_sec, (int)entry->tm.tv_nsec, entry->thd,
entry->type, entry->tagstr, entry->file, entry->line,
entry->important);
}
}
static void writing_thread(void *unused) {
gpr_timer_log *log;
pthread_mutex_lock(&g_mu);
for (;;) {
while ((log = timer_log_pop_front(&g_done_logs)) == NULL && !g_shutdown) {
pthread_cond_wait(&g_cv, &g_mu);
}
if (log != NULL) {
pthread_mutex_unlock(&g_mu);
write_log(log);
free(log);
pthread_mutex_lock(&g_mu);
}
if (g_shutdown) {
pthread_mutex_unlock(&g_mu);
return;
}
}
}
/* Now clear out the log */
g_count = 0;
static void flush_logs(gpr_timer_log_list *list) {
gpr_timer_log *log;
while ((log = timer_log_pop_front(list)) != NULL) {
write_log(log);
free(log);
}
}
static void finish_writing() {
pthread_mutex_lock(&g_mu);
g_shutdown = 1;
pthread_cond_signal(&g_cv);
pthread_mutex_unlock(&g_mu);
gpr_thd_join(g_writing_thread);
gpr_log(GPR_INFO, "flushing logs");
pthread_mutex_lock(&g_mu);
flush_logs(&g_done_logs);
flush_logs(&g_in_progress_logs);
pthread_mutex_unlock(&g_mu);
if (output_file) {
fclose(output_file);
}
}
void gpr_timers_set_log_filename(const char *filename) {
output_filename = filename;
}
static void init_output() {
gpr_thd_options options = gpr_thd_options_default();
gpr_thd_options_set_joinable(&options);
gpr_thd_new(&g_writing_thread, writing_thread, NULL, &options);
atexit(finish_writing);
}
static void rotate_log() {
gpr_timer_log *new = malloc(sizeof(*new));
gpr_once_init(&g_once_init, init_output);
new->num_entries = 0;
pthread_mutex_lock(&g_mu);
if (g_thread_log != NULL) {
timer_log_remove(&g_in_progress_logs, g_thread_log);
if (timer_log_push_back(&g_done_logs, g_thread_log)) {
pthread_cond_signal(&g_cv);
}
} else {
g_thread_id = g_next_thread_id++;
}
timer_log_push_back(&g_in_progress_logs, new);
pthread_mutex_unlock(&g_mu);
g_thread_log = new;
}
static void gpr_timers_log_add(const char *tagstr, marker_type type,
int important, const char *file, int line) {
gpr_timer_entry *entry;
/* TODO (vpai) : Improve concurrency */
if (g_count == MAX_COUNT) {
log_report();
if (g_thread_log == NULL || g_thread_log->num_entries == MAX_COUNT) {
rotate_log();
}
entry = &g_log[g_count++];
entry = &g_thread_log->log[g_thread_log->num_entries++];
entry->tm = gpr_now(GPR_CLOCK_PRECISE);
entry->tagstr = tagstr;
entry->type = type;
entry->file = file;
entry->line = line;
entry->line = (short)line;
entry->important = important != 0;
entry->thd = g_thread_id;
}
/* Latency profiler API implementation. */
@ -131,4 +269,6 @@ void gpr_timers_global_destroy(void) {}
void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
void gpr_timers_set_log_filename(const char *filename) {}
#endif /* GRPC_BASIC_PROFILER */

@ -48,6 +48,8 @@ void gpr_timer_begin(const char *tagstr, int important, const char *file,
void gpr_timer_end(const char *tagstr, int important, const char *file,
int line);
void gpr_timers_set_log_filename(const char *filename);
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \

@ -39,12 +39,13 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/support/string.h"
#include "src/core/channel/channel_stack.h"
#include "src/core/security/security_context.h"
#include "src/core/security/security_connector.h"
#include "src/core/security/credentials.h"
#include "src/core/security/security_connector.h"
#include "src/core/security/security_context.h"
#include "src/core/support/string.h"
#include "src/core/surface/call.h"
#include "src/core/transport/static_metadata.h"
#define MAX_CREDENTIALS_METADATA_COUNT 4
@ -59,8 +60,6 @@ typedef struct {
progress */
grpc_pollset *pollset;
grpc_transport_stream_op op;
size_t op_md_idx;
int sent_initial_metadata;
gpr_uint8 security_context_set;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
@ -69,11 +68,6 @@ typedef struct {
/* We can have a per-channel credentials. */
typedef struct {
grpc_channel_security_connector *security_connector;
grpc_mdctx *md_ctx;
grpc_mdstr *authority_string;
grpc_mdstr *path_string;
grpc_mdstr *error_msg_key;
grpc_mdstr *status_key;
} channel_data;
static void reset_auth_metadata_context(
@ -106,7 +100,6 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_credentials_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_transport_stream_op *op = &calld->op;
grpc_metadata_batch *mdb;
size_t i;
@ -117,13 +110,12 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
return;
}
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
GPR_ASSERT(op->send_ops && op->send_ops->nops > calld->op_md_idx &&
op->send_ops->ops[calld->op_md_idx].type == GRPC_OP_METADATA);
mdb = &op->send_ops->ops[calld->op_md_idx].data.metadata;
GPR_ASSERT(op->send_initial_metadata != NULL);
mdb = op->send_initial_metadata;
for (i = 0; i < num_md; i++) {
grpc_metadata_batch_add_tail(
mdb, &calld->md_links[i],
grpc_mdelem_from_slices(chand->md_ctx, gpr_slice_ref(md_elems[i].key),
grpc_mdelem_from_slices(gpr_slice_ref(md_elems[i].key),
gpr_slice_ref(md_elems[i].value)));
}
grpc_call_next_op(exec_ctx, elem, op);
@ -223,7 +215,6 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_linked_mdelem *l;
size_t i;
grpc_client_security_context *sec_ctx = NULL;
if (calld->security_context_set == 0 &&
@ -242,53 +233,41 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
chand->security_connector->base.auth_context, "client_auth_filter");
}
if (op->bind_pollset != NULL) {
calld->pollset = op->bind_pollset;
}
if (op->send_ops != NULL && !calld->sent_initial_metadata) {
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *sop = &ops[i];
if (sop->type != GRPC_OP_METADATA) continue;
calld->op_md_idx = i;
calld->sent_initial_metadata = 1;
for (l = sop->data.metadata.list.head; l != NULL; l = l->next) {
grpc_mdelem *md = l->md;
/* Pointer comparison is OK for md_elems created from the same context.
*/
if (md->key == chand->authority_string) {
if (calld->host != NULL) GRPC_MDSTR_UNREF(calld->host);
calld->host = GRPC_MDSTR_REF(md->value);
} else if (md->key == chand->path_string) {
if (calld->method != NULL) GRPC_MDSTR_UNREF(calld->method);
calld->method = GRPC_MDSTR_REF(md->value);
}
if (op->send_initial_metadata != NULL) {
for (l = op->send_initial_metadata->list.head; l != NULL; l = l->next) {
grpc_mdelem *md = l->md;
/* Pointer comparison is OK for md_elems created from the same context.
*/
if (md->key == GRPC_MDSTR_AUTHORITY) {
if (calld->host != NULL) GRPC_MDSTR_UNREF(calld->host);
calld->host = GRPC_MDSTR_REF(md->value);
} else if (md->key == GRPC_MDSTR_PATH) {
if (calld->method != NULL) GRPC_MDSTR_UNREF(calld->method);
calld->method = GRPC_MDSTR_REF(md->value);
}
if (calld->host != NULL) {
grpc_security_status status;
const char *call_host = grpc_mdstr_as_c_string(calld->host);
calld->op = *op; /* Copy op (originates from the caller's stack). */
status = grpc_channel_security_connector_check_call_host(
exec_ctx, chand->security_connector, call_host, on_host_checked,
elem);
if (status != GRPC_SECURITY_OK) {
if (status == GRPC_SECURITY_ERROR) {
char *error_msg;
gpr_asprintf(&error_msg,
"Invalid host %s set in :authority metadata.",
call_host);
bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT,
error_msg);
gpr_free(error_msg);
}
return; /* early exit */
}
if (calld->host != NULL) {
grpc_security_status status;
const char *call_host = grpc_mdstr_as_c_string(calld->host);
calld->op = *op; /* Copy op (originates from the caller's stack). */
status = grpc_channel_security_connector_check_call_host(
exec_ctx, chand->security_connector, call_host, on_host_checked,
elem);
if (status != GRPC_SECURITY_OK) {
if (status == GRPC_SECURITY_ERROR) {
char *error_msg;
gpr_asprintf(&error_msg,
"Invalid host %s set in :authority metadata.",
call_host);
bubble_up_error(exec_ctx, elem, GRPC_STATUS_INVALID_ARGUMENT,
error_msg);
gpr_free(error_msg);
}
return; /* early exit */
}
send_security_metadata(exec_ctx, elem, op);
return; /* early exit */
}
send_security_metadata(exec_ctx, elem, op);
return; /* early exit */
}
/* pass control down the stack */
@ -297,11 +276,15 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_stream_op *initial_op) {
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(*calld));
GPR_ASSERT(!initial_op || !initial_op->send_ops);
}
static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_pollset *pollset) {
call_data *calld = elem->call_data;
calld->pollset = pollset;
}
/* Destructor for call_data */
@ -320,18 +303,17 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
grpc_channel_element *elem,
grpc_channel_element_args *args) {
grpc_security_connector *sc =
grpc_find_security_connector_in_args(args->channel_args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_last);
GPR_ASSERT(!args->is_last);
GPR_ASSERT(sc != NULL);
/* initialize members */
@ -339,11 +321,6 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->security_connector =
(grpc_channel_security_connector *)GRPC_SECURITY_CONNECTOR_REF(
sc, "client_auth_filter");
chand->md_ctx = metadata_context;
chand->authority_string = grpc_mdstr_from_string(chand->md_ctx, ":authority");
chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path");
chand->error_msg_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-message");
chand->status_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-status");
}
/* Destructor for channel data */
@ -352,24 +329,13 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
grpc_channel_security_connector *ctx = chand->security_connector;
if (ctx != NULL)
if (ctx != NULL) {
GRPC_SECURITY_CONNECTOR_UNREF(&ctx->base, "client_auth_filter");
if (chand->authority_string != NULL) {
GRPC_MDSTR_UNREF(chand->authority_string);
}
if (chand->error_msg_key != NULL) {
GRPC_MDSTR_UNREF(chand->error_msg_key);
}
if (chand->status_key != NULL) {
GRPC_MDSTR_UNREF(chand->status_key);
}
if (chand->path_string != NULL) {
GRPC_MDSTR_UNREF(chand->path_string);
}
}
const grpc_channel_filter grpc_client_auth_filter = {
auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
init_call_elem, set_pollset, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"client-auth"};

@ -39,7 +39,7 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/http_client_filter.h"
#include "src/core/httpcli/httpcli.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/executor.h"
#include "src/core/json/json.h"
#include "src/core/support/string.h"
#include "src/core/surface/api_trace.h"
@ -48,7 +48,6 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
/* -- Common. -- */
@ -511,10 +510,11 @@ grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
"grpc_service_account_jwt_access_credentials_create("
"json_key=%s, "
"token_lifetime="
"gpr_timespec { tv_sec: %ld, tv_nsec: %d, clock_type: %d }, "
"gpr_timespec { tv_sec: %lld, tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
5, (json_key, (long)token_lifetime.tv_sec, token_lifetime.tv_nsec,
(int)token_lifetime.clock_type, reserved));
5,
(json_key, (long long)token_lifetime.tv_sec, (int)token_lifetime.tv_nsec,
(int)token_lifetime.clock_type, reserved));
GPR_ASSERT(reserved == NULL);
return grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
grpc_auth_json_key_create_from_string(json_key), token_lifetime);
@ -792,15 +792,14 @@ static void md_only_test_destruct(grpc_call_credentials *creds) {
grpc_credentials_md_store_unref(c->md_store);
}
static void on_simulated_token_fetch_done(void *user_data) {
static void on_simulated_token_fetch_done(grpc_exec_ctx *exec_ctx,
void *user_data, int success) {
grpc_credentials_metadata_request *r =
(grpc_credentials_metadata_request *)user_data;
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)r->creds;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
r->cb(&exec_ctx, r->user_data, c->md_store->entries, c->md_store->num_entries,
r->cb(exec_ctx, r->user_data, c->md_store->entries, c->md_store->num_entries,
GRPC_CREDENTIALS_OK);
grpc_credentials_metadata_request_destroy(r);
grpc_exec_ctx_finish(&exec_ctx);
}
static void md_only_test_get_request_metadata(
@ -810,10 +809,10 @@ static void md_only_test_get_request_metadata(
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds;
if (c->is_async) {
gpr_thd_id thd_id;
grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data);
gpr_thd_new(&thd_id, on_simulated_token_fetch_done, cb_arg, NULL);
grpc_executor_enqueue(
grpc_closure_create(on_simulated_token_fetch_done, cb_arg), 1);
} else {
cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK);
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save