Merge branch 'master' into grpc_namespace_credentials

pull/18444/head
Karthik Ravi Shankar 6 years ago
commit 5a132b8245
  1. 8
      .gitignore
  2. 30
      .vscode/launch.json
  3. 4
      BUILD
  4. 15
      BUILD.gn
  5. 35
      CMakeLists.txt
  6. 13
      CONTRIBUTING.md
  7. 52
      Makefile
  8. 28
      build.yaml
  9. 7
      config.m4
  10. 7
      config.w32
  11. 8
      gRPC-C++.podspec
  12. 30
      gRPC-Core.podspec
  13. 11
      grpc.gemspec
  14. 22
      grpc.gyp
  15. 7
      include/grpc/impl/codegen/grpc_types.h
  16. 11
      package.xml
  17. 151
      src/core/ext/filters/client_channel/client_channel.cc
  18. 2
      src/core/ext/filters/client_channel/client_channel_plugin.cc
  19. 24
      src/core/ext/filters/client_channel/health/health_check_client.cc
  20. 6
      src/core/ext/filters/client_channel/health/health_check_client.h
  21. 26
      src/core/ext/filters/client_channel/lb_policy.cc
  22. 40
      src/core/ext/filters/client_channel/lb_policy.h
  23. 113
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  24. 14
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  25. 14
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  26. 770
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  27. 5
      src/core/ext/filters/client_channel/lb_policy_factory.h
  28. 107
      src/core/ext/filters/client_channel/lb_policy_registry.cc
  29. 11
      src/core/ext/filters/client_channel/lb_policy_registry.h
  30. 3
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc
  31. 589
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  32. 183
      src/core/ext/filters/client_channel/resolver_result_parsing.h
  33. 12
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  34. 26
      src/core/ext/filters/client_channel/resolving_lb_policy.h
  35. 79
      src/core/ext/filters/client_channel/service_config.cc
  36. 273
      src/core/ext/filters/client_channel/service_config.h
  37. 39
      src/core/ext/filters/client_channel/subchannel.cc
  38. 6
      src/core/ext/filters/client_channel/subchannel.h
  39. 5
      src/core/ext/filters/deadline/deadline_filter.cc
  40. 5
      src/core/ext/filters/deadline/deadline_filter.h
  41. 2
      src/core/ext/filters/http/client/http_client_filter.cc
  42. 2
      src/core/ext/filters/http/client_authority_filter.cc
  43. 2
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  44. 2
      src/core/ext/filters/http/server/http_server_filter.cc
  45. 195
      src/core/ext/filters/message_size/message_size_filter.cc
  46. 33
      src/core/ext/filters/message_size/message_size_filter.h
  47. 7
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  48. 2
      src/core/ext/transport/chttp2/transport/incoming_metadata.cc
  49. 5
      src/core/ext/transport/chttp2/transport/incoming_metadata.h
  50. 4
      src/core/ext/transport/chttp2/transport/internal.h
  51. 13
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  52. 18
      src/core/ext/transport/inproc/inproc_transport.cc
  53. 6
      src/core/lib/channel/channel_stack.h
  54. 4
      src/core/lib/channel/connected_channel.cc
  55. 3
      src/core/lib/channel/context.h
  56. 152
      src/core/lib/gpr/arena.cc
  57. 20
      src/core/lib/gpr/arena.h
  58. 103
      src/core/lib/gprpp/arena.cc
  59. 121
      src/core/lib/gprpp/arena.h
  60. 1
      src/core/lib/gprpp/optional.h
  61. 146
      src/core/lib/iomgr/call_combiner.cc
  62. 155
      src/core/lib/iomgr/call_combiner.h
  63. 4
      src/core/lib/iomgr/endpoint_pair_windows.cc
  64. 23
      src/core/lib/iomgr/error.h
  65. 2
      src/core/lib/iomgr/internal_errqueue.cc
  66. 1
      src/core/lib/iomgr/iomgr_windows.cc
  67. 19
      src/core/lib/iomgr/socket_windows.cc
  68. 8
      src/core/lib/iomgr/socket_windows.h
  69. 2
      src/core/lib/iomgr/tcp_client_windows.cc
  70. 4
      src/core/lib/iomgr/tcp_server_windows.cc
  71. 16
      src/core/lib/iomgr/tcp_windows.cc
  72. 2
      src/core/lib/iomgr/timer_generic.cc
  73. 13
      src/core/lib/security/context/security_context.cc
  74. 7
      src/core/lib/security/context/security_context.h
  75. 18
      src/core/lib/security/transport/client_auth_filter.cc
  76. 5
      src/core/lib/security/transport/server_auth_filter.cc
  77. 60
      src/core/lib/surface/call.cc
  78. 3
      src/core/lib/surface/call.h
  79. 1
      src/core/lib/surface/call_details.cc
  80. 14
      src/core/lib/surface/completion_queue.cc
  81. 2
      src/core/lib/surface/lame_client.cc
  82. 38
      src/core/lib/surface/server.cc
  83. 480
      src/core/lib/transport/metadata.cc
  84. 201
      src/core/lib/transport/metadata.h
  85. 5
      src/core/lib/transport/transport.cc
  86. 7
      src/core/lib/transport/transport.h
  87. 2
      src/core/lib/transport/transport_impl.h
  88. 1
      src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
  89. 11
      src/csharp/README.md
  90. 2
      src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
  91. 2
      src/objective-c/!ProtoCompiler.podspec
  92. 14
      src/objective-c/GRPCClient/GRPCCall.h
  93. 133
      src/objective-c/GRPCClient/GRPCCall.m
  94. 21
      src/objective-c/GRPCClient/GRPCCallOptions.h
  95. 16
      src/objective-c/GRPCClient/GRPCCallOptions.m
  96. 27
      src/objective-c/ProtoRPC/ProtoRPC.h
  97. 26
      src/objective-c/ProtoRPC/ProtoRPC.m
  98. 296
      src/objective-c/tests/APIv2Tests/APIv2Tests.m
  99. 86
      src/objective-c/tests/InteropTests.m
  100. 7
      src/php/bin/run_tests.sh
  101. Some files were not shown because too many files have changed in this diff Show More

8
.gitignore vendored

@ -134,3 +134,11 @@ bm_*.json
# cmake build files
/cmake/build
# Visual Studio Code artifacts
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json

@ -1,30 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "Mocha Tests",
"cwd": "${workspaceRoot}",
"runtimeExecutable": "${workspaceRoot}/node_modules/.bin/mocha",
"windows": {
"runtimeExecutable": "${workspaceRoot}/node_modules/.bin/mocha.cmd"
},
"runtimeArgs": [
"-u",
"tdd",
"--timeout",
"999999",
"--colors",
"${workspaceRoot}/src/node/test"
],
"internalConsoleOptions": "openOnSessionStart"
},
{
"type": "node",
"request": "attach",
"name": "Attach to Process",
"port": 5858
}
]
}

@ -543,7 +543,6 @@ grpc_cc_library(
name = "gpr_base",
srcs = [
"src/core/lib/gpr/alloc.cc",
"src/core/lib/gpr/arena.cc",
"src/core/lib/gpr/atm.cc",
"src/core/lib/gpr/cpu_iphone.cc",
"src/core/lib/gpr/cpu_linux.cc",
@ -576,6 +575,7 @@ grpc_cc_library(
"src/core/lib/gpr/tmpfile_posix.cc",
"src/core/lib/gpr/tmpfile_windows.cc",
"src/core/lib/gpr/wrap_memcpy.cc",
"src/core/lib/gprpp/arena.cc",
"src/core/lib/gprpp/fork.cc",
"src/core/lib/gprpp/thd_posix.cc",
"src/core/lib/gprpp/thd_windows.cc",
@ -600,6 +600,8 @@ grpc_cc_library(
"src/core/lib/gpr/tmpfile.h",
"src/core/lib/gpr/useful.h",
"src/core/lib/gprpp/abstract.h",
"src/core/lib/gprpp/arena.h",
"src/core/lib/gprpp/atomic.h",
"src/core/lib/gprpp/fork.h",
"src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h",

@ -131,7 +131,6 @@ config("grpc_config") {
"include/grpc/support/time.h",
"src/core/lib/gpr/alloc.cc",
"src/core/lib/gpr/alloc.h",
"src/core/lib/gpr/arena.cc",
"src/core/lib/gpr/arena.h",
"src/core/lib/gpr/atm.cc",
"src/core/lib/gpr/cpu_iphone.cc",
@ -180,6 +179,8 @@ config("grpc_config") {
"src/core/lib/gpr/useful.h",
"src/core/lib/gpr/wrap_memcpy.cc",
"src/core/lib/gprpp/abstract.h",
"src/core/lib/gprpp/arena.cc",
"src/core/lib/gprpp/arena.h",
"src/core/lib/gprpp/atomic.h",
"src/core/lib/gprpp/fork.cc",
"src/core/lib/gprpp/fork.h",
@ -481,18 +482,24 @@ config("grpc_config") {
"src/core/lib/iomgr/buffer_list.h",
"src/core/lib/iomgr/call_combiner.cc",
"src/core/lib/iomgr/call_combiner.h",
"src/core/lib/iomgr/cfstream_handle.cc",
"src/core/lib/iomgr/cfstream_handle.h",
"src/core/lib/iomgr/closure.h",
"src/core/lib/iomgr/combiner.cc",
"src/core/lib/iomgr/combiner.h",
"src/core/lib/iomgr/dynamic_annotations.h",
"src/core/lib/iomgr/endpoint.cc",
"src/core/lib/iomgr/endpoint.h",
"src/core/lib/iomgr/endpoint_cfstream.cc",
"src/core/lib/iomgr/endpoint_cfstream.h",
"src/core/lib/iomgr/endpoint_pair.h",
"src/core/lib/iomgr/endpoint_pair_posix.cc",
"src/core/lib/iomgr/endpoint_pair_uv.cc",
"src/core/lib/iomgr/endpoint_pair_windows.cc",
"src/core/lib/iomgr/error.cc",
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/error_cfstream.cc",
"src/core/lib/iomgr/error_cfstream.h",
"src/core/lib/iomgr/error_internal.h",
"src/core/lib/iomgr/ev_epoll1_linux.cc",
"src/core/lib/iomgr/ev_epoll1_linux.h",
@ -528,6 +535,7 @@ config("grpc_config") {
"src/core/lib/iomgr/iomgr_internal.h",
"src/core/lib/iomgr/iomgr_posix.cc",
"src/core/lib/iomgr/iomgr_posix.h",
"src/core/lib/iomgr/iomgr_posix_cfstream.cc",
"src/core/lib/iomgr/iomgr_uv.cc",
"src/core/lib/iomgr/iomgr_windows.cc",
"src/core/lib/iomgr/is_epollexclusive_available.cc",
@ -583,6 +591,7 @@ config("grpc_config") {
"src/core/lib/iomgr/sys_epoll_wrapper.h",
"src/core/lib/iomgr/tcp_client.cc",
"src/core/lib/iomgr/tcp_client.h",
"src/core/lib/iomgr/tcp_client_cfstream.cc",
"src/core/lib/iomgr/tcp_client_custom.cc",
"src/core/lib/iomgr/tcp_client_posix.cc",
"src/core/lib/iomgr/tcp_client_posix.h",
@ -1159,6 +1168,7 @@ config("grpc_config") {
"src/core/lib/gpr/tmpfile.h",
"src/core/lib/gpr/useful.h",
"src/core/lib/gprpp/abstract.h",
"src/core/lib/gprpp/arena.h",
"src/core/lib/gprpp/atomic.h",
"src/core/lib/gprpp/debug_location.h",
"src/core/lib/gprpp/fork.h",
@ -1179,12 +1189,15 @@ config("grpc_config") {
"src/core/lib/iomgr/block_annotate.h",
"src/core/lib/iomgr/buffer_list.h",
"src/core/lib/iomgr/call_combiner.h",
"src/core/lib/iomgr/cfstream_handle.h",
"src/core/lib/iomgr/closure.h",
"src/core/lib/iomgr/combiner.h",
"src/core/lib/iomgr/dynamic_annotations.h",
"src/core/lib/iomgr/endpoint.h",
"src/core/lib/iomgr/endpoint_cfstream.h",
"src/core/lib/iomgr/endpoint_pair.h",
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/error_cfstream.h",
"src/core/lib/iomgr/error_internal.h",
"src/core/lib/iomgr/ev_epoll1_linux.h",
"src/core/lib/iomgr/ev_epollex_linux.h",

@ -839,7 +839,6 @@ endif (gRPC_BUILD_TESTS)
add_library(gpr
src/core/lib/gpr/alloc.cc
src/core/lib/gpr/arena.cc
src/core/lib/gpr/atm.cc
src/core/lib/gpr/cpu_iphone.cc
src/core/lib/gpr/cpu_linux.cc
@ -872,6 +871,7 @@ add_library(gpr
src/core/lib/gpr/tmpfile_posix.cc
src/core/lib/gpr/tmpfile_windows.cc
src/core/lib/gpr/wrap_memcpy.cc
src/core/lib/gprpp/arena.cc
src/core/lib/gprpp/fork.cc
src/core/lib/gprpp/thd_posix.cc
src/core/lib/gprpp/thd_windows.cc
@ -997,12 +997,15 @@ add_library(grpc
src/core/lib/http/parser.cc
src/core/lib/iomgr/buffer_list.cc
src/core/lib/iomgr/call_combiner.cc
src/core/lib/iomgr/cfstream_handle.cc
src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/endpoint.cc
src/core/lib/iomgr/endpoint_cfstream.cc
src/core/lib/iomgr/endpoint_pair_posix.cc
src/core/lib/iomgr/endpoint_pair_uv.cc
src/core/lib/iomgr/endpoint_pair_windows.cc
src/core/lib/iomgr/error.cc
src/core/lib/iomgr/error_cfstream.cc
src/core/lib/iomgr/ev_epoll1_linux.cc
src/core/lib/iomgr/ev_epollex_linux.cc
src/core/lib/iomgr/ev_poll_posix.cc
@ -1023,6 +1026,7 @@ add_library(grpc
src/core/lib/iomgr/iomgr_custom.cc
src/core/lib/iomgr/iomgr_internal.cc
src/core/lib/iomgr/iomgr_posix.cc
src/core/lib/iomgr/iomgr_posix_cfstream.cc
src/core/lib/iomgr/iomgr_uv.cc
src/core/lib/iomgr/iomgr_windows.cc
src/core/lib/iomgr/is_epollexclusive_available.cc
@ -1051,6 +1055,7 @@ add_library(grpc
src/core/lib/iomgr/socket_utils_windows.cc
src/core/lib/iomgr/socket_windows.cc
src/core/lib/iomgr/tcp_client.cc
src/core/lib/iomgr/tcp_client_cfstream.cc
src/core/lib/iomgr/tcp_client_custom.cc
src/core/lib/iomgr/tcp_client_posix.cc
src/core/lib/iomgr/tcp_client_windows.cc
@ -1424,12 +1429,15 @@ add_library(grpc_cronet
src/core/lib/http/parser.cc
src/core/lib/iomgr/buffer_list.cc
src/core/lib/iomgr/call_combiner.cc
src/core/lib/iomgr/cfstream_handle.cc
src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/endpoint.cc
src/core/lib/iomgr/endpoint_cfstream.cc
src/core/lib/iomgr/endpoint_pair_posix.cc
src/core/lib/iomgr/endpoint_pair_uv.cc
src/core/lib/iomgr/endpoint_pair_windows.cc
src/core/lib/iomgr/error.cc
src/core/lib/iomgr/error_cfstream.cc
src/core/lib/iomgr/ev_epoll1_linux.cc
src/core/lib/iomgr/ev_epollex_linux.cc
src/core/lib/iomgr/ev_poll_posix.cc
@ -1450,6 +1458,7 @@ add_library(grpc_cronet
src/core/lib/iomgr/iomgr_custom.cc
src/core/lib/iomgr/iomgr_internal.cc
src/core/lib/iomgr/iomgr_posix.cc
src/core/lib/iomgr/iomgr_posix_cfstream.cc
src/core/lib/iomgr/iomgr_uv.cc
src/core/lib/iomgr/iomgr_windows.cc
src/core/lib/iomgr/is_epollexclusive_available.cc
@ -1478,6 +1487,7 @@ add_library(grpc_cronet
src/core/lib/iomgr/socket_utils_windows.cc
src/core/lib/iomgr/socket_windows.cc
src/core/lib/iomgr/tcp_client.cc
src/core/lib/iomgr/tcp_client_cfstream.cc
src/core/lib/iomgr/tcp_client_custom.cc
src/core/lib/iomgr/tcp_client_posix.cc
src/core/lib/iomgr/tcp_client_windows.cc
@ -1836,12 +1846,15 @@ add_library(grpc_test_util
src/core/lib/http/parser.cc
src/core/lib/iomgr/buffer_list.cc
src/core/lib/iomgr/call_combiner.cc
src/core/lib/iomgr/cfstream_handle.cc
src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/endpoint.cc
src/core/lib/iomgr/endpoint_cfstream.cc
src/core/lib/iomgr/endpoint_pair_posix.cc
src/core/lib/iomgr/endpoint_pair_uv.cc
src/core/lib/iomgr/endpoint_pair_windows.cc
src/core/lib/iomgr/error.cc
src/core/lib/iomgr/error_cfstream.cc
src/core/lib/iomgr/ev_epoll1_linux.cc
src/core/lib/iomgr/ev_epollex_linux.cc
src/core/lib/iomgr/ev_poll_posix.cc
@ -1862,6 +1875,7 @@ add_library(grpc_test_util
src/core/lib/iomgr/iomgr_custom.cc
src/core/lib/iomgr/iomgr_internal.cc
src/core/lib/iomgr/iomgr_posix.cc
src/core/lib/iomgr/iomgr_posix_cfstream.cc
src/core/lib/iomgr/iomgr_uv.cc
src/core/lib/iomgr/iomgr_windows.cc
src/core/lib/iomgr/is_epollexclusive_available.cc
@ -1890,6 +1904,7 @@ add_library(grpc_test_util
src/core/lib/iomgr/socket_utils_windows.cc
src/core/lib/iomgr/socket_windows.cc
src/core/lib/iomgr/tcp_client.cc
src/core/lib/iomgr/tcp_client_cfstream.cc
src/core/lib/iomgr/tcp_client_custom.cc
src/core/lib/iomgr/tcp_client_posix.cc
src/core/lib/iomgr/tcp_client_windows.cc
@ -2161,12 +2176,15 @@ add_library(grpc_test_util_unsecure
src/core/lib/http/parser.cc
src/core/lib/iomgr/buffer_list.cc
src/core/lib/iomgr/call_combiner.cc
src/core/lib/iomgr/cfstream_handle.cc
src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/endpoint.cc
src/core/lib/iomgr/endpoint_cfstream.cc
src/core/lib/iomgr/endpoint_pair_posix.cc
src/core/lib/iomgr/endpoint_pair_uv.cc
src/core/lib/iomgr/endpoint_pair_windows.cc
src/core/lib/iomgr/error.cc
src/core/lib/iomgr/error_cfstream.cc
src/core/lib/iomgr/ev_epoll1_linux.cc
src/core/lib/iomgr/ev_epollex_linux.cc
src/core/lib/iomgr/ev_poll_posix.cc
@ -2187,6 +2205,7 @@ add_library(grpc_test_util_unsecure
src/core/lib/iomgr/iomgr_custom.cc
src/core/lib/iomgr/iomgr_internal.cc
src/core/lib/iomgr/iomgr_posix.cc
src/core/lib/iomgr/iomgr_posix_cfstream.cc
src/core/lib/iomgr/iomgr_uv.cc
src/core/lib/iomgr/iomgr_windows.cc
src/core/lib/iomgr/is_epollexclusive_available.cc
@ -2215,6 +2234,7 @@ add_library(grpc_test_util_unsecure
src/core/lib/iomgr/socket_utils_windows.cc
src/core/lib/iomgr/socket_windows.cc
src/core/lib/iomgr/tcp_client.cc
src/core/lib/iomgr/tcp_client_cfstream.cc
src/core/lib/iomgr/tcp_client_custom.cc
src/core/lib/iomgr/tcp_client_posix.cc
src/core/lib/iomgr/tcp_client_windows.cc
@ -2462,12 +2482,15 @@ add_library(grpc_unsecure
src/core/lib/http/parser.cc
src/core/lib/iomgr/buffer_list.cc
src/core/lib/iomgr/call_combiner.cc
src/core/lib/iomgr/cfstream_handle.cc
src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/endpoint.cc
src/core/lib/iomgr/endpoint_cfstream.cc
src/core/lib/iomgr/endpoint_pair_posix.cc
src/core/lib/iomgr/endpoint_pair_uv.cc
src/core/lib/iomgr/endpoint_pair_windows.cc
src/core/lib/iomgr/error.cc
src/core/lib/iomgr/error_cfstream.cc
src/core/lib/iomgr/ev_epoll1_linux.cc
src/core/lib/iomgr/ev_epollex_linux.cc
src/core/lib/iomgr/ev_poll_posix.cc
@ -2488,6 +2511,7 @@ add_library(grpc_unsecure
src/core/lib/iomgr/iomgr_custom.cc
src/core/lib/iomgr/iomgr_internal.cc
src/core/lib/iomgr/iomgr_posix.cc
src/core/lib/iomgr/iomgr_posix_cfstream.cc
src/core/lib/iomgr/iomgr_uv.cc
src/core/lib/iomgr/iomgr_windows.cc
src/core/lib/iomgr/is_epollexclusive_available.cc
@ -2516,6 +2540,7 @@ add_library(grpc_unsecure
src/core/lib/iomgr/socket_utils_windows.cc
src/core/lib/iomgr/socket_windows.cc
src/core/lib/iomgr/tcp_client.cc
src/core/lib/iomgr/tcp_client_cfstream.cc
src/core/lib/iomgr/tcp_client_custom.cc
src/core/lib/iomgr/tcp_client_posix.cc
src/core/lib/iomgr/tcp_client_windows.cc
@ -3366,12 +3391,15 @@ add_library(grpc++_cronet
src/core/lib/http/parser.cc
src/core/lib/iomgr/buffer_list.cc
src/core/lib/iomgr/call_combiner.cc
src/core/lib/iomgr/cfstream_handle.cc
src/core/lib/iomgr/combiner.cc
src/core/lib/iomgr/endpoint.cc
src/core/lib/iomgr/endpoint_cfstream.cc
src/core/lib/iomgr/endpoint_pair_posix.cc
src/core/lib/iomgr/endpoint_pair_uv.cc
src/core/lib/iomgr/endpoint_pair_windows.cc
src/core/lib/iomgr/error.cc
src/core/lib/iomgr/error_cfstream.cc
src/core/lib/iomgr/ev_epoll1_linux.cc
src/core/lib/iomgr/ev_epollex_linux.cc
src/core/lib/iomgr/ev_poll_posix.cc
@ -3392,6 +3420,7 @@ add_library(grpc++_cronet
src/core/lib/iomgr/iomgr_custom.cc
src/core/lib/iomgr/iomgr_internal.cc
src/core/lib/iomgr/iomgr_posix.cc
src/core/lib/iomgr/iomgr_posix_cfstream.cc
src/core/lib/iomgr/iomgr_uv.cc
src/core/lib/iomgr/iomgr_windows.cc
src/core/lib/iomgr/is_epollexclusive_available.cc
@ -3420,6 +3449,7 @@ add_library(grpc++_cronet
src/core/lib/iomgr/socket_utils_windows.cc
src/core/lib/iomgr/socket_windows.cc
src/core/lib/iomgr/tcp_client.cc
src/core/lib/iomgr/tcp_client_cfstream.cc
src/core/lib/iomgr/tcp_client_custom.cc
src/core/lib/iomgr/tcp_client_posix.cc
src/core/lib/iomgr/tcp_client_windows.cc
@ -12696,6 +12726,7 @@ target_include_directories(client_crash_test_server
target_link_libraries(client_crash_test_server
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_config
grpc++_test_util
grpc_test_util
grpc++
@ -13435,6 +13466,7 @@ target_include_directories(golden_file_test
target_link_libraries(golden_file_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_config
grpc++
grpc
gpr
@ -15679,6 +15711,7 @@ target_include_directories(server_crash_test_client
target_link_libraries(server_crash_test_client
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_config
grpc++_test_util
grpc_test_util
grpc++

@ -110,5 +110,16 @@ How to get your contributions merged smoothly and quickly.
- Exceptions to the rules can be made if there's a compelling reason for doing
so.
## Obtaining Commit Access
We grant Commit Access to contributors based on the following criteria:
* Sustained contribution to the gRPC project.
* Deep understanding of the areas contributed to, and good consideration of various reliability, usability and performance tradeoffs.
* Contributions demonstrate that obtaining Commit Access will significantly reduce friction for the contributors or others.
In addition to submitting PRs, a Contributor with Commit Access can:
* Review PRs and merge once other checks and criteria pass.
* Triage bugs and PRs and assign appropriate labels and reviewers.
### Obtaining Commit Access without Code Contributions
The [gRPC organization](https://github.com/grpc) is comprised of multiple repositories and commit access is usually restricted to one or more of these repositories. Some repositories such as the [grpc.github.io](https://github.com/grpc/grpc.github.io/) do not have code, but the same principle of sustained, high quality contributions, with a good understanding of the fundamentals, apply.

@ -3320,7 +3320,6 @@ endif
LIBGPR_SRC = \
src/core/lib/gpr/alloc.cc \
src/core/lib/gpr/arena.cc \
src/core/lib/gpr/atm.cc \
src/core/lib/gpr/cpu_iphone.cc \
src/core/lib/gpr/cpu_linux.cc \
@ -3353,6 +3352,7 @@ LIBGPR_SRC = \
src/core/lib/gpr/tmpfile_posix.cc \
src/core/lib/gpr/tmpfile_windows.cc \
src/core/lib/gpr/wrap_memcpy.cc \
src/core/lib/gprpp/arena.cc \
src/core/lib/gprpp/fork.cc \
src/core/lib/gprpp/thd_posix.cc \
src/core/lib/gprpp/thd_windows.cc \
@ -3457,12 +3457,15 @@ LIBGRPC_SRC = \
src/core/lib/http/parser.cc \
src/core/lib/iomgr/buffer_list.cc \
src/core/lib/iomgr/call_combiner.cc \
src/core/lib/iomgr/cfstream_handle.cc \
src/core/lib/iomgr/combiner.cc \
src/core/lib/iomgr/endpoint.cc \
src/core/lib/iomgr/endpoint_cfstream.cc \
src/core/lib/iomgr/endpoint_pair_posix.cc \
src/core/lib/iomgr/endpoint_pair_uv.cc \
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
@ -3483,6 +3486,7 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/iomgr_custom.cc \
src/core/lib/iomgr/iomgr_internal.cc \
src/core/lib/iomgr/iomgr_posix.cc \
src/core/lib/iomgr/iomgr_posix_cfstream.cc \
src/core/lib/iomgr/iomgr_uv.cc \
src/core/lib/iomgr/iomgr_windows.cc \
src/core/lib/iomgr/is_epollexclusive_available.cc \
@ -3511,6 +3515,7 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/socket_utils_windows.cc \
src/core/lib/iomgr/socket_windows.cc \
src/core/lib/iomgr/tcp_client.cc \
src/core/lib/iomgr/tcp_client_cfstream.cc \
src/core/lib/iomgr/tcp_client_custom.cc \
src/core/lib/iomgr/tcp_client_posix.cc \
src/core/lib/iomgr/tcp_client_windows.cc \
@ -3878,12 +3883,15 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/http/parser.cc \
src/core/lib/iomgr/buffer_list.cc \
src/core/lib/iomgr/call_combiner.cc \
src/core/lib/iomgr/cfstream_handle.cc \
src/core/lib/iomgr/combiner.cc \
src/core/lib/iomgr/endpoint.cc \
src/core/lib/iomgr/endpoint_cfstream.cc \
src/core/lib/iomgr/endpoint_pair_posix.cc \
src/core/lib/iomgr/endpoint_pair_uv.cc \
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
@ -3904,6 +3912,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/iomgr_custom.cc \
src/core/lib/iomgr/iomgr_internal.cc \
src/core/lib/iomgr/iomgr_posix.cc \
src/core/lib/iomgr/iomgr_posix_cfstream.cc \
src/core/lib/iomgr/iomgr_uv.cc \
src/core/lib/iomgr/iomgr_windows.cc \
src/core/lib/iomgr/is_epollexclusive_available.cc \
@ -3932,6 +3941,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/socket_utils_windows.cc \
src/core/lib/iomgr/socket_windows.cc \
src/core/lib/iomgr/tcp_client.cc \
src/core/lib/iomgr/tcp_client_cfstream.cc \
src/core/lib/iomgr/tcp_client_custom.cc \
src/core/lib/iomgr/tcp_client_posix.cc \
src/core/lib/iomgr/tcp_client_windows.cc \
@ -4283,12 +4293,15 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/http/parser.cc \
src/core/lib/iomgr/buffer_list.cc \
src/core/lib/iomgr/call_combiner.cc \
src/core/lib/iomgr/cfstream_handle.cc \
src/core/lib/iomgr/combiner.cc \
src/core/lib/iomgr/endpoint.cc \
src/core/lib/iomgr/endpoint_cfstream.cc \
src/core/lib/iomgr/endpoint_pair_posix.cc \
src/core/lib/iomgr/endpoint_pair_uv.cc \
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
@ -4309,6 +4322,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/iomgr_custom.cc \
src/core/lib/iomgr/iomgr_internal.cc \
src/core/lib/iomgr/iomgr_posix.cc \
src/core/lib/iomgr/iomgr_posix_cfstream.cc \
src/core/lib/iomgr/iomgr_uv.cc \
src/core/lib/iomgr/iomgr_windows.cc \
src/core/lib/iomgr/is_epollexclusive_available.cc \
@ -4337,6 +4351,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/socket_utils_windows.cc \
src/core/lib/iomgr/socket_windows.cc \
src/core/lib/iomgr/tcp_client.cc \
src/core/lib/iomgr/tcp_client_cfstream.cc \
src/core/lib/iomgr/tcp_client_custom.cc \
src/core/lib/iomgr/tcp_client_posix.cc \
src/core/lib/iomgr/tcp_client_windows.cc \
@ -4595,12 +4610,15 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/lib/http/parser.cc \
src/core/lib/iomgr/buffer_list.cc \
src/core/lib/iomgr/call_combiner.cc \
src/core/lib/iomgr/cfstream_handle.cc \
src/core/lib/iomgr/combiner.cc \
src/core/lib/iomgr/endpoint.cc \
src/core/lib/iomgr/endpoint_cfstream.cc \
src/core/lib/iomgr/endpoint_pair_posix.cc \
src/core/lib/iomgr/endpoint_pair_uv.cc \
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
@ -4621,6 +4639,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/lib/iomgr/iomgr_custom.cc \
src/core/lib/iomgr/iomgr_internal.cc \
src/core/lib/iomgr/iomgr_posix.cc \
src/core/lib/iomgr/iomgr_posix_cfstream.cc \
src/core/lib/iomgr/iomgr_uv.cc \
src/core/lib/iomgr/iomgr_windows.cc \
src/core/lib/iomgr/is_epollexclusive_available.cc \
@ -4649,6 +4668,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/lib/iomgr/socket_utils_windows.cc \
src/core/lib/iomgr/socket_windows.cc \
src/core/lib/iomgr/tcp_client.cc \
src/core/lib/iomgr/tcp_client_cfstream.cc \
src/core/lib/iomgr/tcp_client_custom.cc \
src/core/lib/iomgr/tcp_client_posix.cc \
src/core/lib/iomgr/tcp_client_windows.cc \
@ -4870,12 +4890,15 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/http/parser.cc \
src/core/lib/iomgr/buffer_list.cc \
src/core/lib/iomgr/call_combiner.cc \
src/core/lib/iomgr/cfstream_handle.cc \
src/core/lib/iomgr/combiner.cc \
src/core/lib/iomgr/endpoint.cc \
src/core/lib/iomgr/endpoint_cfstream.cc \
src/core/lib/iomgr/endpoint_pair_posix.cc \
src/core/lib/iomgr/endpoint_pair_uv.cc \
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
@ -4896,6 +4919,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/iomgr_custom.cc \
src/core/lib/iomgr/iomgr_internal.cc \
src/core/lib/iomgr/iomgr_posix.cc \
src/core/lib/iomgr/iomgr_posix_cfstream.cc \
src/core/lib/iomgr/iomgr_uv.cc \
src/core/lib/iomgr/iomgr_windows.cc \
src/core/lib/iomgr/is_epollexclusive_available.cc \
@ -4924,6 +4948,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/socket_utils_windows.cc \
src/core/lib/iomgr/socket_windows.cc \
src/core/lib/iomgr/tcp_client.cc \
src/core/lib/iomgr/tcp_client_cfstream.cc \
src/core/lib/iomgr/tcp_client_custom.cc \
src/core/lib/iomgr/tcp_client_posix.cc \
src/core/lib/iomgr/tcp_client_windows.cc \
@ -5750,12 +5775,15 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/http/parser.cc \
src/core/lib/iomgr/buffer_list.cc \
src/core/lib/iomgr/call_combiner.cc \
src/core/lib/iomgr/cfstream_handle.cc \
src/core/lib/iomgr/combiner.cc \
src/core/lib/iomgr/endpoint.cc \
src/core/lib/iomgr/endpoint_cfstream.cc \
src/core/lib/iomgr/endpoint_pair_posix.cc \
src/core/lib/iomgr/endpoint_pair_uv.cc \
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
@ -5776,6 +5804,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/iomgr/iomgr_custom.cc \
src/core/lib/iomgr/iomgr_internal.cc \
src/core/lib/iomgr/iomgr_posix.cc \
src/core/lib/iomgr/iomgr_posix_cfstream.cc \
src/core/lib/iomgr/iomgr_uv.cc \
src/core/lib/iomgr/iomgr_windows.cc \
src/core/lib/iomgr/is_epollexclusive_available.cc \
@ -5804,6 +5833,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/iomgr/socket_utils_windows.cc \
src/core/lib/iomgr/socket_windows.cc \
src/core/lib/iomgr/tcp_client.cc \
src/core/lib/iomgr/tcp_client_cfstream.cc \
src/core/lib/iomgr/tcp_client_custom.cc \
src/core/lib/iomgr/tcp_client_posix.cc \
src/core/lib/iomgr/tcp_client_windows.cc \
@ -15643,16 +15673,16 @@ $(BINDIR)/$(CONFIG)/client_crash_test_server: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/client_crash_test_server: $(PROTOBUF_DEP) $(CLIENT_CRASH_TEST_SERVER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/client_crash_test_server: $(PROTOBUF_DEP) $(CLIENT_CRASH_TEST_SERVER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(CLIENT_CRASH_TEST_SERVER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/client_crash_test_server
$(Q) $(LDXX) $(LDFLAGS) $(CLIENT_CRASH_TEST_SERVER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/client_crash_test_server
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/client_crash_test_server.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/client_crash_test_server.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_client_crash_test_server: $(CLIENT_CRASH_TEST_SERVER_OBJS:.o=.dep)
@ -16390,18 +16420,18 @@ $(BINDIR)/$(CONFIG)/golden_file_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/golden_file_test: $(PROTOBUF_DEP) $(GOLDEN_FILE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/golden_file_test: $(PROTOBUF_DEP) $(GOLDEN_FILE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(GOLDEN_FILE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/golden_file_test
$(Q) $(LDXX) $(LDFLAGS) $(GOLDEN_FILE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/golden_file_test
endif
endif
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/compiler_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/compiler_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/golden_file_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/codegen/golden_file_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_golden_file_test: $(GOLDEN_FILE_TEST_OBJS:.o=.dep)
@ -18606,16 +18636,16 @@ $(BINDIR)/$(CONFIG)/server_crash_test_client: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/server_crash_test_client: $(PROTOBUF_DEP) $(SERVER_CRASH_TEST_CLIENT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(BINDIR)/$(CONFIG)/server_crash_test_client: $(PROTOBUF_DEP) $(SERVER_CRASH_TEST_CLIENT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(SERVER_CRASH_TEST_CLIENT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/server_crash_test_client
$(Q) $(LDXX) $(LDFLAGS) $(SERVER_CRASH_TEST_CLIENT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/server_crash_test_client
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/server_crash_test_client.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/server_crash_test_client.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_server_crash_test_client: $(SERVER_CRASH_TEST_CLIENT_OBJS:.o=.dep)

@ -114,7 +114,6 @@ filegroups:
- name: gpr_base
src:
- src/core/lib/gpr/alloc.cc
- src/core/lib/gpr/arena.cc
- src/core/lib/gpr/atm.cc
- src/core/lib/gpr/cpu_iphone.cc
- src/core/lib/gpr/cpu_linux.cc
@ -147,6 +146,7 @@ filegroups:
- src/core/lib/gpr/tmpfile_posix.cc
- src/core/lib/gpr/tmpfile_windows.cc
- src/core/lib/gpr/wrap_memcpy.cc
- src/core/lib/gprpp/arena.cc
- src/core/lib/gprpp/fork.cc
- src/core/lib/gprpp/thd_posix.cc
- src/core/lib/gprpp/thd_windows.cc
@ -191,6 +191,7 @@ filegroups:
- src/core/lib/gpr/tmpfile.h
- src/core/lib/gpr/useful.h
- src/core/lib/gprpp/abstract.h
- src/core/lib/gprpp/arena.h
- src/core/lib/gprpp/atomic.h
- src/core/lib/gprpp/fork.h
- src/core/lib/gprpp/manual_constructor.h
@ -258,12 +259,15 @@ filegroups:
- src/core/lib/http/parser.cc
- src/core/lib/iomgr/buffer_list.cc
- src/core/lib/iomgr/call_combiner.cc
- src/core/lib/iomgr/cfstream_handle.cc
- src/core/lib/iomgr/combiner.cc
- src/core/lib/iomgr/endpoint.cc
- src/core/lib/iomgr/endpoint_cfstream.cc
- src/core/lib/iomgr/endpoint_pair_posix.cc
- src/core/lib/iomgr/endpoint_pair_uv.cc
- src/core/lib/iomgr/endpoint_pair_windows.cc
- src/core/lib/iomgr/error.cc
- src/core/lib/iomgr/error_cfstream.cc
- src/core/lib/iomgr/ev_epoll1_linux.cc
- src/core/lib/iomgr/ev_epollex_linux.cc
- src/core/lib/iomgr/ev_poll_posix.cc
@ -284,6 +288,7 @@ filegroups:
- src/core/lib/iomgr/iomgr_custom.cc
- src/core/lib/iomgr/iomgr_internal.cc
- src/core/lib/iomgr/iomgr_posix.cc
- src/core/lib/iomgr/iomgr_posix_cfstream.cc
- src/core/lib/iomgr/iomgr_uv.cc
- src/core/lib/iomgr/iomgr_windows.cc
- src/core/lib/iomgr/is_epollexclusive_available.cc
@ -312,6 +317,7 @@ filegroups:
- src/core/lib/iomgr/socket_utils_windows.cc
- src/core/lib/iomgr/socket_windows.cc
- src/core/lib/iomgr/tcp_client.cc
- src/core/lib/iomgr/tcp_client_cfstream.cc
- src/core/lib/iomgr/tcp_client_custom.cc
- src/core/lib/iomgr/tcp_client_posix.cc
- src/core/lib/iomgr/tcp_client_windows.cc
@ -439,12 +445,15 @@ filegroups:
- src/core/lib/iomgr/block_annotate.h
- src/core/lib/iomgr/buffer_list.h
- src/core/lib/iomgr/call_combiner.h
- src/core/lib/iomgr/cfstream_handle.h
- src/core/lib/iomgr/closure.h
- src/core/lib/iomgr/combiner.h
- src/core/lib/iomgr/dynamic_annotations.h
- src/core/lib/iomgr/endpoint.h
- src/core/lib/iomgr/endpoint_cfstream.h
- src/core/lib/iomgr/endpoint_pair.h
- src/core/lib/iomgr/error.h
- src/core/lib/iomgr/error_cfstream.h
- src/core/lib/iomgr/error_internal.h
- src/core/lib/iomgr/ev_epoll1_linux.h
- src/core/lib/iomgr/ev_epollex_linux.h
@ -545,20 +554,6 @@ filegroups:
uses:
- grpc_codegen
- grpc_trace_headers
- name: grpc_cfstream
headers:
- src/core/lib/iomgr/cfstream_handle.h
- src/core/lib/iomgr/endpoint_cfstream.h
- src/core/lib/iomgr/error_cfstream.h
src:
- src/core/lib/iomgr/cfstream_handle.cc
- src/core/lib/iomgr/endpoint_cfstream.cc
- src/core/lib/iomgr/error_cfstream.cc
- src/core/lib/iomgr/iomgr_posix_cfstream.cc
- src/core/lib/iomgr/tcp_client_cfstream.cc
uses:
- grpc_base_headers
- gpr_base_headers
- name: grpc_client_authority_filter
headers:
- src/core/ext/filters/http/client_authority_filter.h
@ -4524,6 +4519,7 @@ targets:
src:
- test/cpp/end2end/client_crash_test_server.cc
deps:
- grpc++_test_config
- grpc++_test_util
- grpc_test_util
- grpc++
@ -4736,6 +4732,7 @@ targets:
- src/proto/grpc/testing/compiler_test.proto
- test/cpp/codegen/golden_file_test.cc
deps:
- grpc++_test_config
- grpc++
- grpc
- gpr
@ -5451,6 +5448,7 @@ targets:
src:
- test/cpp/end2end/server_crash_test_client.cc
deps:
- grpc++_test_config
- grpc++_test_util
- grpc_test_util
- grpc++

@ -45,7 +45,6 @@ if test "$PHP_GRPC" != "no"; then
third_party/address_sorting/address_sorting_posix.c \
third_party/address_sorting/address_sorting_windows.c \
src/core/lib/gpr/alloc.cc \
src/core/lib/gpr/arena.cc \
src/core/lib/gpr/atm.cc \
src/core/lib/gpr/cpu_iphone.cc \
src/core/lib/gpr/cpu_linux.cc \
@ -78,6 +77,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/gpr/tmpfile_posix.cc \
src/core/lib/gpr/tmpfile_windows.cc \
src/core/lib/gpr/wrap_memcpy.cc \
src/core/lib/gprpp/arena.cc \
src/core/lib/gprpp/fork.cc \
src/core/lib/gprpp/thd_posix.cc \
src/core/lib/gprpp/thd_windows.cc \
@ -110,12 +110,15 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/http/parser.cc \
src/core/lib/iomgr/buffer_list.cc \
src/core/lib/iomgr/call_combiner.cc \
src/core/lib/iomgr/cfstream_handle.cc \
src/core/lib/iomgr/combiner.cc \
src/core/lib/iomgr/endpoint.cc \
src/core/lib/iomgr/endpoint_cfstream.cc \
src/core/lib/iomgr/endpoint_pair_posix.cc \
src/core/lib/iomgr/endpoint_pair_uv.cc \
src/core/lib/iomgr/endpoint_pair_windows.cc \
src/core/lib/iomgr/error.cc \
src/core/lib/iomgr/error_cfstream.cc \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_epollex_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
@ -136,6 +139,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/iomgr_custom.cc \
src/core/lib/iomgr/iomgr_internal.cc \
src/core/lib/iomgr/iomgr_posix.cc \
src/core/lib/iomgr/iomgr_posix_cfstream.cc \
src/core/lib/iomgr/iomgr_uv.cc \
src/core/lib/iomgr/iomgr_windows.cc \
src/core/lib/iomgr/is_epollexclusive_available.cc \
@ -164,6 +168,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/socket_utils_windows.cc \
src/core/lib/iomgr/socket_windows.cc \
src/core/lib/iomgr/tcp_client.cc \
src/core/lib/iomgr/tcp_client_cfstream.cc \
src/core/lib/iomgr/tcp_client_custom.cc \
src/core/lib/iomgr/tcp_client_posix.cc \
src/core/lib/iomgr/tcp_client_windows.cc \

@ -20,7 +20,6 @@ if (PHP_GRPC != "no") {
"third_party\\address_sorting\\address_sorting_posix.c " +
"third_party\\address_sorting\\address_sorting_windows.c " +
"src\\core\\lib\\gpr\\alloc.cc " +
"src\\core\\lib\\gpr\\arena.cc " +
"src\\core\\lib\\gpr\\atm.cc " +
"src\\core\\lib\\gpr\\cpu_iphone.cc " +
"src\\core\\lib\\gpr\\cpu_linux.cc " +
@ -53,6 +52,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\gpr\\tmpfile_posix.cc " +
"src\\core\\lib\\gpr\\tmpfile_windows.cc " +
"src\\core\\lib\\gpr\\wrap_memcpy.cc " +
"src\\core\\lib\\gprpp\\arena.cc " +
"src\\core\\lib\\gprpp\\fork.cc " +
"src\\core\\lib\\gprpp\\thd_posix.cc " +
"src\\core\\lib\\gprpp\\thd_windows.cc " +
@ -85,12 +85,15 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\http\\parser.cc " +
"src\\core\\lib\\iomgr\\buffer_list.cc " +
"src\\core\\lib\\iomgr\\call_combiner.cc " +
"src\\core\\lib\\iomgr\\cfstream_handle.cc " +
"src\\core\\lib\\iomgr\\combiner.cc " +
"src\\core\\lib\\iomgr\\endpoint.cc " +
"src\\core\\lib\\iomgr\\endpoint_cfstream.cc " +
"src\\core\\lib\\iomgr\\endpoint_pair_posix.cc " +
"src\\core\\lib\\iomgr\\endpoint_pair_uv.cc " +
"src\\core\\lib\\iomgr\\endpoint_pair_windows.cc " +
"src\\core\\lib\\iomgr\\error.cc " +
"src\\core\\lib\\iomgr\\error_cfstream.cc " +
"src\\core\\lib\\iomgr\\ev_epoll1_linux.cc " +
"src\\core\\lib\\iomgr\\ev_epollex_linux.cc " +
"src\\core\\lib\\iomgr\\ev_poll_posix.cc " +
@ -111,6 +114,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\iomgr\\iomgr_custom.cc " +
"src\\core\\lib\\iomgr\\iomgr_internal.cc " +
"src\\core\\lib\\iomgr\\iomgr_posix.cc " +
"src\\core\\lib\\iomgr\\iomgr_posix_cfstream.cc " +
"src\\core\\lib\\iomgr\\iomgr_uv.cc " +
"src\\core\\lib\\iomgr\\iomgr_windows.cc " +
"src\\core\\lib\\iomgr\\is_epollexclusive_available.cc " +
@ -139,6 +143,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\iomgr\\socket_utils_windows.cc " +
"src\\core\\lib\\iomgr\\socket_windows.cc " +
"src\\core\\lib\\iomgr\\tcp_client.cc " +
"src\\core\\lib\\iomgr\\tcp_client_cfstream.cc " +
"src\\core\\lib\\iomgr\\tcp_client_custom.cc " +
"src\\core\\lib\\iomgr\\tcp_client_posix.cc " +
"src\\core\\lib\\iomgr\\tcp_client_windows.cc " +

@ -266,6 +266,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile.h',
'src/core/lib/gpr/useful.h',
'src/core/lib/gprpp/abstract.h',
'src/core/lib/gprpp/arena.h',
'src/core/lib/gprpp/atomic.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/manual_constructor.h',
@ -435,12 +436,15 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/block_annotate.h',
'src/core/lib/iomgr/buffer_list.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/cfstream_handle.h',
'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h',
'src/core/lib/iomgr/dynamic_annotations.h',
'src/core/lib/iomgr/endpoint.h',
'src/core/lib/iomgr/endpoint_cfstream.h',
'src/core/lib/iomgr/endpoint_pair.h',
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/error_cfstream.h',
'src/core/lib/iomgr/error_internal.h',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.h',
@ -584,6 +588,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile.h',
'src/core/lib/gpr/useful.h',
'src/core/lib/gprpp/abstract.h',
'src/core/lib/gprpp/arena.h',
'src/core/lib/gprpp/atomic.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/manual_constructor.h',
@ -628,12 +633,15 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/block_annotate.h',
'src/core/lib/iomgr/buffer_list.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/cfstream_handle.h',
'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h',
'src/core/lib/iomgr/dynamic_annotations.h',
'src/core/lib/iomgr/endpoint.h',
'src/core/lib/iomgr/endpoint_cfstream.h',
'src/core/lib/iomgr/endpoint_pair.h',
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/error_cfstream.h',
'src/core/lib/iomgr/error_internal.h',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.h',

@ -205,6 +205,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile.h',
'src/core/lib/gpr/useful.h',
'src/core/lib/gprpp/abstract.h',
'src/core/lib/gprpp/arena.h',
'src/core/lib/gprpp/atomic.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/manual_constructor.h',
@ -215,7 +216,6 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/arena.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
@ -248,6 +248,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/arena.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
@ -413,12 +414,15 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/block_annotate.h',
'src/core/lib/iomgr/buffer_list.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/cfstream_handle.h',
'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h',
'src/core/lib/iomgr/dynamic_annotations.h',
'src/core/lib/iomgr/endpoint.h',
'src/core/lib/iomgr/endpoint_cfstream.h',
'src/core/lib/iomgr/endpoint_pair.h',
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/error_cfstream.h',
'src/core/lib/iomgr/error_internal.h',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.h',
@ -561,12 +565,15 @@ Pod::Spec.new do |s|
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_uv.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
@ -587,6 +594,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
@ -615,6 +623,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
@ -859,15 +868,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
'src/core/ext/filters/workarounds/workaround_utils.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/cfstream_handle.h',
'src/core/lib/iomgr/endpoint_cfstream.h',
'src/core/lib/iomgr/error_cfstream.h'
'src/core/plugin_registry/grpc_plugin_registry.cc'
ss.private_header_files = 'src/core/lib/gpr/alloc.h',
'src/core/lib/gpr/arena.h',
@ -886,6 +887,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile.h',
'src/core/lib/gpr/useful.h',
'src/core/lib/gprpp/abstract.h',
'src/core/lib/gprpp/arena.h',
'src/core/lib/gprpp/atomic.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/manual_constructor.h',
@ -1055,12 +1057,15 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/block_annotate.h',
'src/core/lib/iomgr/buffer_list.h',
'src/core/lib/iomgr/call_combiner.h',
'src/core/lib/iomgr/cfstream_handle.h',
'src/core/lib/iomgr/closure.h',
'src/core/lib/iomgr/combiner.h',
'src/core/lib/iomgr/dynamic_annotations.h',
'src/core/lib/iomgr/endpoint.h',
'src/core/lib/iomgr/endpoint_cfstream.h',
'src/core/lib/iomgr/endpoint_pair.h',
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/error_cfstream.h',
'src/core/lib/iomgr/error_internal.h',
'src/core/lib/iomgr/ev_epoll1_linux.h',
'src/core/lib/iomgr/ev_epollex_linux.h',
@ -1175,10 +1180,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/message_size/message_size_filter.h',
'src/core/ext/filters/http/client_authority_filter.h',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h',
'src/core/ext/filters/workarounds/workaround_utils.h',
'src/core/lib/iomgr/cfstream_handle.h',
'src/core/lib/iomgr/endpoint_cfstream.h',
'src/core/lib/iomgr/error_cfstream.h'
'src/core/ext/filters/workarounds/workaround_utils.h'
end
# CFStream is now default. Leaving this subspec only for compatibility purpose.

@ -99,6 +99,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gpr/tmpfile.h )
s.files += %w( src/core/lib/gpr/useful.h )
s.files += %w( src/core/lib/gprpp/abstract.h )
s.files += %w( src/core/lib/gprpp/arena.h )
s.files += %w( src/core/lib/gprpp/atomic.h )
s.files += %w( src/core/lib/gprpp/fork.h )
s.files += %w( src/core/lib/gprpp/manual_constructor.h )
@ -109,7 +110,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gprpp/thd.h )
s.files += %w( src/core/lib/profiling/timers.h )
s.files += %w( src/core/lib/gpr/alloc.cc )
s.files += %w( src/core/lib/gpr/arena.cc )
s.files += %w( src/core/lib/gpr/atm.cc )
s.files += %w( src/core/lib/gpr/cpu_iphone.cc )
s.files += %w( src/core/lib/gpr/cpu_linux.cc )
@ -142,6 +142,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gpr/tmpfile_posix.cc )
s.files += %w( src/core/lib/gpr/tmpfile_windows.cc )
s.files += %w( src/core/lib/gpr/wrap_memcpy.cc )
s.files += %w( src/core/lib/gprpp/arena.cc )
s.files += %w( src/core/lib/gprpp/fork.cc )
s.files += %w( src/core/lib/gprpp/thd_posix.cc )
s.files += %w( src/core/lib/gprpp/thd_windows.cc )
@ -347,12 +348,15 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/block_annotate.h )
s.files += %w( src/core/lib/iomgr/buffer_list.h )
s.files += %w( src/core/lib/iomgr/call_combiner.h )
s.files += %w( src/core/lib/iomgr/cfstream_handle.h )
s.files += %w( src/core/lib/iomgr/closure.h )
s.files += %w( src/core/lib/iomgr/combiner.h )
s.files += %w( src/core/lib/iomgr/dynamic_annotations.h )
s.files += %w( src/core/lib/iomgr/endpoint.h )
s.files += %w( src/core/lib/iomgr/endpoint_cfstream.h )
s.files += %w( src/core/lib/iomgr/endpoint_pair.h )
s.files += %w( src/core/lib/iomgr/error.h )
s.files += %w( src/core/lib/iomgr/error_cfstream.h )
s.files += %w( src/core/lib/iomgr/error_internal.h )
s.files += %w( src/core/lib/iomgr/ev_epoll1_linux.h )
s.files += %w( src/core/lib/iomgr/ev_epollex_linux.h )
@ -495,12 +499,15 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/http/parser.cc )
s.files += %w( src/core/lib/iomgr/buffer_list.cc )
s.files += %w( src/core/lib/iomgr/call_combiner.cc )
s.files += %w( src/core/lib/iomgr/cfstream_handle.cc )
s.files += %w( src/core/lib/iomgr/combiner.cc )
s.files += %w( src/core/lib/iomgr/endpoint.cc )
s.files += %w( src/core/lib/iomgr/endpoint_cfstream.cc )
s.files += %w( src/core/lib/iomgr/endpoint_pair_posix.cc )
s.files += %w( src/core/lib/iomgr/endpoint_pair_uv.cc )
s.files += %w( src/core/lib/iomgr/endpoint_pair_windows.cc )
s.files += %w( src/core/lib/iomgr/error.cc )
s.files += %w( src/core/lib/iomgr/error_cfstream.cc )
s.files += %w( src/core/lib/iomgr/ev_epoll1_linux.cc )
s.files += %w( src/core/lib/iomgr/ev_epollex_linux.cc )
s.files += %w( src/core/lib/iomgr/ev_poll_posix.cc )
@ -521,6 +528,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/iomgr_custom.cc )
s.files += %w( src/core/lib/iomgr/iomgr_internal.cc )
s.files += %w( src/core/lib/iomgr/iomgr_posix.cc )
s.files += %w( src/core/lib/iomgr/iomgr_posix_cfstream.cc )
s.files += %w( src/core/lib/iomgr/iomgr_uv.cc )
s.files += %w( src/core/lib/iomgr/iomgr_windows.cc )
s.files += %w( src/core/lib/iomgr/is_epollexclusive_available.cc )
@ -549,6 +557,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/socket_utils_windows.cc )
s.files += %w( src/core/lib/iomgr/socket_windows.cc )
s.files += %w( src/core/lib/iomgr/tcp_client.cc )
s.files += %w( src/core/lib/iomgr/tcp_client_cfstream.cc )
s.files += %w( src/core/lib/iomgr/tcp_client_custom.cc )
s.files += %w( src/core/lib/iomgr/tcp_client_posix.cc )
s.files += %w( src/core/lib/iomgr/tcp_client_windows.cc )

@ -218,7 +218,6 @@
],
'sources': [
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/arena.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
@ -251,6 +250,7 @@
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/arena.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
@ -292,12 +292,15 @@
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_uv.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
@ -318,6 +321,7 @@
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
@ -346,6 +350,7 @@
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
@ -660,12 +665,15 @@
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_uv.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
@ -686,6 +694,7 @@
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
@ -714,6 +723,7 @@
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
@ -905,12 +915,15 @@
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_uv.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
@ -931,6 +944,7 @@
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
@ -959,6 +973,7 @@
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
@ -1126,12 +1141,15 @@
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_uv.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
@ -1152,6 +1170,7 @@
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
@ -1180,6 +1199,7 @@
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',

@ -315,11 +315,11 @@ typedef struct {
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_call_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the grpclb load
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. Default value is 10000. */
If 0, enter fallback mode immediately. Default value is 10000. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the xDS load
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. Default value is 10000. */
If 0, enter fallback mode immediately. Default value is 10000. */
#define GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS "grpc.xds_fallback_timeout_ms"
/** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
@ -494,7 +494,8 @@ typedef struct grpc_event {
field is guaranteed to be 0 */
int success;
/** The tag passed to grpc_call_start_batch etc to start this operation.
Only GRPC_OP_COMPLETE has a tag. */
*Only* GRPC_OP_COMPLETE has a tag. For all other grpc_completion_type
values, tag is uninitialized. */
void* tag;
} grpc_event;

@ -104,6 +104,7 @@
<file baseinstalldir="/" name="src/core/lib/gpr/tmpfile.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/useful.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/abstract.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/arena.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/atomic.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/fork.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/manual_constructor.h" role="src" />
@ -114,7 +115,6 @@
<file baseinstalldir="/" name="src/core/lib/gprpp/thd.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/profiling/timers.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/alloc.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/arena.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/atm.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/cpu_iphone.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/cpu_linux.cc" role="src" />
@ -147,6 +147,7 @@
<file baseinstalldir="/" name="src/core/lib/gpr/tmpfile_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/tmpfile_windows.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/wrap_memcpy.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/arena.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/fork.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/thd_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/thd_windows.cc" role="src" />
@ -352,12 +353,15 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/block_annotate.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/buffer_list.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/call_combiner.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/cfstream_handle.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/closure.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/combiner.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/dynamic_annotations.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_cfstream.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error_cfstream.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error_internal.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll1_linux.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epollex_linux.h" role="src" />
@ -500,12 +504,15 @@
<file baseinstalldir="/" name="src/core/lib/http/parser.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/buffer_list.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/call_combiner.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/cfstream_handle.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/combiner.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_cfstream.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair_uv.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair_windows.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error_cfstream.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll1_linux.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epollex_linux.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_posix.cc" role="src" />
@ -526,6 +533,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_custom.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_internal.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_posix_cfstream.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_uv.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_windows.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/is_epollexclusive_available.cc" role="src" />
@ -554,6 +562,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_windows.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_windows.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client_cfstream.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client_custom.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client_windows.cc" role="src" />

@ -66,8 +66,7 @@
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/status_metadata.h"
using grpc_core::internal::ClientChannelMethodParams;
using grpc_core::internal::ClientChannelMethodParamsTable;
using grpc_core::internal::ClientChannelMethodParsedObject;
using grpc_core::internal::ProcessedResolverResult;
using grpc_core::internal::ServerRetryThrottleData;
@ -157,10 +156,8 @@ class ChannelData {
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data() const {
return retry_throttle_data_;
}
RefCountedPtr<ClientChannelMethodParams> GetMethodParams(
const grpc_slice& path) {
if (method_params_table_ == nullptr) return nullptr;
return ServiceConfig::MethodConfigTableLookup(*method_params_table_, path);
RefCountedPtr<ServiceConfig> service_config() const {
return service_config_;
}
grpc_connectivity_state CheckConnectivityState(bool try_to_connect);
@ -226,8 +223,8 @@ class ChannelData {
~ChannelData();
static bool ProcessResolverResultLocked(
void* arg, Resolver::Result* result, const char** lb_policy_name,
RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config);
void* arg, const Resolver::Result& result, const char** lb_policy_name,
RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config);
grpc_error* DoPingLocked(grpc_transport_op* op);
@ -243,6 +240,7 @@ class ChannelData {
const size_t per_rpc_retry_buffer_size_;
grpc_channel_stack* owning_stack_;
ClientChannelFactory* client_channel_factory_;
UniquePtr<char> server_name_;
// Initialized shortly after construction.
channelz::ClientChannelNode* channelz_node_ = nullptr;
@ -255,7 +253,7 @@ class ChannelData {
// Data from service config.
bool received_service_config_data_ = false;
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
RefCountedPtr<ClientChannelMethodParamsTable> method_params_table_;
RefCountedPtr<ServiceConfig> service_config_;
//
// Fields used in the control plane. Guarded by combiner.
@ -266,6 +264,7 @@ class ChannelData {
OrphanablePtr<LoadBalancingPolicy> resolving_lb_policy_;
grpc_connectivity_state_tracker state_tracker_;
ExternalConnectivityWatcher::WatcherList external_connectivity_watcher_list_;
UniquePtr<char> health_check_service_name_;
//
// Fields accessed from both data plane and control plane combiners.
@ -615,13 +614,14 @@ class CallData {
grpc_slice path_; // Request path.
gpr_timespec call_start_time_;
grpc_millis deadline_;
gpr_arena* arena_;
Arena* arena_;
grpc_call_stack* owning_call_;
grpc_call_combiner* call_combiner_;
CallCombiner* call_combiner_;
grpc_call_context_element* call_context_;
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
RefCountedPtr<ClientChannelMethodParams> method_params_;
ServiceConfig::CallData service_config_call_data_;
const ClientChannelMethodParsedObject* method_params_ = nullptr;
RefCountedPtr<SubchannelCall> subchannel_call_;
@ -764,11 +764,12 @@ class ChannelData::ServiceConfigSetter {
public:
ServiceConfigSetter(
ChannelData* chand,
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data,
RefCountedPtr<ClientChannelMethodParamsTable> method_params_table)
Optional<internal::ClientChannelGlobalParsedObject::RetryThrottling>
retry_throttle_data,
RefCountedPtr<ServiceConfig> service_config)
: chand_(chand),
retry_throttle_data_(std::move(retry_throttle_data)),
method_params_table_(std::move(method_params_table)) {
retry_throttle_data_(retry_throttle_data),
service_config_(std::move(service_config)) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack_, "ServiceConfigSetter");
GRPC_CLOSURE_INIT(&closure_, SetServiceConfigData, this,
grpc_combiner_scheduler(chand->data_plane_combiner_));
@ -781,8 +782,14 @@ class ChannelData::ServiceConfigSetter {
ChannelData* chand = self->chand_;
// Update channel state.
chand->received_service_config_data_ = true;
chand->retry_throttle_data_ = std::move(self->retry_throttle_data_);
chand->method_params_table_ = std::move(self->method_params_table_);
if (self->retry_throttle_data_.has_value()) {
chand->retry_throttle_data_ =
internal::ServerRetryThrottleMap::GetDataForServer(
chand->server_name_.get(),
self->retry_throttle_data_.value().max_milli_tokens,
self->retry_throttle_data_.value().milli_token_ratio);
}
chand->service_config_ = std::move(self->service_config_);
// Apply service config to queued picks.
for (QueuedPick* pick = chand->queued_picks_; pick != nullptr;
pick = pick->next) {
@ -796,8 +803,9 @@ class ChannelData::ServiceConfigSetter {
}
ChannelData* chand_;
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
RefCountedPtr<ClientChannelMethodParamsTable> method_params_table_;
Optional<internal::ClientChannelGlobalParsedObject::RetryThrottling>
retry_throttle_data_;
RefCountedPtr<ServiceConfig> service_config_;
grpc_closure closure_;
};
@ -934,10 +942,18 @@ class ChannelData::ClientChannelControlHelper
}
Subchannel* CreateSubchannel(const grpc_channel_args& args) override {
grpc_arg arg = SubchannelPoolInterface::CreateChannelArg(
grpc_arg args_to_add[2];
int num_args_to_add = 0;
if (chand_->health_check_service_name_ != nullptr) {
args_to_add[0] = grpc_channel_arg_string_create(
const_cast<char*>("grpc.temp.health_check"),
const_cast<char*>(chand_->health_check_service_name_.get()));
num_args_to_add++;
}
args_to_add[num_args_to_add++] = SubchannelPoolInterface::CreateChannelArg(
chand_->subchannel_pool_.get());
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add(&args, &arg, 1);
grpc_channel_args_copy_and_add(&args, args_to_add, num_args_to_add);
Subchannel* subchannel =
chand_->client_channel_factory_->CreateSubchannel(new_args);
grpc_channel_args_destroy(new_args);
@ -1050,6 +1066,12 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
"filter");
return;
}
grpc_uri* uri = grpc_uri_parse(server_uri, true);
if (uri != nullptr && uri->path[0] != '\0') {
server_name_.reset(
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path));
}
grpc_uri_destroy(uri);
char* proxy_name = nullptr;
grpc_channel_args* new_args = nullptr;
grpc_proxy_mappers_map_name(server_uri, args->channel_args, &proxy_name,
@ -1109,19 +1131,21 @@ ChannelData::~ChannelData() {
// Synchronous callback from ResolvingLoadBalancingPolicy to process a
// resolver result update.
bool ChannelData::ProcessResolverResultLocked(
void* arg, Resolver::Result* result, const char** lb_policy_name,
RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config) {
void* arg, const Resolver::Result& result, const char** lb_policy_name,
RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config) {
ChannelData* chand = static_cast<ChannelData*>(arg);
ProcessedResolverResult resolver_result(result, chand->enable_retries_);
UniquePtr<char> service_config_json = resolver_result.service_config_json();
ProcessedResolverResult resolver_result(result);
char* service_config_json = gpr_strdup(resolver_result.service_config_json());
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
chand, service_config_json.get());
chand, service_config_json);
}
chand->health_check_service_name_.reset(
gpr_strdup(resolver_result.health_check_service_name()));
// Create service config setter to update channel state in the data
// plane combiner. Destroys itself when done.
New<ServiceConfigSetter>(chand, resolver_result.retry_throttle_data(),
resolver_result.method_params_table());
resolver_result.service_config());
// Swap out the data used by GetChannelInfo().
bool service_config_changed;
{
@ -1131,9 +1155,9 @@ bool ChannelData::ProcessResolverResultLocked(
((service_config_json == nullptr) !=
(chand->info_service_config_json_ == nullptr)) ||
(service_config_json != nullptr &&
strcmp(service_config_json.get(),
chand->info_service_config_json_.get()) != 0);
chand->info_service_config_json_ = std::move(service_config_json);
strcmp(service_config_json, chand->info_service_config_json_.get()) !=
0);
chand->info_service_config_json_.reset(service_config_json);
}
// Return results.
*lb_policy_name = chand->info_lb_policy_name_.get();
@ -1483,8 +1507,8 @@ void CallData::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
GPR_ASSERT(send_initial_metadata_storage_ == nullptr);
grpc_metadata_batch* send_initial_metadata =
batch->payload->send_initial_metadata.send_initial_metadata;
send_initial_metadata_storage_ = (grpc_linked_mdelem*)gpr_arena_alloc(
arena_, sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
send_initial_metadata_storage_ = (grpc_linked_mdelem*)arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
grpc_metadata_batch_copy(send_initial_metadata, &send_initial_metadata_,
send_initial_metadata_storage_);
send_initial_metadata_flags_ =
@ -1493,10 +1517,8 @@ void CallData::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
}
// Set up cache for send_message ops.
if (batch->send_message) {
ByteStreamCache* cache = static_cast<ByteStreamCache*>(
gpr_arena_alloc(arena_, sizeof(ByteStreamCache)));
new (cache)
ByteStreamCache(std::move(batch->payload->send_message.send_message));
ByteStreamCache* cache = arena_->New<ByteStreamCache>(
std::move(batch->payload->send_message.send_message));
send_messages_.push_back(cache);
}
// Save metadata batch for send_trailing_metadata ops.
@ -1505,8 +1527,7 @@ void CallData::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
GPR_ASSERT(send_trailing_metadata_storage_ == nullptr);
grpc_metadata_batch* send_trailing_metadata =
batch->payload->send_trailing_metadata.send_trailing_metadata;
send_trailing_metadata_storage_ = (grpc_linked_mdelem*)gpr_arena_alloc(
arena_,
send_trailing_metadata_storage_ = (grpc_linked_mdelem*)arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count);
grpc_metadata_batch_copy(send_trailing_metadata, &send_trailing_metadata_,
send_trailing_metadata_storage_);
@ -1843,8 +1864,7 @@ void CallData::DoRetry(grpc_call_element* elem,
grpc_millis server_pushback_ms) {
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
GPR_ASSERT(method_params_ != nullptr);
const ClientChannelMethodParams::RetryPolicy* retry_policy =
method_params_->retry_policy();
const auto* retry_policy = method_params_->retry_policy();
GPR_ASSERT(retry_policy != nullptr);
// Reset subchannel call and connected subchannel.
subchannel_call_.reset();
@ -1852,7 +1872,7 @@ void CallData::DoRetry(grpc_call_element* elem,
// Compute backoff delay.
grpc_millis next_attempt_time;
if (server_pushback_ms >= 0) {
next_attempt_time = grpc_core::ExecCtx::Get()->Now() + server_pushback_ms;
next_attempt_time = ExecCtx::Get()->Now() + server_pushback_ms;
last_attempt_got_server_pushback_ = true;
} else {
if (num_attempts_completed_ == 1 || last_attempt_got_server_pushback_) {
@ -1869,7 +1889,7 @@ void CallData::DoRetry(grpc_call_element* elem,
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand,
this, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
this, next_attempt_time - ExecCtx::Get()->Now());
}
// Schedule retry after computed delay.
GRPC_CLOSURE_INIT(&pick_closure_, StartPickLocked, elem,
@ -1886,8 +1906,7 @@ bool CallData::MaybeRetry(grpc_call_element* elem,
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
// Get retry policy.
if (method_params_ == nullptr) return false;
const ClientChannelMethodParams::RetryPolicy* retry_policy =
method_params_->retry_policy();
const auto* retry_policy = method_params_->retry_policy();
if (retry_policy == nullptr) return false;
// If we've already dispatched a retry from this call, return true.
// This catches the case where the batch has multiple callbacks
@ -1994,10 +2013,8 @@ bool CallData::MaybeRetry(grpc_call_element* elem,
CallData::SubchannelCallBatchData* CallData::SubchannelCallBatchData::Create(
grpc_call_element* elem, int refcount, bool set_on_complete) {
CallData* calld = static_cast<CallData*>(elem->call_data);
SubchannelCallBatchData* batch_data =
new (gpr_arena_alloc(calld->arena_, sizeof(*batch_data)))
SubchannelCallBatchData(elem, calld, refcount, set_on_complete);
return batch_data;
return calld->arena_->New<SubchannelCallBatchData>(elem, calld, refcount,
set_on_complete);
}
CallData::SubchannelCallBatchData::SubchannelCallBatchData(
@ -2589,10 +2606,10 @@ void CallData::AddRetriableSendInitialMetadataOp(
//
// If we've already completed one or more attempts, add the
// grpc-retry-attempts header.
retry_state->send_initial_metadata_storage = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(arena_, sizeof(grpc_linked_mdelem) *
(send_initial_metadata_.list.count +
(num_attempts_completed_ > 0))));
retry_state->send_initial_metadata_storage =
static_cast<grpc_linked_mdelem*>(arena_->Alloc(
sizeof(grpc_linked_mdelem) *
(send_initial_metadata_.list.count + (num_attempts_completed_ > 0))));
grpc_metadata_batch_copy(&send_initial_metadata_,
&retry_state->send_initial_metadata,
retry_state->send_initial_metadata_storage);
@ -2651,8 +2668,7 @@ void CallData::AddRetriableSendTrailingMetadataOp(
// the filters in the subchannel stack may modify this batch, and we don't
// want those modifications to be passed forward to subsequent attempts.
retry_state->send_trailing_metadata_storage =
static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
arena_,
static_cast<grpc_linked_mdelem*>(arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_trailing_metadata_.list.count));
grpc_metadata_batch_copy(&send_trailing_metadata_,
&retry_state->send_trailing_metadata,
@ -3017,7 +3033,7 @@ class CallData::QueuedPickCanceller {
GRPC_CALL_STACK_REF(calld->owning_call_, "QueuedPickCanceller");
GRPC_CLOSURE_INIT(&closure_, &CancelLocked, this,
grpc_combiner_scheduler(chand->data_plane_combiner()));
grpc_call_combiner_set_notify_on_cancel(calld->call_combiner_, &closure_);
calld->call_combiner_->SetNotifyOnCancel(&closure_);
}
private:
@ -3078,8 +3094,19 @@ void CallData::ApplyServiceConfigToCallLocked(grpc_call_element* elem) {
gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
chand, this);
}
// Store a ref to the service_config in service_config_call_data_. Also, save
// a pointer to this in the call_context so that all future filters can access
// it.
service_config_call_data_ =
ServiceConfig::CallData(chand->service_config(), path_);
if (service_config_call_data_.service_config() != nullptr) {
call_context_[GRPC_SERVICE_CONFIG_CALL_DATA].value =
&service_config_call_data_;
method_params_ = static_cast<ClientChannelMethodParsedObject*>(
service_config_call_data_.GetMethodParsedObject(
internal::ClientChannelServiceConfigParser::ParserIndex()));
}
retry_throttle_data_ = chand->retry_throttle_data();
method_params_ = chand->GetMethodParams(path_);
if (method_params_ != nullptr) {
// If the deadline from the service config is shorter than the one
// from the client API, reset the deadline timer.
@ -3097,12 +3124,10 @@ void CallData::ApplyServiceConfigToCallLocked(grpc_call_element* elem) {
uint32_t* send_initial_metadata_flags =
&pending_batches_[0]
.batch->payload->send_initial_metadata.send_initial_metadata_flags;
if (GPR_UNLIKELY(method_params_->wait_for_ready() !=
ClientChannelMethodParams::WAIT_FOR_READY_UNSET &&
!(*send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET))) {
if (method_params_->wait_for_ready() ==
ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
if (method_params_->wait_for_ready().has_value() &&
!(*send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET)) {
if (method_params_->wait_for_ready().value()) {
*send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
*send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;

@ -32,6 +32,7 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/lib/surface/channel_init.h"
@ -50,6 +51,7 @@ static bool append_filter(grpc_channel_stack_builder* builder, void* arg) {
void grpc_client_channel_init(void) {
grpc_core::ServiceConfig::Init();
grpc_core::internal::ClientChannelServiceConfigParser::Register();
grpc_core::LoadBalancingPolicyRegistry::Builder::InitRegistry();
grpc_core::ResolverRegistry::Builder::InitRegistry();
grpc_core::internal::ServerRetryThrottleMap::Init();

@ -37,11 +37,10 @@
#define HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS 120
#define HEALTH_CHECK_RECONNECT_JITTER 0.2
grpc_core::TraceFlag grpc_health_check_client_trace(false,
"health_check_client");
namespace grpc_core {
TraceFlag grpc_health_check_client_trace(false, "health_check_client");
//
// HealthCheckClient
//
@ -50,7 +49,7 @@ HealthCheckClient::HealthCheckClient(
const char* service_name,
RefCountedPtr<ConnectedSubchannel> connected_subchannel,
grpc_pollset_set* interested_parties,
grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode> channelz_node)
RefCountedPtr<channelz::SubchannelNode> channelz_node)
: InternallyRefCounted<HealthCheckClient>(&grpc_health_check_client_trace),
service_name_(service_name),
connected_subchannel_(std::move(connected_subchannel)),
@ -281,11 +280,9 @@ HealthCheckClient::CallState::CallState(
: InternallyRefCounted<CallState>(&grpc_health_check_client_trace),
health_check_client_(std::move(health_check_client)),
pollent_(grpc_polling_entity_create_from_pollset_set(interested_parties)),
arena_(gpr_arena_create(health_check_client_->connected_subchannel_
->GetInitialCallSizeEstimate(0))),
payload_(context_) {
grpc_call_combiner_init(&call_combiner_);
}
arena_(Arena::Create(health_check_client_->connected_subchannel_
->GetInitialCallSizeEstimate(0))),
payload_(context_) {}
HealthCheckClient::CallState::~CallState() {
if (grpc_health_check_client_trace.enabled()) {
@ -303,14 +300,13 @@ HealthCheckClient::CallState::~CallState() {
// holding to the call stack. Also flush the closures on exec_ctx so that
// filters that schedule cancel notification closures on exec_ctx do not
// need to take a ref of the call stack to guarantee closure liveness.
grpc_call_combiner_set_notify_on_cancel(&call_combiner_, nullptr);
grpc_core::ExecCtx::Get()->Flush();
grpc_call_combiner_destroy(&call_combiner_);
gpr_arena_destroy(arena_);
call_combiner_.SetNotifyOnCancel(nullptr);
ExecCtx::Get()->Flush();
arena_->Destroy();
}
void HealthCheckClient::CallState::Orphan() {
grpc_call_combiner_cancel(&call_combiner_, GRPC_ERROR_CANCELLED);
call_combiner_.Cancel(GRPC_ERROR_CANCELLED);
Cancel();
}

@ -27,7 +27,7 @@
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/atomic.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
@ -97,8 +97,8 @@ class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
RefCountedPtr<HealthCheckClient> health_check_client_;
grpc_polling_entity pollent_;
gpr_arena* arena_;
grpc_call_combiner call_combiner_;
Arena* arena_;
grpc_core::CallCombiner call_combiner_;
grpc_call_context_element context_[GRPC_CONTEXT_COUNT] = {};
// The streaming call to the backend. Always non-NULL.

@ -62,32 +62,6 @@ void LoadBalancingPolicy::ShutdownAndUnrefLocked(void* arg,
policy->Unref();
}
grpc_json* LoadBalancingPolicy::ParseLoadBalancingConfig(
const grpc_json* lb_config_array) {
if (lb_config_array == nullptr || lb_config_array->type != GRPC_JSON_ARRAY) {
return nullptr;
}
// Find the first LB policy that this client supports.
for (const grpc_json* lb_config = lb_config_array->child;
lb_config != nullptr; lb_config = lb_config->next) {
if (lb_config->type != GRPC_JSON_OBJECT) return nullptr;
grpc_json* policy = nullptr;
for (grpc_json* field = lb_config->child; field != nullptr;
field = field->next) {
if (field->key == nullptr || field->type != GRPC_JSON_OBJECT)
return nullptr;
if (policy != nullptr) return nullptr; // Violate "oneof" type.
policy = field;
}
if (policy == nullptr) return nullptr;
// If we support this policy, then select it.
if (LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(policy->key)) {
return policy;
}
}
return nullptr;
}
//
// LoadBalancingPolicy::UpdateArgs
//

@ -36,6 +36,18 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
namespace grpc_core {
/// Interface for parsed forms of load balancing configs found in a service
/// config.
class ParsedLoadBalancingConfig : public RefCounted<ParsedLoadBalancingConfig> {
public:
virtual ~ParsedLoadBalancingConfig() = default;
// Returns the load balancing policy name
virtual const char* name() const GRPC_ABSTRACT;
GRPC_ABSTRACT_BASE_CLASS;
};
/// Interface for load balancing policies.
///
/// The following concepts are used here:
@ -167,6 +179,9 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
/// A proxy object used by the LB policy to communicate with the client
/// channel.
// TODO(juanlishen): Consider adding a mid-layer subclass that helps handle
// things like swapping in pending policy when it's ready. Currently, we are
// duplicating the logic in many subclasses.
class ChannelControlHelper {
public:
ChannelControlHelper() = default;
@ -193,30 +208,11 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
GRPC_ABSTRACT_BASE_CLASS
};
/// Configuration for an LB policy instance.
// TODO(roth): Find a better JSON representation for this API.
class Config : public RefCounted<Config> {
public:
Config(const grpc_json* lb_config,
RefCountedPtr<ServiceConfig> service_config)
: json_(lb_config), service_config_(std::move(service_config)) {}
const char* name() const { return json_->key; }
const grpc_json* config() const { return json_->child; }
RefCountedPtr<ServiceConfig> service_config() const {
return service_config_;
}
private:
const grpc_json* json_;
RefCountedPtr<ServiceConfig> service_config_;
};
/// Data passed to the UpdateLocked() method when new addresses and
/// config are available.
struct UpdateArgs {
ServerAddressList addresses;
RefCountedPtr<Config> config;
RefCountedPtr<ParsedLoadBalancingConfig> config;
const grpc_channel_args* args = nullptr;
// TODO(roth): Remove everything below once channel args is
@ -287,10 +283,6 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
void Orphan() override;
/// Returns the JSON node of policy (with both policy name and config content)
/// given the JSON node of a LoadBalancingConfig array.
static grpc_json* ParseLoadBalancingConfig(const grpc_json* lb_config_array);
// A picker that returns PICK_QUEUE for all picks.
// Also calls the parent LB policy's ExitIdleLocked() method when the
// first pick is seen.

@ -118,6 +118,21 @@ namespace {
constexpr char kGrpclb[] = "grpclb";
class ParsedGrpcLbConfig : public ParsedLoadBalancingConfig {
public:
explicit ParsedGrpcLbConfig(
RefCountedPtr<ParsedLoadBalancingConfig> child_policy)
: child_policy_(std::move(child_policy)) {}
const char* name() const override { return kGrpclb; }
RefCountedPtr<ParsedLoadBalancingConfig> child_policy() const {
return child_policy_;
}
private:
RefCountedPtr<ParsedLoadBalancingConfig> child_policy_;
};
class GrpcLb : public LoadBalancingPolicy {
public:
explicit GrpcLb(Args args);
@ -302,7 +317,6 @@ class GrpcLb : public LoadBalancingPolicy {
// Helper functions used in UpdateLocked().
void ProcessAddressesAndChannelArgsLocked(const ServerAddressList& addresses,
const grpc_channel_args& args);
void ParseLbConfig(Config* grpclb_config);
static void OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error);
void CancelBalancerChannelConnectivityWatchLocked();
@ -380,7 +394,7 @@ class GrpcLb : public LoadBalancingPolicy {
// until it reports READY, at which point it will be moved to child_policy_.
OrphanablePtr<LoadBalancingPolicy> pending_child_policy_;
// The child policy config.
RefCountedPtr<Config> child_policy_config_;
RefCountedPtr<ParsedLoadBalancingConfig> child_policy_config_;
// Child policy in state READY.
bool child_policy_ready_ = false;
};
@ -1129,13 +1143,13 @@ void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
// we want to retry connecting. Otherwise, we have deliberately ended this
// call and no further action is required.
if (lb_calld == grpclb_policy->lb_calld_.get()) {
// If we did not receive a serverlist and the fallback-at-startup checks
// are pending, go into fallback mode immediately. This short-circuits
// the timeout for the fallback-at-startup case.
if (!lb_calld->seen_serverlist_ &&
grpclb_policy->fallback_at_startup_checks_pending_) {
// If the fallback-at-startup checks are pending, go into fallback mode
// immediately. This short-circuits the timeout for the fallback-at-startup
// case.
if (grpclb_policy->fallback_at_startup_checks_pending_) {
GPR_ASSERT(!lb_calld->seen_serverlist_);
gpr_log(GPR_INFO,
"[grpclb %p] balancer call finished without receiving "
"[grpclb %p] Balancer call finished without receiving "
"serverlist; entering fallback mode",
grpclb_policy);
grpclb_policy->fallback_at_startup_checks_pending_ = false;
@ -1373,7 +1387,13 @@ void GrpcLb::FillChildRefsForChannelz(
void GrpcLb::UpdateLocked(UpdateArgs args) {
const bool is_initial_update = lb_channel_ == nullptr;
ParseLbConfig(args.config.get());
auto* grpclb_config =
static_cast<const ParsedGrpcLbConfig*>(args.config.get());
if (grpclb_config != nullptr) {
child_policy_config_ = grpclb_config->child_policy();
} else {
child_policy_config_ = nullptr;
}
ProcessAddressesAndChannelArgsLocked(args.addresses, *args.args);
// Update the existing child policy.
if (child_policy_ != nullptr) CreateOrUpdateChildPolicyLocked();
@ -1462,27 +1482,6 @@ void GrpcLb::ProcessAddressesAndChannelArgsLocked(
response_generator_->SetResponse(std::move(result));
}
void GrpcLb::ParseLbConfig(Config* grpclb_config) {
const grpc_json* child_policy = nullptr;
if (grpclb_config != nullptr) {
const grpc_json* grpclb_config_json = grpclb_config->config();
for (const grpc_json* field = grpclb_config_json; field != nullptr;
field = field->next) {
if (field->key == nullptr) return;
if (strcmp(field->key, "childPolicy") == 0) {
if (child_policy != nullptr) return; // Duplicate.
child_policy = ParseLoadBalancingConfig(field);
}
}
}
if (child_policy != nullptr) {
child_policy_config_ =
MakeRefCounted<Config>(child_policy, grpclb_config->service_config());
} else {
child_policy_config_.reset();
}
}
void GrpcLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error) {
GrpcLb* self = static_cast<GrpcLb*>(arg);
@ -1628,20 +1627,16 @@ void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
grpc_channel_args* GrpcLb::CreateChildPolicyArgsLocked(
bool is_backend_from_grpclb_load_balancer) {
grpc_arg args_to_add[2] = {
// A channel arg indicating if the target is a backend inferred from a
// grpclb load balancer.
grpc_channel_arg_integer_create(
const_cast<char*>(
GRPC_ARG_ADDRESS_IS_BACKEND_FROM_GRPCLB_LOAD_BALANCER),
is_backend_from_grpclb_load_balancer),
};
size_t num_args_to_add = 1;
InlinedVector<grpc_arg, 2> args_to_add;
args_to_add.emplace_back(grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_BACKEND_FROM_GRPCLB_LOAD_BALANCER),
is_backend_from_grpclb_load_balancer));
if (is_backend_from_grpclb_load_balancer) {
args_to_add[num_args_to_add++] = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1);
args_to_add.emplace_back(grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1));
}
return grpc_channel_args_copy_and_add(args_, args_to_add, num_args_to_add);
return grpc_channel_args_copy_and_add(args_, args_to_add.data(),
args_to_add.size());
}
OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
@ -1804,6 +1799,40 @@ class GrpcLbFactory : public LoadBalancingPolicyFactory {
}
const char* name() const override { return kGrpclb; }
RefCountedPtr<ParsedLoadBalancingConfig> ParseLoadBalancingConfig(
const grpc_json* json, grpc_error** error) const override {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
if (json == nullptr) {
return RefCountedPtr<ParsedLoadBalancingConfig>(
New<ParsedGrpcLbConfig>(nullptr));
}
InlinedVector<grpc_error*, 2> error_list;
RefCountedPtr<ParsedLoadBalancingConfig> child_policy;
for (const grpc_json* field = json->child; field != nullptr;
field = field->next) {
if (field->key == nullptr) continue;
if (strcmp(field->key, "childPolicy") == 0) {
if (child_policy != nullptr) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:childPolicy error:Duplicate entry"));
}
grpc_error* parse_error = GRPC_ERROR_NONE;
child_policy = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
field, &parse_error);
if (parse_error != GRPC_ERROR_NONE) {
error_list.push_back(parse_error);
}
}
}
if (error_list.empty()) {
return RefCountedPtr<ParsedLoadBalancingConfig>(
New<ParsedGrpcLbConfig>(std::move(child_policy)));
} else {
*error = GRPC_ERROR_CREATE_FROM_VECTOR("GrpcLb Parser", &error_list);
return nullptr;
}
}
};
} // namespace

@ -526,6 +526,11 @@ void PickFirst::PickFirstSubchannelData::
}
}
class ParsedPickFirstConfig : public ParsedLoadBalancingConfig {
public:
const char* name() const override { return kPickFirst; }
};
//
// factory
//
@ -538,6 +543,15 @@ class PickFirstFactory : public LoadBalancingPolicyFactory {
}
const char* name() const override { return kPickFirst; }
RefCountedPtr<ParsedLoadBalancingConfig> ParseLoadBalancingConfig(
const grpc_json* json, grpc_error** error) const override {
if (json != nullptr) {
GPR_DEBUG_ASSERT(strcmp(json->key, name()) == 0);
}
return RefCountedPtr<ParsedLoadBalancingConfig>(
New<ParsedPickFirstConfig>());
}
};
} // namespace

@ -503,6 +503,11 @@ void RoundRobin::UpdateLocked(UpdateArgs args) {
}
}
class ParsedRoundRobinConfig : public ParsedLoadBalancingConfig {
public:
const char* name() const override { return kRoundRobin; }
};
//
// factory
//
@ -515,6 +520,15 @@ class RoundRobinFactory : public LoadBalancingPolicyFactory {
}
const char* name() const override { return kRoundRobin; }
RefCountedPtr<ParsedLoadBalancingConfig> ParseLoadBalancingConfig(
const grpc_json* json, grpc_error** error) const override {
if (json != nullptr) {
GPR_DEBUG_ASSERT(strcmp(json->key, name()) == 0);
}
return RefCountedPtr<ParsedLoadBalancingConfig>(
New<ParsedRoundRobinConfig>());
}
};
} // namespace

File diff suppressed because it is too large Load Diff

@ -37,9 +37,12 @@ class LoadBalancingPolicyFactory {
/// Caller does NOT take ownership of result.
virtual const char* name() const GRPC_ABSTRACT;
virtual RefCountedPtr<ParsedLoadBalancingConfig> ParseLoadBalancingConfig(
const grpc_json* json, grpc_error** error) const GRPC_ABSTRACT;
virtual ~LoadBalancingPolicyFactory() {}
GRPC_ABSTRACT_BASE_CLASS
GRPC_ABSTRACT_BASE_CLASS;
};
} // namespace grpc_core

@ -94,9 +94,112 @@ LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
return factory->CreateLoadBalancingPolicy(std::move(args));
}
bool LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(const char* name) {
bool LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(
const char* name, bool* requires_config) {
GPR_ASSERT(g_state != nullptr);
return g_state->GetLoadBalancingPolicyFactory(name) != nullptr;
auto* factory = g_state->GetLoadBalancingPolicyFactory(name);
if (factory == nullptr) {
return false;
}
if (requires_config != nullptr) {
grpc_error* error = GRPC_ERROR_NONE;
// Check if the load balancing policy allows an empty config
*requires_config =
factory->ParseLoadBalancingConfig(nullptr, &error) == nullptr;
GRPC_ERROR_UNREF(error);
}
return true;
}
namespace {
// Returns the JSON node of policy (with both policy name and config content)
// given the JSON node of a LoadBalancingConfig array.
grpc_json* ParseLoadBalancingConfigHelper(const grpc_json* lb_config_array,
grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
char* error_msg;
if (lb_config_array == nullptr || lb_config_array->type != GRPC_JSON_ARRAY) {
gpr_asprintf(&error_msg, "field:%s error:type should be array",
lb_config_array->key);
*error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
return nullptr;
}
const char* field_name = lb_config_array->key;
// Find the first LB policy that this client supports.
for (const grpc_json* lb_config = lb_config_array->child;
lb_config != nullptr; lb_config = lb_config->next) {
if (lb_config->type != GRPC_JSON_OBJECT) {
gpr_asprintf(&error_msg,
"field:%s error:child entry should be of type object",
field_name);
*error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
return nullptr;
}
grpc_json* policy = nullptr;
for (grpc_json* field = lb_config->child; field != nullptr;
field = field->next) {
if (field->key == nullptr || field->type != GRPC_JSON_OBJECT) {
gpr_asprintf(&error_msg,
"field:%s error:child entry should be of type object",
field_name);
*error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
return nullptr;
}
if (policy != nullptr) {
gpr_asprintf(&error_msg, "field:%s error:oneOf violation", field_name);
*error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
return nullptr;
} // Violate "oneof" type.
policy = field;
}
if (policy == nullptr) {
gpr_asprintf(&error_msg, "field:%s error:no policy found in child entry",
field_name);
*error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
return nullptr;
}
// If we support this policy, then select it.
if (LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(policy->key,
nullptr)) {
return policy;
}
}
gpr_asprintf(&error_msg, "field:%s error:No known policy", field_name);
*error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
return nullptr;
}
} // namespace
RefCountedPtr<ParsedLoadBalancingConfig>
LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(const grpc_json* json,
grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
GPR_ASSERT(g_state != nullptr);
const grpc_json* policy = ParseLoadBalancingConfigHelper(json, error);
if (policy == nullptr) {
return nullptr;
} else {
GPR_DEBUG_ASSERT(*error == GRPC_ERROR_NONE && json != nullptr);
// Find factory.
LoadBalancingPolicyFactory* factory =
g_state->GetLoadBalancingPolicyFactory(policy->key);
if (factory == nullptr) {
char* msg;
gpr_asprintf(&msg, "field:%s error:Factory not found to create policy",
json->key);
*error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return nullptr;
}
// Parse load balancing config via factory.
return factory->ParseLoadBalancingConfig(policy, error);
}
}
} // namespace grpc_core

@ -49,8 +49,15 @@ class LoadBalancingPolicyRegistry {
const char* name, LoadBalancingPolicy::Args args);
/// Returns true if the LB policy factory specified by \a name exists in this
/// registry.
static bool LoadBalancingPolicyExists(const char* name);
/// registry. If the load balancing policy requires a config to be specified
/// then sets \a requires_config to true.
static bool LoadBalancingPolicyExists(const char* name,
bool* requires_config);
/// Returns a parsed object of the load balancing policy to be used from a
/// LoadBalancingConfig array \a json.
static RefCountedPtr<ParsedLoadBalancingConfig> ParseLoadBalancingConfig(
const grpc_json* json, grpc_error** error);
};
} // namespace grpc_core

@ -445,7 +445,8 @@ class SockToPolledFdMap {
*/
static ares_socket_t Socket(int af, int type, int protocol, void* user_data) {
SockToPolledFdMap* map = static_cast<SockToPolledFdMap*>(user_data);
SOCKET s = WSASocket(af, type, protocol, nullptr, 0, WSA_FLAG_OVERLAPPED);
SOCKET s = WSASocket(af, type, protocol, nullptr, 0,
grpc_get_default_wsa_socket_flags());
if (s == INVALID_SOCKET) {
return s;
}

@ -35,6 +35,7 @@
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/optional.h"
#include "src/core/lib/uri/uri_parser.h"
// As per the retry design, we do not allow more than 5 retry attempts.
@ -43,80 +44,68 @@
namespace grpc_core {
namespace internal {
namespace {
size_t g_client_channel_service_config_parser_index;
}
size_t ClientChannelServiceConfigParser::ParserIndex() {
return g_client_channel_service_config_parser_index;
}
void ClientChannelServiceConfigParser::Register() {
g_client_channel_service_config_parser_index =
ServiceConfig::RegisterParser(UniquePtr<ServiceConfig::Parser>(
New<ClientChannelServiceConfigParser>()));
}
ProcessedResolverResult::ProcessedResolverResult(
Resolver::Result* resolver_result, bool parse_retry)
: service_config_(resolver_result->service_config) {
const Resolver::Result& resolver_result)
: service_config_(resolver_result.service_config) {
// If resolver did not return a service config, use the default
// specified via the client API.
if (service_config_ == nullptr) {
const char* service_config_json = grpc_channel_arg_get_string(
grpc_channel_args_find(resolver_result->args, GRPC_ARG_SERVICE_CONFIG));
grpc_channel_args_find(resolver_result.args, GRPC_ARG_SERVICE_CONFIG));
if (service_config_json != nullptr) {
grpc_error* error = GRPC_ERROR_NONE;
service_config_ = ServiceConfig::Create(service_config_json, &error);
// Error is currently unused.
GRPC_ERROR_UNREF(error);
}
} else {
// Add the service config JSON to channel args so that it's
// accessible in the subchannel.
// TODO(roth): Consider whether there's a better way to pass the
// service config down into the subchannel stack, such as maybe via
// call context or metadata. This would avoid the problem of having
// to recreate all subchannels whenever the service config changes.
// It would also avoid the need to pass in the resolver result in
// mutable form, both here and in
// ResolvingLoadBalancingPolicy::ProcessResolverResultCallback().
grpc_arg arg = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_SERVICE_CONFIG),
const_cast<char*>(service_config_->service_config_json()));
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add(resolver_result->args, &arg, 1);
grpc_channel_args_destroy(resolver_result->args);
resolver_result->args = new_args;
}
// Process service config.
ProcessServiceConfig(*resolver_result, parse_retry);
// If no LB config was found above, just find the LB policy name then.
if (lb_policy_name_ == nullptr) ProcessLbPolicyName(*resolver_result);
const ClientChannelGlobalParsedObject* parsed_object = nullptr;
if (service_config_ != nullptr) {
parsed_object = static_cast<const ClientChannelGlobalParsedObject*>(
service_config_->GetParsedGlobalServiceConfigObject(
ClientChannelServiceConfigParser::ParserIndex()));
ProcessServiceConfig(resolver_result, parsed_object);
}
ProcessLbPolicy(resolver_result, parsed_object);
}
void ProcessedResolverResult::ProcessServiceConfig(
const Resolver::Result& resolver_result, bool parse_retry) {
if (service_config_ == nullptr) return;
service_config_json_ =
UniquePtr<char>(gpr_strdup(service_config_->service_config_json()));
if (parse_retry) {
const grpc_arg* channel_arg =
grpc_channel_args_find(resolver_result.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(channel_arg);
GPR_ASSERT(server_uri != nullptr);
grpc_uri* uri = grpc_uri_parse(server_uri, true);
GPR_ASSERT(uri->path[0] != '\0');
server_name_ = uri->path[0] == '/' ? uri->path + 1 : uri->path;
service_config_->ParseGlobalParams(ParseServiceConfig, this);
grpc_uri_destroy(uri);
} else {
service_config_->ParseGlobalParams(ParseServiceConfig, this);
const Resolver::Result& resolver_result,
const ClientChannelGlobalParsedObject* parsed_object) {
health_check_service_name_ = parsed_object->health_check_service_name();
service_config_json_ = service_config_->service_config_json();
if (parsed_object != nullptr) {
retry_throttle_data_ = parsed_object->retry_throttling();
}
method_params_table_ = service_config_->CreateMethodConfigTable(
ClientChannelMethodParams::CreateFromJson);
}
void ProcessedResolverResult::ProcessLbPolicyName(
const Resolver::Result& resolver_result) {
// Prefer the LB policy name found in the service config. Note that this is
// checking the deprecated loadBalancingPolicy field, rather than the new
// loadBalancingConfig field.
if (service_config_ != nullptr) {
lb_policy_name_.reset(
gpr_strdup(service_config_->GetLoadBalancingPolicyName()));
// Convert to lower-case.
if (lb_policy_name_ != nullptr) {
char* lb_policy_name = lb_policy_name_.get();
for (size_t i = 0; i < strlen(lb_policy_name); ++i) {
lb_policy_name[i] = tolower(lb_policy_name[i]);
}
void ProcessedResolverResult::ProcessLbPolicy(
const Resolver::Result& resolver_result,
const ClientChannelGlobalParsedObject* parsed_object) {
// Prefer the LB policy name found in the service config.
if (parsed_object != nullptr) {
if (parsed_object->parsed_lb_config() != nullptr) {
lb_policy_name_.reset(
gpr_strdup(parsed_object->parsed_lb_config()->name()));
lb_policy_config_ = parsed_object->parsed_lb_config();
} else {
lb_policy_name_.reset(
gpr_strdup(parsed_object->parsed_deprecated_lb_policy()));
}
}
// Otherwise, find the LB policy name set by the client API.
@ -152,97 +141,8 @@ void ProcessedResolverResult::ProcessLbPolicyName(
}
}
void ProcessedResolverResult::ParseServiceConfig(
const grpc_json* field, ProcessedResolverResult* parsing_state) {
parsing_state->ParseLbConfigFromServiceConfig(field);
if (parsing_state->server_name_ != nullptr) {
parsing_state->ParseRetryThrottleParamsFromServiceConfig(field);
}
}
void ProcessedResolverResult::ParseLbConfigFromServiceConfig(
const grpc_json* field) {
if (lb_policy_config_ != nullptr) return; // Already found.
if (field->key == nullptr || strcmp(field->key, "loadBalancingConfig") != 0) {
return; // Not the LB config global parameter.
}
const grpc_json* policy =
LoadBalancingPolicy::ParseLoadBalancingConfig(field);
if (policy != nullptr) {
lb_policy_name_.reset(gpr_strdup(policy->key));
lb_policy_config_ =
MakeRefCounted<LoadBalancingPolicy::Config>(policy, service_config_);
}
}
void ProcessedResolverResult::ParseRetryThrottleParamsFromServiceConfig(
const grpc_json* field) {
if (strcmp(field->key, "retryThrottling") == 0) {
if (retry_throttle_data_ != nullptr) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
int max_milli_tokens = 0;
int milli_token_ratio = 0;
for (grpc_json* sub_field = field->child; sub_field != nullptr;
sub_field = sub_field->next) {
if (sub_field->key == nullptr) return;
if (strcmp(sub_field->key, "maxTokens") == 0) {
if (max_milli_tokens != 0) return; // Duplicate.
if (sub_field->type != GRPC_JSON_NUMBER) return;
max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
if (max_milli_tokens == -1) return;
max_milli_tokens *= 1000;
} else if (strcmp(sub_field->key, "tokenRatio") == 0) {
if (milli_token_ratio != 0) return; // Duplicate.
if (sub_field->type != GRPC_JSON_NUMBER) return;
// We support up to 3 decimal digits.
size_t whole_len = strlen(sub_field->value);
uint32_t multiplier = 1;
uint32_t decimal_value = 0;
const char* decimal_point = strchr(sub_field->value, '.');
if (decimal_point != nullptr) {
whole_len = static_cast<size_t>(decimal_point - sub_field->value);
multiplier = 1000;
size_t decimal_len = strlen(decimal_point + 1);
if (decimal_len > 3) decimal_len = 3;
if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
&decimal_value)) {
return;
}
uint32_t decimal_multiplier = 1;
for (size_t i = 0; i < (3 - decimal_len); ++i) {
decimal_multiplier *= 10;
}
decimal_value *= decimal_multiplier;
}
uint32_t whole_value;
if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
&whole_value)) {
return;
}
milli_token_ratio =
static_cast<int>((whole_value * multiplier) + decimal_value);
if (milli_token_ratio <= 0) return;
}
}
retry_throttle_data_ =
grpc_core::internal::ServerRetryThrottleMap::GetDataForServer(
server_name_, max_milli_tokens, milli_token_ratio);
}
}
namespace {
bool ParseWaitForReady(
grpc_json* field, ClientChannelMethodParams::WaitForReady* wait_for_ready) {
if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
return false;
}
*wait_for_ready = field->type == GRPC_JSON_TRUE
? ClientChannelMethodParams::WAIT_FOR_READY_TRUE
: ClientChannelMethodParams::WAIT_FOR_READY_FALSE;
return true;
}
// Parses a JSON field of the form generated for a google.proto.Duration
// proto message, as per:
// https://developers.google.com/protocol-buffers/docs/proto3#json
@ -275,18 +175,36 @@ bool ParseDuration(grpc_json* field, grpc_millis* duration) {
return true;
}
UniquePtr<ClientChannelMethodParams::RetryPolicy> ParseRetryPolicy(
grpc_json* field) {
auto retry_policy = MakeUnique<ClientChannelMethodParams::RetryPolicy>();
if (field->type != GRPC_JSON_OBJECT) return nullptr;
UniquePtr<ClientChannelMethodParsedObject::RetryPolicy> ParseRetryPolicy(
grpc_json* field, grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
auto retry_policy =
MakeUnique<ClientChannelMethodParsedObject::RetryPolicy>();
if (field->type != GRPC_JSON_OBJECT) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryPolicy error:should be of type object");
return nullptr;
}
InlinedVector<grpc_error*, 4> error_list;
for (grpc_json* sub_field = field->child; sub_field != nullptr;
sub_field = sub_field->next) {
if (sub_field->key == nullptr) return nullptr;
if (sub_field->key == nullptr) continue;
if (strcmp(sub_field->key, "maxAttempts") == 0) {
if (retry_policy->max_attempts != 0) return nullptr; // Duplicate.
if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
if (retry_policy->max_attempts != 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxAttempts error:Duplicate entry"));
} // Duplicate. Continue Parsing
if (sub_field->type != GRPC_JSON_NUMBER) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxAttempts error:should be of type number"));
continue;
}
retry_policy->max_attempts = gpr_parse_nonnegative_int(sub_field->value);
if (retry_policy->max_attempts <= 1) return nullptr;
if (retry_policy->max_attempts <= 1) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxAttempts error:should be at least 2"));
continue;
}
if (retry_policy->max_attempts > MAX_MAX_RETRY_ATTEMPTS) {
gpr_log(GPR_ERROR,
"service config: clamped retryPolicy.maxAttempts at %d",
@ -294,78 +212,375 @@ UniquePtr<ClientChannelMethodParams::RetryPolicy> ParseRetryPolicy(
retry_policy->max_attempts = MAX_MAX_RETRY_ATTEMPTS;
}
} else if (strcmp(sub_field->key, "initialBackoff") == 0) {
if (retry_policy->initial_backoff > 0) return nullptr; // Duplicate.
if (retry_policy->initial_backoff > 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:initialBackoff error:Duplicate entry"));
} // Duplicate, continue parsing.
if (!ParseDuration(sub_field, &retry_policy->initial_backoff)) {
return nullptr;
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:initialBackoff error:Failed to parse"));
continue;
}
if (retry_policy->initial_backoff == 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:initialBackoff error:must be greater than 0"));
}
if (retry_policy->initial_backoff == 0) return nullptr;
} else if (strcmp(sub_field->key, "maxBackoff") == 0) {
if (retry_policy->max_backoff > 0) return nullptr; // Duplicate.
if (retry_policy->max_backoff > 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxBackoff error:Duplicate entry"));
} // Duplicate, continue parsing.
if (!ParseDuration(sub_field, &retry_policy->max_backoff)) {
return nullptr;
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxBackoff error:failed to parse"));
continue;
}
if (retry_policy->max_backoff == 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxBackoff error:should be greater than 0"));
}
if (retry_policy->max_backoff == 0) return nullptr;
} else if (strcmp(sub_field->key, "backoffMultiplier") == 0) {
if (retry_policy->backoff_multiplier != 0) return nullptr; // Duplicate.
if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
if (retry_policy->backoff_multiplier != 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:backoffMultiplier error:Duplicate entry"));
} // Duplicate, continue parsing.
if (sub_field->type != GRPC_JSON_NUMBER) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:backoffMultiplier error:should be of type number"));
continue;
}
if (sscanf(sub_field->value, "%f", &retry_policy->backoff_multiplier) !=
1) {
return nullptr;
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:backoffMultiplier error:failed to parse"));
continue;
}
if (retry_policy->backoff_multiplier <= 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:backoffMultiplier error:should be greater than 0"));
}
if (retry_policy->backoff_multiplier <= 0) return nullptr;
} else if (strcmp(sub_field->key, "retryableStatusCodes") == 0) {
if (!retry_policy->retryable_status_codes.Empty()) {
return nullptr; // Duplicate.
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryableStatusCodes error:Duplicate entry"));
} // Duplicate, continue parsing.
if (sub_field->type != GRPC_JSON_ARRAY) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryableStatusCodes error:should be of type array"));
continue;
}
if (sub_field->type != GRPC_JSON_ARRAY) return nullptr;
for (grpc_json* element = sub_field->child; element != nullptr;
element = element->next) {
if (element->type != GRPC_JSON_STRING) return nullptr;
if (element->type != GRPC_JSON_STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryableStatusCodes error:status codes should be of type "
"string"));
continue;
}
grpc_status_code status;
if (!grpc_status_code_from_string(element->value, &status)) {
return nullptr;
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryableStatusCodes error:failed to parse status code"));
continue;
}
retry_policy->retryable_status_codes.Add(status);
}
if (retry_policy->retryable_status_codes.Empty()) return nullptr;
if (retry_policy->retryable_status_codes.Empty()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryableStatusCodes error:should be non-empty"));
};
}
}
// Make sure required fields are set.
if (retry_policy->max_attempts == 0 || retry_policy->initial_backoff == 0 ||
retry_policy->max_backoff == 0 || retry_policy->backoff_multiplier == 0 ||
retry_policy->retryable_status_codes.Empty()) {
if (error_list.empty()) {
if (retry_policy->max_attempts == 0 || retry_policy->initial_backoff == 0 ||
retry_policy->max_backoff == 0 ||
retry_policy->backoff_multiplier == 0 ||
retry_policy->retryable_status_codes.Empty()) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryPolicy error:Missing required field(s)");
return nullptr;
}
}
*error = GRPC_ERROR_CREATE_FROM_VECTOR("retryPolicy", &error_list);
return *error == GRPC_ERROR_NONE ? std::move(retry_policy) : nullptr;
}
const char* ParseHealthCheckConfig(const grpc_json* field, grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
const char* service_name = nullptr;
GPR_DEBUG_ASSERT(strcmp(field->key, "healthCheckConfig") == 0);
if (field->type != GRPC_JSON_OBJECT) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:healthCheckConfig error:should be of type object");
return nullptr;
}
InlinedVector<grpc_error*, 2> error_list;
for (grpc_json* sub_field = field->child; sub_field != nullptr;
sub_field = sub_field->next) {
if (sub_field->key == nullptr) {
GPR_DEBUG_ASSERT(false);
continue;
}
if (strcmp(sub_field->key, "serviceName") == 0) {
if (service_name != nullptr) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:serviceName error:Duplicate "
"entry"));
} // Duplicate. Continue parsing
if (sub_field->type != GRPC_JSON_STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:serviceName error:should be of type string"));
continue;
}
service_name = sub_field->value;
}
}
if (!error_list.empty()) {
return nullptr;
}
return retry_policy;
*error =
GRPC_ERROR_CREATE_FROM_VECTOR("field:healthCheckConfig", &error_list);
return service_name;
}
} // namespace
RefCountedPtr<ClientChannelMethodParams>
ClientChannelMethodParams::CreateFromJson(const grpc_json* json) {
RefCountedPtr<ClientChannelMethodParams> method_params =
MakeRefCounted<ClientChannelMethodParams>();
UniquePtr<ServiceConfig::ParsedConfig>
ClientChannelServiceConfigParser::ParseGlobalParams(const grpc_json* json,
grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
InlinedVector<grpc_error*, 4> error_list;
RefCountedPtr<ParsedLoadBalancingConfig> parsed_lb_config;
UniquePtr<char> lb_policy_name;
Optional<ClientChannelGlobalParsedObject::RetryThrottling> retry_throttling;
const char* health_check_service_name = nullptr;
for (grpc_json* field = json->child; field != nullptr; field = field->next) {
if (field->key == nullptr) {
continue; // Not the LB config global parameter
}
// Parsed Load balancing config
if (strcmp(field->key, "loadBalancingConfig") == 0) {
if (parsed_lb_config != nullptr) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingConfig error:Duplicate entry"));
} // Duplicate, continue parsing.
grpc_error* parse_error = GRPC_ERROR_NONE;
parsed_lb_config = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
field, &parse_error);
if (parsed_lb_config == nullptr) {
error_list.push_back(parse_error);
}
}
// Parse deprecated loadBalancingPolicy
if (strcmp(field->key, "loadBalancingPolicy") == 0) {
if (lb_policy_name != nullptr) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:Duplicate entry"));
} // Duplicate, continue parsing.
if (field->type != GRPC_JSON_STRING) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:type should be string"));
continue;
}
lb_policy_name.reset(gpr_strdup(field->value));
char* lb_policy = lb_policy_name.get();
if (lb_policy != nullptr) {
for (size_t i = 0; i < strlen(lb_policy); ++i) {
lb_policy[i] = tolower(lb_policy[i]);
}
}
bool requires_config = false;
if (!LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(
lb_policy, &requires_config)) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:Unknown lb policy"));
} else if (requires_config) {
char* error_msg;
gpr_asprintf(&error_msg,
"field:loadBalancingPolicy error:%s requires a config. "
"Please use loadBalancingConfig instead.",
lb_policy);
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg));
gpr_free(error_msg);
}
}
// Parse retry throttling
if (strcmp(field->key, "retryThrottling") == 0) {
if (retry_throttling.has_value()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling error:Duplicate entry"));
} // Duplicate, continue parsing.
if (field->type != GRPC_JSON_OBJECT) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling error:Type should be object"));
continue;
}
Optional<int> max_milli_tokens;
Optional<int> milli_token_ratio;
for (grpc_json* sub_field = field->child; sub_field != nullptr;
sub_field = sub_field->next) {
if (sub_field->key == nullptr) continue;
if (strcmp(sub_field->key, "maxTokens") == 0) {
if (max_milli_tokens.has_value()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:maxTokens error:Duplicate "
"entry"));
} // Duplicate, continue parsing.
if (sub_field->type != GRPC_JSON_NUMBER) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:maxTokens error:Type should be "
"number"));
} else {
max_milli_tokens.set(gpr_parse_nonnegative_int(sub_field->value) *
1000);
if (max_milli_tokens.value() <= 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:maxTokens error:should be "
"greater than zero"));
}
}
} else if (strcmp(sub_field->key, "tokenRatio") == 0) {
if (milli_token_ratio.has_value()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:tokenRatio error:Duplicate "
"entry"));
} // Duplicate, continue parsing.
if (sub_field->type != GRPC_JSON_NUMBER) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:tokenRatio error:type should be "
"number"));
} else {
// We support up to 3 decimal digits.
size_t whole_len = strlen(sub_field->value);
uint32_t multiplier = 1;
uint32_t decimal_value = 0;
const char* decimal_point = strchr(sub_field->value, '.');
if (decimal_point != nullptr) {
whole_len = static_cast<size_t>(decimal_point - sub_field->value);
multiplier = 1000;
size_t decimal_len = strlen(decimal_point + 1);
if (decimal_len > 3) decimal_len = 3;
if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
&decimal_value)) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:tokenRatio error:Failed "
"parsing"));
continue;
}
uint32_t decimal_multiplier = 1;
for (size_t i = 0; i < (3 - decimal_len); ++i) {
decimal_multiplier *= 10;
}
decimal_value *= decimal_multiplier;
}
uint32_t whole_value;
if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
&whole_value)) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:tokenRatio error:Failed "
"parsing"));
continue;
}
milli_token_ratio.set(
static_cast<int>((whole_value * multiplier) + decimal_value));
if (milli_token_ratio.value() <= 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:tokenRatio error:value should "
"be greater than 0"));
}
}
}
}
ClientChannelGlobalParsedObject::RetryThrottling data;
if (!max_milli_tokens.has_value()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:maxTokens error:Not found"));
} else {
data.max_milli_tokens = max_milli_tokens.value();
}
if (!milli_token_ratio.has_value()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryThrottling field:tokenRatio error:Not found"));
} else {
data.milli_token_ratio = milli_token_ratio.value();
}
retry_throttling.set(data);
}
if (strcmp(field->key, "healthCheckConfig") == 0) {
if (health_check_service_name != nullptr) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:healthCheckConfig error:Duplicate entry"));
} // Duplicate continue parsing
grpc_error* parsing_error = GRPC_ERROR_NONE;
health_check_service_name = ParseHealthCheckConfig(field, &parsing_error);
if (parsing_error != GRPC_ERROR_NONE) {
error_list.push_back(parsing_error);
}
}
}
*error = GRPC_ERROR_CREATE_FROM_VECTOR("Client channel global parser",
&error_list);
if (*error == GRPC_ERROR_NONE) {
return UniquePtr<ServiceConfig::ParsedConfig>(
New<ClientChannelGlobalParsedObject>(
std::move(parsed_lb_config), std::move(lb_policy_name),
retry_throttling, health_check_service_name));
}
return nullptr;
}
UniquePtr<ServiceConfig::ParsedConfig>
ClientChannelServiceConfigParser::ParsePerMethodParams(const grpc_json* json,
grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
InlinedVector<grpc_error*, 4> error_list;
Optional<bool> wait_for_ready;
grpc_millis timeout = 0;
UniquePtr<ClientChannelMethodParsedObject::RetryPolicy> retry_policy;
for (grpc_json* field = json->child; field != nullptr; field = field->next) {
if (field->key == nullptr) continue;
if (strcmp(field->key, "waitForReady") == 0) {
if (method_params->wait_for_ready_ != WAIT_FOR_READY_UNSET) {
return nullptr; // Duplicate.
}
if (!ParseWaitForReady(field, &method_params->wait_for_ready_)) {
return nullptr;
if (wait_for_ready.has_value()) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:waitForReady error:Duplicate entry"));
} // Duplicate, continue parsing.
if (field->type == GRPC_JSON_TRUE) {
wait_for_ready.set(true);
} else if (field->type == GRPC_JSON_FALSE) {
wait_for_ready.set(false);
} else {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:waitForReady error:Type should be true/false"));
}
} else if (strcmp(field->key, "timeout") == 0) {
if (method_params->timeout_ > 0) return nullptr; // Duplicate.
if (!ParseDuration(field, &method_params->timeout_)) return nullptr;
if (timeout > 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:timeout error:Duplicate entry"));
} // Duplicate, continue parsing.
if (!ParseDuration(field, &timeout)) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:timeout error:Failed parsing"));
};
} else if (strcmp(field->key, "retryPolicy") == 0) {
if (method_params->retry_policy_ != nullptr) {
return nullptr; // Duplicate.
if (retry_policy != nullptr) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:retryPolicy error:Duplicate entry"));
} // Duplicate, continue parsing.
grpc_error* error = GRPC_ERROR_NONE;
retry_policy = ParseRetryPolicy(field, &error);
if (retry_policy == nullptr) {
error_list.push_back(error);
}
method_params->retry_policy_ = ParseRetryPolicy(field);
if (method_params->retry_policy_ == nullptr) return nullptr;
}
}
return method_params;
*error = GRPC_ERROR_CREATE_FROM_VECTOR("Client channel parser", &error_list);
if (*error == GRPC_ERROR_NONE) {
return UniquePtr<ServiceConfig::ParsedConfig>(
New<ClientChannelMethodParsedObject>(timeout, wait_for_ready,
std::move(retry_policy)));
}
return nullptr;
}
} // namespace internal

@ -22,10 +22,12 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/ext/filters/client_channel/service_config.h"
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/gprpp/optional.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis
@ -35,44 +37,123 @@
namespace grpc_core {
namespace internal {
class ClientChannelMethodParams;
class ClientChannelGlobalParsedObject : public ServiceConfig::ParsedConfig {
public:
struct RetryThrottling {
intptr_t max_milli_tokens = 0;
intptr_t milli_token_ratio = 0;
};
ClientChannelGlobalParsedObject(
RefCountedPtr<ParsedLoadBalancingConfig> parsed_lb_config,
UniquePtr<char> parsed_deprecated_lb_policy,
const Optional<RetryThrottling>& retry_throttling,
const char* health_check_service_name)
: parsed_lb_config_(std::move(parsed_lb_config)),
parsed_deprecated_lb_policy_(std::move(parsed_deprecated_lb_policy)),
retry_throttling_(retry_throttling),
health_check_service_name_(health_check_service_name) {}
Optional<RetryThrottling> retry_throttling() const {
return retry_throttling_;
}
RefCountedPtr<ParsedLoadBalancingConfig> parsed_lb_config() const {
return parsed_lb_config_;
}
const char* parsed_deprecated_lb_policy() const {
return parsed_deprecated_lb_policy_.get();
}
const char* health_check_service_name() const {
return health_check_service_name_;
}
private:
RefCountedPtr<ParsedLoadBalancingConfig> parsed_lb_config_;
UniquePtr<char> parsed_deprecated_lb_policy_;
Optional<RetryThrottling> retry_throttling_;
const char* health_check_service_name_;
};
class ClientChannelMethodParsedObject : public ServiceConfig::ParsedConfig {
public:
struct RetryPolicy {
int max_attempts = 0;
grpc_millis initial_backoff = 0;
grpc_millis max_backoff = 0;
float backoff_multiplier = 0;
StatusCodeSet retryable_status_codes;
};
ClientChannelMethodParsedObject(grpc_millis timeout,
const Optional<bool>& wait_for_ready,
UniquePtr<RetryPolicy> retry_policy)
: timeout_(timeout),
wait_for_ready_(wait_for_ready),
retry_policy_(std::move(retry_policy)) {}
grpc_millis timeout() const { return timeout_; }
Optional<bool> wait_for_ready() const { return wait_for_ready_; }
const RetryPolicy* retry_policy() const { return retry_policy_.get(); }
private:
grpc_millis timeout_ = 0;
Optional<bool> wait_for_ready_;
UniquePtr<RetryPolicy> retry_policy_;
};
class ClientChannelServiceConfigParser : public ServiceConfig::Parser {
public:
UniquePtr<ServiceConfig::ParsedConfig> ParseGlobalParams(
const grpc_json* json, grpc_error** error) override;
UniquePtr<ServiceConfig::ParsedConfig> ParsePerMethodParams(
const grpc_json* json, grpc_error** error) override;
// A table mapping from a method name to its method parameters.
typedef SliceHashTable<RefCountedPtr<ClientChannelMethodParams>>
ClientChannelMethodParamsTable;
static size_t ParserIndex();
static void Register();
};
// A container of processed fields from the resolver result. Simplifies the
// usage of resolver result.
// TODO(yashykt): It would be cleaner to move this logic to the client_channel
// code. A container of processed fields from the resolver result. Simplifies
// the usage of resolver result.
class ProcessedResolverResult {
public:
// Processes the resolver result and populates the relative members
// for later consumption. Tries to parse retry parameters only if parse_retry
// is true.
ProcessedResolverResult(Resolver::Result* resolver_result, bool parse_retry);
// for later consumption.
ProcessedResolverResult(const Resolver::Result& resolver_result);
// Getters. Any managed object's ownership is transferred.
UniquePtr<char> service_config_json() {
return std::move(service_config_json_);
}
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data() {
return std::move(retry_throttle_data_);
}
RefCountedPtr<ClientChannelMethodParamsTable> method_params_table() {
return std::move(method_params_table_);
}
const char* service_config_json() { return service_config_json_; }
RefCountedPtr<ServiceConfig> service_config() { return service_config_; }
UniquePtr<char> lb_policy_name() { return std::move(lb_policy_name_); }
RefCountedPtr<LoadBalancingPolicy::Config> lb_policy_config() {
return std::move(lb_policy_config_);
RefCountedPtr<ParsedLoadBalancingConfig> lb_policy_config() {
return lb_policy_config_;
}
Optional<ClientChannelGlobalParsedObject::RetryThrottling>
retry_throttle_data() {
return retry_throttle_data_;
}
const char* health_check_service_name() { return health_check_service_name_; }
private:
// Finds the service config; extracts LB config and (maybe) retry throttle
// params from it.
void ProcessServiceConfig(const Resolver::Result& resolver_result,
bool parse_retry);
void ProcessServiceConfig(
const Resolver::Result& resolver_result,
const ClientChannelGlobalParsedObject* parsed_object);
// Finds the LB policy name (when no LB config was found).
void ProcessLbPolicyName(const Resolver::Result& resolver_result);
// Extracts the LB policy.
void ProcessLbPolicy(const Resolver::Result& resolver_result,
const ClientChannelGlobalParsedObject* parsed_object);
// Parses the service config. Intended to be used by
// ServiceConfig::ParseGlobalParams.
@ -84,59 +165,15 @@ class ProcessedResolverResult {
void ParseRetryThrottleParamsFromServiceConfig(const grpc_json* field);
// Service config.
UniquePtr<char> service_config_json_;
const char* service_config_json_ = nullptr;
RefCountedPtr<ServiceConfig> service_config_;
// LB policy.
UniquePtr<char> lb_policy_name_;
RefCountedPtr<LoadBalancingPolicy::Config> lb_policy_config_;
RefCountedPtr<ParsedLoadBalancingConfig> lb_policy_config_;
// Retry throttle data.
char* server_name_ = nullptr;
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
// Method params table.
RefCountedPtr<ClientChannelMethodParamsTable> method_params_table_;
};
// The parameters of a method.
class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
public:
enum WaitForReady {
WAIT_FOR_READY_UNSET = 0,
WAIT_FOR_READY_FALSE,
WAIT_FOR_READY_TRUE
};
struct RetryPolicy {
int max_attempts = 0;
grpc_millis initial_backoff = 0;
grpc_millis max_backoff = 0;
float backoff_multiplier = 0;
StatusCodeSet retryable_status_codes;
};
/// Creates a method_parameters object from \a json.
/// Intended for use with ServiceConfig::CreateMethodConfigTable().
static RefCountedPtr<ClientChannelMethodParams> CreateFromJson(
const grpc_json* json);
grpc_millis timeout() const { return timeout_; }
WaitForReady wait_for_ready() const { return wait_for_ready_; }
const RetryPolicy* retry_policy() const { return retry_policy_.get(); }
private:
// So New() can call our private ctor.
template <typename T, typename... Args>
friend T* grpc_core::New(Args&&... args);
// So Delete() can call our private dtor.
template <typename T>
friend void grpc_core::Delete(T*);
ClientChannelMethodParams() {}
virtual ~ClientChannelMethodParams() {}
grpc_millis timeout_ = 0;
WaitForReady wait_for_ready_ = WAIT_FOR_READY_UNSET;
UniquePtr<RetryPolicy> retry_policy_;
Optional<ClientChannelGlobalParsedObject::RetryThrottling>
retry_throttle_data_;
const char* health_check_service_name_ = nullptr;
};
} // namespace internal

@ -183,7 +183,8 @@ class ResolvingLoadBalancingPolicy::ResolvingControlHelper
ResolvingLoadBalancingPolicy::ResolvingLoadBalancingPolicy(
Args args, TraceFlag* tracer, UniquePtr<char> target_uri,
UniquePtr<char> child_policy_name, RefCountedPtr<Config> child_lb_config,
UniquePtr<char> child_policy_name,
RefCountedPtr<ParsedLoadBalancingConfig> child_lb_config,
grpc_error** error)
: LoadBalancingPolicy(std::move(args)),
tracer_(tracer),
@ -331,7 +332,8 @@ void ResolvingLoadBalancingPolicy::OnResolverError(grpc_error* error) {
}
void ResolvingLoadBalancingPolicy::CreateOrUpdateLbPolicyLocked(
const char* lb_policy_name, RefCountedPtr<Config> lb_policy_config,
const char* lb_policy_name,
RefCountedPtr<ParsedLoadBalancingConfig> lb_policy_config,
Resolver::Result result, TraceStringVector* trace_strings) {
// If the child policy name changes, we need to create a new child
// policy. When this happens, we leave child_policy_ as-is and store
@ -528,11 +530,11 @@ void ResolvingLoadBalancingPolicy::OnResolverResultChangedLocked(
const bool resolution_contains_addresses = result.addresses.size() > 0;
// Process the resolver result.
const char* lb_policy_name = nullptr;
RefCountedPtr<Config> lb_policy_config;
RefCountedPtr<ParsedLoadBalancingConfig> lb_policy_config;
bool service_config_changed = false;
if (process_resolver_result_ != nullptr) {
service_config_changed =
process_resolver_result_(process_resolver_result_user_data_, &result,
process_resolver_result_(process_resolver_result_user_data_, result,
&lb_policy_name, &lb_policy_config);
} else {
lb_policy_name = child_policy_name_.get();
@ -540,7 +542,7 @@ void ResolvingLoadBalancingPolicy::OnResolverResultChangedLocked(
}
GPR_ASSERT(lb_policy_name != nullptr);
// Create or update LB policy, as needed.
CreateOrUpdateLbPolicyLocked(lb_policy_name, std::move(lb_policy_config),
CreateOrUpdateLbPolicyLocked(lb_policy_name, lb_policy_config,
std::move(result), &trace_strings);
// Add channel trace event.
if (channelz_node() != nullptr) {

@ -23,6 +23,7 @@
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
@ -53,11 +54,11 @@ class ResolvingLoadBalancingPolicy : public LoadBalancingPolicy {
public:
// If error is set when this returns, then construction failed, and
// the caller may not use the new object.
ResolvingLoadBalancingPolicy(Args args, TraceFlag* tracer,
UniquePtr<char> target_uri,
UniquePtr<char> child_policy_name,
RefCountedPtr<Config> child_lb_config,
grpc_error** error);
ResolvingLoadBalancingPolicy(
Args args, TraceFlag* tracer, UniquePtr<char> target_uri,
UniquePtr<char> child_policy_name,
RefCountedPtr<ParsedLoadBalancingConfig> child_lb_config,
grpc_error** error);
// Private ctor, to be used by client_channel only!
//
@ -65,8 +66,9 @@ class ResolvingLoadBalancingPolicy : public LoadBalancingPolicy {
// lb_policy_name and lb_policy_config to point to the right data.
// Returns true if the service config has changed since the last result.
typedef bool (*ProcessResolverResultCallback)(
void* user_data, Resolver::Result* result, const char** lb_policy_name,
RefCountedPtr<Config>* lb_policy_config);
void* user_data, const Resolver::Result& result,
const char** lb_policy_name,
RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config);
// If error is set when this returns, then construction failed, and
// the caller may not use the new object.
ResolvingLoadBalancingPolicy(
@ -102,10 +104,10 @@ class ResolvingLoadBalancingPolicy : public LoadBalancingPolicy {
void StartResolvingLocked();
void OnResolverError(grpc_error* error);
void CreateOrUpdateLbPolicyLocked(const char* lb_policy_name,
RefCountedPtr<Config> lb_policy_config,
Resolver::Result result,
TraceStringVector* trace_strings);
void CreateOrUpdateLbPolicyLocked(
const char* lb_policy_name,
RefCountedPtr<ParsedLoadBalancingConfig> lb_policy_config,
Resolver::Result result, TraceStringVector* trace_strings);
OrphanablePtr<LoadBalancingPolicy> CreateLbPolicyLocked(
const char* lb_policy_name, const grpc_channel_args& args,
TraceStringVector* trace_strings);
@ -121,7 +123,7 @@ class ResolvingLoadBalancingPolicy : public LoadBalancingPolicy {
ProcessResolverResultCallback process_resolver_result_ = nullptr;
void* process_resolver_result_user_data_ = nullptr;
UniquePtr<char> child_policy_name_;
RefCountedPtr<Config> child_lb_config_;
RefCountedPtr<ParsedLoadBalancingConfig> child_lb_config_;
// Resolver and associated state.
OrphanablePtr<Resolver> resolver_;

@ -34,28 +34,10 @@
namespace grpc_core {
namespace {
typedef InlinedVector<UniquePtr<ServiceConfigParser>,
typedef InlinedVector<UniquePtr<ServiceConfig::Parser>,
ServiceConfig::kNumPreallocatedParsers>
ServiceConfigParserList;
ServiceConfigParserList* registered_parsers;
// Consumes all the errors in the vector and forms a referencing error from
// them. If the vector is empty, return GRPC_ERROR_NONE.
template <size_t N>
grpc_error* CreateErrorFromVector(const char* desc,
InlinedVector<grpc_error*, N>* error_list) {
grpc_error* error = GRPC_ERROR_NONE;
if (error_list->size() != 0) {
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
desc, error_list->data(), error_list->size());
// Remove refs to all errors in error_list.
for (size_t i = 0; i < error_list->size(); i++) {
GRPC_ERROR_UNREF((*error_list)[i]);
}
error_list->clear();
}
return error;
}
ServiceConfigParserList* g_registered_parsers;
} // namespace
RefCountedPtr<ServiceConfig> ServiceConfig::Create(const char* json,
@ -107,16 +89,16 @@ grpc_error* ServiceConfig::ParseGlobalParams(const grpc_json* json_tree) {
GPR_DEBUG_ASSERT(json_tree_->type == GRPC_JSON_OBJECT);
GPR_DEBUG_ASSERT(json_tree_->key == nullptr);
InlinedVector<grpc_error*, 4> error_list;
for (size_t i = 0; i < registered_parsers->size(); i++) {
for (size_t i = 0; i < g_registered_parsers->size(); i++) {
grpc_error* parser_error = GRPC_ERROR_NONE;
auto parsed_obj =
(*registered_parsers)[i]->ParseGlobalParams(json_tree, &parser_error);
(*g_registered_parsers)[i]->ParseGlobalParams(json_tree, &parser_error);
if (parser_error != GRPC_ERROR_NONE) {
error_list.push_back(parser_error);
}
parsed_global_service_config_objects_.push_back(std::move(parsed_obj));
}
return CreateErrorFromVector("Global Params", &error_list);
return GRPC_ERROR_CREATE_FROM_VECTOR("Global Params", &error_list);
}
grpc_error* ServiceConfig::ParseJsonMethodConfigToServiceConfigObjectsTable(
@ -125,17 +107,20 @@ grpc_error* ServiceConfig::ParseJsonMethodConfigToServiceConfigObjectsTable(
size_t* idx) {
auto objs_vector = MakeUnique<ServiceConfigObjectsVector>();
InlinedVector<grpc_error*, 4> error_list;
for (size_t i = 0; i < registered_parsers->size(); i++) {
for (size_t i = 0; i < g_registered_parsers->size(); i++) {
grpc_error* parser_error = GRPC_ERROR_NONE;
auto parsed_obj =
(*registered_parsers)[i]->ParsePerMethodParams(json, &parser_error);
(*g_registered_parsers)[i]->ParsePerMethodParams(json, &parser_error);
if (parser_error != GRPC_ERROR_NONE) {
error_list.push_back(parser_error);
}
objs_vector->push_back(std::move(parsed_obj));
}
const auto* vector_ptr = objs_vector.get();
service_config_objects_vectors_storage_.push_back(std::move(objs_vector));
const auto* vector_ptr =
service_config_objects_vectors_storage_
[service_config_objects_vectors_storage_.size() - 1]
.get();
// Construct list of paths.
InlinedVector<UniquePtr<char>, 10> paths;
for (grpc_json* child = json->child; child != nullptr; child = child->next) {
@ -169,7 +154,7 @@ grpc_error* ServiceConfig::ParseJsonMethodConfigToServiceConfigObjectsTable(
++*idx;
}
wrap_error:
return CreateErrorFromVector("methodConfig", &error_list);
return GRPC_ERROR_CREATE_FROM_VECTOR("methodConfig", &error_list);
}
grpc_error* ServiceConfig::ParsePerMethodParams(const grpc_json* json_tree) {
@ -226,28 +211,11 @@ grpc_error* ServiceConfig::ParsePerMethodParams(const grpc_json* json_tree) {
num_entries, entries, nullptr);
gpr_free(entries);
}
return CreateErrorFromVector("Method Params", &error_list);
return GRPC_ERROR_CREATE_FROM_VECTOR("Method Params", &error_list);
}
ServiceConfig::~ServiceConfig() { grpc_json_destroy(json_tree_); }
const char* ServiceConfig::GetLoadBalancingPolicyName() const {
if (json_tree_->type != GRPC_JSON_OBJECT || json_tree_->key != nullptr) {
return nullptr;
}
const char* lb_policy_name = nullptr;
for (grpc_json* field = json_tree_->child; field != nullptr;
field = field->next) {
if (field->key == nullptr) return nullptr;
if (strcmp(field->key, "loadBalancingPolicy") == 0) {
if (lb_policy_name != nullptr) return nullptr; // Duplicate.
if (field->type != GRPC_JSON_STRING) return nullptr;
lb_policy_name = field->value;
}
}
return lb_policy_name;
}
int ServiceConfig::CountNamesInMethodConfig(grpc_json* json) {
int num_names = 0;
for (grpc_json* field = json->child; field != nullptr; field = field->next) {
@ -319,8 +287,11 @@ UniquePtr<char> ServiceConfig::ParseJsonMethodName(grpc_json* json,
return UniquePtr<char>(path);
}
const ServiceConfig::ServiceConfigObjectsVector* const*
const ServiceConfig::ServiceConfigObjectsVector*
ServiceConfig::GetMethodServiceConfigObjectsVector(const grpc_slice& path) {
if (parsed_method_service_config_objects_table_.get() == nullptr) {
return nullptr;
}
const auto* value = parsed_method_service_config_objects_table_->Get(path);
// If we didn't find a match for the path, try looking for a wildcard
// entry (i.e., change "/service/method" to "/service/*").
@ -339,22 +310,22 @@ ServiceConfig::GetMethodServiceConfigObjectsVector(const grpc_slice& path) {
gpr_free(path_str);
if (value == nullptr) return nullptr;
}
return value;
return *value;
}
size_t ServiceConfig::RegisterParser(UniquePtr<ServiceConfigParser> parser) {
registered_parsers->push_back(std::move(parser));
return registered_parsers->size() - 1;
size_t ServiceConfig::RegisterParser(UniquePtr<Parser> parser) {
g_registered_parsers->push_back(std::move(parser));
return g_registered_parsers->size() - 1;
}
void ServiceConfig::Init() {
GPR_ASSERT(registered_parsers == nullptr);
registered_parsers = New<ServiceConfigParserList>();
GPR_ASSERT(g_registered_parsers == nullptr);
g_registered_parsers = New<ServiceConfigParserList>();
}
void ServiceConfig::Shutdown() {
Delete(registered_parsers);
registered_parsers = nullptr;
Delete(g_registered_parsers);
g_registered_parsers = nullptr;
}
} // namespace grpc_core

@ -55,41 +55,73 @@
namespace grpc_core {
/// This is the base class that all service config parsers MUST use to store
/// parsed service config data.
class ServiceConfigParsedObject {
class ServiceConfig : public RefCounted<ServiceConfig> {
public:
virtual ~ServiceConfigParsedObject() = default;
/// This is the base class that all service config parsers MUST use to store
/// parsed service config data.
class ParsedConfig {
public:
virtual ~ParsedConfig() = default;
GRPC_ABSTRACT_BASE_CLASS;
};
/// This is the base class that all service config parsers should derive from.
class Parser {
public:
virtual ~Parser() = default;
virtual UniquePtr<ParsedConfig> ParseGlobalParams(const grpc_json* json,
grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr);
return nullptr;
}
GRPC_ABSTRACT_BASE_CLASS;
};
virtual UniquePtr<ParsedConfig> ParsePerMethodParams(const grpc_json* json,
grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr);
return nullptr;
}
/// This is the base class that all service config parsers should derive from.
class ServiceConfigParser {
public:
virtual ~ServiceConfigParser() = default;
GRPC_ABSTRACT_BASE_CLASS;
};
virtual UniquePtr<ServiceConfigParsedObject> ParseGlobalParams(
const grpc_json* json, grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr);
return nullptr;
}
static constexpr int kNumPreallocatedParsers = 4;
typedef InlinedVector<UniquePtr<ParsedConfig>, kNumPreallocatedParsers>
ServiceConfigObjectsVector;
virtual UniquePtr<ServiceConfigParsedObject> ParsePerMethodParams(
const grpc_json* json, grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr);
return nullptr;
}
/// When a service config is applied to a call in the client_channel_filter,
/// we create an instance of this object and store it in the call_data for
/// client_channel. A pointer to this object is also stored in the
/// call_context, so that future filters can easily access method and global
/// parameters for the call.
class CallData {
public:
CallData() = default;
CallData(RefCountedPtr<ServiceConfig> svc_cfg, const grpc_slice& path)
: service_config_(std::move(svc_cfg)) {
if (service_config_ != nullptr) {
method_params_vector_ =
service_config_->GetMethodServiceConfigObjectsVector(path);
}
}
GRPC_ABSTRACT_BASE_CLASS;
};
ServiceConfig* service_config() { return service_config_.get(); }
class ServiceConfig : public RefCounted<ServiceConfig> {
public:
static constexpr int kNumPreallocatedParsers = 4;
typedef InlinedVector<UniquePtr<ServiceConfigParsedObject>,
kNumPreallocatedParsers>
ServiceConfigObjectsVector;
ParsedConfig* GetMethodParsedObject(size_t index) const {
return method_params_vector_ != nullptr
? (*method_params_vector_)[index].get()
: nullptr;
}
ParsedConfig* GetGlobalParsedObject(size_t index) const {
return service_config_->GetParsedGlobalServiceConfigObject(index);
}
private:
RefCountedPtr<ServiceConfig> service_config_;
const ServiceConfigObjectsVector* method_params_vector_ = nullptr;
};
/// Creates a new service config from parsing \a json_string.
/// Returns null on parse error.
@ -100,48 +132,18 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
const char* service_config_json() const { return service_config_json_.get(); }
/// Invokes \a process_json() for each global parameter in the service
/// config. \a arg is passed as the second argument to \a process_json().
template <typename T>
using ProcessJson = void (*)(const grpc_json*, T*);
template <typename T>
void ParseGlobalParams(ProcessJson<T> process_json, T* arg) const;
/// Gets the LB policy name from \a service_config.
/// Returns NULL if no LB policy name was specified.
/// Caller does NOT take ownership.
const char* GetLoadBalancingPolicyName() const;
/// Creates a method config table based on the data in \a json.
/// The table's keys are request paths. The table's value type is
/// returned by \a create_value(), based on data parsed from the JSON tree.
/// Returns null on error.
template <typename T>
using CreateValue = RefCountedPtr<T> (*)(const grpc_json* method_config_json);
template <typename T>
RefCountedPtr<SliceHashTable<RefCountedPtr<T>>> CreateMethodConfigTable(
CreateValue<T> create_value) const;
/// A helper function for looking up values in the table returned by
/// \a CreateMethodConfigTable().
/// Gets the method config for the specified \a path, which should be of
/// the form "/service/method".
/// Returns null if the method has no config.
/// Caller does NOT own a reference to the result.
template <typename T>
static RefCountedPtr<T> MethodConfigTableLookup(
const SliceHashTable<RefCountedPtr<T>>& table, const grpc_slice& path);
/// Retrieves the parsed global service config object at index \a index.
ServiceConfigParsedObject* GetParsedGlobalServiceConfigObject(int index) {
GPR_DEBUG_ASSERT(
index < static_cast<int>(parsed_global_service_config_objects_.size()));
/// Retrieves the parsed global service config object at index \a index. The
/// lifetime of the returned object is tied to the lifetime of the
/// ServiceConfig object.
ParsedConfig* GetParsedGlobalServiceConfigObject(size_t index) {
GPR_DEBUG_ASSERT(index < parsed_global_service_config_objects_.size());
return parsed_global_service_config_objects_[index].get();
}
/// Retrieves the vector of method service config objects for a given path \a
/// path.
const ServiceConfigObjectsVector* const* GetMethodServiceConfigObjectsVector(
/// path. The lifetime of the returned vector and contained objects is tied to
/// the lifetime of the ServiceConfig object.
const ServiceConfigObjectsVector* GetMethodServiceConfigObjectsVector(
const grpc_slice& path);
/// Globally register a service config parser. On successful registration, it
@ -150,7 +152,7 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
/// registered parser. Each parser is responsible for reading the service
/// config json and returning a parsed object. This parsed object can later be
/// retrieved using the same index that was returned at registration time.
static size_t RegisterParser(UniquePtr<ServiceConfigParser> parser);
static size_t RegisterParser(UniquePtr<Parser> parser);
static void Init();
@ -178,14 +180,6 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
static UniquePtr<char> ParseJsonMethodName(grpc_json* json,
grpc_error** error);
// Parses the method config from \a json. Adds an entry to \a entries for
// each name found, incrementing \a idx for each entry added.
// Returns false on error.
template <typename T>
static bool ParseJsonMethodConfig(
grpc_json* json, CreateValue<T> create_value,
typename SliceHashTable<RefCountedPtr<T>>::Entry* entries, size_t* idx);
grpc_error* ParseJsonMethodConfigToServiceConfigObjectsTable(
const grpc_json* json,
SliceHashTable<const ServiceConfigObjectsVector*>::Entry* entries,
@ -195,7 +189,7 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
UniquePtr<char> json_string_; // Underlying storage for json_tree.
grpc_json* json_tree_;
InlinedVector<UniquePtr<ServiceConfigParsedObject>, kNumPreallocatedParsers>
InlinedVector<UniquePtr<ParsedConfig>, kNumPreallocatedParsers>
parsed_global_service_config_objects_;
// A map from the method name to the service config objects vector. Note that
// we are using a raw pointer and not a unique pointer so that we can use the
@ -208,133 +202,6 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
service_config_objects_vectors_storage_;
};
//
// implementation -- no user-serviceable parts below
//
template <typename T>
void ServiceConfig::ParseGlobalParams(ProcessJson<T> process_json,
T* arg) const {
if (json_tree_->type != GRPC_JSON_OBJECT || json_tree_->key != nullptr) {
return;
}
for (grpc_json* field = json_tree_->child; field != nullptr;
field = field->next) {
if (field->key == nullptr) return;
if (strcmp(field->key, "methodConfig") == 0) continue;
process_json(field, arg);
}
}
template <typename T>
bool ServiceConfig::ParseJsonMethodConfig(
grpc_json* json, CreateValue<T> create_value,
typename SliceHashTable<RefCountedPtr<T>>::Entry* entries, size_t* idx) {
// Construct value.
RefCountedPtr<T> method_config = create_value(json);
if (method_config == nullptr) return false;
// Construct list of paths.
InlinedVector<UniquePtr<char>, 10> paths;
for (grpc_json* child = json->child; child != nullptr; child = child->next) {
if (child->key == nullptr) continue;
if (strcmp(child->key, "name") == 0) {
if (child->type != GRPC_JSON_ARRAY) return false;
for (grpc_json* name = child->child; name != nullptr; name = name->next) {
grpc_error* error = GRPC_ERROR_NONE;
UniquePtr<char> path = ParseJsonMethodName(name, &error);
// We are not reporting the error here.
GRPC_ERROR_UNREF(error);
if (path == nullptr) return false;
paths.push_back(std::move(path));
}
}
}
if (paths.size() == 0) return false; // No names specified.
// Add entry for each path.
for (size_t i = 0; i < paths.size(); ++i) {
entries[*idx].key = grpc_slice_from_copied_string(paths[i].get());
entries[*idx].value = method_config; // Takes a new ref.
++*idx;
}
// Success.
return true;
}
template <typename T>
RefCountedPtr<SliceHashTable<RefCountedPtr<T>>>
ServiceConfig::CreateMethodConfigTable(CreateValue<T> create_value) const {
// Traverse parsed JSON tree.
if (json_tree_->type != GRPC_JSON_OBJECT || json_tree_->key != nullptr) {
return nullptr;
}
size_t num_entries = 0;
typename SliceHashTable<RefCountedPtr<T>>::Entry* entries = nullptr;
for (grpc_json* field = json_tree_->child; field != nullptr;
field = field->next) {
if (field->key == nullptr) return nullptr;
if (strcmp(field->key, "methodConfig") == 0) {
if (entries != nullptr) return nullptr; // Duplicate.
if (field->type != GRPC_JSON_ARRAY) return nullptr;
// Find number of entries.
for (grpc_json* method = field->child; method != nullptr;
method = method->next) {
int count = CountNamesInMethodConfig(method);
if (count <= 0) return nullptr;
num_entries += static_cast<size_t>(count);
}
// Populate method config table entries.
entries = static_cast<typename SliceHashTable<RefCountedPtr<T>>::Entry*>(
gpr_zalloc(num_entries *
sizeof(typename SliceHashTable<RefCountedPtr<T>>::Entry)));
size_t idx = 0;
for (grpc_json* method = field->child; method != nullptr;
method = method->next) {
if (!ParseJsonMethodConfig(method, create_value, entries, &idx)) {
for (size_t i = 0; i < idx; ++i) {
grpc_slice_unref_internal(entries[i].key);
entries[i].value.reset();
}
gpr_free(entries);
return nullptr;
}
}
GPR_ASSERT(idx == num_entries);
}
}
// Instantiate method config table.
RefCountedPtr<SliceHashTable<RefCountedPtr<T>>> method_config_table;
if (entries != nullptr) {
method_config_table =
SliceHashTable<RefCountedPtr<T>>::Create(num_entries, entries, nullptr);
gpr_free(entries);
}
return method_config_table;
}
template <typename T>
RefCountedPtr<T> ServiceConfig::MethodConfigTableLookup(
const SliceHashTable<RefCountedPtr<T>>& table, const grpc_slice& path) {
const RefCountedPtr<T>* value = table.Get(path);
// If we didn't find a match for the path, try looking for a wildcard
// entry (i.e., change "/service/method" to "/service/*").
if (value == nullptr) {
char* path_str = grpc_slice_to_c_string(path);
const char* sep = strrchr(path_str, '/') + 1;
const size_t len = (size_t)(sep - path_str);
char* buf = (char*)gpr_malloc(len + 2); // '*' and NUL
memcpy(buf, path_str, len);
buf[len] = '*';
buf[len + 1] = '\0';
grpc_slice wildcard_path = grpc_slice_from_copied_string(buf);
gpr_free(buf);
value = table.Get(wildcard_path);
grpc_slice_unref_internal(wildcard_path);
gpr_free(path_str);
if (value == nullptr) return nullptr;
}
return RefCountedPtr<T>(*value);
}
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SERVICE_CONFIG_H */

@ -121,7 +121,7 @@ RefCountedPtr<SubchannelCall> ConnectedSubchannel::CreateCall(
const size_t allocation_size =
GetInitialCallSizeEstimate(args.parent_data_size);
RefCountedPtr<SubchannelCall> call(
new (gpr_arena_alloc(args.arena, allocation_size))
new (args.arena->Alloc(allocation_size))
SubchannelCall(Ref(DEBUG_LOCATION, "subchannel_call"), args));
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(call.get());
const grpc_call_element_args call_args = {
@ -529,25 +529,6 @@ BackOff::Options ParseArgsForBackoffValues(
.set_max_backoff(max_backoff_ms);
}
struct HealthCheckParams {
UniquePtr<char> service_name;
static void Parse(const grpc_json* field, HealthCheckParams* params) {
if (strcmp(field->key, "healthCheckConfig") == 0) {
if (field->type != GRPC_JSON_OBJECT) return;
for (grpc_json* sub_field = field->child; sub_field != nullptr;
sub_field = sub_field->next) {
if (sub_field->key == nullptr) return;
if (strcmp(sub_field->key, "serviceName") == 0) {
if (params->service_name != nullptr) return; // Duplicate.
if (sub_field->type != GRPC_JSON_STRING) return;
params->service_name.reset(gpr_strdup(sub_field->value));
}
}
}
}
};
} // namespace
Subchannel::Subchannel(SubchannelKey* key, grpc_connector* connector,
@ -583,21 +564,9 @@ Subchannel::Subchannel(SubchannelKey* key, grpc_connector* connector,
"subchannel");
grpc_connectivity_state_init(&state_and_health_tracker_, GRPC_CHANNEL_IDLE,
"subchannel");
// Check whether we should enable health checking.
const char* service_config_json = grpc_channel_arg_get_string(
grpc_channel_args_find(args_, GRPC_ARG_SERVICE_CONFIG));
if (service_config_json != nullptr) {
grpc_error* service_config_error = GRPC_ERROR_NONE;
RefCountedPtr<ServiceConfig> service_config =
ServiceConfig::Create(service_config_json, &service_config_error);
// service_config_error is currently unused.
GRPC_ERROR_UNREF(service_config_error);
if (service_config != nullptr) {
HealthCheckParams params;
service_config->ParseGlobalParams(HealthCheckParams::Parse, &params);
health_check_service_name_ = std::move(params.service_name);
}
}
health_check_service_name_ =
UniquePtr<char>(gpr_strdup(grpc_channel_arg_get_string(
grpc_channel_args_find(args_, "grpc.temp.health_check"))));
const grpc_arg* arg = grpc_channel_args_find(args_, GRPC_ARG_ENABLE_CHANNELZ);
const bool channelz_enabled =
grpc_channel_arg_get_bool(arg, GRPC_ENABLE_CHANNELZ_DEFAULT);

@ -26,7 +26,7 @@
#include "src/core/ext/filters/client_channel/subchannel_pool_interface.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
@ -75,9 +75,9 @@ class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
grpc_slice path;
gpr_timespec start_time;
grpc_millis deadline;
gpr_arena* arena;
Arena* arena;
grpc_call_context_element* context;
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
size_t parent_data_size;
};

@ -68,8 +68,7 @@ static void timer_callback(void* arg, grpc_error* error) {
error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
grpc_call_combiner_cancel(deadline_state->call_combiner,
GRPC_ERROR_REF(error));
deadline_state->call_combiner->Cancel(GRPC_ERROR_REF(error));
GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
send_cancel_op_in_call_combiner, elem,
grpc_schedule_on_exec_ctx);
@ -183,7 +182,7 @@ static void start_timer_after_init(void* arg, grpc_error* error) {
grpc_deadline_state::grpc_deadline_state(grpc_call_element* elem,
grpc_call_stack* call_stack,
grpc_call_combiner* call_combiner,
grpc_core::CallCombiner* call_combiner,
grpc_millis deadline)
: call_stack(call_stack), call_combiner(call_combiner) {
// Deadline will always be infinite on servers, so the timer will only be

@ -32,12 +32,13 @@ enum grpc_deadline_timer_state {
// Must be the first field in the filter's call_data.
struct grpc_deadline_state {
grpc_deadline_state(grpc_call_element* elem, grpc_call_stack* call_stack,
grpc_call_combiner* call_combiner, grpc_millis deadline);
grpc_core::CallCombiner* call_combiner,
grpc_millis deadline);
~grpc_deadline_state();
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
grpc_deadline_timer_state timer_state = GRPC_DEADLINE_STATE_INITIAL;
grpc_timer timer;
grpc_closure timer_callback;

@ -62,7 +62,7 @@ struct call_data {
~call_data() { GRPC_ERROR_UNREF(recv_initial_metadata_error); }
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;

@ -40,7 +40,7 @@ namespace {
struct call_data {
grpc_linked_mdelem authority_storage;
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
};
struct channel_data {

@ -72,7 +72,7 @@ struct call_data {
GRPC_ERROR_UNREF(cancel_error);
}
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;

@ -61,7 +61,7 @@ struct call_data {
}
}
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
// Outgoing headers to add to send_initial_metadata.
grpc_linked_mdelem status;

@ -32,75 +32,78 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel_init.h"
typedef struct {
int max_send_size;
int max_recv_size;
} message_size_limits;
static void recv_message_ready(void* user_data, grpc_error* error);
static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
namespace grpc_core {
namespace {
class MessageSizeLimits : public RefCounted<MessageSizeLimits> {
public:
static RefCountedPtr<MessageSizeLimits> CreateFromJson(const grpc_json* json);
const message_size_limits& limits() const { return limits_; }
private:
// So New() can call our private ctor.
template <typename T, typename... Args>
friend T* grpc_core::New(Args&&... args);
MessageSizeLimits(int max_send_size, int max_recv_size) {
limits_.max_send_size = max_send_size;
limits_.max_recv_size = max_recv_size;
}
message_size_limits limits_;
};
namespace {
size_t g_message_size_parser_index;
} // namespace
RefCountedPtr<MessageSizeLimits> MessageSizeLimits::CreateFromJson(
const grpc_json* json) {
UniquePtr<ServiceConfig::ParsedConfig> MessageSizeParser::ParsePerMethodParams(
const grpc_json* json, grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
int max_request_message_bytes = -1;
int max_response_message_bytes = -1;
InlinedVector<grpc_error*, 4> error_list;
for (grpc_json* field = json->child; field != nullptr; field = field->next) {
if (field->key == nullptr) continue;
if (strcmp(field->key, "maxRequestMessageBytes") == 0) {
if (max_request_message_bytes >= 0) return nullptr; // Duplicate.
if (max_request_message_bytes >= 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxRequestMessageBytes error:Duplicate entry"));
} // Duplicate, continue parsing.
if (field->type != GRPC_JSON_STRING && field->type != GRPC_JSON_NUMBER) {
return nullptr;
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxRequestMessageBytes error:should be of type number"));
} else {
max_request_message_bytes = gpr_parse_nonnegative_int(field->value);
if (max_request_message_bytes == -1) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxRequestMessageBytes error:should be non-negative"));
}
}
max_request_message_bytes = gpr_parse_nonnegative_int(field->value);
if (max_request_message_bytes == -1) return nullptr;
} else if (strcmp(field->key, "maxResponseMessageBytes") == 0) {
if (max_response_message_bytes >= 0) return nullptr; // Duplicate.
if (max_response_message_bytes >= 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxResponseMessageBytes error:Duplicate entry"));
} // Duplicate, continue parsing
if (field->type != GRPC_JSON_STRING && field->type != GRPC_JSON_NUMBER) {
return nullptr;
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxResponseMessageBytes error:should be of type number"));
} else {
max_response_message_bytes = gpr_parse_nonnegative_int(field->value);
if (max_response_message_bytes == -1) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:maxResponseMessageBytes error:should be non-negative"));
}
}
max_response_message_bytes = gpr_parse_nonnegative_int(field->value);
if (max_response_message_bytes == -1) return nullptr;
}
}
return MakeRefCounted<MessageSizeLimits>(max_request_message_bytes,
max_response_message_bytes);
if (!error_list.empty()) {
*error = GRPC_ERROR_CREATE_FROM_VECTOR("Message size parser", &error_list);
return nullptr;
}
return UniquePtr<ServiceConfig::ParsedConfig>(New<MessageSizeParsedObject>(
max_request_message_bytes, max_response_message_bytes));
}
} // namespace
} // namespace grpc_core
void MessageSizeParser::Register() {
g_message_size_parser_index = ServiceConfig::RegisterParser(
UniquePtr<ServiceConfig::Parser>(New<MessageSizeParser>()));
}
static void recv_message_ready(void* user_data, grpc_error* error);
static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
size_t MessageSizeParser::ParserIndex() { return g_message_size_parser_index; }
} // namespace grpc_core
namespace {
struct channel_data {
message_size_limits limits;
// Maps path names to refcounted_message_size_limits structs.
grpc_core::RefCountedPtr<grpc_core::SliceHashTable<
grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits>>>
method_limit_table;
grpc_core::MessageSizeParsedObject::message_size_limits limits;
grpc_core::RefCountedPtr<grpc_core::ServiceConfig> svc_cfg;
};
struct call_data {
@ -116,29 +119,42 @@ struct call_data {
// Note: Per-method config is only available on the client, so we
// apply the max request size to the send limit and the max response
// size to the receive limit.
if (chand.method_limit_table != nullptr) {
grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits> limits =
grpc_core::ServiceConfig::MethodConfigTableLookup(
*chand.method_limit_table, args.path);
if (limits != nullptr) {
if (limits->limits().max_send_size >= 0 &&
(limits->limits().max_send_size < this->limits.max_send_size ||
this->limits.max_send_size < 0)) {
this->limits.max_send_size = limits->limits().max_send_size;
}
if (limits->limits().max_recv_size >= 0 &&
(limits->limits().max_recv_size < this->limits.max_recv_size ||
this->limits.max_recv_size < 0)) {
this->limits.max_recv_size = limits->limits().max_recv_size;
}
const grpc_core::MessageSizeParsedObject* limits = nullptr;
grpc_core::ServiceConfig::CallData* svc_cfg_call_data = nullptr;
if (args.context != nullptr) {
svc_cfg_call_data = static_cast<grpc_core::ServiceConfig::CallData*>(
args.context[GRPC_SERVICE_CONFIG_CALL_DATA].value);
}
if (svc_cfg_call_data != nullptr) {
limits = static_cast<const grpc_core::MessageSizeParsedObject*>(
svc_cfg_call_data->GetMethodParsedObject(
grpc_core::MessageSizeParser::ParserIndex()));
} else if (chand.svc_cfg != nullptr) {
const auto* objs_vector =
chand.svc_cfg->GetMethodServiceConfigObjectsVector(args.path);
if (objs_vector != nullptr) {
limits = static_cast<const grpc_core::MessageSizeParsedObject*>(
(*objs_vector)[grpc_core::MessageSizeParser::ParserIndex()].get());
}
}
if (limits != nullptr) {
if (limits->limits().max_send_size >= 0 &&
(limits->limits().max_send_size < this->limits.max_send_size ||
this->limits.max_send_size < 0)) {
this->limits.max_send_size = limits->limits().max_send_size;
}
if (limits->limits().max_recv_size >= 0 &&
(limits->limits().max_recv_size < this->limits.max_recv_size ||
this->limits.max_recv_size < 0)) {
this->limits.max_recv_size = limits->limits().max_recv_size;
}
}
}
~call_data() { GRPC_ERROR_UNREF(error); }
grpc_call_combiner* call_combiner;
message_size_limits limits;
grpc_core::CallCombiner* call_combiner;
grpc_core::MessageSizeParsedObject::message_size_limits limits;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
// call our next_recv_message_ready member after handling it.
@ -284,9 +300,9 @@ static int default_size(const grpc_channel_args* args,
return without_minimal_stack;
}
message_size_limits get_message_size_limits(
grpc_core::MessageSizeParsedObject::message_size_limits get_message_size_limits(
const grpc_channel_args* channel_args) {
message_size_limits lim;
grpc_core::MessageSizeParsedObject::message_size_limits lim;
lim.max_send_size =
default_size(channel_args, GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH);
lim.max_recv_size =
@ -313,21 +329,27 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
new (chand) channel_data();
chand->limits = get_message_size_limits(args->channel_args);
// Get method config table from channel args.
// TODO(yashykt): We only need to read GRPC_ARG_SERVICE_CONFIG in the case of
// direct channels. (Service config is otherwise stored in the call_context by
// client_channel filter.) If we ever need a second filter that also needs to
// parse GRPC_ARG_SERVICE_CONFIG, we should refactor this code and add a
// separate filter that reads GRPC_ARG_SERVICE_CONFIG and saves the parsed
// config in the call_context.
const grpc_arg* channel_arg =
grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG);
const char* service_config_str = grpc_channel_arg_get_string(channel_arg);
if (service_config_str != nullptr) {
grpc_error* service_config_error = GRPC_ERROR_NONE;
grpc_core::RefCountedPtr<grpc_core::ServiceConfig> service_config =
grpc_core::ServiceConfig::Create(service_config_str,
&service_config_error);
GRPC_ERROR_UNREF(service_config_error);
if (service_config != nullptr) {
chand->method_limit_table = service_config->CreateMethodConfigTable(
grpc_core::MessageSizeLimits::CreateFromJson);
auto svc_cfg = grpc_core::ServiceConfig::Create(service_config_str,
&service_config_error);
if (service_config_error == GRPC_ERROR_NONE) {
chand->svc_cfg = std::move(svc_cfg);
} else {
gpr_log(GPR_ERROR, "%s", grpc_error_string(service_config_error));
}
GRPC_ERROR_UNREF(service_config_error);
}
return GRPC_ERROR_NONE;
}
@ -335,7 +357,7 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
// Destructor for channel_data.
static void destroy_channel_elem(grpc_channel_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
chand->method_limit_table.reset();
chand->~channel_data();
}
const grpc_channel_filter grpc_message_size_filter = {
@ -351,18 +373,34 @@ const grpc_channel_filter grpc_message_size_filter = {
grpc_channel_next_get_info,
"message_size"};
// Used for GRPC_CLIENT_SUBCHANNEL
static bool maybe_add_message_size_filter_subchannel(
grpc_channel_stack_builder* builder, void* arg) {
const grpc_channel_args* channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
if (grpc_channel_args_want_minimal_stack(channel_args)) {
return true;
}
return grpc_channel_stack_builder_prepend_filter(
builder, &grpc_message_size_filter, nullptr, nullptr);
}
// Used for GRPC_CLIENT_DIRECT_CHANNEL and GRPC_SERVER_CHANNEL. Adds the filter
// only if message size limits or service config is specified.
static bool maybe_add_message_size_filter(grpc_channel_stack_builder* builder,
void* arg) {
const grpc_channel_args* channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable = false;
message_size_limits lim = get_message_size_limits(channel_args);
grpc_core::MessageSizeParsedObject::message_size_limits lim =
get_message_size_limits(channel_args);
if (lim.max_send_size != -1 || lim.max_recv_size != -1) {
enable = true;
}
const grpc_arg* a =
grpc_channel_args_find(channel_args, GRPC_ARG_SERVICE_CONFIG);
if (a != nullptr) {
const char* svc_cfg_str = grpc_channel_arg_get_string(a);
if (svc_cfg_str != nullptr) {
enable = true;
}
if (enable) {
@ -374,15 +412,16 @@ static bool maybe_add_message_size_filter(grpc_channel_stack_builder* builder,
}
void grpc_message_size_filter_init(void) {
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_message_size_filter, nullptr);
grpc_channel_init_register_stage(
GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_message_size_filter_subchannel, nullptr);
grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_message_size_filter, nullptr);
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_message_size_filter, nullptr);
grpc_core::MessageSizeParser::Register();
}
void grpc_message_size_filter_shutdown(void) {}

@ -19,8 +19,41 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/service_config.h"
#include "src/core/lib/channel/channel_stack.h"
extern const grpc_channel_filter grpc_message_size_filter;
namespace grpc_core {
class MessageSizeParsedObject : public ServiceConfig::ParsedConfig {
public:
struct message_size_limits {
int max_send_size;
int max_recv_size;
};
MessageSizeParsedObject(int max_send_size, int max_recv_size) {
limits_.max_send_size = max_send_size;
limits_.max_recv_size = max_recv_size;
}
const message_size_limits& limits() const { return limits_; }
private:
message_size_limits limits_;
};
class MessageSizeParser : public ServiceConfig::Parser {
public:
UniquePtr<ServiceConfig::ParsedConfig> ParsePerMethodParams(
const grpc_json* json, grpc_error** error) override;
static void Register();
static size_t ParserIndex();
};
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_MESSAGE_SIZE_MESSAGE_SIZE_FILTER_H */

@ -655,11 +655,12 @@ grpc_chttp2_stream::Reffer::Reffer(grpc_chttp2_stream* s) {
grpc_chttp2_stream::grpc_chttp2_stream(grpc_chttp2_transport* t,
grpc_stream_refcount* refcount,
const void* server_data,
gpr_arena* arena)
grpc_core::Arena* arena)
: t(t),
refcount(refcount),
reffer(this),
metadata_buffer{{arena}, {arena}} {
metadata_buffer{grpc_chttp2_incoming_metadata_buffer(arena),
grpc_chttp2_incoming_metadata_buffer(arena)} {
if (server_data) {
id = static_cast<uint32_t>((uintptr_t)server_data);
*t->accepting_stream = this;
@ -740,7 +741,7 @@ grpc_chttp2_stream::~grpc_chttp2_stream() {
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
grpc_core::Arena* arena) {
GPR_TIMER_SCOPE("init_stream", 0);
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
new (gs) grpc_chttp2_stream(t, refcount, server_data, arena);

@ -36,7 +36,7 @@ grpc_error* grpc_chttp2_incoming_metadata_buffer_add(
buffer->count++;
} else {
storage = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(buffer->arena, sizeof(grpc_linked_mdelem)));
buffer->arena->Alloc(sizeof(grpc_linked_mdelem)));
}
return grpc_metadata_batch_add_tail(&buffer->batch, storage, elem);
}

@ -24,7 +24,8 @@
#include "src/core/lib/transport/transport.h"
struct grpc_chttp2_incoming_metadata_buffer {
grpc_chttp2_incoming_metadata_buffer(gpr_arena* arena) : arena(arena) {
explicit grpc_chttp2_incoming_metadata_buffer(grpc_core::Arena* arena)
: arena(arena) {
grpc_metadata_batch_init(&batch);
batch.deadline = GRPC_MILLIS_INF_FUTURE;
}
@ -34,7 +35,7 @@ struct grpc_chttp2_incoming_metadata_buffer {
static constexpr size_t kPreallocatedMDElem = 10;
gpr_arena* arena;
grpc_core::Arena* arena;
size_t size = 0; // total size of metadata.
size_t count = 0; // minimum of count of metadata and kPreallocatedMDElem.
// These preallocated mdelems are used while count < kPreallocatedMDElem.

@ -504,7 +504,7 @@ typedef enum {
struct grpc_chttp2_stream {
grpc_chttp2_stream(grpc_chttp2_transport* t, grpc_stream_refcount* refcount,
const void* server_data, gpr_arena* arena);
const void* server_data, grpc_core::Arena* arena);
~grpc_chttp2_stream();
void* context;
@ -633,7 +633,7 @@ struct grpc_chttp2_stream {
GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
/* Stream decompression method to be used. */
grpc_stream_compression_method stream_decompression_method =
GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
/** Stream compression decompress context */
grpc_stream_compression_context* stream_decompression_ctx = nullptr;
/** Stream compression compress context */

@ -110,7 +110,7 @@ typedef struct grpc_cronet_transport grpc_cronet_transport;
/* TODO (makdharma): reorder structure for memory efficiency per
http://www.catb.org/esr/structure-packing/#_structure_reordering: */
struct read_state {
read_state(gpr_arena* arena)
read_state(grpc_core::Arena* arena)
: trailing_metadata(arena), initial_metadata(arena) {
grpc_slice_buffer_init(&read_slice_buffer);
}
@ -144,7 +144,7 @@ struct write_state {
/* track state of one stream op */
struct op_state {
op_state(gpr_arena* arena) : rs(arena) {}
op_state(grpc_core::Arena* arena) : rs(arena) {}
bool state_op_done[OP_NUM_OPS] = {};
bool state_callback_received[OP_NUM_OPS] = {};
@ -186,10 +186,10 @@ struct op_storage {
struct stream_obj {
stream_obj(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, gpr_arena* arena);
grpc_stream_refcount* refcount, grpc_core::Arena* arena);
~stream_obj();
gpr_arena* arena;
grpc_core::Arena* arena;
struct op_and_state* oas = nullptr;
grpc_transport_stream_op_batch* curr_op = nullptr;
grpc_cronet_transport* curr_ct;
@ -1368,7 +1368,8 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
*/
inline stream_obj::stream_obj(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, gpr_arena* arena)
grpc_stream_refcount* refcount,
grpc_core::Arena* arena)
: arena(arena),
curr_ct(reinterpret_cast<grpc_cronet_transport*>(gt)),
curr_gs(gs),
@ -1387,7 +1388,7 @@ inline stream_obj::~stream_obj() {
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
grpc_core::Arena* arena) {
new (gs) stream_obj(gt, gs, refcount, arena);
return 0;
}

@ -120,7 +120,7 @@ struct inproc_transport {
struct inproc_stream {
inproc_stream(inproc_transport* t, grpc_stream_refcount* refcount,
const void* server_data, gpr_arena* arena)
const void* server_data, grpc_core::Arena* arena)
: t(t), refs(refcount), arena(arena) {
// Ref this stream right now for ctor and list.
ref("inproc_init_stream:init");
@ -250,7 +250,7 @@ struct inproc_stream {
grpc_stream_refcount* refs;
grpc_closure* closure_at_destroy = nullptr;
gpr_arena* arena;
grpc_core::Arena* arena;
grpc_transport_stream_op_batch* send_message_op = nullptr;
grpc_transport_stream_op_batch* send_trailing_md_op = nullptr;
@ -309,8 +309,8 @@ grpc_error* fill_in_metadata(inproc_stream* s,
grpc_error* error = GRPC_ERROR_NONE;
for (grpc_linked_mdelem* elem = metadata->list.head;
(elem != nullptr) && (error == GRPC_ERROR_NONE); elem = elem->next) {
grpc_linked_mdelem* nelem = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(s->arena, sizeof(*nelem)));
grpc_linked_mdelem* nelem =
static_cast<grpc_linked_mdelem*>(s->arena->Alloc(sizeof(*nelem)));
nelem->md =
grpc_mdelem_from_slices(grpc_slice_intern(GRPC_MDKEY(elem->md)),
grpc_slice_intern(GRPC_MDVALUE(elem->md)));
@ -322,7 +322,7 @@ grpc_error* fill_in_metadata(inproc_stream* s,
int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
grpc_core::Arena* arena) {
INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data);
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
new (gs) inproc_stream(t, refcount, server_data, arena);
@ -436,13 +436,13 @@ void fail_helper_locked(inproc_stream* s, grpc_error* error) {
// since it expects that as well as no error yet
grpc_metadata_batch fake_md;
grpc_metadata_batch_init(&fake_md);
grpc_linked_mdelem* path_md = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(s->arena, sizeof(*path_md)));
grpc_linked_mdelem* path_md =
static_cast<grpc_linked_mdelem*>(s->arena->Alloc(sizeof(*path_md)));
path_md->md = grpc_mdelem_from_slices(g_fake_path_key, g_fake_path_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(&fake_md, path_md) ==
GRPC_ERROR_NONE);
grpc_linked_mdelem* auth_md = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(s->arena, sizeof(*auth_md)));
grpc_linked_mdelem* auth_md =
static_cast<grpc_linked_mdelem*>(s->arena->Alloc(sizeof(*auth_md)));
auth_md->md = grpc_mdelem_from_slices(g_fake_auth_key, g_fake_auth_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(&fake_md, auth_md) ==
GRPC_ERROR_NONE);

@ -42,7 +42,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/transport.h"
@ -69,8 +69,8 @@ typedef struct {
const grpc_slice& path;
gpr_timespec start_time;
grpc_millis deadline;
gpr_arena* arena;
grpc_call_combiner* call_combiner;
grpc_core::Arena* arena;
grpc_core::CallCombiner* call_combiner;
} grpc_call_element_args;
typedef struct {

@ -41,12 +41,12 @@ typedef struct connected_channel_channel_data {
typedef struct {
grpc_closure closure;
grpc_closure* original_closure;
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
const char* reason;
} callback_state;
typedef struct connected_channel_call_data {
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
// Closures used for returning results on the call combiner.
callback_state on_complete[6]; // Max number of pending batches.
callback_state recv_initial_metadata_ready;

@ -35,6 +35,9 @@ typedef enum {
/// Reserved for traffic_class_context.
GRPC_CONTEXT_TRAFFIC,
/// Holds a pointer to ServiceConfig::CallData associated with this call.
GRPC_SERVICE_CONFIG_CALL_DATA,
GRPC_CONTEXT_COUNT
} grpc_context_index;

@ -1,152 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/gpr/arena.h"
#include <string.h>
#include <new>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gprpp/memory.h"
static void* gpr_arena_malloc(size_t size) {
return gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
}
// Uncomment this to use a simple arena that simply allocates the
// requested amount of memory for each call to gpr_arena_alloc(). This
// effectively eliminates the efficiency gain of using an arena, but it
// may be useful for debugging purposes.
//#define SIMPLE_ARENA_FOR_DEBUGGING
#ifdef SIMPLE_ARENA_FOR_DEBUGGING
struct gpr_arena {
gpr_arena() { gpr_mu_init(&mu); }
~gpr_arena() {
gpr_mu_destroy(&mu);
for (size_t i = 0; i < num_ptrs; ++i) {
gpr_free_aligned(ptrs[i]);
}
gpr_free(ptrs);
}
gpr_mu mu;
void** ptrs = nullptr;
size_t num_ptrs = 0;
};
gpr_arena* gpr_arena_create(size_t ignored_initial_size) {
return grpc_core::New<gpr_arena>();
}
size_t gpr_arena_destroy(gpr_arena* arena) {
grpc_core::Delete(arena);
return 1; // Value doesn't matter, since it won't be used.
}
void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
gpr_mu_lock(&arena->mu);
arena->ptrs =
(void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1));
void* retval = arena->ptrs[arena->num_ptrs++] = gpr_arena_malloc(size);
gpr_mu_unlock(&arena->mu);
return retval;
}
#else // SIMPLE_ARENA_FOR_DEBUGGING
// TODO(roth): We currently assume that all callers need alignment of 16
// bytes, which may be wrong in some cases. As part of converting the
// arena API to C++, we should consider replacing gpr_arena_alloc() with a
// template that takes the type of the value being allocated, which
// would allow us to use the alignment actually needed by the caller.
typedef struct zone {
zone* next = nullptr;
} zone;
struct gpr_arena {
gpr_arena(size_t initial_size)
: initial_zone_size(initial_size), last_zone(&initial_zone) {
gpr_mu_init(&arena_growth_mutex);
}
~gpr_arena() {
gpr_mu_destroy(&arena_growth_mutex);
zone* z = initial_zone.next;
while (z) {
zone* next_z = z->next;
z->~zone();
gpr_free_aligned(z);
z = next_z;
}
}
// Keep track of the total used size. We use this in our call sizing
// historesis.
gpr_atm total_used = 0;
size_t initial_zone_size;
zone initial_zone;
zone* last_zone;
gpr_mu arena_growth_mutex;
};
gpr_arena* gpr_arena_create(size_t initial_size) {
initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
return new (gpr_arena_malloc(
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size))
gpr_arena(initial_size);
}
size_t gpr_arena_destroy(gpr_arena* arena) {
const gpr_atm size = gpr_atm_no_barrier_load(&arena->total_used);
arena->~gpr_arena();
gpr_free_aligned(arena);
return static_cast<size_t>(size);
}
void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size);
size_t begin = gpr_atm_no_barrier_fetch_add(&arena->total_used, size);
if (begin + size <= arena->initial_zone_size) {
return reinterpret_cast<char*>(arena) +
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + begin;
} else {
// If the allocation isn't able to end in the initial zone, create a new
// zone for this allocation, and any unused space in the initial zone is
// wasted. This overflowing and wasting is uncommon because of our arena
// sizing historesis (that is, most calls should have a large enough initial
// zone and will not need to grow the arena).
gpr_mu_lock(&arena->arena_growth_mutex);
zone* z = new (gpr_arena_malloc(
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size)) zone();
arena->last_zone->next = z;
arena->last_zone = z;
gpr_mu_unlock(&arena->arena_growth_mutex);
return reinterpret_cast<char*>(z) +
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone));
}
}
#endif // SIMPLE_ARENA_FOR_DEBUGGING

@ -21,21 +21,27 @@
// the arena as a whole is freed
// Tracks the total memory allocated against it, so that future arenas can
// pre-allocate the right amount of memory
// This transitional API is deprecated and will be removed soon in favour of
// src/core/lib/gprpp/arena.h .
#ifndef GRPC_CORE_LIB_GPR_ARENA_H
#define GRPC_CORE_LIB_GPR_ARENA_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
typedef struct gpr_arena gpr_arena;
#include "src/core/lib/gprpp/arena.h"
// TODO(arjunroy) : Remove deprecated gpr_arena API once all callers are gone.
typedef class grpc_core::Arena gpr_arena;
// Create an arena, with \a initial_size bytes in the first allocated buffer
gpr_arena* gpr_arena_create(size_t initial_size);
// Allocate \a size bytes from the arena
void* gpr_arena_alloc(gpr_arena* arena, size_t size);
inline gpr_arena* gpr_arena_create(size_t initial_size) {
return grpc_core::Arena::Create(initial_size);
}
// Destroy an arena, returning the total number of bytes allocated
size_t gpr_arena_destroy(gpr_arena* arena);
inline size_t gpr_arena_destroy(gpr_arena* arena) { return arena->Destroy(); }
// Allocate \a size bytes from the arena
inline void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
return arena->Alloc(size);
}
#endif /* GRPC_CORE_LIB_GPR_ARENA_H */

@ -0,0 +1,103 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/arena.h"
#include <string.h>
#include <new>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gprpp/memory.h"
namespace {
void* ArenaStorage(size_t initial_size) {
static constexpr size_t base_size =
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_core::Arena));
initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
size_t alloc_size = base_size + initial_size;
static constexpr size_t alignment =
(GPR_CACHELINE_SIZE > GPR_MAX_ALIGNMENT &&
GPR_CACHELINE_SIZE % GPR_MAX_ALIGNMENT == 0)
? GPR_CACHELINE_SIZE
: GPR_MAX_ALIGNMENT;
return gpr_malloc_aligned(alloc_size, alignment);
}
} // namespace
namespace grpc_core {
Arena::~Arena() {
Zone* z = last_zone_;
while (z) {
Zone* prev_z = z->prev;
z->~Zone();
gpr_free_aligned(z);
z = prev_z;
}
}
Arena* Arena::Create(size_t initial_size) {
return new (ArenaStorage(initial_size)) Arena(initial_size);
}
Pair<Arena*, void*> Arena::CreateWithAlloc(size_t initial_size,
size_t alloc_size) {
static constexpr size_t base_size =
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
auto* new_arena =
new (ArenaStorage(initial_size)) Arena(initial_size, alloc_size);
void* first_alloc = reinterpret_cast<char*>(new_arena) + base_size;
return MakePair(new_arena, first_alloc);
}
size_t Arena::Destroy() {
size_t size = total_used_.Load(MemoryOrder::RELAXED);
this->~Arena();
gpr_free_aligned(this);
return size;
}
void* Arena::AllocZone(size_t size) {
// If the allocation isn't able to end in the initial zone, create a new
// zone for this allocation, and any unused space in the initial zone is
// wasted. This overflowing and wasting is uncommon because of our arena
// sizing hysteresis (that is, most calls should have a large enough initial
// zone and will not need to grow the arena).
static constexpr size_t zone_base_size =
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Zone));
size_t alloc_size = zone_base_size + size;
Zone* z = new (gpr_malloc_aligned(alloc_size, GPR_MAX_ALIGNMENT)) Zone();
{
gpr_spinlock_lock(&arena_growth_spinlock_);
z->prev = last_zone_;
last_zone_ = z;
gpr_spinlock_unlock(&arena_growth_spinlock_);
}
return reinterpret_cast<char*>(z) + zone_base_size;
}
} // namespace grpc_core

@ -0,0 +1,121 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// \file Arena based allocator
// Allows very fast allocation of memory, but that memory cannot be freed until
// the arena as a whole is freed
// Tracks the total memory allocated against it, so that future arenas can
// pre-allocate the right amount of memory
#ifndef GRPC_CORE_LIB_GPRPP_ARENA_H
#define GRPC_CORE_LIB_GPRPP_ARENA_H
#include <grpc/support/port_platform.h>
#include <new>
#include <utility>
#include <grpc/support/alloc.h>
#include <grpc/support/sync.h>
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gpr/spinlock.h"
#include "src/core/lib/gprpp/atomic.h"
#include "src/core/lib/gprpp/pair.h"
#include <stddef.h>
namespace grpc_core {
class Arena {
public:
// Create an arena, with \a initial_size bytes in the first allocated buffer.
static Arena* Create(size_t initial_size);
// Create an arena, with \a initial_size bytes in the first allocated buffer,
// and return both a void pointer to the returned arena and a void* with the
// first allocation.
static Pair<Arena*, void*> CreateWithAlloc(size_t initial_size,
size_t alloc_size);
// Destroy an arena, returning the total number of bytes allocated.
size_t Destroy();
// Allocate \a size bytes from the arena.
void* Alloc(size_t size) {
static constexpr size_t base_size =
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size);
size_t begin = total_used_.FetchAdd(size, MemoryOrder::RELAXED);
if (begin + size <= initial_zone_size_) {
return reinterpret_cast<char*>(this) + base_size + begin;
} else {
return AllocZone(size);
}
}
// TODO(roth): We currently assume that all callers need alignment of 16
// bytes, which may be wrong in some cases. When we have time, we should
// change this to instead use the alignment of the type being allocated by
// this method.
template <typename T, typename... Args>
T* New(Args&&... args) {
T* t = static_cast<T*>(Alloc(sizeof(T)));
new (t) T(std::forward<Args>(args)...);
return t;
}
private:
struct Zone {
Zone* prev;
};
// Initialize an arena.
// Parameters:
// initial_size: The initial size of the whole arena in bytes. These bytes
// are contained within 'zone 0'. If the arena user ends up requiring more
// memory than the arena contains in zone 0, subsequent zones are allocated
// on demand and maintained in a tail-linked list.
//
// initial_alloc: Optionally, construct the arena as though a call to
// Alloc() had already been made for initial_alloc bytes. This provides a
// quick optimization (avoiding an atomic fetch-add) for the common case
// where we wish to create an arena and then perform an immediate
// allocation.
explicit Arena(size_t initial_size, size_t initial_alloc = 0)
: total_used_(initial_alloc), initial_zone_size_(initial_size) {}
~Arena();
void* AllocZone(size_t size);
// Keep track of the total used size. We use this in our call sizing
// hysteresis.
Atomic<size_t> total_used_;
size_t initial_zone_size_;
gpr_spinlock arena_growth_spinlock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
// If the initial arena allocation wasn't enough, we allocate additional zones
// in a reverse linked list. Each additional zone consists of (1) a pointer to
// the zone added before this zone (null if this is the first additional zone)
// and (2) the allocated memory. The arena itself maintains a pointer to the
// last zone; the zone list is reverse-walked during arena destruction only.
Zone* last_zone_ = nullptr;
};
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_GPRPP_ARENA_H */

@ -26,6 +26,7 @@ namespace grpc_core {
template <typename T>
class Optional {
public:
Optional() : value_() {}
void set(const T& val) {
value_ = val;
set_ = true;

@ -26,23 +26,43 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/profiling/timers.h"
grpc_core::TraceFlag grpc_call_combiner_trace(false, "call_combiner");
namespace grpc_core {
static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) {
TraceFlag grpc_call_combiner_trace(false, "call_combiner");
namespace {
grpc_error* DecodeCancelStateError(gpr_atm cancel_state) {
if (cancel_state & 1) {
return (grpc_error*)(cancel_state & ~static_cast<gpr_atm>(1));
}
return GRPC_ERROR_NONE;
}
static gpr_atm encode_cancel_state_error(grpc_error* error) {
gpr_atm EncodeCancelStateError(grpc_error* error) {
return static_cast<gpr_atm>(1) | (gpr_atm)error;
}
} // namespace
CallCombiner::CallCombiner() {
gpr_atm_no_barrier_store(&cancel_state_, 0);
gpr_atm_no_barrier_store(&size_, 0);
gpr_mpscq_init(&queue_);
#ifdef GRPC_TSAN_ENABLED
GRPC_CLOSURE_INIT(&tsan_closure_, TsanClosure, this,
grpc_schedule_on_exec_ctx);
#endif
}
CallCombiner::~CallCombiner() {
gpr_mpscq_destroy(&queue_);
GRPC_ERROR_UNREF(DecodeCancelStateError(cancel_state_));
}
#ifdef GRPC_TSAN_ENABLED
static void tsan_closure(void* user_data, grpc_error* error) {
grpc_call_combiner* call_combiner =
static_cast<grpc_call_combiner*>(user_data);
void CallCombiner::TsanClosure(void* arg, grpc_error* error) {
CallCombiner* self = static_cast<CallCombiner*>(arg);
// We ref-count the lock, and check if it's already taken.
// If it was taken, we should do nothing. Otherwise, we will mark it as
// locked. Note that if two different threads try to do this, only one of
@ -51,18 +71,18 @@ static void tsan_closure(void* user_data, grpc_error* error) {
// TSAN will correctly produce an error.
//
// TODO(soheil): This only covers the callbacks scheduled by
// grpc_call_combiner_(start|finish). If in the future, a
// callback gets scheduled using other mechanisms, we will need
// to add APIs to externally lock call combiners.
grpc_core::RefCountedPtr<grpc_call_combiner::TsanLock> lock =
call_combiner->tsan_lock;
// CallCombiner::Start() and CallCombiner::Stop().
// If in the future, a callback gets scheduled using other
// mechanisms, we will need to add APIs to externally lock
// call combiners.
RefCountedPtr<TsanLock> lock = self->tsan_lock_;
bool prev = false;
if (lock->taken.compare_exchange_strong(prev, true)) {
TSAN_ANNOTATE_RWLOCK_ACQUIRED(&lock->taken, true);
} else {
lock.reset();
}
GRPC_CLOSURE_RUN(call_combiner->original_closure, GRPC_ERROR_REF(error));
GRPC_CLOSURE_RUN(self->original_closure_, GRPC_ERROR_REF(error));
if (lock != nullptr) {
TSAN_ANNOTATE_RWLOCK_RELEASED(&lock->taken, true);
bool prev = true;
@ -71,34 +91,17 @@ static void tsan_closure(void* user_data, grpc_error* error) {
}
#endif
static void call_combiner_sched_closure(grpc_call_combiner* call_combiner,
grpc_closure* closure,
grpc_error* error) {
void CallCombiner::ScheduleClosure(grpc_closure* closure, grpc_error* error) {
#ifdef GRPC_TSAN_ENABLED
call_combiner->original_closure = closure;
GRPC_CLOSURE_SCHED(&call_combiner->tsan_closure, error);
original_closure_ = closure;
GRPC_CLOSURE_SCHED(&tsan_closure_, error);
#else
GRPC_CLOSURE_SCHED(closure, error);
#endif
}
void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
gpr_atm_no_barrier_store(&call_combiner->cancel_state, 0);
gpr_atm_no_barrier_store(&call_combiner->size, 0);
gpr_mpscq_init(&call_combiner->queue);
#ifdef GRPC_TSAN_ENABLED
GRPC_CLOSURE_INIT(&call_combiner->tsan_closure, tsan_closure, call_combiner,
grpc_schedule_on_exec_ctx);
#endif
}
void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
gpr_mpscq_destroy(&call_combiner->queue);
GRPC_ERROR_UNREF(decode_cancel_state_error(call_combiner->cancel_state));
}
#ifndef NDEBUG
#define DEBUG_ARGS , const char *file, int line
#define DEBUG_ARGS const char *file, int line,
#define DEBUG_FMT_STR "%s:%d: "
#define DEBUG_FMT_ARGS , file, line
#else
@ -107,20 +110,17 @@ void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
#define DEBUG_FMT_ARGS
#endif
void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
grpc_closure* closure,
grpc_error* error DEBUG_ARGS,
const char* reason) {
GPR_TIMER_SCOPE("call_combiner_start", 0);
void CallCombiner::Start(grpc_closure* closure, grpc_error* error,
DEBUG_ARGS const char* reason) {
GPR_TIMER_SCOPE("CallCombiner::Start", 0);
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO,
"==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
"==> CallCombiner::Start() [%p] closure=%p [" DEBUG_FMT_STR
"%s] error=%s",
call_combiner, closure DEBUG_FMT_ARGS, reason,
grpc_error_string(error));
this, closure DEBUG_FMT_ARGS, reason, grpc_error_string(error));
}
size_t prev_size = static_cast<size_t>(
gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1));
size_t prev_size =
static_cast<size_t>(gpr_atm_full_fetch_add(&size_, (gpr_atm)1));
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
@ -128,34 +128,30 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS();
if (prev_size == 0) {
GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED();
GPR_TIMER_MARK("call_combiner_initiate", 0);
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
}
// Queue was empty, so execute this closure immediately.
call_combiner_sched_closure(call_combiner, closure, error);
ScheduleClosure(closure, error);
} else {
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " QUEUING");
}
// Queue was not empty, so add closure to queue.
closure->error_data.error = error;
gpr_mpscq_push(&call_combiner->queue,
reinterpret_cast<gpr_mpscq_node*>(closure));
gpr_mpscq_push(&queue_, reinterpret_cast<gpr_mpscq_node*>(closure));
}
}
void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
const char* reason) {
GPR_TIMER_SCOPE("call_combiner_stop", 0);
void CallCombiner::Stop(DEBUG_ARGS const char* reason) {
GPR_TIMER_SCOPE("CallCombiner::Stop", 0);
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO,
"==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
call_combiner DEBUG_FMT_ARGS, reason);
gpr_log(GPR_INFO, "==> CallCombiner::Stop() [%p] [" DEBUG_FMT_STR "%s]",
this DEBUG_FMT_ARGS, reason);
}
size_t prev_size = static_cast<size_t>(
gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1));
size_t prev_size =
static_cast<size_t>(gpr_atm_full_fetch_add(&size_, (gpr_atm)-1));
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size - 1);
@ -168,10 +164,10 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
}
bool empty;
grpc_closure* closure = reinterpret_cast<grpc_closure*>(
gpr_mpscq_pop_and_check_end(&call_combiner->queue, &empty));
gpr_mpscq_pop_and_check_end(&queue_, &empty));
if (closure == nullptr) {
// This can happen either due to a race condition within the mpscq
// code or because of a race with grpc_call_combiner_start().
// code or because of a race with Start().
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " queue returned no result; checking again");
}
@ -181,8 +177,7 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error));
}
call_combiner_sched_closure(call_combiner, closure,
closure->error_data.error);
ScheduleClosure(closure, closure->error_data.error);
break;
}
} else if (grpc_call_combiner_trace.enabled()) {
@ -190,13 +185,12 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
}
}
void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
grpc_closure* closure) {
void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL();
while (true) {
// Decode original state.
gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
grpc_error* original_error = decode_cancel_state_error(original_state);
gpr_atm original_state = gpr_atm_acq_load(&cancel_state_);
grpc_error* original_error = DecodeCancelStateError(original_state);
// If error is set, invoke the cancellation closure immediately.
// Otherwise, store the new closure.
if (original_error != GRPC_ERROR_NONE) {
@ -204,16 +198,15 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p "
"for pre-existing cancellation",
call_combiner, closure);
this, closure);
}
GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_REF(original_error));
break;
} else {
if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
(gpr_atm)closure)) {
if (gpr_atm_full_cas(&cancel_state_, original_state, (gpr_atm)closure)) {
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p",
call_combiner, closure);
this, closure);
}
// If we replaced an earlier closure, invoke the original
// closure with GRPC_ERROR_NONE. This allows callers to clean
@ -222,8 +215,8 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
closure = (grpc_closure*)original_state;
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling old cancel callback=%p",
call_combiner, closure);
"call_combiner=%p: scheduling old cancel callback=%p", this,
closure);
}
GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
}
@ -234,24 +227,23 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
}
}
void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
grpc_error* error) {
void CallCombiner::Cancel(grpc_error* error) {
GRPC_STATS_INC_CALL_COMBINER_CANCELLED();
while (true) {
gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
grpc_error* original_error = decode_cancel_state_error(original_state);
gpr_atm original_state = gpr_atm_acq_load(&cancel_state_);
grpc_error* original_error = DecodeCancelStateError(original_state);
if (original_error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(error);
break;
}
if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
encode_cancel_state_error(error))) {
if (gpr_atm_full_cas(&cancel_state_, original_state,
EncodeCancelStateError(error))) {
if (original_state != 0) {
grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",
call_combiner, notify_on_cancel);
this, notify_on_cancel);
}
GRPC_CLOSURE_SCHED(notify_on_cancel, GRPC_ERROR_REF(error));
}
@ -260,3 +252,5 @@ void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
// cas failed, try again.
}
}
} // namespace grpc_core

@ -41,15 +41,78 @@
// when it is done with the action that was kicked off by the original
// callback.
extern grpc_core::TraceFlag grpc_call_combiner_trace;
namespace grpc_core {
extern TraceFlag grpc_call_combiner_trace;
class CallCombiner {
public:
CallCombiner();
~CallCombiner();
#ifndef NDEBUG
#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \
(call_combiner)->Start((closure), (error), __FILE__, __LINE__, (reason))
#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \
(call_combiner)->Stop(__FILE__, __LINE__, (reason))
/// Starts processing \a closure.
void Start(grpc_closure* closure, grpc_error* error, const char* file,
int line, const char* reason);
/// Yields the call combiner to the next closure in the queue, if any.
void Stop(const char* file, int line, const char* reason);
#else
#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \
(call_combiner)->Start((closure), (error), (reason))
#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \
(call_combiner)->Stop((reason))
/// Starts processing \a closure.
void Start(grpc_closure* closure, grpc_error* error, const char* reason);
/// Yields the call combiner to the next closure in the queue, if any.
void Stop(const char* reason);
#endif
/// Registers \a closure to be invoked when Cancel() is called.
///
/// Once a closure is registered, it will always be scheduled exactly
/// once; this allows the closure to hold references that will be freed
/// regardless of whether or not the call was cancelled. If a cancellation
/// does occur, the closure will be scheduled with the cancellation error;
/// otherwise, it will be scheduled with GRPC_ERROR_NONE.
///
/// The closure will be scheduled in the following cases:
/// - If Cancel() was called prior to registering the closure, it will be
/// scheduled immediately with the cancelation error.
/// - If Cancel() is called after registering the closure, the closure will
/// be scheduled with the cancellation error.
/// - If SetNotifyOnCancel() is called again to register a new cancellation
/// closure, the previous cancellation closure will be scheduled with
/// GRPC_ERROR_NONE.
///
/// If \a closure is NULL, then no closure will be invoked on
/// cancellation; this effectively unregisters the previously set closure.
/// However, most filters will not need to explicitly unregister their
/// callbacks, as this is done automatically when the call is destroyed.
/// Filters that schedule the cancellation closure on ExecCtx do not need
/// to take a ref on the call stack to guarantee closure liveness. This is
/// done by explicitly flushing ExecCtx after the unregistration during
/// call destruction.
void SetNotifyOnCancel(grpc_closure* closure);
/// Indicates that the call has been cancelled.
void Cancel(grpc_error* error);
private:
void ScheduleClosure(grpc_closure* closure, grpc_error* error);
#ifdef GRPC_TSAN_ENABLED
static void TsanClosure(void* arg, grpc_error* error);
#endif
struct grpc_call_combiner {
gpr_atm size = 0; // size_t, num closures in queue or currently executing
gpr_mpscq queue;
gpr_atm size_ = 0; // size_t, num closures in queue or currently executing
gpr_mpscq queue_;
// Either 0 (if not cancelled and no cancellation closure set),
// a grpc_closure* (if the lowest bit is 0),
// or a grpc_error* (if the lowest bit is 1).
gpr_atm cancel_state = 0;
gpr_atm cancel_state_ = 0;
#ifdef GRPC_TSAN_ENABLED
// A fake ref-counted lock that is kept alive after the destruction of
// grpc_call_combiner, when we are running the original closure.
@ -58,90 +121,20 @@ struct grpc_call_combiner {
// callback is called. However, original_closure is free to trigger
// anything on the call combiner (including destruction of grpc_call).
// Thus, we need a ref-counted structure that can outlive the call combiner.
struct TsanLock
: public grpc_core::RefCounted<TsanLock,
grpc_core::NonPolymorphicRefCount> {
struct TsanLock : public RefCounted<TsanLock, NonPolymorphicRefCount> {
TsanLock() { TSAN_ANNOTATE_RWLOCK_CREATE(&taken); }
~TsanLock() { TSAN_ANNOTATE_RWLOCK_DESTROY(&taken); }
// To avoid double-locking by the same thread, we should acquire/release
// the lock only when taken is false. On each acquire taken must be set to
// true.
std::atomic<bool> taken{false};
};
grpc_core::RefCountedPtr<TsanLock> tsan_lock =
grpc_core::MakeRefCounted<TsanLock>();
grpc_closure tsan_closure;
grpc_closure* original_closure;
RefCountedPtr<TsanLock> tsan_lock_ = MakeRefCounted<TsanLock>();
grpc_closure tsan_closure_;
grpc_closure* original_closure_;
#endif
};
// Assumes memory was initialized to zero.
void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner);
#ifndef NDEBUG
#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \
grpc_call_combiner_start((call_combiner), (closure), (error), __FILE__, \
__LINE__, (reason))
#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \
grpc_call_combiner_stop((call_combiner), __FILE__, __LINE__, (reason))
/// Starts processing \a closure on \a call_combiner.
void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
grpc_closure* closure, grpc_error* error,
const char* file, int line, const char* reason);
/// Yields the call combiner to the next closure in the queue, if any.
void grpc_call_combiner_stop(grpc_call_combiner* call_combiner,
const char* file, int line, const char* reason);
#else
#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \
grpc_call_combiner_start((call_combiner), (closure), (error), (reason))
#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \
grpc_call_combiner_stop((call_combiner), (reason))
/// Starts processing \a closure on \a call_combiner.
void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
grpc_closure* closure, grpc_error* error,
const char* reason);
/// Yields the call combiner to the next closure in the queue, if any.
void grpc_call_combiner_stop(grpc_call_combiner* call_combiner,
const char* reason);
#endif
/// Registers \a closure to be invoked by \a call_combiner when
/// grpc_call_combiner_cancel() is called.
///
/// Once a closure is registered, it will always be scheduled exactly
/// once; this allows the closure to hold references that will be freed
/// regardless of whether or not the call was cancelled. If a cancellation
/// does occur, the closure will be scheduled with the cancellation error;
/// otherwise, it will be scheduled with GRPC_ERROR_NONE.
///
/// The closure will be scheduled in the following cases:
/// - If grpc_call_combiner_cancel() was called prior to registering the
/// closure, it will be scheduled immediately with the cancelation error.
/// - If grpc_call_combiner_cancel() is called after registering the
/// closure, the closure will be scheduled with the cancellation error.
/// - If grpc_call_combiner_set_notify_on_cancel() is called again to
/// register a new cancellation closure, the previous cancellation
/// closure will be scheduled with GRPC_ERROR_NONE.
///
/// If \a closure is NULL, then no closure will be invoked on
/// cancellation; this effectively unregisters the previously set closure.
/// However, most filters will not need to explicitly unregister their
/// callbacks, as this is done automatically when the call is destroyed. Filters
/// that schedule the cancellation closure on ExecCtx do not need to take a ref
/// on the call stack to guarantee closure liveness. This is done by explicitly
/// flushing ExecCtx after the unregistration during call destruction.
void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
grpc_closure* closure);
/// Indicates that the call has been cancelled.
void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
grpc_error* error);
namespace grpc_core {
// Helper for running a list of closures in a call combiner.
//
// Each callback running in the call combiner will eventually be
@ -166,7 +159,7 @@ class CallCombinerClosureList {
// scheduled via GRPC_CLOSURE_SCHED(), which will eventually result in
// yielding the call combiner. If the list is empty, then the call
// combiner will be yielded immediately.
void RunClosures(grpc_call_combiner* call_combiner) {
void RunClosures(CallCombiner* call_combiner) {
if (closures_.empty()) {
GRPC_CALL_COMBINER_STOP(call_combiner, "no closures to schedule");
return;
@ -190,7 +183,7 @@ class CallCombinerClosureList {
// Runs all closures in the call combiner, but does NOT yield the call
// combiner. All closures will be scheduled via GRPC_CALL_COMBINER_START().
void RunClosuresWithoutYielding(grpc_call_combiner* call_combiner) {
void RunClosuresWithoutYielding(CallCombiner* call_combiner) {
for (size_t i = 0; i < closures_.size(); ++i) {
auto& closure = closures_[i];
GRPC_CALL_COMBINER_START(call_combiner, closure.closure, closure.error,

@ -41,7 +41,7 @@ static void create_sockets(SOCKET sv[2]) {
int addr_len = sizeof(addr);
lst_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
grpc_get_default_wsa_socket_flags());
GPR_ASSERT(lst_sock != INVALID_SOCKET);
memset(&addr, 0, sizeof(addr));
@ -54,7 +54,7 @@ static void create_sockets(SOCKET sv[2]) {
SOCKET_ERROR);
cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
grpc_get_default_wsa_socket_flags());
GPR_ASSERT(cli_sock != INVALID_SOCKET);
GPR_ASSERT(WSAConnect(cli_sock, (grpc_sockaddr*)&addr, addr_len, NULL, NULL,

@ -30,6 +30,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/inlined_vector.h"
/// Opaque representation of an error.
/// See https://github.com/grpc/grpc/blob/master/doc/core/grpc-error.md for a
@ -165,6 +166,9 @@ grpc_error* grpc_error_create(const char* file, int line,
grpc_error_create(__FILE__, __LINE__, grpc_slice_from_copied_string(desc), \
errs, count)
#define GRPC_ERROR_CREATE_FROM_VECTOR(desc, error_list) \
grpc_error_create_from_vector(__FILE__, __LINE__, desc, error_list)
#ifndef NDEBUG
grpc_error* grpc_error_do_ref(grpc_error* err, const char* file, int line);
void grpc_error_do_unref(grpc_error* err, const char* file, int line);
@ -193,6 +197,25 @@ inline void grpc_error_unref(grpc_error* err) {
#define GRPC_ERROR_UNREF(err) grpc_error_unref(err)
#endif
// Consumes all the errors in the vector and forms a referencing error from
// them. If the vector is empty, return GRPC_ERROR_NONE.
template <size_t N>
static grpc_error* grpc_error_create_from_vector(
const char* file, int line, const char* desc,
grpc_core::InlinedVector<grpc_error*, N>* error_list) {
grpc_error* error = GRPC_ERROR_NONE;
if (error_list->size() != 0) {
error = grpc_error_create(file, line, grpc_slice_from_static_string(desc),
error_list->data(), error_list->size());
// Remove refs to all errors in error_list.
for (size_t i = 0; i < error_list->size(); i++) {
GRPC_ERROR_UNREF((*error_list)[i]);
}
error_list->clear();
}
return error;
}
grpc_error* grpc_error_set_int(grpc_error* src, grpc_error_ints which,
intptr_t value) GRPC_MUST_USE_RESULT;
/// It is an error to pass nullptr as `p`. Caller should allocate a dummy

@ -36,7 +36,7 @@ static bool errqueue_supported = false;
bool kernel_supports_errqueue() { return errqueue_supported; }
void grpc_errqueue_init() {
/* Both-compile time and run-time linux kernel versions should be atleast 4.0.0
/* Both-compile time and run-time linux kernel versions should be at least 4.0.0
*/
#ifdef GRPC_LINUX_ERRQUEUE
struct utsname buffer;

@ -61,6 +61,7 @@ static void iomgr_platform_init(void) {
winsock_init();
grpc_iocp_init();
grpc_pollset_global_init();
grpc_wsa_socket_flags_init();
}
static void iomgr_platform_flush(void) { grpc_iocp_flush(); }

@ -39,6 +39,8 @@
#include "src/core/lib/iomgr/sockaddr_windows.h"
#include "src/core/lib/iomgr/socket_windows.h"
static DWORD s_wsa_socket_flags;
grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name) {
char* final_name;
grpc_winsocket* r = (grpc_winsocket*)gpr_malloc(sizeof(grpc_winsocket));
@ -181,4 +183,21 @@ int grpc_ipv6_loopback_available(void) {
return g_ipv6_loopback_available;
}
DWORD grpc_get_default_wsa_socket_flags() { return s_wsa_socket_flags; }
void grpc_wsa_socket_flags_init() {
s_wsa_socket_flags = WSA_FLAG_OVERLAPPED;
/* WSA_FLAG_NO_HANDLE_INHERIT may be not supported on the older Windows
versions, see
https://msdn.microsoft.com/en-us/library/windows/desktop/ms742212(v=vs.85).aspx
for details. */
SOCKET sock = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
s_wsa_socket_flags | WSA_FLAG_NO_HANDLE_INHERIT);
if (sock != INVALID_SOCKET) {
/* Windows 7, Windows 2008 R2 with SP1 or later */
s_wsa_socket_flags |= WSA_FLAG_NO_HANDLE_INHERIT;
closesocket(sock);
}
}
#endif /* GRPC_WINSOCK_SOCKET */

@ -32,6 +32,10 @@
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#ifndef WSA_FLAG_NO_HANDLE_INHERIT
#define WSA_FLAG_NO_HANDLE_INHERIT 0x80
#endif
/* This holds the data for an outstanding read or write on a socket.
The mutex to protect the concurrent access to that data is the one
inside the winsocket wrapper. */
@ -114,6 +118,10 @@ void grpc_socket_become_ready(grpc_winsocket* winsocket,
The value is probed once, and cached for the life of the process. */
int grpc_ipv6_loopback_available(void);
void grpc_wsa_socket_flags_init();
DWORD grpc_get_default_wsa_socket_flags();
#endif
#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_WINDOWS_H */

@ -148,7 +148,7 @@ static void tcp_connect(grpc_closure* on_done, grpc_endpoint** endpoint,
}
sock = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
grpc_get_default_wsa_socket_flags());
if (sock == INVALID_SOCKET) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "WSASocket");
goto failure;

@ -255,7 +255,7 @@ static grpc_error* start_accept_locked(grpc_tcp_listener* port) {
}
sock = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
grpc_get_default_wsa_socket_flags());
if (sock == INVALID_SOCKET) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "WSASocket");
goto failure;
@ -493,7 +493,7 @@ static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
}
sock = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
grpc_get_default_wsa_socket_flags());
if (sock == INVALID_SOCKET) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "WSASocket");
goto done;

@ -74,20 +74,6 @@ static grpc_error* set_dualstack(SOCKET sock) {
: GRPC_WSA_ERROR(WSAGetLastError(), "setsockopt(IPV6_V6ONLY)");
}
static grpc_error* enable_loopback_fast_path(SOCKET sock) {
int status;
uint32_t param = 1;
DWORD ret;
status = WSAIoctl(sock, /*SIO_LOOPBACK_FAST_PATH==*/_WSAIOW(IOC_VENDOR, 16),
&param, sizeof(param), NULL, 0, &ret, 0, 0);
if (status == SOCKET_ERROR) {
status = WSAGetLastError();
}
return status == 0 || status == WSAEOPNOTSUPP
? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(status, "WSAIoctl(SIO_LOOPBACK_FAST_PATH)");
}
static grpc_error* enable_socket_low_latency(SOCKET sock) {
int status;
BOOL param = TRUE;
@ -106,8 +92,6 @@ grpc_error* grpc_tcp_prepare_socket(SOCKET sock) {
if (err != GRPC_ERROR_NONE) return err;
err = set_dualstack(sock);
if (err != GRPC_ERROR_NONE) return err;
err = enable_loopback_fast_path(sock);
if (err != GRPC_ERROR_NONE) return err;
err = enable_socket_low_latency(sock);
if (err != GRPC_ERROR_NONE) return err;
return GRPC_ERROR_NONE;

@ -487,7 +487,7 @@ static void timer_cancel(grpc_timer* timer) {
/* Rebalances the timer shard by computing a new 'queue_deadline_cap' and moving
all relevant timers in shard->list (i.e timers with deadlines earlier than
'queue_deadline_cap') into into shard->heap.
Returns 'true' if shard->heap has atleast ONE element
Returns 'true' if shard->heap has at least ONE element
REQUIRES: shard->mu locked */
static bool refill_heap(timer_shard* shard, grpc_millis now) {
/* Compute the new queue window width and bound by the limits: */

@ -21,8 +21,8 @@
#include <string.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/security/context/security_context.h"
@ -102,9 +102,9 @@ grpc_client_security_context::~grpc_client_security_context() {
}
grpc_client_security_context* grpc_client_security_context_create(
gpr_arena* arena, grpc_call_credentials* creds) {
return new (gpr_arena_alloc(arena, sizeof(grpc_client_security_context)))
grpc_client_security_context(creds != nullptr ? creds->Ref() : nullptr);
grpc_core::Arena* arena, grpc_call_credentials* creds) {
return arena->New<grpc_client_security_context>(
creds != nullptr ? creds->Ref() : nullptr);
}
void grpc_client_security_context_destroy(void* ctx) {
@ -123,9 +123,8 @@ grpc_server_security_context::~grpc_server_security_context() {
}
grpc_server_security_context* grpc_server_security_context_create(
gpr_arena* arena) {
return new (gpr_arena_alloc(arena, sizeof(grpc_server_security_context)))
grpc_server_security_context();
grpc_core::Arena* arena) {
return arena->New<grpc_server_security_context>();
}
void grpc_server_security_context_destroy(void* ctx) {

@ -21,6 +21,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/pollset.h"
@ -28,8 +29,6 @@
extern grpc_core::DebugOnlyTraceFlag grpc_trace_auth_context_refcount;
struct gpr_arena;
/* --- grpc_auth_context ---
High level authentication context object. Can optionally be chained. */
@ -121,7 +120,7 @@ struct grpc_client_security_context {
};
grpc_client_security_context* grpc_client_security_context_create(
gpr_arena* arena, grpc_call_credentials* creds);
grpc_core::Arena* arena, grpc_call_credentials* creds);
void grpc_client_security_context_destroy(void* ctx);
/* --- grpc_server_security_context ---
@ -137,7 +136,7 @@ struct grpc_server_security_context {
};
grpc_server_security_context* grpc_server_security_context_create(
gpr_arena* arena);
grpc_core::Arena* arena);
void grpc_server_security_context_destroy(void* ctx);
/* --- Channel args for auth context --- */

@ -92,7 +92,7 @@ struct call_data {
}
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
grpc_core::RefCountedPtr<grpc_call_credentials> creds;
grpc_slice host = grpc_empty_slice();
grpc_slice method = grpc_empty_slice();
@ -270,11 +270,9 @@ static void send_security_metadata(grpc_call_element* elem,
GRPC_ERROR_UNREF(error);
} else {
// Async return; register cancellation closure with call combiner.
grpc_call_combiner_set_notify_on_cancel(
calld->call_combiner,
GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure,
cancel_get_request_metadata, elem,
grpc_schedule_on_exec_ctx));
calld->call_combiner->SetNotifyOnCancel(GRPC_CLOSURE_INIT(
&calld->get_request_metadata_cancel_closure,
cancel_get_request_metadata, elem, grpc_schedule_on_exec_ctx));
}
}
@ -345,11 +343,9 @@ static void auth_start_transport_stream_op_batch(
GRPC_ERROR_UNREF(error);
} else {
// Async return; register cancellation closure with call combiner.
grpc_call_combiner_set_notify_on_cancel(
calld->call_combiner,
GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure,
cancel_check_call_host, elem,
grpc_schedule_on_exec_ctx));
calld->call_combiner->SetNotifyOnCancel(GRPC_CLOSURE_INIT(
&calld->check_call_host_cancel_closure, cancel_check_call_host,
elem, grpc_schedule_on_exec_ctx));
}
gpr_free(call_host);
return; /* early exit */

@ -74,7 +74,7 @@ struct call_data {
~call_data() { GRPC_ERROR_UNREF(recv_initial_metadata_error); }
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
grpc_call_stack* owning_call;
grpc_transport_stream_op_batch* recv_initial_metadata_batch;
grpc_closure* original_recv_initial_metadata_ready;
@ -219,8 +219,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
// to drop the call combiner early if we get cancelled.
GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem,
grpc_schedule_on_exec_ctx);
grpc_call_combiner_set_notify_on_cancel(calld->call_combiner,
&calld->cancel_closure);
calld->call_combiner->SetNotifyOnCancel(&calld->cancel_closure);
GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata");
calld->md = metadata_batch_to_md_array(
batch->payload->recv_initial_metadata.recv_initial_metadata);

@ -35,9 +35,9 @@
#include "src/core/lib/compression/algorithm_metadata.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
@ -124,14 +124,13 @@ struct child_call {
#define RECV_INITIAL_METADATA_FIRST ((gpr_atm)1)
struct grpc_call {
grpc_call(gpr_arena* arena, const grpc_call_create_args& args)
grpc_call(grpc_core::Arena* arena, const grpc_call_create_args& args)
: arena(arena),
cq(args.cq),
channel(args.channel),
is_client(args.server_transport_data == nullptr),
stream_op_payload(context) {
gpr_ref_init(&ext_ref, 1);
grpc_call_combiner_init(&call_combiner);
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE;
@ -141,12 +140,11 @@ struct grpc_call {
~grpc_call() {
gpr_free(static_cast<void*>(const_cast<char*>(final_info.error_string)));
grpc_call_combiner_destroy(&call_combiner);
}
gpr_refcount ext_ref;
gpr_arena* arena;
grpc_call_combiner call_combiner;
grpc_core::Arena* arena;
grpc_core::CallCombiner call_combiner;
grpc_completion_queue* cq;
grpc_polling_entity pollent;
grpc_channel* channel;
@ -292,13 +290,13 @@ static void add_init_error(grpc_error** composite, grpc_error* new_err) {
}
void* grpc_call_arena_alloc(grpc_call* call, size_t size) {
return gpr_arena_alloc(call->arena, size);
return call->arena->Alloc(size);
}
static parent_call* get_or_create_parent_call(grpc_call* call) {
parent_call* p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
if (p == nullptr) {
p = new (gpr_arena_alloc(call->arena, sizeof(*p))) parent_call();
p = call->arena->New<parent_call>();
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm) nullptr,
(gpr_atm)p)) {
p->~parent_call();
@ -323,16 +321,23 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
grpc_core::Arena* arena;
grpc_call* call;
grpc_error* error = GRPC_ERROR_NONE;
grpc_channel_stack* channel_stack =
grpc_channel_get_channel_stack(args->channel);
grpc_call* call;
size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size);
gpr_arena* arena = gpr_arena_create(initial_size);
call = new (gpr_arena_alloc(
arena, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
channel_stack->call_stack_size)) grpc_call(arena, *args);
size_t call_and_stack_size =
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
channel_stack->call_stack_size;
size_t call_alloc_size =
call_and_stack_size + (args->parent ? sizeof(child_call) : 0);
std::pair<grpc_core::Arena*, void*> arena_with_call =
grpc_core::Arena::CreateWithAlloc(initial_size, call_alloc_size);
arena = arena_with_call.first;
call = new (arena_with_call.second) grpc_call(arena, *args);
*out_call = call;
grpc_slice path = grpc_empty_slice();
if (call->is_client) {
@ -364,8 +369,8 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
bool immediately_cancel = false;
if (args->parent != nullptr) {
call->child = new (gpr_arena_alloc(arena, sizeof(child_call)))
child_call(args->parent);
call->child = new (reinterpret_cast<char*>(arena_with_call.second) +
call_and_stack_size) child_call(args->parent);
GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
@ -502,9 +507,9 @@ void grpc_call_internal_unref(grpc_call* c REF_ARG) {
static void release_call(void* call, grpc_error* error) {
grpc_call* c = static_cast<grpc_call*>(call);
grpc_channel* channel = c->channel;
gpr_arena* arena = c->arena;
grpc_core::Arena* arena = c->arena;
c->~grpc_call();
grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(arena));
grpc_channel_update_call_size_estimate(channel, arena->Destroy());
GRPC_CHANNEL_INTERNAL_UNREF(channel, "call");
}
@ -589,7 +594,7 @@ void grpc_call_unref(grpc_call* c) {
// holding to the call stack. Also flush the closures on exec_ctx so that
// filters that schedule cancel notification closures on exec_ctx do not
// need to take a ref of the call stack to guarantee closure liveness.
grpc_call_combiner_set_notify_on_cancel(&c->call_combiner, nullptr);
c->call_combiner.SetNotifyOnCancel(nullptr);
grpc_core::ExecCtx::Get()->Flush();
}
GRPC_CALL_INTERNAL_UNREF(c, "destroy");
@ -685,7 +690,7 @@ static void cancel_with_error(grpc_call* c, grpc_error* error) {
// any in-flight asynchronous actions that may be holding the call
// combiner. This ensures that the cancel_stream batch can be sent
// down the filter stack in a timely manner.
grpc_call_combiner_cancel(&c->call_combiner, GRPC_ERROR_REF(error));
c->call_combiner.Cancel(GRPC_ERROR_REF(error));
cancel_state* state = static_cast<cancel_state*>(gpr_malloc(sizeof(*state)));
state->call = c;
GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state,
@ -1069,7 +1074,7 @@ static void recv_trailing_filter(void* args, grpc_metadata_batch* b,
publish_app_metadata(call, b, true);
}
gpr_arena* grpc_call_get_arena(grpc_call* call) { return call->arena; }
grpc_core::Arena* grpc_call_get_arena(grpc_call* call) { return call->arena; }
grpc_call_stack* grpc_call_get_call_stack(grpc_call* call) {
return CALL_STACK_FROM_CALL(call);
@ -1130,8 +1135,7 @@ static batch_control* reuse_or_allocate_batch_control(grpc_call* call,
bctl->~batch_control();
bctl->op = {};
} else {
bctl = new (gpr_arena_alloc(call->arena, sizeof(batch_control)))
batch_control();
bctl = call->arena->New<batch_control>();
*pslot = bctl;
}
bctl->call = call;
@ -1559,7 +1563,10 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
goto done_with_error;
}
/* process compression level */
memset(&call->compression_md, 0, sizeof(call->compression_md));
grpc_metadata& compression_md = call->compression_md;
compression_md.key = grpc_empty_slice();
compression_md.value = grpc_empty_slice();
compression_md.flags = 0;
size_t additional_metadata_count = 0;
grpc_compression_level effective_compression_level =
GRPC_COMPRESS_LEVEL_NONE;
@ -1582,8 +1589,8 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
call, effective_compression_level);
/* the following will be picked up by the compress filter and used
* as the call's compression algorithm. */
call->compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
call->compression_md.value = grpc_compression_algorithm_slice(calgo);
compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
compression_md.value = grpc_compression_algorithm_slice(calgo);
additional_metadata_count++;
}
@ -1597,8 +1604,7 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
if (!prepare_application_metadata(
call, static_cast<int>(op->data.send_initial_metadata.count),
op->data.send_initial_metadata.metadata, 0, call->is_client,
&call->compression_md,
static_cast<int>(additional_metadata_count))) {
&compression_md, static_cast<int>(additional_metadata_count))) {
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}

@ -23,6 +23,7 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/context.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/surface/api_trace.h"
#include <grpc/grpc.h>
@ -72,7 +73,7 @@ void grpc_call_internal_unref(grpc_call* call);
#define GRPC_CALL_INTERNAL_UNREF(call, reason) grpc_call_internal_unref(call)
#endif
gpr_arena* grpc_call_get_arena(grpc_call* call);
grpc_core::Arena* grpc_call_get_arena(grpc_call* call);
grpc_call_stack* grpc_call_get_call_stack(grpc_call* call);

@ -29,7 +29,6 @@
void grpc_call_details_init(grpc_call_details* cd) {
GRPC_API_TRACE("grpc_call_details_init(cd=%p)", 1, (cd));
memset(cd, 0, sizeof(*cd));
cd->method = grpc_empty_slice();
cd->host = grpc_empty_slice();
}

@ -1002,15 +1002,15 @@ static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
continue;
}
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_SHUTDOWN;
ret.success = 0;
break;
}
if (!is_finished_arg.first_loop &&
grpc_core::ExecCtx::Get()->Now() >= deadline_millis) {
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
ret.success = 0;
dump_pending_tags(cq);
break;
}
@ -1027,8 +1027,8 @@ static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
gpr_log(GPR_ERROR, "Completion queue next failed: %s", msg);
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
ret.success = 0;
dump_pending_tags(cq);
break;
}
@ -1234,8 +1234,8 @@ static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
}
if (cqd->shutdown.Load(grpc_core::MemoryOrder::RELAXED)) {
gpr_mu_unlock(cq->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_SHUTDOWN;
ret.success = 0;
break;
}
if (!add_plucker(cq, tag, &worker)) {
@ -1244,9 +1244,9 @@ static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
"is %d",
GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
gpr_mu_unlock(cq->mu);
memset(&ret, 0, sizeof(ret));
/* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
ret.success = 0;
dump_pending_tags(cq);
break;
}
@ -1254,8 +1254,8 @@ static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
grpc_core::ExecCtx::Get()->Now() >= deadline_millis) {
del_plucker(cq, tag, &worker);
gpr_mu_unlock(cq->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
ret.success = 0;
dump_pending_tags(cq);
break;
}
@ -1269,8 +1269,8 @@ static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
gpr_log(GPR_ERROR, "Completion queue pluck failed: %s", msg);
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
ret.success = 0;
dump_pending_tags(cq);
break;
}

@ -39,7 +39,7 @@ namespace grpc_core {
namespace {
struct CallData {
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
grpc_core::Atomic<bool> filled_metadata;

@ -190,7 +190,7 @@ struct call_data {
grpc_closure publish;
call_data* pending_next = nullptr;
grpc_call_combiner* call_combiner;
grpc_core::CallCombiner* call_combiner;
};
struct request_matcher {
@ -347,8 +347,8 @@ static void channel_broadcaster_shutdown(channel_broadcaster* cb,
*/
static void request_matcher_init(request_matcher* rm, grpc_server* server) {
memset(rm, 0, sizeof(*rm));
rm->server = server;
rm->pending_head = rm->pending_tail = nullptr;
rm->requests_per_cq = static_cast<gpr_locked_mpscq*>(
gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count));
for (size_t i = 0; i < server->cq_count; i++) {
@ -601,8 +601,9 @@ static void finish_start_new_rpc(
break;
case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER: {
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_MESSAGE;
op.flags = 0;
op.reserved = nullptr;
op.data.recv_message.recv_message = &calld->payload;
GRPC_CLOSURE_INIT(&calld->publish, publish_new_rpc, elem,
grpc_schedule_on_exec_ctx);
@ -1098,20 +1099,6 @@ void* grpc_server_register_method(
return m;
}
static void start_listeners(void* s, grpc_error* error) {
grpc_server* server = static_cast<grpc_server*>(s);
for (listener* l = server->listeners; l; l = l->next) {
l->start(server, l->arg, server->pollsets, server->pollset_count);
}
gpr_mu_lock(&server->mu_global);
server->starting = false;
gpr_cv_signal(&server->starting_cv);
gpr_mu_unlock(&server->mu_global);
server_unref(server);
}
void grpc_server_start(grpc_server* server) {
size_t i;
grpc_core::ExecCtx exec_ctx;
@ -1133,13 +1120,18 @@ void grpc_server_start(grpc_server* server) {
request_matcher_init(&rm->matcher, server);
}
server_ref(server);
gpr_mu_lock(&server->mu_global);
server->starting = true;
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_CREATE(
start_listeners, server,
grpc_core::Executor::Scheduler(grpc_core::ExecutorJobType::SHORT)),
GRPC_ERROR_NONE);
gpr_mu_unlock(&server->mu_global);
for (listener* l = server->listeners; l; l = l->next) {
l->start(server, l->arg, server->pollsets, server->pollset_count);
}
gpr_mu_lock(&server->mu_global);
server->starting = false;
gpr_cv_signal(&server->starting_cv);
gpr_mu_unlock(&server->mu_global);
}
void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets,

@ -41,6 +41,10 @@
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/transport/static_metadata.h"
using grpc_core::AllocatedMetadata;
using grpc_core::InternedMetadata;
using grpc_core::UserData;
/* There are two kinds of mdelem and mdstr instances.
* Static instances are declared in static_metadata.{h,c} and
* are initialized by grpc_mdctx_global_init().
@ -54,13 +58,40 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_metadata(false, "metadata");
#ifndef NDEBUG
#define DEBUG_ARGS , const char *file, int line
#define FWD_DEBUG_ARGS , file, line
#define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s), __FILE__, __LINE__)
#else
#define FWD_DEBUG_ARGS file, line
void grpc_mdelem_trace_ref(void* md, const grpc_slice& key,
const grpc_slice& value, intptr_t refcnt,
const char* file, int line) {
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(key);
char* value_str = grpc_slice_to_c_string(value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md, refcnt,
refcnt + 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
}
void grpc_mdelem_trace_unref(void* md, const grpc_slice& key,
const grpc_slice& value, intptr_t refcnt,
const char* file, int line) {
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(key);
char* value_str = grpc_slice_to_c_string(value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md,
refcnt, refcnt - 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
}
#else // ifndef NDEBUG
#define DEBUG_ARGS
#define FWD_DEBUG_ARGS
#define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s))
#endif
#endif // ifndef NDEBUG
#define INITIAL_SHARD_CAPACITY 8
#define LOG2_SHARD_COUNT 4
@ -69,43 +100,87 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_metadata(false, "metadata");
#define TABLE_IDX(hash, capacity) (((hash) >> (LOG2_SHARD_COUNT)) % (capacity))
#define SHARD_IDX(hash) ((hash) & ((1 << (LOG2_SHARD_COUNT)) - 1))
typedef void (*destroy_user_data_func)(void* user_data);
struct UserData {
gpr_mu mu_user_data;
gpr_atm destroy_user_data;
gpr_atm user_data;
};
/* Shadow structure for grpc_mdelem_data for interned elements */
typedef struct interned_metadata {
/* must be byte compatible with grpc_mdelem_data */
grpc_slice key;
grpc_slice value;
/* private only data */
gpr_atm refcnt;
UserData user_data;
AllocatedMetadata::AllocatedMetadata(const grpc_slice& key,
const grpc_slice& value)
: key_(grpc_slice_ref_internal(key)),
value_(grpc_slice_ref_internal(value)),
refcnt_(1) {
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(key_);
char* value_str = grpc_slice_to_c_string(value_);
gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%" PRIdPTR ": '%s' = '%s'", this,
RefValue(), key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
#endif
}
struct interned_metadata* bucket_next;
} interned_metadata;
AllocatedMetadata::~AllocatedMetadata() {
grpc_slice_unref_internal(key_);
grpc_slice_unref_internal(value_);
void* user_data = user_data_.data.Load(grpc_core::MemoryOrder::RELAXED);
if (user_data) {
destroy_user_data_func destroy_user_data =
user_data_.destroy_user_data.Load(grpc_core::MemoryOrder::RELAXED);
destroy_user_data(user_data);
}
}
/* Shadow structure for grpc_mdelem_data for allocated elements */
typedef struct allocated_metadata {
/* must be byte compatible with grpc_mdelem_data */
grpc_slice key;
grpc_slice value;
InternedMetadata::InternedMetadata(const grpc_slice& key,
const grpc_slice& value, uint32_t hash,
InternedMetadata* next)
: key_(grpc_slice_ref_internal(key)),
value_(grpc_slice_ref_internal(value)),
refcnt_(1),
hash_(hash),
link_(next) {
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(key_);
char* value_str = grpc_slice_to_c_string(value_);
gpr_log(GPR_DEBUG, "ELM NEW:%p:%" PRIdPTR ": '%s' = '%s'", this,
RefValue(), key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
#endif
}
/* private only data */
gpr_atm refcnt;
InternedMetadata::~InternedMetadata() {
grpc_slice_unref_internal(key_);
grpc_slice_unref_internal(value_);
void* user_data = user_data_.data.Load(grpc_core::MemoryOrder::RELAXED);
if (user_data) {
destroy_user_data_func destroy_user_data =
user_data_.destroy_user_data.Load(grpc_core::MemoryOrder::RELAXED);
destroy_user_data(user_data);
}
}
UserData user_data;
} allocated_metadata;
size_t InternedMetadata::CleanupLinkedMetadata(
InternedMetadata::BucketLink* head) {
size_t num_freed = 0;
InternedMetadata::BucketLink* prev_next = head;
InternedMetadata *md, *next;
for (md = head->next; md; md = next) {
next = md->link_.next;
if (md->AllRefsDropped()) {
prev_next->next = next;
grpc_core::Delete(md);
num_freed++;
} else {
prev_next = &md->link_;
}
}
return num_freed;
}
typedef struct mdtab_shard {
gpr_mu mu;
interned_metadata** elems;
InternedMetadata::BucketLink* elems;
size_t count;
size_t capacity;
/** Estimate of the number of unreferenced mdelems in the hash table.
@ -126,7 +201,7 @@ void grpc_mdctx_global_init(void) {
shard->count = 0;
gpr_atm_no_barrier_store(&shard->free_estimate, 0);
shard->capacity = INITIAL_SHARD_CAPACITY;
shard->elems = static_cast<interned_metadata**>(
shard->elems = static_cast<InternedMetadata::BucketLink*>(
gpr_zalloc(sizeof(*shard->elems) * shard->capacity));
}
}
@ -154,57 +229,32 @@ static int is_mdelem_static(grpc_mdelem e) {
&grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
}
static void ref_md_locked(mdtab_shard* shard,
interned_metadata* md DEBUG_ARGS) {
void InternedMetadata::RefWithShardLocked(mdtab_shard* shard) {
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(md->key);
char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", (void*)md,
gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
char* key_str = grpc_slice_to_c_string(key_);
char* value_str = grpc_slice_to_c_string(value_);
intptr_t value = RefValue();
gpr_log(__FILE__, __LINE__, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", this, value,
value + 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
#endif
if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
if (FirstRef()) {
gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -1);
}
}
static void gc_mdtab(mdtab_shard* shard) {
GPR_TIMER_SCOPE("gc_mdtab", 0);
size_t i;
interned_metadata** prev_next;
interned_metadata *md, *next;
gpr_atm num_freed = 0;
for (i = 0; i < shard->capacity; i++) {
prev_next = &shard->elems[i];
for (md = shard->elems[i]; md; md = next) {
void* user_data =
(void*)gpr_atm_no_barrier_load(&md->user_data.user_data);
next = md->bucket_next;
if (gpr_atm_acq_load(&md->refcnt) == 0) {
grpc_slice_unref_internal(md->key);
grpc_slice_unref_internal(md->value);
if (md->user_data.user_data) {
((destroy_user_data_func)gpr_atm_no_barrier_load(
&md->user_data.destroy_user_data))(user_data);
}
gpr_mu_destroy(&md->user_data.mu_user_data);
gpr_free(md);
*prev_next = next;
num_freed++;
shard->count--;
} else {
prev_next = &md->bucket_next;
}
}
size_t num_freed = 0;
for (size_t i = 0; i < shard->capacity; ++i) {
num_freed += InternedMetadata::CleanupLinkedMetadata(&shard->elems[i]);
}
gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -num_freed);
gpr_atm_no_barrier_fetch_add(&shard->free_estimate,
-static_cast<intptr_t>(num_freed));
}
static void grow_mdtab(mdtab_shard* shard) {
@ -212,22 +262,21 @@ static void grow_mdtab(mdtab_shard* shard) {
size_t capacity = shard->capacity * 2;
size_t i;
interned_metadata** mdtab;
interned_metadata *md, *next;
InternedMetadata::BucketLink* mdtab;
InternedMetadata *md, *next;
uint32_t hash;
mdtab = static_cast<interned_metadata**>(
gpr_zalloc(sizeof(interned_metadata*) * capacity));
mdtab = static_cast<InternedMetadata::BucketLink*>(
gpr_zalloc(sizeof(InternedMetadata::BucketLink) * capacity));
for (i = 0; i < shard->capacity; i++) {
for (md = shard->elems[i]; md; md = next) {
for (md = shard->elems[i].next; md; md = next) {
size_t idx;
hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash(md->key),
grpc_slice_hash(md->value));
next = md->bucket_next;
hash = md->hash();
next = md->bucket_next();
idx = TABLE_IDX(hash, capacity);
md->bucket_next = mdtab[idx];
mdtab[idx] = md;
md->set_bucket_next(mdtab[idx].next);
mdtab[idx].next = md;
}
}
gpr_free(shard->elems);
@ -247,34 +296,22 @@ static void rehash_mdtab(mdtab_shard* shard) {
grpc_mdelem grpc_mdelem_create(
const grpc_slice& key, const grpc_slice& value,
grpc_mdelem_data* compatible_external_backing_store) {
// External storage if either slice is not interned and the caller already
// created a backing store. If no backing store, we allocate one.
if (!grpc_slice_is_interned(key) || !grpc_slice_is_interned(value)) {
if (compatible_external_backing_store != nullptr) {
// Caller provided backing store.
return GRPC_MAKE_MDELEM(compatible_external_backing_store,
GRPC_MDELEM_STORAGE_EXTERNAL);
} else {
// We allocate backing store.
return GRPC_MAKE_MDELEM(grpc_core::New<AllocatedMetadata>(key, value),
GRPC_MDELEM_STORAGE_ALLOCATED);
}
allocated_metadata* allocated =
static_cast<allocated_metadata*>(gpr_malloc(sizeof(*allocated)));
allocated->key = grpc_slice_ref_internal(key);
allocated->value = grpc_slice_ref_internal(value);
gpr_atm_rel_store(&allocated->refcnt, 1);
allocated->user_data.user_data = 0;
allocated->user_data.destroy_user_data = 0;
gpr_mu_init(&allocated->user_data.mu_user_data);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(allocated->key);
char* value_str = grpc_slice_to_c_string(allocated->value);
gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%" PRIdPTR ": '%s' = '%s'",
(void*)allocated, gpr_atm_no_barrier_load(&allocated->refcnt),
key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
#endif
return GRPC_MAKE_MDELEM(allocated, GRPC_MDELEM_STORAGE_ALLOCATED);
}
// Not all static slice input yields a statically stored metadata element.
// It may be worth documenting why.
if (GRPC_IS_STATIC_METADATA_STRING(key) &&
GRPC_IS_STATIC_METADATA_STRING(value)) {
grpc_mdelem static_elem = grpc_static_mdelem_for_static_strings(
@ -286,7 +323,7 @@ grpc_mdelem grpc_mdelem_create(
uint32_t hash =
GRPC_MDSTR_KV_HASH(grpc_slice_hash(key), grpc_slice_hash(value));
interned_metadata* md;
InternedMetadata* md;
mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
size_t idx;
@ -296,34 +333,18 @@ grpc_mdelem grpc_mdelem_create(
idx = TABLE_IDX(hash, shard->capacity);
/* search for an existing pair */
for (md = shard->elems[idx]; md; md = md->bucket_next) {
if (grpc_slice_eq(key, md->key) && grpc_slice_eq(value, md->value)) {
REF_MD_LOCKED(shard, md);
for (md = shard->elems[idx].next; md; md = md->bucket_next()) {
if (grpc_slice_eq(key, md->key()) && grpc_slice_eq(value, md->value())) {
md->RefWithShardLocked(shard);
gpr_mu_unlock(&shard->mu);
return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
}
}
/* not found: create a new pair */
md = static_cast<interned_metadata*>(gpr_malloc(sizeof(interned_metadata)));
gpr_atm_rel_store(&md->refcnt, 1);
md->key = grpc_slice_ref_internal(key);
md->value = grpc_slice_ref_internal(value);
md->user_data.user_data = 0;
md->user_data.destroy_user_data = 0;
md->bucket_next = shard->elems[idx];
shard->elems[idx] = md;
gpr_mu_init(&md->user_data.mu_user_data);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(md->key);
char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(GPR_DEBUG, "ELM NEW:%p:%" PRIdPTR ": '%s' = '%s'", (void*)md,
gpr_atm_no_barrier_load(&md->refcnt), key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
#endif
md = grpc_core::New<InternedMetadata>(key, value, hash,
shard->elems[idx].next);
shard->elems[idx].next = md;
shard->count++;
if (shard->count > shard->capacity * 2) {
@ -354,130 +375,10 @@ grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata) {
changed ? nullptr : reinterpret_cast<grpc_mdelem_data*>(metadata));
}
grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) {
switch (GRPC_MDELEM_STORAGE(gmd)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_STATIC:
break;
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata* md =
reinterpret_cast<interned_metadata*> GRPC_MDELEM_DATA(gmd);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(md->key);
char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
(void*)md, gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
#endif
/* we can assume the ref count is >= 1 as the application is calling
this function - meaning that no adjustment to mdtab_free is necessary,
simplifying the logic here to be just an atomic increment */
/* use C assert to have this removed in opt builds */
GPR_ASSERT(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
break;
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata* md =
reinterpret_cast<allocated_metadata*> GRPC_MDELEM_DATA(gmd);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(md->key);
char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
(void*)md, gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
#endif
/* we can assume the ref count is >= 1 as the application is calling
this function - meaning that no adjustment to mdtab_free is necessary,
simplifying the logic here to be just an atomic increment */
/* use C assert to have this removed in opt builds */
gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
break;
}
}
return gmd;
}
void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) {
switch (GRPC_MDELEM_STORAGE(gmd)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_STATIC:
break;
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata* md =
reinterpret_cast<interned_metadata*> GRPC_MDELEM_DATA(gmd);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(md->key);
char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
(void*)md, gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
#endif
uint32_t hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash(md->key),
grpc_slice_hash(md->value));
const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1);
GPR_ASSERT(prev_refcount >= 1);
if (1 == prev_refcount) {
/* once the refcount hits zero, some other thread can come along and
free md at any time: it's unsafe from this point on to access it */
mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
gpr_atm_no_barrier_fetch_add(&shard->free_estimate, 1);
}
break;
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata* md =
reinterpret_cast<allocated_metadata*> GRPC_MDELEM_DATA(gmd);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(md->key);
char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
(void*)md, gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
#endif
const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1);
GPR_ASSERT(prev_refcount >= 1);
if (1 == prev_refcount) {
grpc_slice_unref_internal(md->key);
grpc_slice_unref_internal(md->value);
if (md->user_data.user_data) {
destroy_user_data_func destroy_user_data =
(destroy_user_data_func)gpr_atm_no_barrier_load(
&md->user_data.destroy_user_data);
destroy_user_data((void*)md->user_data.user_data);
}
gpr_mu_destroy(&md->user_data.mu_user_data);
gpr_free(md);
}
break;
}
}
}
static void* get_user_data(UserData* user_data, void (*destroy_func)(void*)) {
if (gpr_atm_acq_load(&user_data->destroy_user_data) ==
(gpr_atm)destroy_func) {
return (void*)gpr_atm_no_barrier_load(&user_data->user_data);
if (user_data->destroy_user_data.Load(grpc_core::MemoryOrder::ACQUIRE) ==
destroy_func) {
return user_data->data.Load(grpc_core::MemoryOrder::RELAXED);
} else {
return nullptr;
}
@ -491,57 +392,52 @@ void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void*)) {
return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
grpc_static_mdelem_table];
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata* am =
reinterpret_cast<allocated_metadata*>(GRPC_MDELEM_DATA(md));
return get_user_data(&am->user_data, destroy_func);
auto* am = reinterpret_cast<AllocatedMetadata*>(GRPC_MDELEM_DATA(md));
return get_user_data(am->user_data(), destroy_func);
}
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata* im =
reinterpret_cast<interned_metadata*> GRPC_MDELEM_DATA(md);
return get_user_data(&im->user_data, destroy_func);
auto* im = reinterpret_cast<InternedMetadata*> GRPC_MDELEM_DATA(md);
return get_user_data(im->user_data(), destroy_func);
}
}
GPR_UNREACHABLE_CODE(return nullptr);
}
static void* set_user_data(UserData* ud, void (*destroy_func)(void*),
void* user_data) {
GPR_ASSERT((user_data == nullptr) == (destroy_func == nullptr));
gpr_mu_lock(&ud->mu_user_data);
if (gpr_atm_no_barrier_load(&ud->destroy_user_data)) {
void* data) {
GPR_ASSERT((data == nullptr) == (destroy_func == nullptr));
grpc_core::ReleasableMutexLock lock(&ud->mu_user_data);
if (ud->destroy_user_data.Load(grpc_core::MemoryOrder::RELAXED)) {
/* user data can only be set once */
gpr_mu_unlock(&ud->mu_user_data);
lock.Unlock();
if (destroy_func != nullptr) {
destroy_func(user_data);
destroy_func(data);
}
return (void*)gpr_atm_no_barrier_load(&ud->user_data);
return ud->data.Load(grpc_core::MemoryOrder::RELAXED);
}
gpr_atm_no_barrier_store(&ud->user_data, (gpr_atm)user_data);
gpr_atm_rel_store(&ud->destroy_user_data, (gpr_atm)destroy_func);
gpr_mu_unlock(&ud->mu_user_data);
return user_data;
ud->data.Store(data, grpc_core::MemoryOrder::RELAXED);
ud->destroy_user_data.Store(destroy_func, grpc_core::MemoryOrder::RELEASE);
return data;
}
void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
void* user_data) {
void* data) {
switch (GRPC_MDELEM_STORAGE(md)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
destroy_func(user_data);
destroy_func(data);
return nullptr;
case GRPC_MDELEM_STORAGE_STATIC:
destroy_func(user_data);
destroy_func(data);
return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
grpc_static_mdelem_table];
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata* am =
reinterpret_cast<allocated_metadata*>(GRPC_MDELEM_DATA(md));
return set_user_data(&am->user_data, destroy_func, user_data);
auto* am = reinterpret_cast<AllocatedMetadata*>(GRPC_MDELEM_DATA(md));
return set_user_data(am->user_data(), destroy_func, data);
}
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata* im =
reinterpret_cast<interned_metadata*> GRPC_MDELEM_DATA(md);
auto* im = reinterpret_cast<InternedMetadata*> GRPC_MDELEM_DATA(md);
GPR_ASSERT(!is_mdelem_static(md));
return set_user_data(&im->user_data, destroy_func, user_data);
return set_user_data(im->user_data(), destroy_func, data);
}
}
GPR_UNREACHABLE_CODE(return nullptr);
@ -554,3 +450,33 @@ bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b) {
return grpc_slice_eq(GRPC_MDKEY(a), GRPC_MDKEY(b)) &&
grpc_slice_eq(GRPC_MDVALUE(a), GRPC_MDVALUE(b));
}
static void note_disposed_interned_metadata(uint32_t hash) {
mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
gpr_atm_no_barrier_fetch_add(&shard->free_estimate, 1);
}
void grpc_mdelem_do_unref(grpc_mdelem gmd DEBUG_ARGS) {
switch (GRPC_MDELEM_STORAGE(gmd)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_STATIC:
return;
case GRPC_MDELEM_STORAGE_INTERNED: {
auto* md = reinterpret_cast<InternedMetadata*> GRPC_MDELEM_DATA(gmd);
uint32_t hash = md->hash();
if (md->Unref(FWD_DEBUG_ARGS)) {
/* once the refcount hits zero, some other thread can come along and
free md at any time: it's unsafe from this point on to access it */
note_disposed_interned_metadata(hash);
}
break;
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
auto* md = reinterpret_cast<AllocatedMetadata*> GRPC_MDELEM_DATA(gmd);
if (md->Unref(FWD_DEBUG_ARGS)) {
grpc_core::Delete(md);
}
break;
}
}
}

@ -21,11 +21,15 @@
#include <grpc/support/port_platform.h>
#include "include/grpc/impl/codegen/log.h"
#include <grpc/grpc.h>
#include <grpc/slice.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/atomic.h"
#include "src/core/lib/gprpp/sync.h"
extern grpc_core::DebugOnlyTraceFlag grpc_trace_metadata;
@ -63,7 +67,7 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_metadata;
typedef struct grpc_mdelem grpc_mdelem;
/* if changing this, make identical changes in:
- interned_metadata, allocated_metadata in metadata.c
- grpc_core::{InternedMetadata, AllocatedMetadata}
- grpc_metadata in grpc_types.h */
typedef struct grpc_mdelem_data {
const grpc_slice key;
@ -141,19 +145,202 @@ inline bool grpc_mdelem_static_value_eq(grpc_mdelem a, grpc_mdelem b_static) {
is used as a type tag and is checked during user_data fetch. */
void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*if_destroy_func)(void*));
void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
void* user_data);
void* data);
// Defined in metadata.cc.
struct mdtab_shard;
#ifndef NDEBUG
void grpc_mdelem_trace_ref(void* md, const grpc_slice& key,
const grpc_slice& value, intptr_t refcnt,
const char* file, int line);
void grpc_mdelem_trace_unref(void* md, const grpc_slice& key,
const grpc_slice& value, intptr_t refcnt,
const char* file, int line);
#endif
namespace grpc_core {
typedef void (*destroy_user_data_func)(void* data);
struct UserData {
Mutex mu_user_data;
grpc_core::Atomic<destroy_user_data_func> destroy_user_data;
grpc_core::Atomic<void*> data;
};
class InternedMetadata {
public:
struct BucketLink {
explicit BucketLink(InternedMetadata* md) : next(md) {}
InternedMetadata* next = nullptr;
};
InternedMetadata(const grpc_slice& key, const grpc_slice& value,
uint32_t hash, InternedMetadata* next);
~InternedMetadata();
#ifndef NDEBUG
void Ref(const char* file, int line) {
grpc_mdelem_trace_ref(this, key_, value_, RefValue(), file, line);
const intptr_t prior = refcnt_.FetchAdd(1, MemoryOrder::RELAXED);
GPR_ASSERT(prior > 0);
}
bool Unref(const char* file, int line) {
grpc_mdelem_trace_unref(this, key_, value_, RefValue(), file, line);
return Unref();
}
#else
// We define a naked Ref() in the else-clause to make sure we don't
// inadvertently skip the assert on debug builds.
void Ref() {
/* we can assume the ref count is >= 1 as the application is calling
this function - meaning that no adjustment to mdtab_free is necessary,
simplifying the logic here to be just an atomic increment */
refcnt_.FetchAdd(1, MemoryOrder::RELAXED);
}
#endif // ifndef NDEBUG
bool Unref() {
const intptr_t prior = refcnt_.FetchSub(1, MemoryOrder::ACQ_REL);
GPR_DEBUG_ASSERT(prior > 0);
return prior == 1;
}
void RefWithShardLocked(mdtab_shard* shard);
const grpc_slice& key() const { return key_; }
const grpc_slice& value() const { return value_; }
UserData* user_data() { return &user_data_; }
uint32_t hash() { return hash_; }
InternedMetadata* bucket_next() { return link_.next; }
void set_bucket_next(InternedMetadata* md) { link_.next = md; }
static size_t CleanupLinkedMetadata(BucketLink* head);
private:
bool AllRefsDropped() { return refcnt_.Load(MemoryOrder::ACQUIRE) == 0; }
bool FirstRef() { return refcnt_.FetchAdd(1, MemoryOrder::RELAXED) == 0; }
intptr_t RefValue() { return refcnt_.Load(MemoryOrder::RELAXED); }
/* must be byte compatible with grpc_mdelem_data */
grpc_slice key_;
grpc_slice value_;
/* private only data */
grpc_core::Atomic<intptr_t> refcnt_;
uint32_t hash_;
UserData user_data_;
BucketLink link_;
};
/* Shadow structure for grpc_mdelem_data for allocated elements */
class AllocatedMetadata {
public:
AllocatedMetadata(const grpc_slice& key, const grpc_slice& value);
~AllocatedMetadata();
const grpc_slice& key() const { return key_; }
const grpc_slice& value() const { return value_; }
UserData* user_data() { return &user_data_; }
#ifndef NDEBUG
void Ref(const char* file, int line) {
grpc_mdelem_trace_ref(this, key_, value_, RefValue(), file, line);
Ref();
}
bool Unref(const char* file, int line) {
grpc_mdelem_trace_unref(this, key_, value_, RefValue(), file, line);
return Unref();
}
#endif // ifndef NDEBUG
void Ref() {
/* we can assume the ref count is >= 1 as the application is calling
this function - meaning that no adjustment to mdtab_free is necessary,
simplifying the logic here to be just an atomic increment */
refcnt_.FetchAdd(1, MemoryOrder::RELAXED);
}
bool Unref() {
const intptr_t prior = refcnt_.FetchSub(1, MemoryOrder::ACQ_REL);
GPR_DEBUG_ASSERT(prior > 0);
return prior == 1;
}
private:
intptr_t RefValue() { return refcnt_.Load(MemoryOrder::RELAXED); }
/* must be byte compatible with grpc_mdelem_data */
grpc_slice key_;
grpc_slice value_;
/* private only data */
grpc_core::Atomic<intptr_t> refcnt_;
UserData user_data_;
};
} // namespace grpc_core
#ifndef NDEBUG
#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__)
inline grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd, const char* file,
int line) {
#else // ifndef NDEBUG
#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s))
inline grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd) {
#endif // ifndef NDEBUG
switch (GRPC_MDELEM_STORAGE(gmd)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_STATIC:
break;
case GRPC_MDELEM_STORAGE_INTERNED: {
auto* md =
reinterpret_cast<grpc_core::InternedMetadata*> GRPC_MDELEM_DATA(gmd);
/* use C assert to have this removed in opt builds */
#ifndef NDEBUG
md->Ref(file, line);
#else
md->Ref();
#endif
break;
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
auto* md =
reinterpret_cast<grpc_core::AllocatedMetadata*> GRPC_MDELEM_DATA(gmd);
#ifndef NDEBUG
md->Ref(file, line);
#else
md->Ref();
#endif
break;
}
}
return gmd;
}
#ifndef NDEBUG
#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s), __FILE__, __LINE__)
grpc_mdelem grpc_mdelem_ref(grpc_mdelem md, const char* file, int line);
void grpc_mdelem_unref(grpc_mdelem md, const char* file, int line);
void grpc_mdelem_do_unref(grpc_mdelem gmd, const char* file, int line);
inline void grpc_mdelem_unref(grpc_mdelem gmd, const char* file, int line) {
#else
#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s))
#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s))
grpc_mdelem grpc_mdelem_ref(grpc_mdelem md);
void grpc_mdelem_unref(grpc_mdelem md);
void grpc_mdelem_do_unref(grpc_mdelem gmd);
inline void grpc_mdelem_unref(grpc_mdelem gmd) {
#endif
switch (GRPC_MDELEM_STORAGE(gmd)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_STATIC:
return;
case GRPC_MDELEM_STORAGE_INTERNED:
case GRPC_MDELEM_STORAGE_ALLOCATED:
#ifndef NDEBUG
grpc_mdelem_do_unref(gmd, file, line);
#else
grpc_mdelem_do_unref(gmd);
#endif
return;
}
}
#define GRPC_MDNULL GRPC_MAKE_MDELEM(NULL, GRPC_MDELEM_STORAGE_EXTERNAL)
#define GRPC_MDISNULL(md) (GRPC_MDELEM_DATA(md) == NULL)

@ -124,7 +124,8 @@ void grpc_transport_destroy(grpc_transport* transport) {
int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream,
grpc_stream_refcount* refcount,
const void* server_data, gpr_arena* arena) {
const void* server_data,
grpc_core::Arena* arena) {
return transport->vtable->init_stream(transport, stream, refcount,
server_data, arena);
}
@ -174,7 +175,7 @@ grpc_endpoint* grpc_transport_get_endpoint(grpc_transport* transport) {
// it's grpc_transport_stream_op_batch_finish_with_failure
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_transport_stream_op_batch* batch, grpc_error* error,
grpc_call_combiner* call_combiner) {
grpc_core::CallCombiner* call_combiner) {
if (batch->send_message) {
batch->payload->send_message.send_message.reset();
}

@ -24,7 +24,7 @@
#include <stddef.h>
#include "src/core/lib/channel/context.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/polling_entity.h"
@ -358,7 +358,8 @@ size_t grpc_transport_stream_size(grpc_transport* transport);
supplied from the accept_stream callback function */
int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream,
grpc_stream_refcount* refcount,
const void* server_data, gpr_arena* arena);
const void* server_data,
grpc_core::Arena* arena);
void grpc_transport_set_pops(grpc_transport* transport, grpc_stream* stream,
grpc_polling_entity* pollent);
@ -379,7 +380,7 @@ void grpc_transport_destroy_stream(grpc_transport* transport,
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_transport_stream_op_batch* op, grpc_error* error,
grpc_call_combiner* call_combiner);
grpc_core::CallCombiner* call_combiner);
char* grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch* op);
char* grpc_transport_op_string(grpc_transport_op* op);

@ -34,7 +34,7 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_init_stream */
int (*init_stream)(grpc_transport* self, grpc_stream* stream,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena);
grpc_core::Arena* arena);
/* implementation of grpc_transport_set_pollset */
void (*set_pollset)(grpc_transport* self, grpc_stream* stream,

@ -47,7 +47,6 @@ namespace Grpc.Core.Internal.Tests
GrpcEnvironment.ReleaseAsync().Wait();
Assert.AreEqual(CompletionQueueEvent.CompletionType.Shutdown, ev.type);
Assert.AreNotEqual(IntPtr.Zero, ev.success);
Assert.AreEqual(IntPtr.Zero, ev.tag);
}
}
}

@ -40,6 +40,17 @@ See [Experimentally supported platforms](experimental) for instructions.
See [Experimentally supported platforms](experimental) for instructions.
NUGET DEVELOPMENT FEED (NIGHTLY BUILDS)
--------------
In production, you should use officially released stable packages available on http://nuget.org, but if you want to test the newest upstream bug fixes and features early, you can can use the development nuget feed where new nuget builds are uploaded nightly.
Feed URL (NuGet v2): https://grpc.jfrog.io/grpc/api/nuget/grpc-nuget-dev
Feed URL (NuGet v3): https://grpc.jfrog.io/grpc/api/nuget/v3/grpc-nuget-dev
The same development nuget packages and packages for other languages can also be found at https://packages.grpc.io/
BUILD FROM SOURCE
-----------------

@ -101,7 +101,7 @@ Pod::Spec.new do |s|
s.preserve_paths = plugin
# Restrict the protoc version to the one supported by this plugin.
s.dependency '!ProtoCompiler', '3.6.1'
s.dependency '!ProtoCompiler', '3.7.0'
# For the Protobuf dependency not to complain:
s.ios.deployment_target = '7.0'
s.osx.deployment_target = '10.9'

@ -36,7 +36,7 @@ Pod::Spec.new do |s|
# exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
# before them.
s.name = '!ProtoCompiler'
v = '3.6.1'
v = '3.7.0'
s.version = v
s.summary = 'The Protobuf Compiler (protoc) generates Objective-C files from .proto files'
s.description = <<-DESC

@ -183,6 +183,12 @@ extern NSString *const kGRPCTrailersKey;
- (void)didCloseWithTrailingMetadata:(nullable NSDictionary *)trailingMetadata
error:(nullable NSError *)error;
/**
* Issued when flow control is enabled for the call and a message written with writeData: method of
* GRPCCall2 is passed to gRPC core with SEND_MESSAGE operation.
*/
- (void)didWriteData;
@end
/**
@ -263,6 +269,14 @@ extern NSString *const kGRPCTrailersKey;
*/
- (void)finish;
/**
* Tell gRPC to receive the next N gRPC message from gRPC core.
*
* This method should only be used when flow control is enabled. When flow control is not enabled,
* this method is a no-op.
*/
- (void)receiveNextMessages:(NSUInteger)numberOfMessages;
/**
* Get a copy of the original call options.
*/

@ -63,6 +63,15 @@ const char *kCFStreamVarName = "grpc_cfstream";
requestsWriter:(GRXWriter *)requestsWriter
callOptions:(GRPCCallOptions *)callOptions;
- (instancetype)initWithHost:(NSString *)host
path:(NSString *)path
callSafety:(GRPCCallSafety)safety
requestsWriter:(GRXWriter *)requestsWriter
callOptions:(GRPCCallOptions *)callOptions
writeDone:(void (^)(void))writeDone;
- (void)receiveNextMessages:(NSUInteger)numberOfMessages;
@end
@implementation GRPCRequestOptions
@ -113,6 +122,8 @@ const char *kCFStreamVarName = "grpc_cfstream";
BOOL _canceled;
/** Flags whether call has been finished. */
BOOL _finished;
/** The number of pending messages receiving requests. */
NSUInteger _pendingReceiveNextMessages;
}
- (instancetype)initWithRequestOptions:(GRPCRequestOptions *)requestOptions
@ -190,11 +201,22 @@ const char *kCFStreamVarName = "grpc_cfstream";
path:_requestOptions.path
callSafety:_requestOptions.safety
requestsWriter:_pipe
callOptions:_callOptions];
callOptions:_callOptions
writeDone:^{
@synchronized(self) {
if (self->_handler) {
[self issueDidWriteData];
}
}
}];
[_call setResponseDispatchQueue:_dispatchQueue];
if (_callOptions.initialMetadata) {
[_call.requestHeaders addEntriesFromDictionary:_callOptions.initialMetadata];
}
if (_pendingReceiveNextMessages > 0) {
[_call receiveNextMessages:_pendingReceiveNextMessages];
_pendingReceiveNextMessages = 0;
}
copiedCall = _call;
}
@ -364,6 +386,33 @@ const char *kCFStreamVarName = "grpc_cfstream";
}
}
- (void)issueDidWriteData {
@synchronized(self) {
if (_callOptions.flowControlEnabled && [_handler respondsToSelector:@selector(didWriteData)]) {
dispatch_async(_dispatchQueue, ^{
id<GRPCResponseHandler> copiedHandler = nil;
@synchronized(self) {
copiedHandler = self->_handler;
};
[copiedHandler didWriteData];
});
}
}
}
- (void)receiveNextMessages:(NSUInteger)numberOfMessages {
// branching based on _callOptions.flowControlEnabled is handled inside _call
GRPCCall *copiedCall = nil;
@synchronized(self) {
copiedCall = _call;
if (copiedCall == nil) {
_pendingReceiveNextMessages += numberOfMessages;
return;
}
}
[copiedCall receiveNextMessages:numberOfMessages];
}
@end
// The following methods of a C gRPC call object aren't reentrant, and thus
@ -427,6 +476,15 @@ const char *kCFStreamVarName = "grpc_cfstream";
// The OAuth2 token fetched from a token provider.
NSString *_fetchedOauth2AccessToken;
// The callback to be called when a write message op is done.
void (^_writeDone)(void);
// Indicate a read request to core is pending.
BOOL _pendingCoreRead;
// Indicate pending read message request from user.
NSUInteger _pendingReceiveNextMessages;
}
@synthesize state = _state;
@ -486,12 +544,26 @@ const char *kCFStreamVarName = "grpc_cfstream";
- (instancetype)initWithHost:(NSString *)host
path:(NSString *)path
callSafety:(GRPCCallSafety)safety
requestsWriter:(GRXWriter *)requestWriter
requestsWriter:(GRXWriter *)requestsWriter
callOptions:(GRPCCallOptions *)callOptions {
return [self initWithHost:host
path:path
callSafety:safety
requestsWriter:requestsWriter
callOptions:callOptions
writeDone:nil];
}
- (instancetype)initWithHost:(NSString *)host
path:(NSString *)path
callSafety:(GRPCCallSafety)safety
requestsWriter:(GRXWriter *)requestsWriter
callOptions:(GRPCCallOptions *)callOptions
writeDone:(void (^)(void))writeDone {
// Purposely using pointer rather than length (host.length == 0) for backwards compatibility.
NSAssert(host != nil && path != nil, @"Neither host nor path can be nil.");
NSAssert(safety <= GRPCCallSafetyCacheableRequest, @"Invalid call safety value.");
NSAssert(requestWriter.state == GRXWriterStateNotStarted,
NSAssert(requestsWriter.state == GRXWriterStateNotStarted,
@"The requests writer can't be already started.");
if (!host || !path) {
return nil;
@ -499,7 +571,7 @@ const char *kCFStreamVarName = "grpc_cfstream";
if (safety > GRPCCallSafetyCacheableRequest) {
return nil;
}
if (requestWriter.state != GRXWriterStateNotStarted) {
if (requestsWriter.state != GRXWriterStateNotStarted) {
return nil;
}
@ -512,16 +584,20 @@ const char *kCFStreamVarName = "grpc_cfstream";
// Serial queue to invoke the non-reentrant methods of the grpc_call object.
_callQueue = dispatch_queue_create("io.grpc.call", DISPATCH_QUEUE_SERIAL);
_requestWriter = requestWriter;
_requestWriter = requestsWriter;
_requestHeaders = [[GRPCRequestHeaders alloc] initWithCall:self];
_writeDone = writeDone;
if ([requestWriter isKindOfClass:[GRXImmediateSingleWriter class]]) {
if ([requestsWriter isKindOfClass:[GRXImmediateSingleWriter class]]) {
_unaryCall = YES;
_unaryOpBatch = [NSMutableArray arrayWithCapacity:kMaxClientBatch];
}
_responseQueue = dispatch_get_main_queue();
// do not start a read until initial metadata is received
_pendingReceiveNextMessages = 0;
_pendingCoreRead = YES;
}
return self;
}
@ -593,11 +669,16 @@ const char *kCFStreamVarName = "grpc_cfstream";
// If the call is currently paused, this is a noop. Restarting the call will invoke this
// method.
// TODO(jcanizales): Rename to readResponseIfNotPaused.
- (void)startNextRead {
- (void)maybeStartNextRead {
@synchronized(self) {
if (_state != GRXWriterStateStarted) {
return;
}
if (_callOptions.flowControlEnabled && (_pendingCoreRead || _pendingReceiveNextMessages == 0)) {
return;
}
_pendingCoreRead = YES;
_pendingReceiveNextMessages--;
}
dispatch_async(_callQueue, ^{
@ -620,6 +701,7 @@ const char *kCFStreamVarName = "grpc_cfstream";
// that's on the hands of any server to have. Instead we finish and ask
// the server to cancel.
@synchronized(strongSelf) {
strongSelf->_pendingCoreRead = NO;
[strongSelf
finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
code:GRPCErrorCodeResourceExhausted
@ -635,7 +717,13 @@ const char *kCFStreamVarName = "grpc_cfstream";
@synchronized(strongSelf) {
[strongSelf->_responseWriteable enqueueValue:data
completionHandler:^{
[strongSelf startNextRead];
__strong GRPCCall *strongSelf = weakSelf;
if (strongSelf) {
@synchronized(strongSelf) {
strongSelf->_pendingCoreRead = NO;
[strongSelf maybeStartNextRead];
}
}
}];
}
}
@ -686,6 +774,20 @@ const char *kCFStreamVarName = "grpc_cfstream";
});
}
- (void)receiveNextMessages:(NSUInteger)numberOfMessages {
if (numberOfMessages == 0) {
return;
}
@synchronized(self) {
_pendingReceiveNextMessages += numberOfMessages;
if (_state != GRXWriterStateStarted || !_callOptions.flowControlEnabled) {
return;
}
[self maybeStartNextRead];
}
}
#pragma mark GRXWriteable implementation
// Only called from the call queue. The error handler will be called from the
@ -699,9 +801,11 @@ const char *kCFStreamVarName = "grpc_cfstream";
GRPCCall *strongSelf = weakSelf;
if (strongSelf) {
strongSelf->_requestWriter.state = GRXWriterStateStarted;
if (strongSelf->_writeDone) {
strongSelf->_writeDone();
}
}
};
GRPCOpSendMessage *op =
[[GRPCOpSendMessage alloc] initWithMessage:message handler:resumingHandler];
if (!_unaryCall) {
@ -778,8 +882,11 @@ const char *kCFStreamVarName = "grpc_cfstream";
// Response headers received.
__strong GRPCCall *strongSelf = weakSelf;
if (strongSelf) {
strongSelf.responseHeaders = headers;
[strongSelf startNextRead];
@synchronized(strongSelf) {
strongSelf.responseHeaders = headers;
strongSelf->_pendingCoreRead = NO;
[strongSelf maybeStartNextRead];
}
}
}
completionHandler:^(NSError *error, NSDictionary *trailers) {
@ -933,7 +1040,7 @@ const char *kCFStreamVarName = "grpc_cfstream";
case GRXWriterStateStarted:
if (_state == GRXWriterStatePaused) {
_state = newState;
[self startNextRead];
[self maybeStartNextRead];
}
return;
case GRXWriterStateNotStarted:

@ -90,6 +90,14 @@ typedef NS_ENUM(NSUInteger, GRPCTransportType) {
*/
@property(readonly) NSTimeInterval timeout;
/**
* Enable flow control of a gRPC call. The option is default to NO. If set to YES, writeData: method
* should only be called at most once before a didWriteData callback is issued, and
* receiveNextMessage: must be called each time before gRPC call issues a didReceiveMessage
* callback.
*/
@property(readonly) BOOL flowControlEnabled;
// OAuth2 parameters. Users of gRPC may specify one of the following two parameters.
/**
@ -232,6 +240,19 @@ typedef NS_ENUM(NSUInteger, GRPCTransportType) {
*/
@property(readwrite) NSTimeInterval timeout;
/**
* Enable flow control of a gRPC call. The option is default to NO. If set to YES, writeData: method
* should only be called at most once before a didWriteData callback is issued, and
* receiveNextMessage: must be called each time before gRPC call can issue a didReceiveMessage
* callback.
*
* If writeData: method is called more than once before issuance of a didWriteData callback, gRPC
* will continue to queue the message and write them to gRPC core in order. However, the user
* assumes their own responsibility of flow control by keeping tracking of the pending writes in
* the call.
*/
@property(readwrite) BOOL flowControlEnabled;
// OAuth2 parameters. Users of gRPC may specify one of the following two parameters.
/**

@ -22,6 +22,7 @@
// The default values for the call options.
static NSString *const kDefaultServerAuthority = nil;
static const NSTimeInterval kDefaultTimeout = 0;
static const BOOL kDefaultFlowControlEnabled = NO;
static NSDictionary *const kDefaultInitialMetadata = nil;
static NSString *const kDefaultUserAgentPrefix = nil;
static const NSUInteger kDefaultResponseSizeLimit = 0;
@ -59,6 +60,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
@protected
NSString *_serverAuthority;
NSTimeInterval _timeout;
BOOL _flowControlEnabled;
NSString *_oauth2AccessToken;
id<GRPCAuthorizationProtocol> _authTokenProvider;
NSDictionary *_initialMetadata;
@ -84,6 +86,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
@synthesize serverAuthority = _serverAuthority;
@synthesize timeout = _timeout;
@synthesize flowControlEnabled = _flowControlEnabled;
@synthesize oauth2AccessToken = _oauth2AccessToken;
@synthesize authTokenProvider = _authTokenProvider;
@synthesize initialMetadata = _initialMetadata;
@ -109,6 +112,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
- (instancetype)init {
return [self initWithServerAuthority:kDefaultServerAuthority
timeout:kDefaultTimeout
flowControlEnabled:kDefaultFlowControlEnabled
oauth2AccessToken:kDefaultOauth2AccessToken
authTokenProvider:kDefaultAuthTokenProvider
initialMetadata:kDefaultInitialMetadata
@ -134,6 +138,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
- (instancetype)initWithServerAuthority:(NSString *)serverAuthority
timeout:(NSTimeInterval)timeout
flowControlEnabled:(BOOL)flowControlEnabled
oauth2AccessToken:(NSString *)oauth2AccessToken
authTokenProvider:(id<GRPCAuthorizationProtocol>)authTokenProvider
initialMetadata:(NSDictionary *)initialMetadata
@ -158,6 +163,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
if ((self = [super init])) {
_serverAuthority = [serverAuthority copy];
_timeout = timeout < 0 ? 0 : timeout;
_flowControlEnabled = flowControlEnabled;
_oauth2AccessToken = [oauth2AccessToken copy];
_authTokenProvider = authTokenProvider;
_initialMetadata =
@ -193,6 +199,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
GRPCCallOptions *newOptions =
[[GRPCCallOptions allocWithZone:zone] initWithServerAuthority:_serverAuthority
timeout:_timeout
flowControlEnabled:_flowControlEnabled
oauth2AccessToken:_oauth2AccessToken
authTokenProvider:_authTokenProvider
initialMetadata:_initialMetadata
@ -221,6 +228,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
GRPCMutableCallOptions *newOptions = [[GRPCMutableCallOptions allocWithZone:zone]
initWithServerAuthority:[_serverAuthority copy]
timeout:_timeout
flowControlEnabled:_flowControlEnabled
oauth2AccessToken:[_oauth2AccessToken copy]
authTokenProvider:_authTokenProvider
initialMetadata:[[NSDictionary alloc] initWithDictionary:_initialMetadata
@ -301,6 +309,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
@dynamic serverAuthority;
@dynamic timeout;
@dynamic flowControlEnabled;
@dynamic oauth2AccessToken;
@dynamic authTokenProvider;
@dynamic initialMetadata;
@ -326,6 +335,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
- (instancetype)init {
return [self initWithServerAuthority:kDefaultServerAuthority
timeout:kDefaultTimeout
flowControlEnabled:kDefaultFlowControlEnabled
oauth2AccessToken:kDefaultOauth2AccessToken
authTokenProvider:kDefaultAuthTokenProvider
initialMetadata:kDefaultInitialMetadata
@ -353,6 +363,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
GRPCCallOptions *newOptions =
[[GRPCCallOptions allocWithZone:zone] initWithServerAuthority:_serverAuthority
timeout:_timeout
flowControlEnabled:_flowControlEnabled
oauth2AccessToken:_oauth2AccessToken
authTokenProvider:_authTokenProvider
initialMetadata:_initialMetadata
@ -381,6 +392,7 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
GRPCMutableCallOptions *newOptions = [[GRPCMutableCallOptions allocWithZone:zone]
initWithServerAuthority:_serverAuthority
timeout:_timeout
flowControlEnabled:_flowControlEnabled
oauth2AccessToken:_oauth2AccessToken
authTokenProvider:_authTokenProvider
initialMetadata:_initialMetadata
@ -417,6 +429,10 @@ static BOOL areObjectsEqual(id obj1, id obj2) {
}
}
- (void)setFlowControlEnabled:(BOOL)flowControlEnabled {
_flowControlEnabled = flowControlEnabled;
}
- (void)setOauth2AccessToken:(NSString *)oauth2AccessToken {
_oauth2AccessToken = [oauth2AccessToken copy];
}

@ -57,6 +57,13 @@ NS_ASSUME_NONNULL_BEGIN
- (void)didCloseWithTrailingMetadata:(nullable NSDictionary *)trailingMetadata
error:(nullable NSError *)error;
/**
* Issued when flow control is enabled for the call and a message (written with writeMessage: method
* of GRPCStreamingProtoCall or the initializer of GRPCUnaryProtoCall) is passed to gRPC core with
* SEND_MESSAGE operation.
*/
- (void)didWriteMessage;
@end
/** A unary-request RPC call with Protobuf. */
@ -130,6 +137,26 @@ NS_ASSUME_NONNULL_BEGIN
*/
- (void)finish;
/**
* Tell gRPC to receive another message.
*
* This method should only be used when flow control is enabled. If flow control is enabled, gRPC
* will only receive additional messages after the user indicates so by using either
* receiveNextMessage: or receiveNextMessages: methods. If flow control is not enabled, messages
* will be automatically received after the previous one is delivered.
*/
- (void)receiveNextMessage;
/**
* Tell gRPC to receive another N message.
*
* This method should only be used when flow control is enabled. If flow control is enabled, the
* messages received from the server are buffered in gRPC until the user want to receive the next
* message. If flow control is not enabled, messages will be automatically received after the
* previous one is delivered.
*/
- (void)receiveNextMessages:(NSUInteger)numberOfMessages;
@end
NS_ASSUME_NONNULL_END

@ -72,6 +72,7 @@ static NSError *ErrorForBadProto(id proto, Class expectedClass, NSError *parsing
- (void)start {
[_call start];
[_call receiveNextMessage];
[_call writeMessage:_message];
[_call finish];
}
@ -197,6 +198,17 @@ static NSError *ErrorForBadProto(id proto, Class expectedClass, NSError *parsing
[copiedCall finish];
}
- (void)receiveNextMessage {
[self receiveNextMessages:1];
}
- (void)receiveNextMessages:(NSUInteger)numberOfMessages {
GRPCCall2 *copiedCall;
@synchronized(self) {
copiedCall = _call;
}
[copiedCall receiveNextMessages:numberOfMessages];
}
- (void)didReceiveInitialMetadata:(NSDictionary *)initialMetadata {
@synchronized(self) {
if (initialMetadata != nil &&
@ -260,6 +272,20 @@ static NSError *ErrorForBadProto(id proto, Class expectedClass, NSError *parsing
}
}
- (void)didWriteData {
@synchronized(self) {
if ([_handler respondsToSelector:@selector(didWriteMessage)]) {
dispatch_async(_dispatchQueue, ^{
id<GRPCProtoResponseHandler> copiedHandler = nil;
@synchronized(self) {
copiedHandler = self->_handler;
}
[copiedHandler didWriteMessage];
});
}
}
}
- (dispatch_queue_t)dispatchQueue {
return _dispatchQueue;
}

@ -40,11 +40,13 @@ static NSString *const kService = @"TestService";
static GRPCProtoMethod *kInexistentMethod;
static GRPCProtoMethod *kEmptyCallMethod;
static GRPCProtoMethod *kUnaryCallMethod;
static GRPCProtoMethod *kOutputStreamingCallMethod;
static GRPCProtoMethod *kFullDuplexCallMethod;
static const int kSimpleDataLength = 100;
static const NSTimeInterval kTestTimeout = 16;
static const NSTimeInterval kTestTimeout = 8;
static const NSTimeInterval kInvertedTimeout = 2;
// Reveal the _class ivar for testing access
@interface GRPCCall2 () {
@ -57,6 +59,11 @@ static const NSTimeInterval kTestTimeout = 16;
// Convenience class to use blocks as callbacks
@interface ClientTestsBlockCallbacks : NSObject<GRPCResponseHandler>
- (instancetype)initWithInitialMetadataCallback:(void (^)(NSDictionary *))initialMetadataCallback
messageCallback:(void (^)(id))messageCallback
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback
writeDataCallback:(void (^)(void))writeDataCallback;
- (instancetype)initWithInitialMetadataCallback:(void (^)(NSDictionary *))initialMetadataCallback
messageCallback:(void (^)(id))messageCallback
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback;
@ -67,21 +74,33 @@ static const NSTimeInterval kTestTimeout = 16;
void (^_initialMetadataCallback)(NSDictionary *);
void (^_messageCallback)(id);
void (^_closeCallback)(NSDictionary *, NSError *);
void (^_writeDataCallback)(void);
dispatch_queue_t _dispatchQueue;
}
- (instancetype)initWithInitialMetadataCallback:(void (^)(NSDictionary *))initialMetadataCallback
messageCallback:(void (^)(id))messageCallback
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback {
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback
writeDataCallback:(void (^)(void))writeDataCallback {
if ((self = [super init])) {
_initialMetadataCallback = initialMetadataCallback;
_messageCallback = messageCallback;
_closeCallback = closeCallback;
_writeDataCallback = writeDataCallback;
_dispatchQueue = dispatch_queue_create(nil, DISPATCH_QUEUE_SERIAL);
}
return self;
}
- (instancetype)initWithInitialMetadataCallback:(void (^)(NSDictionary *))initialMetadataCallback
messageCallback:(void (^)(id))messageCallback
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback {
return [self initWithInitialMetadataCallback:initialMetadataCallback
messageCallback:messageCallback
closeCallback:closeCallback
writeDataCallback:nil];
}
- (void)didReceiveInitialMetadata:(NSDictionary *)initialMetadata {
if (self->_initialMetadataCallback) {
self->_initialMetadataCallback(initialMetadata);
@ -100,6 +119,12 @@ static const NSTimeInterval kTestTimeout = 16;
}
}
- (void)didWriteData {
if (self->_writeDataCallback) {
self->_writeDataCallback();
}
}
- (dispatch_queue_t)dispatchQueue {
return _dispatchQueue;
}
@ -120,6 +145,9 @@ static const NSTimeInterval kTestTimeout = 16;
[[GRPCProtoMethod alloc] initWithPackage:kPackage service:kService method:@"EmptyCall"];
kUnaryCallMethod =
[[GRPCProtoMethod alloc] initWithPackage:kPackage service:kService method:@"UnaryCall"];
kOutputStreamingCallMethod = [[GRPCProtoMethod alloc] initWithPackage:kPackage
service:kService
method:@"StreamingOutputCall"];
kFullDuplexCallMethod =
[[GRPCProtoMethod alloc] initWithPackage:kPackage service:kService method:@"FullDuplexCall"];
}
@ -478,4 +506,268 @@ static const NSTimeInterval kTestTimeout = 16;
[self waitForExpectationsWithTimeout:kTestTimeout handler:nil];
}
- (void)testFlowControlWrite {
__weak XCTestExpectation *expectWriteData =
[self expectationWithDescription:@"Reported write data"];
RMTStreamingOutputCallRequest *request = [RMTStreamingOutputCallRequest message];
RMTResponseParameters *parameters = [RMTResponseParameters message];
parameters.size = kSimpleDataLength;
[request.responseParametersArray addObject:parameters];
request.payload.body = [NSMutableData dataWithLength:kSimpleDataLength];
GRPCRequestOptions *callRequest =
[[GRPCRequestOptions alloc] initWithHost:(NSString *)kHostAddress
path:kUnaryCallMethod.HTTPPath
safety:GRPCCallSafetyDefault];
GRPCMutableCallOptions *options = [[GRPCMutableCallOptions alloc] init];
options.transportType = GRPCTransportTypeInsecure;
options.flowControlEnabled = YES;
GRPCCall2 *call =
[[GRPCCall2 alloc] initWithRequestOptions:callRequest
responseHandler:[[ClientTestsBlockCallbacks alloc]
initWithInitialMetadataCallback:nil
messageCallback:nil
closeCallback:nil
writeDataCallback:^{
[expectWriteData fulfill];
}]
callOptions:options];
[call start];
[call receiveNextMessages:1];
[call writeData:[request data]];
// Wait for 3 seconds and make sure we do not receive the response
[self waitForExpectationsWithTimeout:kTestTimeout handler:nil];
[call finish];
}
- (void)testFlowControlRead {
__weak __block XCTestExpectation *expectBlockedMessage =
[self expectationWithDescription:@"Message not delivered without recvNextMessage"];
__weak __block XCTestExpectation *expectPassedMessage = nil;
__weak __block XCTestExpectation *expectBlockedClose =
[self expectationWithDescription:@"Call not closed with pending message"];
__weak __block XCTestExpectation *expectPassedClose = nil;
expectBlockedMessage.inverted = YES;
expectBlockedClose.inverted = YES;
RMTSimpleRequest *request = [RMTSimpleRequest message];
request.responseSize = kSimpleDataLength;
request.payload.body = [NSMutableData dataWithLength:kSimpleDataLength];
GRPCRequestOptions *callRequest =
[[GRPCRequestOptions alloc] initWithHost:(NSString *)kHostAddress
path:kUnaryCallMethod.HTTPPath
safety:GRPCCallSafetyDefault];
GRPCMutableCallOptions *options = [[GRPCMutableCallOptions alloc] init];
options.transportType = GRPCTransportTypeInsecure;
options.flowControlEnabled = YES;
__block int unblocked = NO;
GRPCCall2 *call = [[GRPCCall2 alloc]
initWithRequestOptions:callRequest
responseHandler:[[ClientTestsBlockCallbacks alloc] initWithInitialMetadataCallback:nil
messageCallback:^(NSData *message) {
if (!unblocked) {
[expectBlockedMessage fulfill];
} else {
[expectPassedMessage fulfill];
}
}
closeCallback:^(NSDictionary *trailers, NSError *error) {
if (!unblocked) {
[expectBlockedClose fulfill];
} else {
[expectPassedClose fulfill];
}
}]
callOptions:options];
[call start];
[call writeData:[request data]];
[call finish];
// Wait to make sure we do not receive the response
[self waitForExpectationsWithTimeout:kInvertedTimeout handler:nil];
expectPassedMessage =
[self expectationWithDescription:@"Message delivered with receiveNextMessage"];
expectPassedClose = [self expectationWithDescription:@"Close delivered after receiveNextMessage"];
unblocked = YES;
[call receiveNextMessages:1];
[self waitForExpectationsWithTimeout:kTestTimeout handler:nil];
}
- (void)testFlowControlMultipleMessages {
__weak XCTestExpectation *expectPassedMessage =
[self expectationWithDescription:@"two messages delivered with receiveNextMessage"];
expectPassedMessage.expectedFulfillmentCount = 2;
__weak XCTestExpectation *expectBlockedMessage =
[self expectationWithDescription:@"Message 3 not delivered"];
expectBlockedMessage.inverted = YES;
__weak XCTestExpectation *expectWriteTwice =
[self expectationWithDescription:@"Write 2 messages done"];
expectWriteTwice.expectedFulfillmentCount = 2;
RMTStreamingOutputCallRequest *request = [RMTStreamingOutputCallRequest message];
RMTResponseParameters *parameters = [RMTResponseParameters message];
parameters.size = kSimpleDataLength;
[request.responseParametersArray addObject:parameters];
request.payload.body = [NSMutableData dataWithLength:kSimpleDataLength];
GRPCRequestOptions *callRequest =
[[GRPCRequestOptions alloc] initWithHost:(NSString *)kHostAddress
path:kFullDuplexCallMethod.HTTPPath
safety:GRPCCallSafetyDefault];
GRPCMutableCallOptions *options = [[GRPCMutableCallOptions alloc] init];
options.transportType = GRPCTransportTypeInsecure;
options.flowControlEnabled = YES;
__block NSUInteger messageId = 0;
__block GRPCCall2 *call = [[GRPCCall2 alloc]
initWithRequestOptions:callRequest
responseHandler:[[ClientTestsBlockCallbacks alloc] initWithInitialMetadataCallback:nil
messageCallback:^(NSData *message) {
if (messageId <= 1) {
[expectPassedMessage fulfill];
} else {
[expectBlockedMessage fulfill];
}
messageId++;
}
closeCallback:nil
writeDataCallback:^{
[expectWriteTwice fulfill];
}]
callOptions:options];
[call receiveNextMessages:2];
[call start];
[call writeData:[request data]];
[call writeData:[request data]];
[self waitForExpectationsWithTimeout:kInvertedTimeout handler:nil];
}
- (void)testFlowControlReadReadyBeforeStart {
__weak XCTestExpectation *expectPassedMessage =
[self expectationWithDescription:@"Message delivered with receiveNextMessage"];
__weak XCTestExpectation *expectPassedClose =
[self expectationWithDescription:@"Close delivered with receiveNextMessage"];
RMTSimpleRequest *request = [RMTSimpleRequest message];
request.responseSize = kSimpleDataLength;
request.payload.body = [NSMutableData dataWithLength:kSimpleDataLength];
GRPCRequestOptions *callRequest =
[[GRPCRequestOptions alloc] initWithHost:(NSString *)kHostAddress
path:kUnaryCallMethod.HTTPPath
safety:GRPCCallSafetyDefault];
GRPCMutableCallOptions *options = [[GRPCMutableCallOptions alloc] init];
options.transportType = GRPCTransportTypeInsecure;
options.flowControlEnabled = YES;
__block BOOL closed = NO;
GRPCCall2 *call = [[GRPCCall2 alloc]
initWithRequestOptions:callRequest
responseHandler:[[ClientTestsBlockCallbacks alloc] initWithInitialMetadataCallback:nil
messageCallback:^(NSData *message) {
[expectPassedMessage fulfill];
XCTAssertFalse(closed);
}
closeCallback:^(NSDictionary *ttrailers, NSError *error) {
closed = YES;
[expectPassedClose fulfill];
}]
callOptions:options];
[call receiveNextMessages:1];
[call start];
[call writeData:[request data]];
[call finish];
[self waitForExpectationsWithTimeout:kInvertedTimeout handler:nil];
}
- (void)testFlowControlReadReadyAfterStart {
__weak XCTestExpectation *expectPassedMessage =
[self expectationWithDescription:@"Message delivered with receiveNextMessage"];
__weak XCTestExpectation *expectPassedClose =
[self expectationWithDescription:@"Close delivered with receiveNextMessage"];
RMTStreamingOutputCallRequest *request = [RMTStreamingOutputCallRequest message];
RMTResponseParameters *parameters = [RMTResponseParameters message];
parameters.size = kSimpleDataLength;
[request.responseParametersArray addObject:parameters];
request.payload.body = [NSMutableData dataWithLength:kSimpleDataLength];
GRPCRequestOptions *callRequest =
[[GRPCRequestOptions alloc] initWithHost:(NSString *)kHostAddress
path:kUnaryCallMethod.HTTPPath
safety:GRPCCallSafetyDefault];
GRPCMutableCallOptions *options = [[GRPCMutableCallOptions alloc] init];
options.transportType = GRPCTransportTypeInsecure;
options.flowControlEnabled = YES;
__block BOOL closed = NO;
GRPCCall2 *call = [[GRPCCall2 alloc]
initWithRequestOptions:callRequest
responseHandler:[[ClientTestsBlockCallbacks alloc] initWithInitialMetadataCallback:nil
messageCallback:^(NSData *message) {
[expectPassedMessage fulfill];
XCTAssertFalse(closed);
}
closeCallback:^(NSDictionary *trailers, NSError *error) {
closed = YES;
[expectPassedClose fulfill];
}]
callOptions:options];
[call start];
[call receiveNextMessages:1];
[call writeData:[request data]];
[call finish];
[self waitForExpectationsWithTimeout:kTestTimeout handler:nil];
}
- (void)testFlowControlReadNonBlockingFailure {
__weak XCTestExpectation *completion = [self expectationWithDescription:@"RPC completed."];
GRPCRequestOptions *requestOptions =
[[GRPCRequestOptions alloc] initWithHost:kHostAddress
path:kUnaryCallMethod.HTTPPath
safety:GRPCCallSafetyDefault];
GRPCMutableCallOptions *options = [[GRPCMutableCallOptions alloc] init];
options.flowControlEnabled = YES;
options.transportType = GRPCTransportTypeInsecure;
RMTSimpleRequest *request = [RMTSimpleRequest message];
request.payload.body = [NSMutableData dataWithLength:options.responseSizeLimit];
RMTEchoStatus *status = [RMTEchoStatus message];
status.code = 2;
status.message = @"test";
request.responseStatus = status;
GRPCCall2 *call = [[GRPCCall2 alloc]
initWithRequestOptions:requestOptions
responseHandler:[[ClientTestsBlockCallbacks alloc] initWithInitialMetadataCallback:nil
messageCallback:^(NSData *data) {
XCTFail(@"Received unexpected message");
}
closeCallback:^(NSDictionary *trailingMetadata, NSError *error) {
XCTAssertNotNil(error, @"Expecting non-nil error");
XCTAssertEqual(error.code, 2);
[completion fulfill];
}]
callOptions:options];
[call writeData:[request data]];
[call start];
[call finish];
[self waitForExpectationsWithTimeout:kTestTimeout handler:nil];
}
@end

@ -79,6 +79,11 @@ BOOL isRemoteInteropTest(NSString *host) {
// Convenience class to use blocks as callbacks
@interface InteropTestsBlockCallbacks : NSObject<GRPCProtoResponseHandler>
- (instancetype)initWithInitialMetadataCallback:(void (^)(NSDictionary *))initialMetadataCallback
messageCallback:(void (^)(id))messageCallback
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback
writeMessageCallback:(void (^)(void))writeMessageCallback;
- (instancetype)initWithInitialMetadataCallback:(void (^)(NSDictionary *))initialMetadataCallback
messageCallback:(void (^)(id))messageCallback
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback;
@ -89,21 +94,33 @@ BOOL isRemoteInteropTest(NSString *host) {
void (^_initialMetadataCallback)(NSDictionary *);
void (^_messageCallback)(id);
void (^_closeCallback)(NSDictionary *, NSError *);
void (^_writeMessageCallback)(void);
dispatch_queue_t _dispatchQueue;
}
- (instancetype)initWithInitialMetadataCallback:(void (^)(NSDictionary *))initialMetadataCallback
messageCallback:(void (^)(id))messageCallback
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback {
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback
writeMessageCallback:(void (^)(void))writeMessageCallback {
if ((self = [super init])) {
_initialMetadataCallback = initialMetadataCallback;
_messageCallback = messageCallback;
_closeCallback = closeCallback;
_writeMessageCallback = writeMessageCallback;
_dispatchQueue = dispatch_queue_create(nil, DISPATCH_QUEUE_SERIAL);
}
return self;
}
- (instancetype)initWithInitialMetadataCallback:(void (^)(NSDictionary *))initialMetadataCallback
messageCallback:(void (^)(id))messageCallback
closeCallback:(void (^)(NSDictionary *, NSError *))closeCallback {
return [self initWithInitialMetadataCallback:initialMetadataCallback
messageCallback:messageCallback
closeCallback:closeCallback
writeMessageCallback:nil];
}
- (void)didReceiveInitialMetadata:(NSDictionary *)initialMetadata {
if (_initialMetadataCallback) {
_initialMetadataCallback(initialMetadata);
@ -122,6 +139,12 @@ BOOL isRemoteInteropTest(NSString *host) {
}
}
- (void)didWriteMessage {
if (_writeMessageCallback) {
_writeMessageCallback();
}
}
- (dispatch_queue_t)dispatchQueue {
return _dispatchQueue;
}
@ -702,6 +725,67 @@ BOOL isRemoteInteropTest(NSString *host) {
[self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
}
- (void)testPingPongRPCWithFlowControl {
XCTAssertNotNil([[self class] host]);
__weak XCTestExpectation *expectation = [self expectationWithDescription:@"PingPongWithV2API"];
NSArray *requests = @[ @27182, @8, @1828, @45904 ];
NSArray *responses = @[ @31415, @9, @2653, @58979 ];
__block int index = 0;
id request = [RMTStreamingOutputCallRequest messageWithPayloadSize:requests[index]
requestedResponseSize:responses[index]];
GRPCMutableCallOptions *options = [[GRPCMutableCallOptions alloc] init];
options.transportType = [[self class] transportType];
options.PEMRootCertificates = [[self class] PEMRootCertificates];
options.hostNameOverride = [[self class] hostNameOverride];
options.flowControlEnabled = YES;
__block BOOL canWriteData = NO;
__block GRPCStreamingProtoCall *call = [_service
fullDuplexCallWithResponseHandler:[[InteropTestsBlockCallbacks alloc]
initWithInitialMetadataCallback:nil
messageCallback:^(id message) {
XCTAssertLessThan(index, 4,
@"More than 4 responses received.");
id expected = [RMTStreamingOutputCallResponse
messageWithPayloadSize:responses[index]];
XCTAssertEqualObjects(message, expected);
index += 1;
if (index < 4) {
id request = [RMTStreamingOutputCallRequest
messageWithPayloadSize:requests[index]
requestedResponseSize:responses[index]];
XCTAssertTrue(canWriteData);
canWriteData = NO;
[call writeMessage:request];
[call receiveNextMessage];
} else {
[call finish];
}
}
closeCallback:^(NSDictionary *trailingMetadata,
NSError *error) {
XCTAssertNil(error,
@"Finished with unexpected error: %@",
error);
XCTAssertEqual(index, 4,
@"Received %i responses instead of 4.",
index);
[expectation fulfill];
}
writeMessageCallback:^{
canWriteData = YES;
}]
callOptions:options];
[call start];
[call receiveNextMessage];
[call writeMessage:request];
[self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
}
- (void)testEmptyStreamRPC {
XCTAssertNotNil([[self class] host]);
__weak XCTestExpectation *expectation = [self expectationWithDescription:@"EmptyStream"];

@ -22,16 +22,17 @@ cd src/php/bin
source ./determine_extension_dir.sh
# in some jenkins macos machine, somehow the PHP build script can't find libgrpc.dylib
export DYLD_LIBRARY_PATH=$root/libs/$CONFIG
php $extension_dir -d max_execution_time=300 $(which phpunit) -v --debug \
$(which php) $extension_dir -d max_execution_time=300 $(which phpunit) -v --debug \
--exclude-group persistent_list_bound_tests ../tests/unit_tests
php $extension_dir -d max_execution_time=300 $(which phpunit) -v --debug \
$(which php) $extension_dir -d max_execution_time=300 $(which phpunit) -v --debug \
../tests/unit_tests/PersistentChannelTests
export ZEND_DONT_UNLOAD_MODULES=1
export USE_ZEND_ALLOC=0
# Detect whether valgrind is executable
if [ -x "$(command -v valgrind)" ]; then
valgrind --error-exitcode=10 --leak-check=yes php $extension_dir -d max_execution_time=300 \
$(which valgrind) --error-exitcode=10 --leak-check=yes \
$(which php) $extension_dir -d max_execution_time=300 \
../tests/MemoryLeakTest/MemoryLeakTest.php
fi

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save