Merge branch 'flow_control_v2' into bwest

pull/9511/head
Craig Tiller 8 years ago
commit 3f14e6df65
  1. 16
      BUILD
  2. 7
      CMakeLists.txt
  3. 46
      Makefile
  4. 1
      binding.gyp
  5. 16
      build.yaml
  6. 1
      config.m4
  7. 5
      gRPC-Core.podspec
  8. 5
      grpc.def
  9. 3
      grpc.gemspec
  10. 2
      include/grpc++/ext/reflection.grpc.pb.h
  11. 13
      include/grpc++/impl/codegen/method_handler_impl.h
  12. 1
      include/grpc++/impl/codegen/server_context.h
  13. 11
      include/grpc++/impl/codegen/service_type.h
  14. 39
      include/grpc++/impl/codegen/sync_stream.h
  15. 70
      include/grpc++/resource_quota.h
  16. 8
      include/grpc++/server_builder.h
  17. 8
      include/grpc++/support/channel_arguments.h
  18. 17
      include/grpc/grpc.h
  19. 5
      include/grpc/impl/codegen/grpc_types.h
  20. 3
      package.xml
  21. 96
      src/compiler/cpp_generator.cc
  22. 4
      src/core/ext/transport/chttp2/client/insecure/channel_create.c
  23. 6
      src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
  24. 7
      src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
  25. 4
      src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
  26. 8
      src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
  27. 3
      src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
  28. 315
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  29. 6
      src/core/ext/transport/chttp2/transport/frame_window_update.c
  30. 75
      src/core/ext/transport/chttp2/transport/internal.h
  31. 20
      src/core/ext/transport/chttp2/transport/parsing.c
  32. 43
      src/core/ext/transport/chttp2/transport/stream_lists.c
  33. 11
      src/core/ext/transport/chttp2/transport/stream_map.c
  34. 3
      src/core/ext/transport/chttp2/transport/stream_map.h
  35. 18
      src/core/ext/transport/chttp2/transport/writing.c
  36. 21
      src/core/lib/http/httpcli.c
  37. 2
      src/core/lib/http/httpcli.h
  38. 4
      src/core/lib/iomgr/endpoint.c
  39. 4
      src/core/lib/iomgr/endpoint.h
  40. 5
      src/core/lib/iomgr/endpoint_pair.h
  41. 13
      src/core/lib/iomgr/endpoint_pair_posix.c
  42. 711
      src/core/lib/iomgr/resource_quota.c
  43. 131
      src/core/lib/iomgr/resource_quota.h
  44. 3
      src/core/lib/iomgr/tcp_client.h
  45. 51
      src/core/lib/iomgr/tcp_client_posix.c
  46. 45
      src/core/lib/iomgr/tcp_client_posix.h
  47. 80
      src/core/lib/iomgr/tcp_posix.c
  48. 4
      src/core/lib/iomgr/tcp_posix.h
  49. 3
      src/core/lib/iomgr/tcp_server.h
  50. 23
      src/core/lib/iomgr/tcp_server_posix.c
  51. 5
      src/core/lib/security/credentials/google_default/google_default_credentials.c
  52. 16
      src/core/lib/security/credentials/jwt/jwt_verifier.c
  53. 20
      src/core/lib/security/credentials/oauth2/oauth2_credentials.c
  54. 7
      src/core/lib/security/transport/secure_endpoint.c
  55. 12
      src/core/lib/support/log_posix.c
  56. 6
      src/core/lib/surface/call.c
  57. 2
      src/core/lib/surface/init.c
  58. 1
      src/core/lib/surface/server.c
  59. 15
      src/cpp/common/channel_arguments.cc
  60. 51
      src/cpp/common/resource_quota_cc.cc
  61. 22
      src/cpp/server/server_builder.cc
  62. 11
      src/proto/grpc/testing/control.proto
  63. 1
      src/proto/grpc/testing/duplicate/echo_duplicate.proto
  64. 8
      src/proto/grpc/testing/stats.proto
  65. 1
      src/python/grpcio/grpc_core_dependencies.py
  66. 10
      src/ruby/ext/grpc/rb_grpc_imports.generated.c
  67. 15
      src/ruby/ext/grpc/rb_grpc_imports.generated.h
  68. 5
      test/core/bad_client/bad_client.c
  69. 8
      test/core/end2end/end2end_nosec_tests.c
  70. 8
      test/core/end2end/end2end_tests.c
  71. 4
      test/core/end2end/fixtures/h2_sockpair+trace.c
  72. 4
      test/core/end2end/fixtures/h2_sockpair.c
  73. 4
      test/core/end2end/fixtures/h2_sockpair_1byte.c
  74. 10
      test/core/end2end/fixtures/http_proxy.c
  75. 16
      test/core/end2end/fuzzers/api_fuzzer.c
  76. 6
      test/core/end2end/fuzzers/client_fuzzer.c
  77. 6
      test/core/end2end/fuzzers/server_fuzzer.c
  78. 17
      test/core/end2end/gen_build_yaml.py
  79. 353
      test/core/end2end/tests/buffer_pool_server.c
  80. 1
      test/core/end2end/tests/max_message_length.c
  81. 1
      test/core/end2end/tests/network_status_change.c
  82. 353
      test/core/end2end/tests/resource_quota_server.c
  83. 11
      test/core/http/httpcli_test.c
  84. 11
      test/core/http/httpscli_test.c
  85. 1
      test/core/internal_api_canaries/iomgr.c
  86. 6
      test/core/iomgr/endpoint_pair_test.c
  87. 6
      test/core/iomgr/fd_conservation_posix_test.c
  88. 743
      test/core/iomgr/resource_quota_test.c
  89. 4
      test/core/iomgr/tcp_client_posix_test.c
  90. 36
      test/core/iomgr/tcp_posix_test.c
  91. 15
      test/core/iomgr/tcp_server_posix_test.c
  92. 5
      test/core/security/secure_endpoint_test.c
  93. 2
      test/core/surface/concurrent_connectivity_test.c
  94. 41
      test/core/util/mock_endpoint.c
  95. 3
      test/core/util/mock_endpoint.h
  96. 34
      test/core/util/passthru_endpoint.c
  97. 3
      test/core/util/passthru_endpoint.h
  98. 20
      test/core/util/port_server_client.c
  99. 4
      test/core/util/test_tcp_server.c
  100. 8
      test/cpp/codegen/compiler_test_golden
  101. Some files were not shown because too many files have changed in this diff Show More

16
BUILD

@ -202,6 +202,7 @@ cc_library(
"src/core/lib/iomgr/pollset_set_windows.h", "src/core/lib/iomgr/pollset_set_windows.h",
"src/core/lib/iomgr/pollset_windows.h", "src/core/lib/iomgr/pollset_windows.h",
"src/core/lib/iomgr/resolve_address.h", "src/core/lib/iomgr/resolve_address.h",
"src/core/lib/iomgr/resource_quota.h",
"src/core/lib/iomgr/sockaddr.h", "src/core/lib/iomgr/sockaddr.h",
"src/core/lib/iomgr/sockaddr_posix.h", "src/core/lib/iomgr/sockaddr_posix.h",
"src/core/lib/iomgr/sockaddr_utils.h", "src/core/lib/iomgr/sockaddr_utils.h",
@ -209,6 +210,7 @@ cc_library(
"src/core/lib/iomgr/socket_utils_posix.h", "src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h", "src/core/lib/iomgr/socket_windows.h",
"src/core/lib/iomgr/tcp_client.h", "src/core/lib/iomgr/tcp_client.h",
"src/core/lib/iomgr/tcp_client_posix.h",
"src/core/lib/iomgr/tcp_posix.h", "src/core/lib/iomgr/tcp_posix.h",
"src/core/lib/iomgr/tcp_server.h", "src/core/lib/iomgr/tcp_server.h",
"src/core/lib/iomgr/tcp_windows.h", "src/core/lib/iomgr/tcp_windows.h",
@ -366,6 +368,7 @@ cc_library(
"src/core/lib/iomgr/pollset_windows.c", "src/core/lib/iomgr/pollset_windows.c",
"src/core/lib/iomgr/resolve_address_posix.c", "src/core/lib/iomgr/resolve_address_posix.c",
"src/core/lib/iomgr/resolve_address_windows.c", "src/core/lib/iomgr/resolve_address_windows.c",
"src/core/lib/iomgr/resource_quota.c",
"src/core/lib/iomgr/sockaddr_utils.c", "src/core/lib/iomgr/sockaddr_utils.c",
"src/core/lib/iomgr/socket_utils_common_posix.c", "src/core/lib/iomgr/socket_utils_common_posix.c",
"src/core/lib/iomgr/socket_utils_linux.c", "src/core/lib/iomgr/socket_utils_linux.c",
@ -611,6 +614,7 @@ cc_library(
"src/core/lib/iomgr/pollset_set_windows.h", "src/core/lib/iomgr/pollset_set_windows.h",
"src/core/lib/iomgr/pollset_windows.h", "src/core/lib/iomgr/pollset_windows.h",
"src/core/lib/iomgr/resolve_address.h", "src/core/lib/iomgr/resolve_address.h",
"src/core/lib/iomgr/resource_quota.h",
"src/core/lib/iomgr/sockaddr.h", "src/core/lib/iomgr/sockaddr.h",
"src/core/lib/iomgr/sockaddr_posix.h", "src/core/lib/iomgr/sockaddr_posix.h",
"src/core/lib/iomgr/sockaddr_utils.h", "src/core/lib/iomgr/sockaddr_utils.h",
@ -618,6 +622,7 @@ cc_library(
"src/core/lib/iomgr/socket_utils_posix.h", "src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h", "src/core/lib/iomgr/socket_windows.h",
"src/core/lib/iomgr/tcp_client.h", "src/core/lib/iomgr/tcp_client.h",
"src/core/lib/iomgr/tcp_client_posix.h",
"src/core/lib/iomgr/tcp_posix.h", "src/core/lib/iomgr/tcp_posix.h",
"src/core/lib/iomgr/tcp_server.h", "src/core/lib/iomgr/tcp_server.h",
"src/core/lib/iomgr/tcp_windows.h", "src/core/lib/iomgr/tcp_windows.h",
@ -760,6 +765,7 @@ cc_library(
"src/core/lib/iomgr/pollset_windows.c", "src/core/lib/iomgr/pollset_windows.c",
"src/core/lib/iomgr/resolve_address_posix.c", "src/core/lib/iomgr/resolve_address_posix.c",
"src/core/lib/iomgr/resolve_address_windows.c", "src/core/lib/iomgr/resolve_address_windows.c",
"src/core/lib/iomgr/resource_quota.c",
"src/core/lib/iomgr/sockaddr_utils.c", "src/core/lib/iomgr/sockaddr_utils.c",
"src/core/lib/iomgr/socket_utils_common_posix.c", "src/core/lib/iomgr/socket_utils_common_posix.c",
"src/core/lib/iomgr/socket_utils_linux.c", "src/core/lib/iomgr/socket_utils_linux.c",
@ -975,6 +981,7 @@ cc_library(
"src/core/lib/iomgr/pollset_set_windows.h", "src/core/lib/iomgr/pollset_set_windows.h",
"src/core/lib/iomgr/pollset_windows.h", "src/core/lib/iomgr/pollset_windows.h",
"src/core/lib/iomgr/resolve_address.h", "src/core/lib/iomgr/resolve_address.h",
"src/core/lib/iomgr/resource_quota.h",
"src/core/lib/iomgr/sockaddr.h", "src/core/lib/iomgr/sockaddr.h",
"src/core/lib/iomgr/sockaddr_posix.h", "src/core/lib/iomgr/sockaddr_posix.h",
"src/core/lib/iomgr/sockaddr_utils.h", "src/core/lib/iomgr/sockaddr_utils.h",
@ -982,6 +989,7 @@ cc_library(
"src/core/lib/iomgr/socket_utils_posix.h", "src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h", "src/core/lib/iomgr/socket_windows.h",
"src/core/lib/iomgr/tcp_client.h", "src/core/lib/iomgr/tcp_client.h",
"src/core/lib/iomgr/tcp_client_posix.h",
"src/core/lib/iomgr/tcp_posix.h", "src/core/lib/iomgr/tcp_posix.h",
"src/core/lib/iomgr/tcp_server.h", "src/core/lib/iomgr/tcp_server.h",
"src/core/lib/iomgr/tcp_windows.h", "src/core/lib/iomgr/tcp_windows.h",
@ -1116,6 +1124,7 @@ cc_library(
"src/core/lib/iomgr/pollset_windows.c", "src/core/lib/iomgr/pollset_windows.c",
"src/core/lib/iomgr/resolve_address_posix.c", "src/core/lib/iomgr/resolve_address_posix.c",
"src/core/lib/iomgr/resolve_address_windows.c", "src/core/lib/iomgr/resolve_address_windows.c",
"src/core/lib/iomgr/resource_quota.c",
"src/core/lib/iomgr/sockaddr_utils.c", "src/core/lib/iomgr/sockaddr_utils.c",
"src/core/lib/iomgr/socket_utils_common_posix.c", "src/core/lib/iomgr/socket_utils_common_posix.c",
"src/core/lib/iomgr/socket_utils_linux.c", "src/core/lib/iomgr/socket_utils_linux.c",
@ -1315,6 +1324,7 @@ cc_library(
"src/cpp/common/channel_filter.cc", "src/cpp/common/channel_filter.cc",
"src/cpp/common/completion_queue_cc.cc", "src/cpp/common/completion_queue_cc.cc",
"src/cpp/common/core_codegen.cc", "src/cpp/common/core_codegen.cc",
"src/cpp/common/resource_quota_cc.cc",
"src/cpp/common/rpc_method.cc", "src/cpp/common/rpc_method.cc",
"src/cpp/server/async_generic_service.cc", "src/cpp/server/async_generic_service.cc",
"src/cpp/server/create_default_thread_pool.cc", "src/cpp/server/create_default_thread_pool.cc",
@ -1359,6 +1369,7 @@ cc_library(
"include/grpc++/impl/thd.h", "include/grpc++/impl/thd.h",
"include/grpc++/impl/thd_cxx11.h", "include/grpc++/impl/thd_cxx11.h",
"include/grpc++/impl/thd_no_cxx11.h", "include/grpc++/impl/thd_no_cxx11.h",
"include/grpc++/resource_quota.h",
"include/grpc++/security/auth_context.h", "include/grpc++/security/auth_context.h",
"include/grpc++/security/auth_metadata_processor.h", "include/grpc++/security/auth_metadata_processor.h",
"include/grpc++/security/credentials.h", "include/grpc++/security/credentials.h",
@ -1537,6 +1548,7 @@ cc_library(
"src/cpp/common/channel_filter.cc", "src/cpp/common/channel_filter.cc",
"src/cpp/common/completion_queue_cc.cc", "src/cpp/common/completion_queue_cc.cc",
"src/cpp/common/core_codegen.cc", "src/cpp/common/core_codegen.cc",
"src/cpp/common/resource_quota_cc.cc",
"src/cpp/common/rpc_method.cc", "src/cpp/common/rpc_method.cc",
"src/cpp/server/async_generic_service.cc", "src/cpp/server/async_generic_service.cc",
"src/cpp/server/create_default_thread_pool.cc", "src/cpp/server/create_default_thread_pool.cc",
@ -1581,6 +1593,7 @@ cc_library(
"include/grpc++/impl/thd.h", "include/grpc++/impl/thd.h",
"include/grpc++/impl/thd_cxx11.h", "include/grpc++/impl/thd_cxx11.h",
"include/grpc++/impl/thd_no_cxx11.h", "include/grpc++/impl/thd_no_cxx11.h",
"include/grpc++/resource_quota.h",
"include/grpc++/security/auth_context.h", "include/grpc++/security/auth_context.h",
"include/grpc++/security/auth_metadata_processor.h", "include/grpc++/security/auth_metadata_processor.h",
"include/grpc++/security/credentials.h", "include/grpc++/security/credentials.h",
@ -1883,6 +1896,7 @@ objc_library(
"src/core/lib/iomgr/pollset_windows.c", "src/core/lib/iomgr/pollset_windows.c",
"src/core/lib/iomgr/resolve_address_posix.c", "src/core/lib/iomgr/resolve_address_posix.c",
"src/core/lib/iomgr/resolve_address_windows.c", "src/core/lib/iomgr/resolve_address_windows.c",
"src/core/lib/iomgr/resource_quota.c",
"src/core/lib/iomgr/sockaddr_utils.c", "src/core/lib/iomgr/sockaddr_utils.c",
"src/core/lib/iomgr/socket_utils_common_posix.c", "src/core/lib/iomgr/socket_utils_common_posix.c",
"src/core/lib/iomgr/socket_utils_linux.c", "src/core/lib/iomgr/socket_utils_linux.c",
@ -2107,6 +2121,7 @@ objc_library(
"src/core/lib/iomgr/pollset_set_windows.h", "src/core/lib/iomgr/pollset_set_windows.h",
"src/core/lib/iomgr/pollset_windows.h", "src/core/lib/iomgr/pollset_windows.h",
"src/core/lib/iomgr/resolve_address.h", "src/core/lib/iomgr/resolve_address.h",
"src/core/lib/iomgr/resource_quota.h",
"src/core/lib/iomgr/sockaddr.h", "src/core/lib/iomgr/sockaddr.h",
"src/core/lib/iomgr/sockaddr_posix.h", "src/core/lib/iomgr/sockaddr_posix.h",
"src/core/lib/iomgr/sockaddr_utils.h", "src/core/lib/iomgr/sockaddr_utils.h",
@ -2114,6 +2129,7 @@ objc_library(
"src/core/lib/iomgr/socket_utils_posix.h", "src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h", "src/core/lib/iomgr/socket_windows.h",
"src/core/lib/iomgr/tcp_client.h", "src/core/lib/iomgr/tcp_client.h",
"src/core/lib/iomgr/tcp_client_posix.h",
"src/core/lib/iomgr/tcp_posix.h", "src/core/lib/iomgr/tcp_posix.h",
"src/core/lib/iomgr/tcp_server.h", "src/core/lib/iomgr/tcp_server.h",
"src/core/lib/iomgr/tcp_windows.h", "src/core/lib/iomgr/tcp_windows.h",

@ -329,6 +329,7 @@ add_library(grpc
src/core/lib/iomgr/pollset_windows.c src/core/lib/iomgr/pollset_windows.c
src/core/lib/iomgr/resolve_address_posix.c src/core/lib/iomgr/resolve_address_posix.c
src/core/lib/iomgr/resolve_address_windows.c src/core/lib/iomgr/resolve_address_windows.c
src/core/lib/iomgr/resource_quota.c
src/core/lib/iomgr/sockaddr_utils.c src/core/lib/iomgr/sockaddr_utils.c
src/core/lib/iomgr/socket_utils_common_posix.c src/core/lib/iomgr/socket_utils_common_posix.c
src/core/lib/iomgr/socket_utils_linux.c src/core/lib/iomgr/socket_utils_linux.c
@ -592,6 +593,7 @@ add_library(grpc_cronet
src/core/lib/iomgr/pollset_windows.c src/core/lib/iomgr/pollset_windows.c
src/core/lib/iomgr/resolve_address_posix.c src/core/lib/iomgr/resolve_address_posix.c
src/core/lib/iomgr/resolve_address_windows.c src/core/lib/iomgr/resolve_address_windows.c
src/core/lib/iomgr/resource_quota.c
src/core/lib/iomgr/sockaddr_utils.c src/core/lib/iomgr/sockaddr_utils.c
src/core/lib/iomgr/socket_utils_common_posix.c src/core/lib/iomgr/socket_utils_common_posix.c
src/core/lib/iomgr/socket_utils_linux.c src/core/lib/iomgr/socket_utils_linux.c
@ -827,6 +829,7 @@ add_library(grpc_unsecure
src/core/lib/iomgr/pollset_windows.c src/core/lib/iomgr/pollset_windows.c
src/core/lib/iomgr/resolve_address_posix.c src/core/lib/iomgr/resolve_address_posix.c
src/core/lib/iomgr/resolve_address_windows.c src/core/lib/iomgr/resolve_address_windows.c
src/core/lib/iomgr/resource_quota.c
src/core/lib/iomgr/sockaddr_utils.c src/core/lib/iomgr/sockaddr_utils.c
src/core/lib/iomgr/socket_utils_common_posix.c src/core/lib/iomgr/socket_utils_common_posix.c
src/core/lib/iomgr/socket_utils_linux.c src/core/lib/iomgr/socket_utils_linux.c
@ -1036,6 +1039,7 @@ add_library(grpc++
src/cpp/common/channel_filter.cc src/cpp/common/channel_filter.cc
src/cpp/common/completion_queue_cc.cc src/cpp/common/completion_queue_cc.cc
src/cpp/common/core_codegen.cc src/cpp/common/core_codegen.cc
src/cpp/common/resource_quota_cc.cc
src/cpp/common/rpc_method.cc src/cpp/common/rpc_method.cc
src/cpp/server/async_generic_service.cc src/cpp/server/async_generic_service.cc
src/cpp/server/create_default_thread_pool.cc src/cpp/server/create_default_thread_pool.cc
@ -1097,6 +1101,7 @@ foreach(_hdr
include/grpc++/impl/thd.h include/grpc++/impl/thd.h
include/grpc++/impl/thd_cxx11.h include/grpc++/impl/thd_cxx11.h
include/grpc++/impl/thd_no_cxx11.h include/grpc++/impl/thd_no_cxx11.h
include/grpc++/resource_quota.h
include/grpc++/security/auth_context.h include/grpc++/security/auth_context.h
include/grpc++/security/auth_metadata_processor.h include/grpc++/security/auth_metadata_processor.h
include/grpc++/security/credentials.h include/grpc++/security/credentials.h
@ -1290,6 +1295,7 @@ add_library(grpc++_unsecure
src/cpp/common/channel_filter.cc src/cpp/common/channel_filter.cc
src/cpp/common/completion_queue_cc.cc src/cpp/common/completion_queue_cc.cc
src/cpp/common/core_codegen.cc src/cpp/common/core_codegen.cc
src/cpp/common/resource_quota_cc.cc
src/cpp/common/rpc_method.cc src/cpp/common/rpc_method.cc
src/cpp/server/async_generic_service.cc src/cpp/server/async_generic_service.cc
src/cpp/server/create_default_thread_pool.cc src/cpp/server/create_default_thread_pool.cc
@ -1351,6 +1357,7 @@ foreach(_hdr
include/grpc++/impl/thd.h include/grpc++/impl/thd.h
include/grpc++/impl/thd_cxx11.h include/grpc++/impl/thd_cxx11.h
include/grpc++/impl/thd_no_cxx11.h include/grpc++/impl/thd_no_cxx11.h
include/grpc++/resource_quota.h
include/grpc++/security/auth_context.h include/grpc++/security/auth_context.h
include/grpc++/security/auth_metadata_processor.h include/grpc++/security/auth_metadata_processor.h
include/grpc++/security/credentials.h include/grpc++/security/credentials.h

@ -1003,6 +1003,7 @@ no_server_test: $(BINDIR)/$(CONFIG)/no_server_test
percent_decode_fuzzer: $(BINDIR)/$(CONFIG)/percent_decode_fuzzer percent_decode_fuzzer: $(BINDIR)/$(CONFIG)/percent_decode_fuzzer
percent_encode_fuzzer: $(BINDIR)/$(CONFIG)/percent_encode_fuzzer percent_encode_fuzzer: $(BINDIR)/$(CONFIG)/percent_encode_fuzzer
resolve_address_test: $(BINDIR)/$(CONFIG)/resolve_address_test resolve_address_test: $(BINDIR)/$(CONFIG)/resolve_address_test
resource_quota_test: $(BINDIR)/$(CONFIG)/resource_quota_test
secure_channel_create_test: $(BINDIR)/$(CONFIG)/secure_channel_create_test secure_channel_create_test: $(BINDIR)/$(CONFIG)/secure_channel_create_test
secure_endpoint_test: $(BINDIR)/$(CONFIG)/secure_endpoint_test secure_endpoint_test: $(BINDIR)/$(CONFIG)/secure_endpoint_test
sequential_connectivity_test: $(BINDIR)/$(CONFIG)/sequential_connectivity_test sequential_connectivity_test: $(BINDIR)/$(CONFIG)/sequential_connectivity_test
@ -1326,6 +1327,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/murmur_hash_test \ $(BINDIR)/$(CONFIG)/murmur_hash_test \
$(BINDIR)/$(CONFIG)/no_server_test \ $(BINDIR)/$(CONFIG)/no_server_test \
$(BINDIR)/$(CONFIG)/resolve_address_test \ $(BINDIR)/$(CONFIG)/resolve_address_test \
$(BINDIR)/$(CONFIG)/resource_quota_test \
$(BINDIR)/$(CONFIG)/secure_channel_create_test \ $(BINDIR)/$(CONFIG)/secure_channel_create_test \
$(BINDIR)/$(CONFIG)/secure_endpoint_test \ $(BINDIR)/$(CONFIG)/secure_endpoint_test \
$(BINDIR)/$(CONFIG)/sequential_connectivity_test \ $(BINDIR)/$(CONFIG)/sequential_connectivity_test \
@ -1711,6 +1713,8 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/no_server_test || ( echo test no_server_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/no_server_test || ( echo test no_server_test failed ; exit 1 )
$(E) "[RUN] Testing resolve_address_test" $(E) "[RUN] Testing resolve_address_test"
$(Q) $(BINDIR)/$(CONFIG)/resolve_address_test || ( echo test resolve_address_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/resolve_address_test || ( echo test resolve_address_test failed ; exit 1 )
$(E) "[RUN] Testing resource_quota_test"
$(Q) $(BINDIR)/$(CONFIG)/resource_quota_test || ( echo test resource_quota_test failed ; exit 1 )
$(E) "[RUN] Testing secure_channel_create_test" $(E) "[RUN] Testing secure_channel_create_test"
$(Q) $(BINDIR)/$(CONFIG)/secure_channel_create_test || ( echo test secure_channel_create_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/secure_channel_create_test || ( echo test secure_channel_create_test failed ; exit 1 )
$(E) "[RUN] Testing secure_endpoint_test" $(E) "[RUN] Testing secure_endpoint_test"
@ -2594,6 +2598,7 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/pollset_windows.c \ src/core/lib/iomgr/pollset_windows.c \
src/core/lib/iomgr/resolve_address_posix.c \ src/core/lib/iomgr/resolve_address_posix.c \
src/core/lib/iomgr/resolve_address_windows.c \ src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \ src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_utils_common_posix.c \ src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \ src/core/lib/iomgr/socket_utils_linux.c \
@ -2875,6 +2880,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/pollset_windows.c \ src/core/lib/iomgr/pollset_windows.c \
src/core/lib/iomgr/resolve_address_posix.c \ src/core/lib/iomgr/resolve_address_posix.c \
src/core/lib/iomgr/resolve_address_windows.c \ src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \ src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_utils_common_posix.c \ src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \ src/core/lib/iomgr/socket_utils_linux.c \
@ -3146,6 +3152,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/pollset_windows.c \ src/core/lib/iomgr/pollset_windows.c \
src/core/lib/iomgr/resolve_address_posix.c \ src/core/lib/iomgr/resolve_address_posix.c \
src/core/lib/iomgr/resolve_address_windows.c \ src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \ src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_utils_common_posix.c \ src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \ src/core/lib/iomgr/socket_utils_linux.c \
@ -3343,6 +3350,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/pollset_windows.c \ src/core/lib/iomgr/pollset_windows.c \
src/core/lib/iomgr/resolve_address_posix.c \ src/core/lib/iomgr/resolve_address_posix.c \
src/core/lib/iomgr/resolve_address_windows.c \ src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \ src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_utils_common_posix.c \ src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \ src/core/lib/iomgr/socket_utils_linux.c \
@ -3635,6 +3643,7 @@ LIBGRPC++_SRC = \
src/cpp/common/channel_filter.cc \ src/cpp/common/channel_filter.cc \
src/cpp/common/completion_queue_cc.cc \ src/cpp/common/completion_queue_cc.cc \
src/cpp/common/core_codegen.cc \ src/cpp/common/core_codegen.cc \
src/cpp/common/resource_quota_cc.cc \
src/cpp/common/rpc_method.cc \ src/cpp/common/rpc_method.cc \
src/cpp/server/async_generic_service.cc \ src/cpp/server/async_generic_service.cc \
src/cpp/server/create_default_thread_pool.cc \ src/cpp/server/create_default_thread_pool.cc \
@ -3679,6 +3688,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/thd.h \ include/grpc++/impl/thd.h \
include/grpc++/impl/thd_cxx11.h \ include/grpc++/impl/thd_cxx11.h \
include/grpc++/impl/thd_no_cxx11.h \ include/grpc++/impl/thd_no_cxx11.h \
include/grpc++/resource_quota.h \
include/grpc++/security/auth_context.h \ include/grpc++/security/auth_context.h \
include/grpc++/security/auth_metadata_processor.h \ include/grpc++/security/auth_metadata_processor.h \
include/grpc++/security/credentials.h \ include/grpc++/security/credentials.h \
@ -4214,6 +4224,7 @@ LIBGRPC++_UNSECURE_SRC = \
src/cpp/common/channel_filter.cc \ src/cpp/common/channel_filter.cc \
src/cpp/common/completion_queue_cc.cc \ src/cpp/common/completion_queue_cc.cc \
src/cpp/common/core_codegen.cc \ src/cpp/common/core_codegen.cc \
src/cpp/common/resource_quota_cc.cc \
src/cpp/common/rpc_method.cc \ src/cpp/common/rpc_method.cc \
src/cpp/server/async_generic_service.cc \ src/cpp/server/async_generic_service.cc \
src/cpp/server/create_default_thread_pool.cc \ src/cpp/server/create_default_thread_pool.cc \
@ -4258,6 +4269,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/thd.h \ include/grpc++/impl/thd.h \
include/grpc++/impl/thd_cxx11.h \ include/grpc++/impl/thd_cxx11.h \
include/grpc++/impl/thd_no_cxx11.h \ include/grpc++/impl/thd_no_cxx11.h \
include/grpc++/resource_quota.h \
include/grpc++/security/auth_context.h \ include/grpc++/security/auth_context.h \
include/grpc++/security/auth_metadata_processor.h \ include/grpc++/security/auth_metadata_processor.h \
include/grpc++/security/credentials.h \ include/grpc++/security/credentials.h \
@ -6692,6 +6704,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/registered_call.c \ test/core/end2end/tests/registered_call.c \
test/core/end2end/tests/request_with_flags.c \ test/core/end2end/tests/request_with_flags.c \
test/core/end2end/tests/request_with_payload.c \ test/core/end2end/tests/request_with_payload.c \
test/core/end2end/tests/resource_quota_server.c \
test/core/end2end/tests/server_finishes_request.c \ test/core/end2end/tests/server_finishes_request.c \
test/core/end2end/tests/shutdown_finishes_calls.c \ test/core/end2end/tests/shutdown_finishes_calls.c \
test/core/end2end/tests/shutdown_finishes_tags.c \ test/core/end2end/tests/shutdown_finishes_tags.c \
@ -6774,6 +6787,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/registered_call.c \ test/core/end2end/tests/registered_call.c \
test/core/end2end/tests/request_with_flags.c \ test/core/end2end/tests/request_with_flags.c \
test/core/end2end/tests/request_with_payload.c \ test/core/end2end/tests/request_with_payload.c \
test/core/end2end/tests/resource_quota_server.c \
test/core/end2end/tests/server_finishes_request.c \ test/core/end2end/tests/server_finishes_request.c \
test/core/end2end/tests/shutdown_finishes_calls.c \ test/core/end2end/tests/shutdown_finishes_calls.c \
test/core/end2end/tests/shutdown_finishes_tags.c \ test/core/end2end/tests/shutdown_finishes_tags.c \
@ -10138,6 +10152,38 @@ endif
endif endif
RESOURCE_QUOTA_TEST_SRC = \
test/core/iomgr/resource_quota_test.c \
RESOURCE_QUOTA_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(RESOURCE_QUOTA_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/resource_quota_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/resource_quota_test: $(RESOURCE_QUOTA_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(RESOURCE_QUOTA_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/resource_quota_test
endif
$(OBJDIR)/$(CONFIG)/test/core/iomgr/resource_quota_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_resource_quota_test: $(RESOURCE_QUOTA_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(RESOURCE_QUOTA_TEST_OBJS:.o=.dep)
endif
endif
SECURE_CHANNEL_CREATE_TEST_SRC = \ SECURE_CHANNEL_CREATE_TEST_SRC = \
test/core/surface/secure_channel_create_test.c \ test/core/surface/secure_channel_create_test.c \

@ -604,6 +604,7 @@
'src/core/lib/iomgr/pollset_windows.c', 'src/core/lib/iomgr/pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c', 'src/core/lib/iomgr/resolve_address_posix.c',
'src/core/lib/iomgr/resolve_address_windows.c', 'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c', 'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_utils_common_posix.c', 'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c', 'src/core/lib/iomgr/socket_utils_linux.c',

@ -206,6 +206,7 @@ filegroups:
- src/core/lib/iomgr/pollset_set_windows.h - src/core/lib/iomgr/pollset_set_windows.h
- src/core/lib/iomgr/pollset_windows.h - src/core/lib/iomgr/pollset_windows.h
- src/core/lib/iomgr/resolve_address.h - src/core/lib/iomgr/resolve_address.h
- src/core/lib/iomgr/resource_quota.h
- src/core/lib/iomgr/sockaddr.h - src/core/lib/iomgr/sockaddr.h
- src/core/lib/iomgr/sockaddr_posix.h - src/core/lib/iomgr/sockaddr_posix.h
- src/core/lib/iomgr/sockaddr_utils.h - src/core/lib/iomgr/sockaddr_utils.h
@ -213,6 +214,7 @@ filegroups:
- src/core/lib/iomgr/socket_utils_posix.h - src/core/lib/iomgr/socket_utils_posix.h
- src/core/lib/iomgr/socket_windows.h - src/core/lib/iomgr/socket_windows.h
- src/core/lib/iomgr/tcp_client.h - src/core/lib/iomgr/tcp_client.h
- src/core/lib/iomgr/tcp_client_posix.h
- src/core/lib/iomgr/tcp_posix.h - src/core/lib/iomgr/tcp_posix.h
- src/core/lib/iomgr/tcp_server.h - src/core/lib/iomgr/tcp_server.h
- src/core/lib/iomgr/tcp_windows.h - src/core/lib/iomgr/tcp_windows.h
@ -292,6 +294,7 @@ filegroups:
- src/core/lib/iomgr/pollset_windows.c - src/core/lib/iomgr/pollset_windows.c
- src/core/lib/iomgr/resolve_address_posix.c - src/core/lib/iomgr/resolve_address_posix.c
- src/core/lib/iomgr/resolve_address_windows.c - src/core/lib/iomgr/resolve_address_windows.c
- src/core/lib/iomgr/resource_quota.c
- src/core/lib/iomgr/sockaddr_utils.c - src/core/lib/iomgr/sockaddr_utils.c
- src/core/lib/iomgr/socket_utils_common_posix.c - src/core/lib/iomgr/socket_utils_common_posix.c
- src/core/lib/iomgr/socket_utils_linux.c - src/core/lib/iomgr/socket_utils_linux.c
@ -698,6 +701,7 @@ filegroups:
- include/grpc++/impl/thd.h - include/grpc++/impl/thd.h
- include/grpc++/impl/thd_cxx11.h - include/grpc++/impl/thd_cxx11.h
- include/grpc++/impl/thd_no_cxx11.h - include/grpc++/impl/thd_no_cxx11.h
- include/grpc++/resource_quota.h
- include/grpc++/security/auth_context.h - include/grpc++/security/auth_context.h
- include/grpc++/security/auth_metadata_processor.h - include/grpc++/security/auth_metadata_processor.h
- include/grpc++/security/credentials.h - include/grpc++/security/credentials.h
@ -735,6 +739,7 @@ filegroups:
- src/cpp/common/channel_filter.cc - src/cpp/common/channel_filter.cc
- src/cpp/common/completion_queue_cc.cc - src/cpp/common/completion_queue_cc.cc
- src/cpp/common/core_codegen.cc - src/cpp/common/core_codegen.cc
- src/cpp/common/resource_quota_cc.cc
- src/cpp/common/rpc_method.cc - src/cpp/common/rpc_method.cc
- src/cpp/server/async_generic_service.cc - src/cpp/server/async_generic_service.cc
- src/cpp/server/create_default_thread_pool.cc - src/cpp/server/create_default_thread_pool.cc
@ -2354,6 +2359,17 @@ targets:
- grpc - grpc
- gpr_test_util - gpr_test_util
- gpr - gpr
- name: resource_quota_test
cpu_cost: 30
build: test
language: c
src:
- test/core/iomgr/resource_quota_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: secure_channel_create_test - name: secure_channel_create_test
build: test build: test
language: c language: c

@ -123,6 +123,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/pollset_windows.c \ src/core/lib/iomgr/pollset_windows.c \
src/core/lib/iomgr/resolve_address_posix.c \ src/core/lib/iomgr/resolve_address_posix.c \
src/core/lib/iomgr/resolve_address_windows.c \ src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \ src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_utils_common_posix.c \ src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \ src/core/lib/iomgr/socket_utils_linux.c \

@ -289,6 +289,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/pollset_set_windows.h', 'src/core/lib/iomgr/pollset_set_windows.h',
'src/core/lib/iomgr/pollset_windows.h', 'src/core/lib/iomgr/pollset_windows.h',
'src/core/lib/iomgr/resolve_address.h', 'src/core/lib/iomgr/resolve_address.h',
'src/core/lib/iomgr/resource_quota.h',
'src/core/lib/iomgr/sockaddr.h', 'src/core/lib/iomgr/sockaddr.h',
'src/core/lib/iomgr/sockaddr_posix.h', 'src/core/lib/iomgr/sockaddr_posix.h',
'src/core/lib/iomgr/sockaddr_utils.h', 'src/core/lib/iomgr/sockaddr_utils.h',
@ -296,6 +297,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/socket_utils_posix.h', 'src/core/lib/iomgr/socket_utils_posix.h',
'src/core/lib/iomgr/socket_windows.h', 'src/core/lib/iomgr/socket_windows.h',
'src/core/lib/iomgr/tcp_client.h', 'src/core/lib/iomgr/tcp_client.h',
'src/core/lib/iomgr/tcp_client_posix.h',
'src/core/lib/iomgr/tcp_posix.h', 'src/core/lib/iomgr/tcp_posix.h',
'src/core/lib/iomgr/tcp_server.h', 'src/core/lib/iomgr/tcp_server.h',
'src/core/lib/iomgr/tcp_windows.h', 'src/core/lib/iomgr/tcp_windows.h',
@ -457,6 +459,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/pollset_windows.c', 'src/core/lib/iomgr/pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c', 'src/core/lib/iomgr/resolve_address_posix.c',
'src/core/lib/iomgr/resolve_address_windows.c', 'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c', 'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_utils_common_posix.c', 'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c', 'src/core/lib/iomgr/socket_utils_linux.c',
@ -670,6 +673,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/pollset_set_windows.h', 'src/core/lib/iomgr/pollset_set_windows.h',
'src/core/lib/iomgr/pollset_windows.h', 'src/core/lib/iomgr/pollset_windows.h',
'src/core/lib/iomgr/resolve_address.h', 'src/core/lib/iomgr/resolve_address.h',
'src/core/lib/iomgr/resource_quota.h',
'src/core/lib/iomgr/sockaddr.h', 'src/core/lib/iomgr/sockaddr.h',
'src/core/lib/iomgr/sockaddr_posix.h', 'src/core/lib/iomgr/sockaddr_posix.h',
'src/core/lib/iomgr/sockaddr_utils.h', 'src/core/lib/iomgr/sockaddr_utils.h',
@ -677,6 +681,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/socket_utils_posix.h', 'src/core/lib/iomgr/socket_utils_posix.h',
'src/core/lib/iomgr/socket_windows.h', 'src/core/lib/iomgr/socket_windows.h',
'src/core/lib/iomgr/tcp_client.h', 'src/core/lib/iomgr/tcp_client.h',
'src/core/lib/iomgr/tcp_client_posix.h',
'src/core/lib/iomgr/tcp_posix.h', 'src/core/lib/iomgr/tcp_posix.h',
'src/core/lib/iomgr/tcp_server.h', 'src/core/lib/iomgr/tcp_server.h',
'src/core/lib/iomgr/tcp_windows.h', 'src/core/lib/iomgr/tcp_windows.h',

@ -94,6 +94,11 @@ EXPORTS
grpc_header_nonbin_value_is_legal grpc_header_nonbin_value_is_legal
grpc_is_binary_header grpc_is_binary_header
grpc_call_error_to_string grpc_call_error_to_string
grpc_resource_quota_create
grpc_resource_quota_ref
grpc_resource_quota_unref
grpc_resource_quota_resize
grpc_resource_quota_arg_vtable
grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd
grpc_server_add_insecure_channel_from_fd grpc_server_add_insecure_channel_from_fd
grpc_use_signal grpc_use_signal

@ -209,6 +209,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/pollset_set_windows.h ) s.files += %w( src/core/lib/iomgr/pollset_set_windows.h )
s.files += %w( src/core/lib/iomgr/pollset_windows.h ) s.files += %w( src/core/lib/iomgr/pollset_windows.h )
s.files += %w( src/core/lib/iomgr/resolve_address.h ) s.files += %w( src/core/lib/iomgr/resolve_address.h )
s.files += %w( src/core/lib/iomgr/resource_quota.h )
s.files += %w( src/core/lib/iomgr/sockaddr.h ) s.files += %w( src/core/lib/iomgr/sockaddr.h )
s.files += %w( src/core/lib/iomgr/sockaddr_posix.h ) s.files += %w( src/core/lib/iomgr/sockaddr_posix.h )
s.files += %w( src/core/lib/iomgr/sockaddr_utils.h ) s.files += %w( src/core/lib/iomgr/sockaddr_utils.h )
@ -216,6 +217,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/socket_utils_posix.h ) s.files += %w( src/core/lib/iomgr/socket_utils_posix.h )
s.files += %w( src/core/lib/iomgr/socket_windows.h ) s.files += %w( src/core/lib/iomgr/socket_windows.h )
s.files += %w( src/core/lib/iomgr/tcp_client.h ) s.files += %w( src/core/lib/iomgr/tcp_client.h )
s.files += %w( src/core/lib/iomgr/tcp_client_posix.h )
s.files += %w( src/core/lib/iomgr/tcp_posix.h ) s.files += %w( src/core/lib/iomgr/tcp_posix.h )
s.files += %w( src/core/lib/iomgr/tcp_server.h ) s.files += %w( src/core/lib/iomgr/tcp_server.h )
s.files += %w( src/core/lib/iomgr/tcp_windows.h ) s.files += %w( src/core/lib/iomgr/tcp_windows.h )
@ -377,6 +379,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/pollset_windows.c ) s.files += %w( src/core/lib/iomgr/pollset_windows.c )
s.files += %w( src/core/lib/iomgr/resolve_address_posix.c ) s.files += %w( src/core/lib/iomgr/resolve_address_posix.c )
s.files += %w( src/core/lib/iomgr/resolve_address_windows.c ) s.files += %w( src/core/lib/iomgr/resolve_address_windows.c )
s.files += %w( src/core/lib/iomgr/resource_quota.c )
s.files += %w( src/core/lib/iomgr/sockaddr_utils.c ) s.files += %w( src/core/lib/iomgr/sockaddr_utils.c )
s.files += %w( src/core/lib/iomgr/socket_utils_common_posix.c ) s.files += %w( src/core/lib/iomgr/socket_utils_common_posix.c )
s.files += %w( src/core/lib/iomgr/socket_utils_linux.c ) s.files += %w( src/core/lib/iomgr/socket_utils_linux.c )

@ -176,6 +176,8 @@ class ServerReflection GRPC_FINAL {
} }
}; };
typedef Service StreamedUnaryService; typedef Service StreamedUnaryService;
typedef Service SplitStreamedService;
typedef Service StreamedService;
}; };
} // namespace v1alpha } // namespace v1alpha

@ -236,6 +236,19 @@ class StreamedUnaryHandler
ServerUnaryStreamer<RequestType, ResponseType>, true>(func) {} ServerUnaryStreamer<RequestType, ResponseType>, true>(func) {}
}; };
template <class RequestType, class ResponseType>
class SplitServerStreamingHandler
: public TemplatedBidiStreamingHandler<
ServerSplitStreamer<RequestType, ResponseType>, false> {
public:
explicit SplitServerStreamingHandler(
std::function<Status(ServerContext*,
ServerSplitStreamer<RequestType, ResponseType>*)>
func)
: TemplatedBidiStreamingHandler<
ServerSplitStreamer<RequestType, ResponseType>, false>(func) {}
};
// Handle unknown method by returning UNIMPLEMENTED error. // Handle unknown method by returning UNIMPLEMENTED error.
class UnknownMethodHandler : public MethodHandler { class UnknownMethodHandler : public MethodHandler {
public: public:

@ -44,7 +44,6 @@
#include <grpc++/impl/codegen/time.h> #include <grpc++/impl/codegen/time.h>
#include <grpc/impl/codegen/compression_types.h> #include <grpc/impl/codegen/compression_types.h>
struct gpr_timespec;
struct grpc_metadata; struct grpc_metadata;
struct grpc_call; struct grpc_call;
struct census_context; struct census_context;

@ -147,14 +147,15 @@ class Service {
methods_[index].reset(); methods_[index].reset();
} }
void MarkMethodStreamedUnary(int index, void MarkMethodStreamed(int index, MethodHandler* streamed_method) {
MethodHandler* streamed_unary_method) {
GPR_CODEGEN_ASSERT(methods_[index] && methods_[index]->handler() && GPR_CODEGEN_ASSERT(methods_[index] && methods_[index]->handler() &&
"Cannot mark an async or generic method Streamed Unary"); "Cannot mark an async or generic method Streamed");
methods_[index]->SetHandler(streamed_unary_method); methods_[index]->SetHandler(streamed_method);
// From the server's point of view, streamed unary is a special // From the server's point of view, streamed unary is a special
// case of BIDI_STREAMING that has 1 read and 1 write, in that order. // case of BIDI_STREAMING that has 1 read and 1 write, in that order,
// and split server-side streaming is BIDI_STREAMING with 1 read and
// any number of writes, in that order
methods_[index]->SetMethodType(::grpc::RpcMethod::BIDI_STREAMING); methods_[index]->SetMethodType(::grpc::RpcMethod::BIDI_STREAMING);
} }

@ -538,7 +538,7 @@ class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
/// the \a NextMessageSize method to determine an upper-bound on the size of /// the \a NextMessageSize method to determine an upper-bound on the size of
/// the message. /// the message.
/// A key difference relative to streaming: ServerUnaryStreamer /// A key difference relative to streaming: ServerUnaryStreamer
/// must have exactly 1 Read and exactly 1 Write, in that order, to function /// must have exactly 1 Read and exactly 1 Write, in that order, to function
/// correctly. Otherwise, the RPC is in error. /// correctly. Otherwise, the RPC is in error.
template <class RequestType, class ResponseType> template <class RequestType, class ResponseType>
class ServerUnaryStreamer GRPC_FINAL class ServerUnaryStreamer GRPC_FINAL
@ -577,6 +577,43 @@ class ServerUnaryStreamer GRPC_FINAL
bool write_done_; bool write_done_;
}; };
/// A class to represent a flow-controlled server-side streaming call.
/// This is something of a hybrid between server-side and bidi streaming.
/// This is invoked through a server-side streaming call on the client side,
/// but the server responds to it as though it were a bidi streaming call that
/// must first have exactly 1 Read and then any number of Writes.
template <class RequestType, class ResponseType>
class ServerSplitStreamer GRPC_FINAL
: public ServerReaderWriterInterface<ResponseType, RequestType> {
public:
ServerSplitStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false) {}
void SendInitialMetadata() GRPC_OVERRIDE { body_.SendInitialMetadata(); }
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
return body_.NextMessageSize(sz);
}
bool Read(RequestType* request) GRPC_OVERRIDE {
if (read_done_) {
return false;
}
read_done_ = true;
return body_.Read(request);
}
using WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response,
const WriteOptions& options) GRPC_OVERRIDE {
return read_done_ && body_.Write(response, options);
}
private:
internal::ServerReaderWriterBody<ResponseType, RequestType> body_;
bool read_done_;
};
} // namespace grpc } // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_SYNC_STREAM_H #endif // GRPCXX_IMPL_CODEGEN_SYNC_STREAM_H

@ -0,0 +1,70 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_RESOURCE_QUOTA_H
#define GRPCXX_RESOURCE_QUOTA_H
struct grpc_resource_quota;
#include <grpc++/impl/codegen/config.h>
namespace grpc {
/// ResourceQuota represents a bound on memory usage by the gRPC library.
/// A ResourceQuota can be attached to a server (via ServerBuilder), or a client
/// channel (via ChannelArguments). gRPC will attempt to keep memory used by
/// all attached entities below the ResourceQuota bound.
class ResourceQuota GRPC_FINAL {
public:
explicit ResourceQuota(const grpc::string& name);
ResourceQuota();
~ResourceQuota();
/// Resize this ResourceQuota to a new size. If new_size is smaller than the
/// current size of the pool, memory usage will be monotonically decreased
/// until it falls under new_size. No time bound is given for this to occur
/// however.
ResourceQuota& Resize(size_t new_size);
grpc_resource_quota* c_resource_quota() const { return impl_; }
private:
ResourceQuota(const ResourceQuota& rhs);
ResourceQuota& operator=(const ResourceQuota& rhs);
grpc_resource_quota* const impl_;
};
} // namespace grpc
#endif // GRPCXX_RESOURCE_QUOTA_H

@ -43,9 +43,12 @@
#include <grpc++/support/config.h> #include <grpc++/support/config.h>
#include <grpc/compression.h> #include <grpc/compression.h>
struct grpc_resource_quota;
namespace grpc { namespace grpc {
class AsyncGenericService; class AsyncGenericService;
class ResourceQuota;
class CompletionQueue; class CompletionQueue;
class RpcService; class RpcService;
class Server; class Server;
@ -61,6 +64,7 @@ class ServerBuilderPluginTest;
class ServerBuilder { class ServerBuilder {
public: public:
ServerBuilder(); ServerBuilder();
~ServerBuilder();
/// Register a service. This call does not take ownership of the service. /// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the \a Server instance returned /// The service must exist for the lifetime of the \a Server instance returned
@ -113,6 +117,9 @@ class ServerBuilder {
ServerBuilder& SetDefaultCompressionAlgorithm( ServerBuilder& SetDefaultCompressionAlgorithm(
grpc_compression_algorithm algorithm); grpc_compression_algorithm algorithm);
/// Set the attached buffer pool for this server
ServerBuilder& SetResourceQuota(const ResourceQuota& resource_quota);
ServerBuilder& SetOption(std::unique_ptr<ServerBuilderOption> option); ServerBuilder& SetOption(std::unique_ptr<ServerBuilderOption> option);
/// Tries to bind \a server to the given \a addr. /// Tries to bind \a server to the given \a addr.
@ -187,6 +194,7 @@ class ServerBuilder {
std::vector<ServerCompletionQueue*> cqs_; std::vector<ServerCompletionQueue*> cqs_;
std::shared_ptr<ServerCredentials> creds_; std::shared_ptr<ServerCredentials> creds_;
std::vector<std::unique_ptr<ServerBuilderPlugin>> plugins_; std::vector<std::unique_ptr<ServerBuilderPlugin>> plugins_;
grpc_resource_quota* resource_quota_;
AsyncGenericService* generic_service_; AsyncGenericService* generic_service_;
struct { struct {
bool is_set; bool is_set;

@ -46,6 +46,8 @@ namespace testing {
class ChannelArgumentsTest; class ChannelArgumentsTest;
} // namespace testing } // namespace testing
class ResourceQuota;
/// Options for channel creation. The user can use generic setters to pass /// Options for channel creation. The user can use generic setters to pass
/// key value pairs down to c channel creation code. For grpc related options, /// key value pairs down to c channel creation code. For grpc related options,
/// concrete setters are provided. /// concrete setters are provided.
@ -80,6 +82,9 @@ class ChannelArguments {
/// The given string will be sent at the front of the user agent string. /// The given string will be sent at the front of the user agent string.
void SetUserAgentPrefix(const grpc::string& user_agent_prefix); void SetUserAgentPrefix(const grpc::string& user_agent_prefix);
/// The given buffer pool will be attached to the constructed channel
void SetResourceQuota(const ResourceQuota& resource_quota);
// Generic channel argument setters. Only for advanced use cases. // Generic channel argument setters. Only for advanced use cases.
/// Set an integer argument \a value under \a key. /// Set an integer argument \a value under \a key.
void SetInt(const grpc::string& key, int value); void SetInt(const grpc::string& key, int value);
@ -88,6 +93,9 @@ class ChannelArguments {
/// Set a pointer argument \a value under \a key. Owership is not transferred. /// Set a pointer argument \a value under \a key. Owership is not transferred.
void SetPointer(const grpc::string& key, void* value); void SetPointer(const grpc::string& key, void* value);
void SetPointerWithVtable(const grpc::string& key, void* value,
const grpc_arg_pointer_vtable* vtable);
/// Set a textual argument \a value under \a key. /// Set a textual argument \a value under \a key.
void SetString(const grpc::string& key, const grpc::string& value); void SetString(const grpc::string& key, const grpc::string& value);

@ -401,6 +401,23 @@ GRPCAPI int grpc_is_binary_header(const char *key, size_t length);
/** Convert grpc_call_error values to a string */ /** Convert grpc_call_error values to a string */
GRPCAPI const char *grpc_call_error_to_string(grpc_call_error error); GRPCAPI const char *grpc_call_error_to_string(grpc_call_error error);
/** Create a buffer pool */
GRPCAPI grpc_resource_quota *grpc_resource_quota_create(const char *trace_name);
/** Add a reference to a buffer pool */
GRPCAPI void grpc_resource_quota_ref(grpc_resource_quota *resource_quota);
/** Drop a reference to a buffer pool */
GRPCAPI void grpc_resource_quota_unref(grpc_resource_quota *resource_quota);
/** Update the size of a buffer pool */
GRPCAPI void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
size_t new_size);
/** Fetch a vtable for a grpc_channel_arg that points to a grpc_resource_quota
*/
GRPCAPI const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

@ -201,6 +201,9 @@ typedef struct {
#define GRPC_ARG_MAX_METADATA_SIZE "grpc.max_metadata_size" #define GRPC_ARG_MAX_METADATA_SIZE "grpc.max_metadata_size"
/** If non-zero, allow the use of SO_REUSEPORT if it's available (default 1) */ /** If non-zero, allow the use of SO_REUSEPORT if it's available (default 1) */
#define GRPC_ARG_ALLOW_REUSEPORT "grpc.so_reuseport" #define GRPC_ARG_ALLOW_REUSEPORT "grpc.so_reuseport"
/** If non-zero, a pointer to a buffer pool (use grpc_resource_quota_arg_vtable
to fetch an appropriate pointer arg vtable */
#define GRPC_ARG_BUFFER_POOL "grpc.resource_quota"
/** Service config data, to be passed to subchannels. /** Service config data, to be passed to subchannels.
Not intended for external use. */ Not intended for external use. */
#define GRPC_ARG_SERVICE_CONFIG "grpc.service_config" #define GRPC_ARG_SERVICE_CONFIG "grpc.service_config"
@ -460,6 +463,8 @@ typedef struct grpc_op {
} data; } data;
} grpc_op; } grpc_op;
typedef struct grpc_resource_quota grpc_resource_quota;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

@ -216,6 +216,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/pollset_set_windows.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/pollset_set_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/pollset_windows.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/pollset_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/resolve_address.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/resolve_address.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/resource_quota.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_posix.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_utils.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_utils.h" role="src" />
@ -223,6 +224,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_posix.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_windows.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/socket_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_client_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_posix.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/tcp_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_server.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/tcp_server.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/tcp_windows.h" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/tcp_windows.h" role="src" />
@ -384,6 +386,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/pollset_windows.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/pollset_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/resolve_address_posix.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/resolve_address_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/resolve_address_windows.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/resolve_address_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/resource_quota.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_utils.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_utils.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_common_posix.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_common_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_linux.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_linux.c" role="src" />

@ -624,7 +624,7 @@ void PrintHeaderServerMethodStreamedUnary(
printer->Indent(); printer->Indent();
printer->Print(*vars, printer->Print(*vars,
"WithStreamedUnaryMethod_$Method$() {\n" "WithStreamedUnaryMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamedUnary($Idx$,\n" " ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::StreamedUnaryHandler< $Request$, " " new ::grpc::StreamedUnaryHandler< $Request$, "
"$Response$>(std::bind" "$Response$>(std::bind"
"(&WithStreamedUnaryMethod_$Method$<BaseClass>::" "(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
@ -656,6 +656,58 @@ void PrintHeaderServerMethodStreamedUnary(
} }
} }
void PrintHeaderServerMethodSplitStreaming(
Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
if (method->ServerOnlyStreaming()) {
printer->Print(*vars, "template <class BaseClass>\n");
printer->Print(*vars,
"class WithSplitStreamingMethod_$Method$ : "
"public BaseClass {\n");
printer->Print(
" private:\n"
" void BaseClassMustBeDerivedFromService(const Service *service) "
"{}\n");
printer->Print(" public:\n");
printer->Indent();
printer->Print(*vars,
"WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::SplitServerStreamingHandler< $Request$, "
"$Response$>(std::bind"
"(&WithSplitStreamingMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
"~WithSplitStreamingMethod_$Method$() GRPC_OVERRIDE {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
printer->Print(
*vars,
"// disable regular version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
"::grpc::ServerWriter< $Response$>* writer) GRPC_FINAL GRPC_OVERRIDE "
"{\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
printer->Print(*vars,
"// replace default version of method with split streamed\n"
"virtual ::grpc::Status Streamed$Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerSplitStreamer< "
"$Request$,$Response$>* server_split_streamer)"
" = 0;\n");
printer->Outdent();
printer->Print(*vars, "};\n");
}
}
void PrintHeaderServerMethodGeneric( void PrintHeaderServerMethodGeneric(
Printer *printer, const Method *method, Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars) { std::map<grpc::string, grpc::string> *vars) {
@ -844,6 +896,48 @@ void PrintHeaderService(Printer *printer, const Service *service,
} }
printer->Print(" StreamedUnaryService;\n"); printer->Print(" StreamedUnaryService;\n");
// Server side - controlled server-side streaming
for (int i = 0; i < service->method_count(); ++i) {
(*vars)["Idx"] = as_string(i);
PrintHeaderServerMethodSplitStreaming(printer, service->method(i).get(),
vars);
}
printer->Print("typedef ");
for (int i = 0; i < service->method_count(); ++i) {
(*vars)["method_name"] = service->method(i).get()->name();
if (service->method(i)->ServerOnlyStreaming()) {
printer->Print(*vars, "WithSplitStreamingMethod_$method_name$<");
}
}
printer->Print("Service");
for (int i = 0; i < service->method_count(); ++i) {
if (service->method(i)->ServerOnlyStreaming()) {
printer->Print(" >");
}
}
printer->Print(" SplitStreamedService;\n");
// Server side - typedef for controlled both unary and server-side streaming
printer->Print("typedef ");
for (int i = 0; i < service->method_count(); ++i) {
(*vars)["method_name"] = service->method(i).get()->name();
if (service->method(i)->ServerOnlyStreaming()) {
printer->Print(*vars, "WithSplitStreamingMethod_$method_name$<");
}
if (service->method(i)->NoStreaming()) {
printer->Print(*vars, "WithStreamedUnaryMethod_$method_name$<");
}
}
printer->Print("Service");
for (int i = 0; i < service->method_count(); ++i) {
if (service->method(i)->NoStreaming() ||
service->method(i)->ServerOnlyStreaming()) {
printer->Print(" >");
}
}
printer->Print(" StreamedService;\n");
printer->Outdent(); printer->Outdent();
printer->Print("};\n"); printer->Print("};\n");
printer->Print(service->GetTrailingComments().c_str()); printer->Print(service->GetTrailingComments().c_str());

@ -150,8 +150,8 @@ static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
c->tcp = NULL; c->tcp = NULL;
grpc_closure_init(&c->connected, connected, c); grpc_closure_init(&c->connected, connected, c);
grpc_tcp_client_connect(exec_ctx, &c->connected, &c->tcp, grpc_tcp_client_connect(exec_ctx, &c->connected, &c->tcp,
args->interested_parties, args->addr, args->addr_len, args->interested_parties, args->channel_args,
args->deadline); args->addr, args->addr_len, args->deadline);
} }
static const grpc_connector_vtable connector_vtable = { static const grpc_connector_vtable connector_vtable = {

@ -44,6 +44,7 @@
#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/tcp_client_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h" #include "src/core/lib/iomgr/tcp_posix.h"
#include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h" #include "src/core/lib/surface/channel.h"
@ -65,9 +66,8 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
int flags = fcntl(fd, F_GETFL, 0); int flags = fcntl(fd, F_GETFL, 0);
GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0); GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);
grpc_endpoint *client = grpc_endpoint *client = grpc_tcp_client_create_from_fd(
grpc_tcp_create(grpc_fd_create(fd, "client"), &exec_ctx, grpc_fd_create(fd, "client"), args, "fd-client");
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "fd-client");
grpc_transport *transport = grpc_transport *transport =
grpc_create_chttp2_transport(&exec_ctx, final_args, client, 1); grpc_create_chttp2_transport(&exec_ctx, final_args, client, 1);

@ -208,9 +208,10 @@ static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
GPR_ASSERT(c->connecting_endpoint == NULL); GPR_ASSERT(c->connecting_endpoint == NULL);
gpr_mu_unlock(&c->mu); gpr_mu_unlock(&c->mu);
grpc_closure_init(&c->connected_closure, connected, c); grpc_closure_init(&c->connected_closure, connected, c);
grpc_tcp_client_connect( grpc_tcp_client_connect(exec_ctx, &c->connected_closure,
exec_ctx, &c->connected_closure, &c->newly_connecting_endpoint, &c->newly_connecting_endpoint,
args->interested_parties, args->addr, args->addr_len, args->deadline); args->interested_parties, args->channel_args,
args->addr, args->addr_len, args->deadline);
} }
static const grpc_connector_vtable connector_vtable = { static const grpc_connector_vtable connector_vtable = {

@ -139,8 +139,8 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
goto error; goto error;
} }
err = err = grpc_tcp_server_create(&exec_ctx, NULL,
grpc_tcp_server_create(NULL, grpc_server_get_channel_args(server), &tcp); grpc_server_get_channel_args(server), &tcp);
if (err != GRPC_ERROR_NONE) { if (err != GRPC_ERROR_NONE) {
goto error; goto error;
} }

@ -57,8 +57,12 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
char *name; char *name;
gpr_asprintf(&name, "fd:%d", fd); gpr_asprintf(&name, "fd:%d", fd);
grpc_endpoint *server_endpoint = grpc_tcp_create( grpc_resource_quota *resource_quota = grpc_resource_quota_from_channel_args(
grpc_fd_create(fd, name), GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name); grpc_server_get_channel_args(server));
grpc_endpoint *server_endpoint =
grpc_tcp_create(grpc_fd_create(fd, name), resource_quota,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_free(name); gpr_free(name);

@ -271,7 +271,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
memset(server_state, 0, sizeof(*server_state)); memset(server_state, 0, sizeof(*server_state));
grpc_closure_init(&server_state->tcp_server_shutdown_complete, grpc_closure_init(&server_state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, server_state); tcp_server_shutdown_complete, server_state);
err = grpc_tcp_server_create(&server_state->tcp_server_shutdown_complete, err = grpc_tcp_server_create(&exec_ctx,
&server_state->tcp_server_shutdown_complete,
grpc_server_get_channel_args(server), &tcp); grpc_server_get_channel_args(server), &tcp);
if (err != GRPC_ERROR_NONE) { if (err != GRPC_ERROR_NONE) {
goto error; goto error;

@ -114,6 +114,20 @@ static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *error); grpc_error *error);
static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
static void close_transport_locked(grpc_exec_ctx *exec_ctx, static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_error *error); grpc_chttp2_transport *t, grpc_error *error);
static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@ -219,8 +233,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->is_client = is_client; t->is_client = is_client;
t->outgoing_window = DEFAULT_WINDOW; t->outgoing_window = DEFAULT_WINDOW;
t->incoming_window = DEFAULT_WINDOW; t->incoming_window = DEFAULT_WINDOW;
t->stream_lookahead = DEFAULT_WINDOW;
t->connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
t->ping_counter = 1; t->ping_counter = 1;
t->pings.next = t->pings.prev = &t->pings; t->pings.next = t->pings.prev = &t->pings;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0; t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
@ -241,6 +253,11 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t); grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t);
grpc_closure_init(&t->read_action_begin, read_action_begin, t); grpc_closure_init(&t->read_action_begin, read_action_begin, t);
grpc_closure_init(&t->read_action_locked, read_action_locked, t); grpc_closure_init(&t->read_action_locked, read_action_locked, t);
grpc_closure_init(&t->benign_reclaimer, benign_reclaimer, t);
grpc_closure_init(&t->destructive_reclaimer, destructive_reclaimer, t);
grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t);
grpc_closure_init(&t->destructive_reclaimer_locked,
destructive_reclaimer_locked, t);
grpc_chttp2_goaway_parser_init(&t->goaway_parser); grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(&t->hpack_parser); grpc_chttp2_hpack_parser_init(&t->hpack_parser);
@ -298,14 +315,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->next_stream_id = (uint32_t)value; t->next_stream_id = (uint32_t)value;
} }
} }
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES)) {
const grpc_integer_options options = {-1, 5, INT_MAX};
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
t->stream_lookahead = (uint32_t)value;
}
} else if (0 == strcmp(channel_args->args[i].key, } else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER)) { GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER)) {
const grpc_integer_options options = {-1, 0, INT_MAX}; const grpc_integer_options options = {-1, 0, INT_MAX};
@ -321,24 +330,26 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_setting_id setting_id; grpc_chttp2_setting_id setting_id;
grpc_integer_options integer_options; grpc_integer_options integer_options;
bool availability[2] /* server, client */; bool availability[2] /* server, client */;
} settings_map[] = { } settings_map[] = {{GRPC_ARG_MAX_CONCURRENT_STREAMS,
{GRPC_ARG_MAX_CONCURRENT_STREAMS, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, {-1, 0, INT32_MAX},
{-1, 0, INT_MAX}, {true, false}},
{true, false}}, {GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER,
{GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER, GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE, {-1, 0, INT32_MAX},
{-1, 0, INT_MAX}, {true, true}},
{true, true}}, {GRPC_ARG_MAX_METADATA_SIZE,
{GRPC_ARG_MAX_METADATA_SIZE, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, {-1, 0, INT32_MAX},
{-1, 0, INT_MAX}, {true, true}},
{true, true}}, {GRPC_ARG_HTTP2_MAX_FRAME_SIZE,
{GRPC_ARG_HTTP2_MAX_FRAME_SIZE, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE, {-1, 16384, 16777215},
{-1, 16384, 16777215}, {true, true}},
{true, true}}, {GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES,
}; GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
{-1, 5, INT32_MAX},
{true, true}}};
for (j = 0; j < (int)GPR_ARRAY_SIZE(settings_map); j++) { for (j = 0; j < (int)GPR_ARRAY_SIZE(settings_map); j++) {
if (0 == strcmp(channel_args->args[i].key, if (0 == strcmp(channel_args->args[i].key,
settings_map[j].channel_arg_name)) { settings_map[j].channel_arg_name)) {
@ -362,6 +373,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
} }
grpc_chttp2_initiate_write(exec_ctx, t, false, "init"); grpc_chttp2_initiate_write(exec_ctx, t, false, "init");
post_benign_reclaimer(exec_ctx, t);
} }
static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp, static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
@ -460,13 +472,9 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
if (server_data) { if (server_data) {
s->id = (uint32_t)(uintptr_t)server_data; s->id = (uint32_t)(uintptr_t)server_data;
s->outgoing_window = t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
s->incoming_window = s->max_recv_bytes =
t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
*t->accepting_stream = s; *t->accepting_stream = s;
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s); grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
post_destructive_reclaimer(exec_ctx, t);
} }
GPR_TIMER_END("init_stream", 0); GPR_TIMER_END("init_stream", 0);
@ -492,6 +500,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
} }
grpc_chttp2_list_remove_stalled_by_transport(t, s); grpc_chttp2_list_remove_stalled_by_transport(t, s);
grpc_chttp2_list_remove_stalled_by_stream(t, s);
for (int i = 0; i < STREAM_LIST_COUNT; i++) { for (int i = 0; i < STREAM_LIST_COUNT; i++) {
if (s->included[i]) { if (s->included[i]) {
@ -501,6 +510,23 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
} }
} }
if (s->incoming_window_delta > 0) {
t->retract_incoming_window += s->incoming_window_delta;
} else if (s->incoming_window_delta < 0) {
int64_t give_back = -s->incoming_window_delta;
if (give_back > t->retract_incoming_window) {
give_back -= t->retract_incoming_window;
t->retract_incoming_window = 0;
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("destroy", t, announce_incoming_window,
give_back);
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("destroy", t, incoming_window,
give_back);
grpc_chttp2_initiate_write(exec_ctx, t, false, "destroy_stream");
} else {
t->retract_incoming_window -= give_back;
}
}
GPR_ASSERT(s->send_initial_metadata_finished == NULL); GPR_ASSERT(s->send_initial_metadata_finished == NULL);
GPR_ASSERT(s->fetching_send_message == NULL); GPR_ASSERT(s->fetching_send_message == NULL);
GPR_ASSERT(s->send_trailing_metadata_finished == NULL); GPR_ASSERT(s->send_trailing_metadata_finished == NULL);
@ -675,6 +701,13 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error)); close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
} }
if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED) {
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SENT;
if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE("goaway sent"));
}
}
grpc_chttp2_end_write(exec_ctx, t, GRPC_ERROR_REF(error)); grpc_chttp2_end_write(exec_ctx, t, GRPC_ERROR_REF(error));
switch (t->write_state) { switch (t->write_state) {
@ -750,7 +783,6 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx, static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) { grpc_chttp2_transport *t) {
grpc_chttp2_stream *s; grpc_chttp2_stream *s;
uint32_t stream_incoming_window;
/* start streams where we have free grpc_chttp2_stream ids and free /* start streams where we have free grpc_chttp2_stream ids and free
* concurrency */ * concurrency */
while (t->next_stream_id <= MAX_CLIENT_STREAM_ID && while (t->next_stream_id <= MAX_CLIENT_STREAM_ID &&
@ -773,13 +805,8 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
"no_more_stream_ids"); "no_more_stream_ids");
} }
s->outgoing_window = t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
s->incoming_window = stream_incoming_window =
t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
s->max_recv_bytes = GPR_MAX(stream_incoming_window, s->max_recv_bytes);
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s); grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
post_destructive_reclaimer(exec_ctx, t);
grpc_chttp2_become_writable(exec_ctx, t, s, true, "new_stream"); grpc_chttp2_become_writable(exec_ctx, t, s, true, "new_stream");
} }
/* cancel out streams that will never be started */ /* cancel out streams that will never be started */
@ -1104,8 +1131,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
s->recv_message = op->recv_message; s->recv_message = op->recv_message;
if (s->id != 0 && if (s->id != 0 &&
(s->incoming_frames.head == NULL || s->incoming_frames.head->is_tail)) { (s->incoming_frames.head == NULL || s->incoming_frames.head->is_tail)) {
incoming_byte_stream_update_flow_control(exec_ctx, t, s, incoming_byte_stream_update_flow_control(exec_ctx, t, s, 5, 0);
t->stream_lookahead, 0);
} }
grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s); grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
} }
@ -1186,6 +1212,14 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
gpr_free(msg); gpr_free(msg);
} }
static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_error_code error, gpr_slice data) {
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)error, data,
&t->qbuf);
grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
}
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx, static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
void *stream_op, void *stream_op,
grpc_error *error_ignored) { grpc_error *error_ignored) {
@ -1200,15 +1234,9 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
} }
if (op->send_goaway) { if (op->send_goaway) {
t->sent_goaway = 1; send_goaway(exec_ctx, t,
grpc_chttp2_goaway_append( grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
t->last_new_stream_id, gpr_slice_ref(*op->goaway_message));
(uint32_t)grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
gpr_slice_ref(*op->goaway_message), &t->qbuf);
close_transport = grpc_chttp2_stream_map_size(&t->stream_map) == 0
? GRPC_ERROR_CREATE("GOAWAY sent")
: GRPC_ERROR_NONE;
grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
} }
if (op->set_accept_stream) { if (op->set_accept_stream) {
@ -1342,10 +1370,14 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
s->data_parser.parsing_frame = NULL; s->data_parser.parsing_frame = NULL;
} }
if (grpc_chttp2_stream_map_size(&t->stream_map) == 0 && t->sent_goaway) { if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
close_transport_locked( post_benign_reclaimer(exec_ctx, t);
exec_ctx, t, GRPC_ERROR_CREATE_REFERENCING( if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SENT) {
"Last stream closed after sending GOAWAY", &error, 1)); close_transport_locked(
exec_ctx, t,
GRPC_ERROR_CREATE_REFERENCING(
"Last stream closed after sending GOAWAY", &error, 1));
}
} }
if (grpc_chttp2_list_remove_writable_stream(t, s)) { if (grpc_chttp2_list_remove_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:remove_stream"); GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:remove_stream");
@ -1679,36 +1711,6 @@ static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
/** update window from a settings change */
typedef struct {
grpc_chttp2_transport *t;
grpc_exec_ctx *exec_ctx;
} update_global_window_args;
static void update_global_window(void *args, uint32_t id, void *stream) {
update_global_window_args *a = args;
grpc_chttp2_transport *t = a->t;
grpc_chttp2_stream *s = stream;
int was_zero;
int is_zero;
int64_t initial_window_update = t->initial_window_update;
if (initial_window_update > 0) {
was_zero = s->outgoing_window <= 0;
GRPC_CHTTP2_FLOW_CREDIT_STREAM("settings", t, s, outgoing_window,
initial_window_update);
is_zero = s->outgoing_window <= 0;
if (was_zero && !is_zero) {
grpc_chttp2_become_writable(a->exec_ctx, t, s, true,
"update_global_window");
}
} else {
GRPC_CHTTP2_FLOW_DEBIT_STREAM("settings", t, s, outgoing_window,
-initial_window_update);
}
}
/******************************************************************************* /*******************************************************************************
* INPUT PROCESSING - PARSING * INPUT PROCESSING - PARSING
*/ */
@ -1792,21 +1794,14 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_BEGIN("post_parse_locked", 0); GPR_TIMER_BEGIN("post_parse_locked", 0);
if (t->initial_window_update != 0) { if (t->initial_window_update != 0) {
update_global_window_args args = {t, exec_ctx}; if (t->initial_window_update > 0) {
grpc_chttp2_stream_map_for_each(&t->stream_map, update_global_window, grpc_chttp2_stream *s;
&args); while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
grpc_chttp2_list_add_writable_stream(t, s);
}
}
t->initial_window_update = 0; t->initial_window_update = 0;
} }
/* handle higher level things */
if (t->incoming_window < t->connection_window_target * 3 / 4) {
int64_t announce_bytes = t->connection_window_target - t->incoming_window;
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", t, announce_incoming_window,
announce_bytes);
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", t, incoming_window,
announce_bytes);
grpc_chttp2_initiate_write(exec_ctx, t, false, "global incoming window");
}
GPR_TIMER_END("post_parse_locked", 0); GPR_TIMER_END("post_parse_locked", 0);
} }
@ -1888,10 +1883,12 @@ static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
size_t max_size_hint, size_t max_size_hint,
size_t have_already) { size_t have_already) {
uint32_t max_recv_bytes; uint32_t max_recv_bytes;
uint32_t initial_window_size =
t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
/* clamp max recv hint to an allowable size */ /* clamp max recv hint to an allowable size */
if (max_size_hint >= UINT32_MAX - t->stream_lookahead) { if (max_size_hint >= UINT32_MAX - initial_window_size) {
max_recv_bytes = UINT32_MAX - t->stream_lookahead; max_recv_bytes = UINT32_MAX - initial_window_size;
} else { } else {
max_recv_bytes = (uint32_t)max_size_hint; max_recv_bytes = (uint32_t)max_size_hint;
} }
@ -1904,21 +1901,32 @@ static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
} }
/* add some small lookahead to keep pipelines flowing */ /* add some small lookahead to keep pipelines flowing */
GPR_ASSERT(max_recv_bytes <= UINT32_MAX - t->stream_lookahead); GPR_ASSERT(max_recv_bytes <= UINT32_MAX - initial_window_size);
max_recv_bytes += t->stream_lookahead; if (s->incoming_window_delta < max_recv_bytes) {
if (s->max_recv_bytes < max_recv_bytes) { uint32_t add_max_recv_bytes =
uint32_t add_max_recv_bytes = max_recv_bytes - s->max_recv_bytes; (uint32_t)(max_recv_bytes - s->incoming_window_delta);
bool new_window_write_is_covered_by_poller = bool new_window_write_is_covered_by_poller =
s->max_recv_bytes < have_already; s->incoming_window_delta + initial_window_size < (int64_t)have_already;
GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, max_recv_bytes, GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, incoming_window_delta,
add_max_recv_bytes);
GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, incoming_window,
add_max_recv_bytes); add_max_recv_bytes);
GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, announce_window, GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, announce_window,
add_max_recv_bytes); add_max_recv_bytes);
grpc_chttp2_become_writable(exec_ctx, t, s, grpc_chttp2_become_writable(exec_ctx, t, s,
new_window_write_is_covered_by_poller, new_window_write_is_covered_by_poller,
"read_incoming_stream"); "read_incoming_stream");
if (t->retract_incoming_window >= add_max_recv_bytes) {
t->retract_incoming_window -= add_max_recv_bytes;
} else {
add_max_recv_bytes -= t->retract_incoming_window;
t->retract_incoming_window = 0;
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("op", t, announce_incoming_window,
add_max_recv_bytes);
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("op", t, incoming_window,
add_max_recv_bytes);
grpc_chttp2_initiate_write(exec_ctx, t,
new_window_write_is_covered_by_poller,
"read_incoming_stream");
}
} }
} }
@ -2072,6 +2080,97 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
return incoming_byte_stream; return incoming_byte_stream;
} }
/*******************************************************************************
* BUFFER POOLS
*/
static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
if (!t->benign_reclaimer_registered) {
t->benign_reclaimer_registered = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
grpc_resource_user_post_reclaimer(exec_ctx,
grpc_endpoint_get_resource_user(t->ep),
false, &t->benign_reclaimer);
}
}
static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
if (!t->destructive_reclaimer_registered) {
t->destructive_reclaimer_registered = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
grpc_resource_user_post_reclaimer(exec_ctx,
grpc_endpoint_get_resource_user(t->ep),
true, &t->destructive_reclaimer);
}
}
static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_combiner_execute(exec_ctx, t->combiner, &t->benign_reclaimer_locked,
GRPC_ERROR_REF(error), false);
}
static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_combiner_execute(exec_ctx, t->combiner, &t->destructive_reclaimer_locked,
GRPC_ERROR_REF(error), false);
}
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
if (error == GRPC_ERROR_NONE &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
t->peer_string);
}
send_goaway(exec_ctx, t, GRPC_CHTTP2_ENHANCE_YOUR_CALM,
gpr_slice_from_static_string("Buffers full"));
} else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG,
"HTTP2: %s - skip benign reclaimation, there are still %" PRIdPTR
" streams",
t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map));
}
t->benign_reclaimer_registered = false;
if (error != GRPC_ERROR_CANCELLED) {
grpc_resource_user_finish_reclaimation(
exec_ctx, grpc_endpoint_get_resource_user(t->ep));
}
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "benign_reclaimer");
}
static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
t->destructive_reclaimer_registered = false;
if (error == GRPC_ERROR_NONE && n > 0) {
grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
}
grpc_chttp2_cancel_stream(
exec_ctx, t, s, grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
GRPC_ERROR_INT_HTTP2_ERROR,
GRPC_CHTTP2_ENHANCE_YOUR_CALM));
if (n > 1) {
post_destructive_reclaimer(exec_ctx, t);
}
}
if (error != GRPC_ERROR_CANCELLED) {
grpc_resource_user_finish_reclaimation(
exec_ctx, grpc_endpoint_get_resource_user(t->ep));
}
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destructive_reclaimer");
}
/******************************************************************************* /*******************************************************************************
* TRACING * TRACING
*/ */

@ -110,11 +110,9 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
if (t->incoming_stream_id != 0) { if (t->incoming_stream_id != 0) {
if (s != NULL) { if (s != NULL) {
bool was_zero = s->outgoing_window <= 0; GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", t, s, outgoing_window_delta,
GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", t, s, outgoing_window,
received_update); received_update);
bool is_zero = s->outgoing_window <= 0; if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) {
if (was_zero && !is_zero) {
grpc_chttp2_become_writable(exec_ctx, t, s, false, grpc_chttp2_become_writable(exec_ctx, t, s, false,
"stream.read_flow_control"); "stream.read_flow_control");
} }

@ -59,6 +59,7 @@ typedef enum {
GRPC_CHTTP2_LIST_WRITABLE, GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING, GRPC_CHTTP2_LIST_WRITING,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
GRPC_CHTTP2_LIST_STALLED_BY_STREAM,
/** streams that are waiting to start because there are too many concurrent /** streams that are waiting to start because there are too many concurrent
streams on the connection */ streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
@ -138,6 +139,12 @@ typedef enum {
GRPC_NUM_SETTING_SETS GRPC_NUM_SETTING_SETS
} grpc_chttp2_setting_set; } grpc_chttp2_setting_set;
typedef enum {
GRPC_CHTTP2_NO_GOAWAY_SEND,
GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED,
GRPC_CHTTP2_GOAWAY_SENT,
} grpc_chttp2_sent_goaway_state;
/* Outstanding ping request data */ /* Outstanding ping request data */
typedef struct grpc_chttp2_outstanding_ping { typedef struct grpc_chttp2_outstanding_ping {
uint8_t id[8]; uint8_t id[8];
@ -243,13 +250,15 @@ struct grpc_chttp2_transport {
/** window available to announce to peer */ /** window available to announce to peer */
int64_t announce_incoming_window; int64_t announce_incoming_window;
/** how much window would we like to have for incoming_window */ /** how many bytes have been given out as transport window that we'd now like
uint32_t connection_window_target; to retract? (since we can't retract incoming window, instead we just dont
give out any more until this amount goes to zero) */
int64_t retract_incoming_window;
/** have we seen a goaway */ /** have we seen a goaway */
uint8_t seen_goaway; uint8_t seen_goaway;
/** have we sent a goaway */ /** have we sent a goaway */
uint8_t sent_goaway; grpc_chttp2_sent_goaway_state sent_goaway_state;
/** are the local settings dirty and need to be sent? */ /** are the local settings dirty and need to be sent? */
uint8_t dirtied_local_settings; uint8_t dirtied_local_settings;
@ -264,9 +273,6 @@ struct grpc_chttp2_transport {
copied to next_stream_id in parsing when parsing commences */ copied to next_stream_id in parsing when parsing commences */
uint32_t next_stream_id; uint32_t next_stream_id;
/** how far to lookahead in a stream? */
uint32_t stream_lookahead;
/** last new stream id */ /** last new stream id */
uint32_t last_new_stream_id; uint32_t last_new_stream_id;
@ -320,6 +326,18 @@ struct grpc_chttp2_transport {
/* if non-NULL, close the transport with this error when writes are finished /* if non-NULL, close the transport with this error when writes are finished
*/ */
grpc_error *close_transport_on_writes_finished; grpc_error *close_transport_on_writes_finished;
/* buffer pool state */
/** have we scheduled a benign cleanup? */
bool benign_reclaimer_registered;
/** have we scheduled a destructive cleanup? */
bool destructive_reclaimer_registered;
/** benign cleanup closure */
grpc_closure benign_reclaimer;
grpc_closure benign_reclaimer_locked;
/** destructive cleanup closure */
grpc_closure destructive_reclaimer;
grpc_closure destructive_reclaimer_locked;
}; };
typedef enum { typedef enum {
@ -342,12 +360,10 @@ struct grpc_chttp2_stream {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */ /** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id; uint32_t id;
/** window available for us to send to peer */ /** window available for us to send to peer, over or under the initial window
int64_t outgoing_window; * size of the transport... ie:
/** The number of bytes the upper layers have offered to receive. * outgoing_window = outgoing_window_delta + transport.initial_window_size */
As the upper layer offers more bytes, this value increases. int64_t outgoing_window_delta;
As bytes are read, this value decreases. */
uint32_t max_recv_bytes;
/** things the upper layers would like to send */ /** things the upper layers would like to send */
grpc_metadata_batch *send_initial_metadata; grpc_metadata_batch *send_initial_metadata;
grpc_closure *send_initial_metadata_finished; grpc_closure *send_initial_metadata_finished;
@ -405,8 +421,10 @@ struct grpc_chttp2_stream {
grpc_error *forced_close_error; grpc_error *forced_close_error;
/** how many header frames have we received? */ /** how many header frames have we received? */
uint8_t header_frames_received; uint8_t header_frames_received;
/** window available for peer to send to us */ /** window available for peer to send to us (as a delta on
int64_t incoming_window; * transport.initial_window_size)
* incoming_window = incoming_window_delta + transport.initial_window_size */
int64_t incoming_window_delta;
/** parsing state for data frames */ /** parsing state for data frames */
grpc_chttp2_data_parser data_parser; grpc_chttp2_data_parser data_parser;
/** number of bytes received - reset at end of parse thread execution */ /** number of bytes received - reset at end of parse thread execution */
@ -454,34 +472,41 @@ bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s); grpc_chttp2_stream *s);
/** Get a writable stream /** Get a writable stream
returns non-zero if there was a stream available */ returns non-zero if there was a stream available */
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s); grpc_chttp2_stream **s);
bool grpc_chttp2_list_remove_writable_stream( bool grpc_chttp2_list_remove_writable_stream(
grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT; grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t, bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s); grpc_chttp2_stream *s);
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t); bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t);
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s); grpc_chttp2_stream **s);
void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t, void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s); grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s); grpc_chttp2_stream **s);
void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t, void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream *s); grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s); grpc_chttp2_stream **s);
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t, void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s); grpc_chttp2_stream *s);
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s); grpc_chttp2_stream **s);
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t, void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s); grpc_chttp2_stream *s);
void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t, grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id); uint32_t id);
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,

@ -379,21 +379,31 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
incoming_frame_size); incoming_frame_size);
if (s != NULL) { if (s != NULL) {
if (incoming_frame_size > s->incoming_window) { if (incoming_frame_size >
s->incoming_window_delta +
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]) {
char *msg; char *msg;
gpr_asprintf(&msg, gpr_asprintf(&msg,
"frame of size %d overflows incoming window of %" PRId64, "frame of size %d overflows incoming window of %" PRId64,
t->incoming_frame_size, s->incoming_window); t->incoming_frame_size,
s->incoming_window_delta +
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
grpc_error *err = GRPC_ERROR_CREATE(msg); grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg); gpr_free(msg);
return err; return err;
} }
GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", t, s, incoming_window, GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", t, s, incoming_window_delta,
incoming_frame_size); incoming_frame_size);
s->received_bytes += incoming_frame_size; s->received_bytes += incoming_frame_size;
s->max_recv_bytes -= } else {
(uint32_t)GPR_MIN(s->max_recv_bytes, incoming_frame_size); GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", t, announce_incoming_window,
incoming_frame_size);
GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", t, incoming_window,
incoming_frame_size);
grpc_chttp2_initiate_write(exec_ctx, t, false, "destroy_stream");
} }
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;

@ -37,14 +37,14 @@
/* core list management */ /* core list management */
static int stream_list_empty(grpc_chttp2_transport *t, static bool stream_list_empty(grpc_chttp2_transport *t,
grpc_chttp2_stream_list_id id) { grpc_chttp2_stream_list_id id) {
return t->lists[id].head == NULL; return t->lists[id].head == NULL;
} }
static int stream_list_pop(grpc_chttp2_transport *t, static bool stream_list_pop(grpc_chttp2_transport *t,
grpc_chttp2_stream **stream, grpc_chttp2_stream **stream,
grpc_chttp2_stream_list_id id) { grpc_chttp2_stream_list_id id) {
grpc_chttp2_stream *s = t->lists[id].head; grpc_chttp2_stream *s = t->lists[id].head;
if (s) { if (s) {
grpc_chttp2_stream *new_head = s->links[id].next; grpc_chttp2_stream *new_head = s->links[id].next;
@ -124,8 +124,8 @@ bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE); return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE);
} }
int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) { grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE); return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE);
} }
@ -139,12 +139,12 @@ bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING); return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING);
} }
int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) { bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING); return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING);
} }
int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) { grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING); return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING);
} }
@ -153,8 +153,8 @@ void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY); stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
} }
int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) { grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY); return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
} }
@ -163,8 +163,8 @@ void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
} }
int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) { grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
} }
@ -172,3 +172,18 @@ void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) { grpc_chttp2_stream *s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
} }
void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}

@ -151,6 +151,17 @@ size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map) {
return map->count - map->free; return map->count - map->free;
} }
void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map) {
if (map->count == map->free) {
return NULL;
}
if (map->free != 0) {
map->count = compact(map->keys, map->values, map->count);
map->free = 0;
}
return map->values[((size_t)rand()) % map->count];
}
void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map, void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
void (*f)(void *user_data, uint32_t key, void (*f)(void *user_data, uint32_t key,
void *value), void *value),

@ -68,6 +68,9 @@ void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key);
/* Return an existing key, or NULL if it does not exist */ /* Return an existing key, or NULL if it does not exist */
void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key); void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key);
/* Return a random entry */
void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map);
/* How many (populated) entries are in the stream map? */ /* How many (populated) entries are in the stream map? */
size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map); size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map);

@ -138,10 +138,15 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
if (sent_initial_metadata) { if (sent_initial_metadata) {
/* send any body bytes, if allowed by flow control */ /* send any body bytes, if allowed by flow control */
if (s->flow_controlled_buffer.length > 0) { if (s->flow_controlled_buffer.length > 0) {
uint32_t max_outgoing = uint32_t stream_outgoing_window = (uint32_t)GPR_MAX(
(uint32_t)GPR_MIN(t->settings[GRPC_ACKED_SETTINGS] 0,
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], s->outgoing_window_delta +
GPR_MIN(s->outgoing_window, t->outgoing_window)); (int64_t)t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
uint32_t max_outgoing = (uint32_t)GPR_MIN(
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
GPR_MIN(stream_outgoing_window, t->outgoing_window));
if (max_outgoing > 0) { if (max_outgoing > 0) {
uint32_t send_bytes = uint32_t send_bytes =
(uint32_t)GPR_MIN(max_outgoing, s->flow_controlled_buffer.length); (uint32_t)GPR_MIN(max_outgoing, s->flow_controlled_buffer.length);
@ -154,7 +159,7 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, send_bytes, grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, send_bytes,
is_last_frame, &s->stats.outgoing, is_last_frame, &s->stats.outgoing,
&t->outbuf); &t->outbuf);
GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, outgoing_window, GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, outgoing_window_delta,
send_bytes); send_bytes);
GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, outgoing_window, GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, outgoing_window,
send_bytes); send_bytes);
@ -176,6 +181,9 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
} else if (t->outgoing_window == 0) { } else if (t->outgoing_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s); grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true; now_writing = true;
} else if (stream_outgoing_window == 0) {
grpc_chttp2_list_add_stalled_by_stream(t, s);
now_writing = true;
} }
} }
if (s->send_trailing_metadata != NULL && if (s->send_trailing_metadata != NULL &&

@ -71,6 +71,7 @@ typedef struct {
grpc_closure done_write; grpc_closure done_write;
grpc_closure connected; grpc_closure connected;
grpc_error *overall_error; grpc_error *overall_error;
grpc_resource_quota *resource_quota;
} internal_request; } internal_request;
static grpc_httpcli_get_override g_get_override = NULL; static grpc_httpcli_get_override g_get_override = NULL;
@ -118,6 +119,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
gpr_slice_buffer_destroy(&req->incoming); gpr_slice_buffer_destroy(&req->incoming);
gpr_slice_buffer_destroy(&req->outgoing); gpr_slice_buffer_destroy(&req->outgoing);
GRPC_ERROR_UNREF(req->overall_error); GRPC_ERROR_UNREF(req->overall_error);
grpc_resource_quota_internal_unref(exec_ctx, req->resource_quota);
gpr_free(req); gpr_free(req);
} }
@ -224,8 +226,14 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
} }
addr = &req->addresses->addrs[req->next_address++]; addr = &req->addresses->addrs[req->next_address++];
grpc_closure_init(&req->connected, on_connected, req); grpc_closure_init(&req->connected, on_connected, req);
grpc_arg arg;
arg.key = GRPC_ARG_BUFFER_POOL;
arg.type = GRPC_ARG_POINTER;
arg.value.pointer.p = req->resource_quota;
arg.value.pointer.vtable = grpc_resource_quota_arg_vtable();
grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect( grpc_tcp_client_connect(
exec_ctx, &req->connected, &req->ep, req->context->pollset_set, exec_ctx, &req->connected, &req->ep, req->context->pollset_set, &args,
(struct sockaddr *)&addr->addr, addr->len, req->deadline); (struct sockaddr *)&addr->addr, addr->len, req->deadline);
} }
@ -242,6 +250,7 @@ static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void internal_request_begin(grpc_exec_ctx *exec_ctx, static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_httpcli_context *context, grpc_httpcli_context *context,
grpc_polling_entity *pollent, grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request, const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done, gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response, grpc_httpcli_response *response,
@ -257,6 +266,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->context = context; req->context = context;
req->pollent = pollent; req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE; req->overall_error = GRPC_ERROR_NONE;
req->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&req->on_read, on_read, req); grpc_closure_init(&req->on_read, on_read, req);
grpc_closure_init(&req->done_write, done_write, req); grpc_closure_init(&req->done_write, done_write, req);
gpr_slice_buffer_init(&req->incoming); gpr_slice_buffer_init(&req->incoming);
@ -274,6 +284,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent, grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request, const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done, gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response) { grpc_httpcli_response *response) {
@ -283,14 +294,15 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
return; return;
} }
gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path); gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path);
internal_request_begin(exec_ctx, context, pollent, request, deadline, on_done, internal_request_begin(exec_ctx, context, pollent, resource_quota, request,
response, name, deadline, on_done, response, name,
grpc_httpcli_format_get_request(request)); grpc_httpcli_format_get_request(request));
gpr_free(name); gpr_free(name);
} }
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent, grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request, const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size, const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_done, gpr_timespec deadline, grpc_closure *on_done,
@ -303,7 +315,8 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
} }
gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path); gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path);
internal_request_begin( internal_request_begin(
exec_ctx, context, pollent, request, deadline, on_done, response, name, exec_ctx, context, pollent, resource_quota, request, deadline, on_done,
response, name,
grpc_httpcli_format_post_request(request, body_bytes, body_size)); grpc_httpcli_format_post_request(request, body_bytes, body_size));
gpr_free(name); gpr_free(name);
} }

@ -96,6 +96,7 @@ void grpc_httpcli_context_destroy(grpc_httpcli_context *context);
'on_response' is a callback to report results to */ 'on_response' is a callback to report results to */
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent, grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request, const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_complete, gpr_timespec deadline, grpc_closure *on_complete,
grpc_httpcli_response *response); grpc_httpcli_response *response);
@ -116,6 +117,7 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
Does not support ?var1=val1&var2=val2 in the path. */ Does not support ?var1=val1&var2=val2 in the path. */
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent, grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request, const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size, const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_complete, gpr_timespec deadline, grpc_closure *on_complete,

@ -69,3 +69,7 @@ char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) { grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) {
return ep->vtable->get_workqueue(ep); return ep->vtable->get_workqueue(ep);
} }
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
return ep->vtable->get_resource_user(ep);
}

@ -39,6 +39,7 @@
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h" #include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resource_quota.h"
/* An endpoint caps a streaming channel between two communicating processes. /* An endpoint caps a streaming channel between two communicating processes.
Examples may be: a tcp socket, <stdin+stdout>, or some shared memory. */ Examples may be: a tcp socket, <stdin+stdout>, or some shared memory. */
@ -58,6 +59,7 @@ struct grpc_endpoint_vtable {
grpc_pollset_set *pollset); grpc_pollset_set *pollset);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep);
char *(*get_peer)(grpc_endpoint *ep); char *(*get_peer)(grpc_endpoint *ep);
}; };
@ -100,6 +102,8 @@ void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_endpoint *ep, grpc_endpoint *ep,
grpc_pollset_set *pollset_set); grpc_pollset_set *pollset_set);
grpc_resource_user *grpc_endpoint_get_resource_user(grpc_endpoint *endpoint);
struct grpc_endpoint { struct grpc_endpoint {
const grpc_endpoint_vtable *vtable; const grpc_endpoint_vtable *vtable;
}; };

@ -41,7 +41,8 @@ typedef struct {
grpc_endpoint *server; grpc_endpoint *server;
} grpc_endpoint_pair; } grpc_endpoint_pair;
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
size_t read_slice_size); const char *name, grpc_resource_quota *resource_quota,
size_t read_slice_size);
#endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */ #endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */

@ -62,20 +62,21 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]) == GRPC_ERROR_NONE); GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]) == GRPC_ERROR_NONE);
} }
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
size_t read_slice_size) { const char *name, grpc_resource_quota *resource_quota,
size_t read_slice_size) {
int sv[2]; int sv[2];
grpc_endpoint_pair p; grpc_endpoint_pair p;
char *final_name; char *final_name;
create_sockets(sv); create_sockets(sv);
gpr_asprintf(&final_name, "%s:client", name); gpr_asprintf(&final_name, "%s:client", name);
p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size, p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), resource_quota,
"socketpair-server"); read_slice_size, "socketpair-server");
gpr_free(final_name); gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name); gpr_asprintf(&final_name, "%s:server", name);
p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size, p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), resource_quota,
"socketpair-client"); read_slice_size, "socketpair-client");
gpr_free(final_name); gpr_free(final_name);
return p; return p;
} }

@ -0,0 +1,711 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/lib/iomgr/resource_quota.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/combiner.h"
int grpc_resource_quota_trace = 0;
typedef bool (*bpstate_func)(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota);
typedef struct {
grpc_resource_user *head;
grpc_resource_user *tail;
} grpc_resource_user_list;
struct grpc_resource_quota {
gpr_refcount refs;
grpc_combiner *combiner;
int64_t size;
int64_t free_pool;
bool step_scheduled;
bool reclaiming;
grpc_closure bpstep_closure;
grpc_closure bpreclaimation_done_closure;
grpc_resource_user *roots[GRPC_BULIST_COUNT];
char *name;
};
/*******************************************************************************
* list management
*/
static void bulist_add_tail(grpc_resource_user *resource_user,
grpc_bulist list) {
grpc_resource_quota *resource_quota = resource_user->resource_quota;
grpc_resource_user **root = &resource_quota->roots[list];
if (*root == NULL) {
*root = resource_user;
resource_user->links[list].next = resource_user->links[list].prev =
resource_user;
} else {
resource_user->links[list].next = *root;
resource_user->links[list].prev = (*root)->links[list].prev;
resource_user->links[list].next->links[list].prev =
resource_user->links[list].prev->links[list].next = resource_user;
}
}
static void bulist_add_head(grpc_resource_user *resource_user,
grpc_bulist list) {
grpc_resource_quota *resource_quota = resource_user->resource_quota;
grpc_resource_user **root = &resource_quota->roots[list];
if (*root == NULL) {
*root = resource_user;
resource_user->links[list].next = resource_user->links[list].prev =
resource_user;
} else {
resource_user->links[list].next = (*root)->links[list].next;
resource_user->links[list].prev = *root;
resource_user->links[list].next->links[list].prev =
resource_user->links[list].prev->links[list].next = resource_user;
*root = resource_user;
}
}
static bool bulist_empty(grpc_resource_quota *resource_quota,
grpc_bulist list) {
return resource_quota->roots[list] == NULL;
}
static grpc_resource_user *bulist_pop(grpc_resource_quota *resource_quota,
grpc_bulist list) {
grpc_resource_user **root = &resource_quota->roots[list];
grpc_resource_user *resource_user = *root;
if (resource_user == NULL) {
return NULL;
}
if (resource_user->links[list].next == resource_user) {
*root = NULL;
} else {
resource_user->links[list].next->links[list].prev =
resource_user->links[list].prev;
resource_user->links[list].prev->links[list].next =
resource_user->links[list].next;
*root = resource_user->links[list].next;
}
resource_user->links[list].next = resource_user->links[list].prev = NULL;
return resource_user;
}
static void bulist_remove(grpc_resource_user *resource_user, grpc_bulist list) {
if (resource_user->links[list].next == NULL) return;
grpc_resource_quota *resource_quota = resource_user->resource_quota;
if (resource_quota->roots[list] == resource_user) {
resource_quota->roots[list] = resource_user->links[list].next;
if (resource_quota->roots[list] == resource_user) {
resource_quota->roots[list] = NULL;
}
}
resource_user->links[list].next->links[list].prev =
resource_user->links[list].prev;
resource_user->links[list].prev->links[list].next =
resource_user->links[list].next;
}
/*******************************************************************************
* buffer pool state machine
*/
static bool bpalloc(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota);
static bool bpscavenge(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota);
static bool bpreclaim(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota, bool destructive);
static void bpstep(grpc_exec_ctx *exec_ctx, void *bp, grpc_error *error) {
grpc_resource_quota *resource_quota = bp;
resource_quota->step_scheduled = false;
do {
if (bpalloc(exec_ctx, resource_quota)) goto done;
} while (bpscavenge(exec_ctx, resource_quota));
bpreclaim(exec_ctx, resource_quota, false) ||
bpreclaim(exec_ctx, resource_quota, true);
done:
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
}
static void bpstep_sched(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota) {
if (resource_quota->step_scheduled) return;
resource_quota->step_scheduled = true;
grpc_resource_quota_internal_ref(resource_quota);
grpc_combiner_execute_finally(exec_ctx, resource_quota->combiner,
&resource_quota->bpstep_closure,
GRPC_ERROR_NONE, false);
}
/* returns true if all allocations are completed */
static bool bpalloc(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota) {
grpc_resource_user *resource_user;
while ((resource_user =
bulist_pop(resource_quota, GRPC_BULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
if (resource_user->free_pool < 0 &&
-resource_user->free_pool <= resource_quota->free_pool) {
int64_t amt = -resource_user->free_pool;
resource_user->free_pool = 0;
resource_quota->free_pool -= amt;
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "BP %s %s: grant alloc %" PRId64
" bytes; bp_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);
}
} else if (grpc_resource_quota_trace && resource_user->free_pool >= 0) {
gpr_log(GPR_DEBUG, "BP %s %s: discard already satisfied alloc request",
resource_quota->name, resource_user->name);
}
if (resource_user->free_pool >= 0) {
resource_user->allocating = false;
grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL);
gpr_mu_unlock(&resource_user->mu);
} else {
bulist_add_head(resource_user, GRPC_BULIST_AWAITING_ALLOCATION);
gpr_mu_unlock(&resource_user->mu);
return false;
}
}
return true;
}
/* returns true if any memory could be reclaimed from buffers */
static bool bpscavenge(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota) {
grpc_resource_user *resource_user;
while ((resource_user =
bulist_pop(resource_quota, GRPC_BULIST_NON_EMPTY_FREE_POOL))) {
gpr_mu_lock(&resource_user->mu);
if (resource_user->free_pool > 0) {
int64_t amt = resource_user->free_pool;
resource_user->free_pool = 0;
resource_quota->free_pool += amt;
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "BP %s %s: scavenge %" PRId64
" bytes; bp_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);
}
gpr_mu_unlock(&resource_user->mu);
return true;
} else {
gpr_mu_unlock(&resource_user->mu);
}
}
return false;
}
/* returns true if reclaimation is proceeding */
static bool bpreclaim(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota, bool destructive) {
if (resource_quota->reclaiming) return true;
grpc_bulist list = destructive ? GRPC_BULIST_RECLAIMER_DESTRUCTIVE
: GRPC_BULIST_RECLAIMER_BENIGN;
grpc_resource_user *resource_user = bulist_pop(resource_quota, list);
if (resource_user == NULL) return false;
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "BP %s %s: initiate %s reclaimation",
resource_quota->name, resource_user->name,
destructive ? "destructive" : "benign");
}
resource_quota->reclaiming = true;
grpc_resource_quota_internal_ref(resource_quota);
grpc_closure *c = resource_user->reclaimers[destructive];
resource_user->reclaimers[destructive] = NULL;
grpc_closure_run(exec_ctx, c, GRPC_ERROR_NONE);
return true;
}
/*******************************************************************************
* bu_slice: a slice implementation that is backed by a grpc_resource_user
*/
typedef struct {
gpr_slice_refcount base;
gpr_refcount refs;
grpc_resource_user *resource_user;
size_t size;
} bu_slice_refcount;
static void bu_slice_ref(void *p) {
bu_slice_refcount *rc = p;
gpr_ref(&rc->refs);
}
static void bu_slice_unref(void *p) {
bu_slice_refcount *rc = p;
if (gpr_unref(&rc->refs)) {
/* TODO(ctiller): this is dangerous, but I think safe for now:
we have no guarantee here that we're at a safe point for creating an
execution context, but we have no way of writing this code otherwise.
In the future: consider lifting gpr_slice to grpc, and offering an
internal_{ref,unref} pair that is execution context aware. Alternatively,
make exec_ctx be thread local and 'do the right thing' (whatever that is)
if NULL */
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, rc->resource_user, rc->size);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(rc);
}
}
static gpr_slice bu_slice_create(grpc_resource_user *resource_user,
size_t size) {
bu_slice_refcount *rc = gpr_malloc(sizeof(bu_slice_refcount) + size);
rc->base.ref = bu_slice_ref;
rc->base.unref = bu_slice_unref;
gpr_ref_init(&rc->refs, 1);
rc->resource_user = resource_user;
rc->size = size;
gpr_slice slice;
slice.refcount = &rc->base;
slice.data.refcounted.bytes = (uint8_t *)(rc + 1);
slice.data.refcounted.length = size;
return slice;
}
/*******************************************************************************
* grpc_resource_quota internal implementation
*/
static void bu_allocate(grpc_exec_ctx *exec_ctx, void *bu, grpc_error *error) {
grpc_resource_user *resource_user = bu;
if (bulist_empty(resource_user->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION)) {
bpstep_sched(exec_ctx, resource_user->resource_quota);
}
bulist_add_tail(resource_user, GRPC_BULIST_AWAITING_ALLOCATION);
}
static void bu_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *bu,
grpc_error *error) {
grpc_resource_user *resource_user = bu;
if (!bulist_empty(resource_user->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_NON_EMPTY_FREE_POOL)) {
bpstep_sched(exec_ctx, resource_user->resource_quota);
}
bulist_add_tail(resource_user, GRPC_BULIST_NON_EMPTY_FREE_POOL);
}
static void bu_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *bu,
grpc_error *error) {
grpc_resource_user *resource_user = bu;
if (!bulist_empty(resource_user->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_NON_EMPTY_FREE_POOL) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_RECLAIMER_BENIGN)) {
bpstep_sched(exec_ctx, resource_user->resource_quota);
}
bulist_add_tail(resource_user, GRPC_BULIST_RECLAIMER_BENIGN);
}
static void bu_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *bu,
grpc_error *error) {
grpc_resource_user *resource_user = bu;
if (!bulist_empty(resource_user->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_NON_EMPTY_FREE_POOL) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_RECLAIMER_BENIGN) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_RECLAIMER_DESTRUCTIVE)) {
bpstep_sched(exec_ctx, resource_user->resource_quota);
}
bulist_add_tail(resource_user, GRPC_BULIST_RECLAIMER_DESTRUCTIVE);
}
static void bu_destroy(grpc_exec_ctx *exec_ctx, void *bu, grpc_error *error) {
grpc_resource_user *resource_user = bu;
GPR_ASSERT(resource_user->allocated == 0);
for (int i = 0; i < GRPC_BULIST_COUNT; i++) {
bulist_remove(resource_user, (grpc_bulist)i);
}
grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED, NULL);
grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
GRPC_ERROR_CANCELLED, NULL);
grpc_exec_ctx_sched(exec_ctx, (grpc_closure *)gpr_atm_no_barrier_load(
&resource_user->on_done_destroy_closure),
GRPC_ERROR_NONE, NULL);
if (resource_user->free_pool != 0) {
resource_user->resource_quota->free_pool += resource_user->free_pool;
bpstep_sched(exec_ctx, resource_user->resource_quota);
}
}
static void bu_allocated_slices(grpc_exec_ctx *exec_ctx, void *ts,
grpc_error *error) {
grpc_resource_user_slice_allocator *slice_allocator = ts;
if (error == GRPC_ERROR_NONE) {
for (size_t i = 0; i < slice_allocator->count; i++) {
gpr_slice_buffer_add_indexed(
slice_allocator->dest, bu_slice_create(slice_allocator->resource_user,
slice_allocator->length));
}
}
grpc_closure_run(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
}
typedef struct {
int64_t size;
grpc_resource_quota *resource_quota;
grpc_closure closure;
} bp_resize_args;
static void bp_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
bp_resize_args *a = args;
int64_t delta = a->size - a->resource_quota->size;
a->resource_quota->size += delta;
a->resource_quota->free_pool += delta;
if (delta < 0 && a->resource_quota->free_pool < 0) {
bpstep_sched(exec_ctx, a->resource_quota);
} else if (delta > 0 &&
!bulist_empty(a->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION)) {
bpstep_sched(exec_ctx, a->resource_quota);
}
grpc_resource_quota_internal_unref(exec_ctx, a->resource_quota);
gpr_free(a);
}
static void bp_reclaimation_done(grpc_exec_ctx *exec_ctx, void *bp,
grpc_error *error) {
grpc_resource_quota *resource_quota = bp;
resource_quota->reclaiming = false;
bpstep_sched(exec_ctx, resource_quota);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
}
/*******************************************************************************
* grpc_resource_quota api
*/
grpc_resource_quota *grpc_resource_quota_create(const char *name) {
grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
gpr_ref_init(&resource_quota->refs, 1);
resource_quota->combiner = grpc_combiner_create(NULL);
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
resource_quota->step_scheduled = false;
resource_quota->reclaiming = false;
if (name != NULL) {
resource_quota->name = gpr_strdup(name);
} else {
gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
(intptr_t)resource_quota);
}
grpc_closure_init(&resource_quota->bpstep_closure, bpstep, resource_quota);
grpc_closure_init(&resource_quota->bpreclaimation_done_closure,
bp_reclaimation_done, resource_quota);
for (int i = 0; i < GRPC_BULIST_COUNT; i++) {
resource_quota->roots[i] = NULL;
}
return resource_quota;
}
void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota) {
if (gpr_unref(&resource_quota->refs)) {
grpc_combiner_destroy(exec_ctx, resource_quota->combiner);
gpr_free(resource_quota->name);
gpr_free(resource_quota);
}
}
void grpc_resource_quota_unref(grpc_resource_quota *resource_quota) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota *grpc_resource_quota_internal_ref(
grpc_resource_quota *resource_quota) {
gpr_ref(&resource_quota->refs);
return resource_quota;
}
void grpc_resource_quota_ref(grpc_resource_quota *resource_quota) {
grpc_resource_quota_internal_ref(resource_quota);
}
void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
size_t size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
bp_resize_args *a = gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
a->size = (int64_t)size;
grpc_closure_init(&a->closure, bp_resize, a);
grpc_combiner_execute(&exec_ctx, resource_quota->combiner, &a->closure,
GRPC_ERROR_NONE, false);
grpc_exec_ctx_finish(&exec_ctx);
}
/*******************************************************************************
* grpc_resource_user channel args api
*/
grpc_resource_quota *grpc_resource_quota_from_channel_args(
const grpc_channel_args *channel_args) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_BUFFER_POOL)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
return grpc_resource_quota_internal_ref(
channel_args->args[i].value.pointer.p);
} else {
gpr_log(GPR_DEBUG, GRPC_ARG_BUFFER_POOL " should be a pointer");
}
}
}
return grpc_resource_quota_create(NULL);
}
static void *bp_copy(void *bp) {
grpc_resource_quota_ref(bp);
return bp;
}
static void bp_destroy(void *bp) { grpc_resource_quota_unref(bp); }
static int bp_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
static const grpc_arg_pointer_vtable vtable = {bp_copy, bp_destroy, bp_cmp};
return &vtable;
}
/*******************************************************************************
* grpc_resource_user api
*/
void grpc_resource_user_init(grpc_resource_user *resource_user,
grpc_resource_quota *resource_quota,
const char *name) {
resource_user->resource_quota =
grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&resource_user->allocate_closure, &bu_allocate,
resource_user);
grpc_closure_init(&resource_user->add_to_free_pool_closure,
&bu_add_to_free_pool, resource_user);
grpc_closure_init(&resource_user->post_reclaimer_closure[0],
&bu_post_benign_reclaimer, resource_user);
grpc_closure_init(&resource_user->post_reclaimer_closure[1],
&bu_post_destructive_reclaimer, resource_user);
grpc_closure_init(&resource_user->destroy_closure, &bu_destroy,
resource_user);
gpr_mu_init(&resource_user->mu);
resource_user->allocated = 0;
resource_user->free_pool = 0;
grpc_closure_list_init(&resource_user->on_allocated);
resource_user->allocating = false;
resource_user->added_to_free_pool = false;
gpr_atm_no_barrier_store(&resource_user->on_done_destroy_closure, 0);
resource_user->reclaimers[0] = NULL;
resource_user->reclaimers[1] = NULL;
for (int i = 0; i < GRPC_BULIST_COUNT; i++) {
resource_user->links[i].next = resource_user->links[i].prev = NULL;
}
#ifndef NDEBUG
resource_user->asan_canary = gpr_malloc(1);
#endif
if (name != NULL) {
resource_user->name = gpr_strdup(name);
} else {
gpr_asprintf(&resource_user->name, "anonymous_resource_user_%" PRIxPTR,
(intptr_t)resource_user);
}
}
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
grpc_closure *on_done) {
gpr_mu_lock(&resource_user->mu);
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->on_done_destroy_closure) ==
0);
gpr_atm_no_barrier_store(&resource_user->on_done_destroy_closure,
(gpr_atm)on_done);
if (resource_user->allocated == 0) {
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
&resource_user->destroy_closure, GRPC_ERROR_NONE,
false);
}
gpr_mu_unlock(&resource_user->mu);
}
void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
#ifndef NDEBUG
gpr_free(resource_user->asan_canary);
#endif
grpc_resource_quota_internal_unref(exec_ctx, resource_user->resource_quota);
gpr_mu_destroy(&resource_user->mu);
gpr_free(resource_user->name);
}
void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user, size_t size,
grpc_closure *optional_on_done) {
gpr_mu_lock(&resource_user->mu);
grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load(
&resource_user->on_done_destroy_closure);
if (on_done_destroy != NULL) {
/* already shutdown */
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "BP %s %s: alloc %" PRIdPTR " after shutdown",
resource_user->resource_quota->name, resource_user->name, size);
}
grpc_exec_ctx_sched(
exec_ctx, optional_on_done,
GRPC_ERROR_CREATE("Buffer pool user is already shutdown"), NULL);
gpr_mu_unlock(&resource_user->mu);
return;
}
resource_user->allocated += (int64_t)size;
resource_user->free_pool -= (int64_t)size;
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "BP %s %s: alloc %" PRIdPTR "; allocated -> %" PRId64
", free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->allocated, resource_user->free_pool);
}
if (resource_user->free_pool < 0) {
grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
GRPC_ERROR_NONE);
if (!resource_user->allocating) {
resource_user->allocating = true;
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
&resource_user->allocate_closure, GRPC_ERROR_NONE,
false);
}
} else {
grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL);
}
gpr_mu_unlock(&resource_user->mu);
}
void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user, size_t size) {
gpr_mu_lock(&resource_user->mu);
GPR_ASSERT(resource_user->allocated >= (int64_t)size);
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += (int64_t)size;
resource_user->allocated -= (int64_t)size;
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "BP %s %s: free %" PRIdPTR "; allocated -> %" PRId64
", free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->allocated, resource_user->free_pool);
}
bool is_bigger_than_zero = resource_user->free_pool > 0;
if (is_bigger_than_zero && was_zero_or_negative &&
!resource_user->added_to_free_pool) {
resource_user->added_to_free_pool = true;
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
&resource_user->add_to_free_pool_closure,
GRPC_ERROR_NONE, false);
}
grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load(
&resource_user->on_done_destroy_closure);
if (on_done_destroy != NULL && resource_user->allocated == 0) {
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
&resource_user->destroy_closure, GRPC_ERROR_NONE,
false);
}
gpr_mu_unlock(&resource_user->mu);
}
void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
bool destructive,
grpc_closure *closure) {
if (gpr_atm_acq_load(&resource_user->on_done_destroy_closure) == 0) {
GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
resource_user->reclaimers[destructive] = closure;
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
&resource_user->post_reclaimer_closure[destructive],
GRPC_ERROR_NONE, false);
} else {
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL);
}
}
void grpc_resource_user_finish_reclaimation(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "BP %s %s: reclaimation complete",
resource_user->resource_quota->name, resource_user->name);
}
grpc_combiner_execute(
exec_ctx, resource_user->resource_quota->combiner,
&resource_user->resource_quota->bpreclaimation_done_closure,
GRPC_ERROR_NONE, false);
}
void grpc_resource_user_slice_allocator_init(
grpc_resource_user_slice_allocator *slice_allocator,
grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
grpc_closure_init(&slice_allocator->on_allocated, bu_allocated_slices,
slice_allocator);
grpc_closure_init(&slice_allocator->on_done, cb, p);
slice_allocator->resource_user = resource_user;
}
void grpc_resource_user_alloc_slices(
grpc_exec_ctx *exec_ctx,
grpc_resource_user_slice_allocator *slice_allocator, size_t length,
size_t count, gpr_slice_buffer *dest) {
slice_allocator->length = length;
slice_allocator->count = count;
slice_allocator->dest = dest;
grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user,
count * length, &slice_allocator->on_allocated);
}

@ -0,0 +1,131 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
#define GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
#include <grpc/grpc.h>
#include "src/core/lib/iomgr/exec_ctx.h"
extern int grpc_resource_quota_trace;
grpc_resource_quota *grpc_resource_quota_internal_ref(
grpc_resource_quota *resource_quota);
void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota);
grpc_resource_quota *grpc_resource_quota_from_channel_args(
const grpc_channel_args *channel_args);
typedef enum {
GRPC_BULIST_AWAITING_ALLOCATION,
GRPC_BULIST_NON_EMPTY_FREE_POOL,
GRPC_BULIST_RECLAIMER_BENIGN,
GRPC_BULIST_RECLAIMER_DESTRUCTIVE,
GRPC_BULIST_COUNT
} grpc_bulist;
typedef struct grpc_resource_user grpc_resource_user;
typedef struct {
grpc_resource_user *next;
grpc_resource_user *prev;
} grpc_resource_user_link;
struct grpc_resource_user {
grpc_resource_quota *resource_quota;
grpc_closure allocate_closure;
grpc_closure add_to_free_pool_closure;
#ifndef NDEBUG
void *asan_canary;
#endif
gpr_mu mu;
int64_t allocated;
int64_t free_pool;
grpc_closure_list on_allocated;
bool allocating;
bool added_to_free_pool;
grpc_closure *reclaimers[2];
grpc_closure post_reclaimer_closure[2];
grpc_closure destroy_closure;
gpr_atm on_done_destroy_closure;
grpc_resource_user_link links[GRPC_BULIST_COUNT];
char *name;
};
void grpc_resource_user_init(grpc_resource_user *resource_user,
grpc_resource_quota *resource_quota,
const char *name);
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
grpc_closure *on_done);
void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user);
void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user, size_t size,
grpc_closure *optional_on_done);
void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user, size_t size);
void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
bool destructive, grpc_closure *closure);
void grpc_resource_user_finish_reclaimation(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user);
typedef struct grpc_resource_user_slice_allocator {
grpc_closure on_allocated;
grpc_closure on_done;
size_t length;
size_t count;
gpr_slice_buffer *dest;
grpc_resource_user *resource_user;
} grpc_resource_user_slice_allocator;
void grpc_resource_user_slice_allocator_init(
grpc_resource_user_slice_allocator *slice_allocator,
grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p);
void grpc_resource_user_alloc_slices(
grpc_exec_ctx *exec_ctx,
grpc_resource_user_slice_allocator *slice_allocator, size_t length,
size_t count, gpr_slice_buffer *dest);
#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */

@ -39,6 +39,8 @@
#include "src/core/lib/iomgr/pollset_set.h" #include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/sockaddr.h"
#define GRPC_ARG_TCP_READ_CHUNK_SIZE "grpc.experimental.tcp_read_chunk_size"
/* Asynchronously connect to an address (specified as (addr, len)), and call /* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and cb with arg and the completed connection when done (or call cb with arg and
NULL on failure). NULL on failure).
@ -47,6 +49,7 @@
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect, void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect,
grpc_endpoint **endpoint, grpc_endpoint **endpoint,
grpc_pollset_set *interested_parties, grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const struct sockaddr *addr, size_t addr_len, const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline); gpr_timespec deadline);

@ -35,7 +35,7 @@
#ifdef GPR_POSIX_SOCKET #ifdef GPR_POSIX_SOCKET
#include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/iomgr/tcp_client_posix.h"
#include <errno.h> #include <errno.h>
#include <netinet/in.h> #include <netinet/in.h>
@ -47,6 +47,7 @@
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_posix.h" #include "src/core/lib/iomgr/iomgr_posix.h"
#include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/sockaddr_utils.h"
@ -69,6 +70,7 @@ typedef struct {
char *addr_str; char *addr_str;
grpc_endpoint **ep; grpc_endpoint **ep;
grpc_closure *closure; grpc_closure *closure;
grpc_channel_args *channel_args;
} async_connect; } async_connect;
static grpc_error *prepare_socket(const struct sockaddr *addr, int fd) { static grpc_error *prepare_socket(const struct sockaddr *addr, int fd) {
@ -114,10 +116,38 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (done) { if (done) {
gpr_mu_destroy(&ac->mu); gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str); gpr_free(ac->addr_str);
grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac); gpr_free(ac);
} }
} }
grpc_endpoint *grpc_tcp_client_create_from_fd(
grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
const char *addr_str) {
size_t tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
8 * 1024 * 1024};
tcp_read_chunk_size = (size_t)grpc_channel_arg_get_integer(
&channel_args->args[i], options);
} else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_BUFFER_POOL)) {
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_internal_ref(
channel_args->args[i].value.pointer.p);
}
}
}
grpc_endpoint *ep =
grpc_tcp_create(fd, resource_quota, tcp_read_chunk_size, addr_str);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
return ep;
}
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect *ac = acp; async_connect *ac = acp;
int so_error = 0; int so_error = 0;
@ -165,7 +195,8 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
switch (so_error) { switch (so_error) {
case 0: case 0:
grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd); grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
*ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str); *ep = grpc_tcp_client_create_from_fd(exec_ctx, fd, ac->channel_args,
ac->addr_str);
fd = NULL; fd = NULL;
break; break;
case ENOBUFS: case ENOBUFS:
@ -215,6 +246,7 @@ finish:
if (done) { if (done) {
gpr_mu_destroy(&ac->mu); gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str); gpr_free(ac->addr_str);
grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac); gpr_free(ac);
} }
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
@ -223,6 +255,7 @@ finish:
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const struct sockaddr *addr, const struct sockaddr *addr,
size_t addr_len, gpr_timespec deadline) { size_t addr_len, gpr_timespec deadline) {
int fd; int fd;
@ -271,7 +304,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
fdobj = grpc_fd_create(fd, name); fdobj = grpc_fd_create(fd, name);
if (err >= 0) { if (err >= 0) {
*ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str); *ep =
grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL); grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
goto done; goto done;
} }
@ -296,6 +330,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
ac->refs = 2; ac->refs = 2;
ac->write_closure.cb = on_writable; ac->write_closure.cb = on_writable;
ac->write_closure.cb_arg = ac; ac->write_closure.cb_arg = ac;
ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace) { if (grpc_tcp_trace) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting", gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
@ -317,16 +352,18 @@ done:
// overridden by api_fuzzer.c // overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)( void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const struct sockaddr *addr, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
size_t addr_len, gpr_timespec deadline) = tcp_client_connect_impl; const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) = tcp_client_connect_impl;
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const struct sockaddr *addr, size_t addr_len, const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) { gpr_timespec deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, addr, grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
addr_len, deadline); channel_args, addr, addr_len, deadline);
} }
#endif #endif

@ -0,0 +1,45 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H
#define GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/tcp_client.h"
grpc_endpoint *grpc_tcp_client_create_from_fd(
grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
const char *addr_str);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */

@ -80,6 +80,7 @@ typedef struct {
msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */ msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */
size_t slice_size; size_t slice_size;
gpr_refcount refcount; gpr_refcount refcount;
gpr_atm shutdown_count;
/* garbage after the last read */ /* garbage after the last read */
gpr_slice_buffer last_read_buffer; gpr_slice_buffer last_read_buffer;
@ -100,15 +101,29 @@ typedef struct {
grpc_closure write_closure; grpc_closure write_closure;
char *peer_string; char *peer_string;
grpc_resource_user resource_user;
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp; } grpc_tcp;
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error); grpc_error *error);
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error); grpc_error *error);
static void tcp_unref_closure(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
static void tcp_maybe_shutdown_resource_user(grpc_exec_ctx *exec_ctx,
grpc_tcp *tcp) {
if (gpr_atm_full_fetch_add(&tcp->shutdown_count, 1) == 0) {
grpc_resource_user_shutdown(exec_ctx, &tcp->resource_user,
grpc_closure_create(tcp_unref_closure, tcp));
}
}
static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep; grpc_tcp *tcp = (grpc_tcp *)ep;
tcp_maybe_shutdown_resource_user(exec_ctx, tcp);
grpc_fd_shutdown(exec_ctx, tcp->em_fd); grpc_fd_shutdown(exec_ctx, tcp->em_fd);
} }
@ -116,6 +131,7 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd, grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
"tcp_unref_orphan"); "tcp_unref_orphan");
gpr_slice_buffer_destroy(&tcp->last_read_buffer); gpr_slice_buffer_destroy(&tcp->last_read_buffer);
grpc_resource_user_destroy(exec_ctx, &tcp->resource_user);
gpr_free(tcp->peer_string); gpr_free(tcp->peer_string);
gpr_free(tcp); gpr_free(tcp);
} }
@ -152,9 +168,16 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif #endif
static void tcp_unref_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
TCP_UNREF(exec_ctx, arg, "resource_user");
}
static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep); grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep; grpc_tcp *tcp = (grpc_tcp *)ep;
tcp_maybe_shutdown_resource_user(exec_ctx, tcp);
gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
TCP_UNREF(exec_ctx, tcp, "destroy"); TCP_UNREF(exec_ctx, tcp, "destroy");
} }
@ -181,7 +204,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
} }
#define MAX_READ_IOVEC 4 #define MAX_READ_IOVEC 4
static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
struct msghdr msg; struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC]; struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes; ssize_t read_bytes;
@ -192,10 +215,6 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
GPR_TIMER_BEGIN("tcp_continue_read", 0); GPR_TIMER_BEGIN("tcp_continue_read", 0);
while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
gpr_slice_buffer_add_indexed(tcp->incoming_buffer,
gpr_slice_malloc(tcp->slice_size));
}
for (i = 0; i < tcp->incoming_buffer->count; i++) { for (i = 0; i < tcp->incoming_buffer->count; i++) {
iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
@ -232,7 +251,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
} else if (read_bytes == 0) { } else if (read_bytes == 0) {
/* 0 read size ==> end of stream */ /* 0 read size ==> end of stream */
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
call_read_cb(exec_ctx, tcp, GRPC_ERROR_CREATE("EOF")); call_read_cb(exec_ctx, tcp, GRPC_ERROR_CREATE("Socket closed"));
TCP_UNREF(exec_ctx, tcp, "read"); TCP_UNREF(exec_ctx, tcp, "read");
} else { } else {
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
@ -252,6 +271,30 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_TIMER_END("tcp_continue_read", 0); GPR_TIMER_END("tcp_continue_read", 0);
} }
static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
grpc_error *error) {
grpc_tcp *tcp = tcpp;
if (error != GRPC_ERROR_NONE) {
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
tcp_do_read(exec_ctx, tcp);
}
}
static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
grpc_resource_user_alloc_slices(
exec_ctx, &tcp->slice_allocator, tcp->slice_size,
(size_t)tcp->iov_size - tcp->incoming_buffer->count,
tcp->incoming_buffer);
} else {
tcp_do_read(exec_ctx, tcp);
}
}
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) { grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg; grpc_tcp *tcp = (grpc_tcp *)arg;
@ -259,6 +302,7 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error)); call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
TCP_UNREF(exec_ctx, tcp, "read"); TCP_UNREF(exec_ctx, tcp, "read");
} else { } else {
@ -469,6 +513,11 @@ static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
return grpc_fd_get_workqueue(tcp->em_fd); return grpc_fd_get_workqueue(tcp->em_fd);
} }
static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return &tcp->resource_user;
}
static const grpc_endpoint_vtable vtable = {tcp_read, static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_write, tcp_write,
tcp_get_workqueue, tcp_get_workqueue,
@ -476,10 +525,12 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_add_to_pollset_set, tcp_add_to_pollset_set,
tcp_shutdown, tcp_shutdown,
tcp_destroy, tcp_destroy,
tcp_get_resource_user,
tcp_get_peer}; tcp_get_peer};
grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
const char *peer_string) { grpc_resource_quota *resource_quota,
size_t slice_size, const char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
tcp->base.vtable = &vtable; tcp->base.vtable = &vtable;
tcp->peer_string = gpr_strdup(peer_string); tcp->peer_string = gpr_strdup(peer_string);
@ -492,14 +543,20 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
tcp->slice_size = slice_size; tcp->slice_size = slice_size;
tcp->iov_size = 1; tcp->iov_size = 1;
tcp->finished_edge = true; tcp->finished_edge = true;
/* paired with unref in grpc_tcp_destroy */ /* paired with unref in grpc_tcp_destroy, and with the shutdown for our
gpr_ref_init(&tcp->refcount, 1); * resource_user */
gpr_ref_init(&tcp->refcount, 2);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd; tcp->em_fd = em_fd;
tcp->read_closure.cb = tcp_handle_read; tcp->read_closure.cb = tcp_handle_read;
tcp->read_closure.cb_arg = tcp; tcp->read_closure.cb_arg = tcp;
tcp->write_closure.cb = tcp_handle_write; tcp->write_closure.cb = tcp_handle_write;
tcp->write_closure.cb_arg = tcp; tcp->write_closure.cb_arg = tcp;
gpr_slice_buffer_init(&tcp->last_read_buffer); gpr_slice_buffer_init(&tcp->last_read_buffer);
grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(&tcp->slice_allocator,
&tcp->resource_user,
tcp_read_allocation_done, tcp);
/* Tell network status tracker about new endpoint */ /* Tell network status tracker about new endpoint */
grpc_network_status_register_endpoint(&tcp->base); grpc_network_status_register_endpoint(&tcp->base);
@ -514,10 +571,13 @@ int grpc_tcp_fd(grpc_endpoint *ep) {
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int *fd, grpc_closure *done) { int *fd, grpc_closure *done) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep; grpc_tcp *tcp = (grpc_tcp *)ep;
GPR_ASSERT(ep->vtable == &vtable); GPR_ASSERT(ep->vtable == &vtable);
tcp->release_fd = fd; tcp->release_fd = fd;
tcp->release_fd_cb = done; tcp->release_fd_cb = done;
tcp_maybe_shutdown_resource_user(exec_ctx, tcp);
gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
TCP_UNREF(exec_ctx, tcp, "destroy"); TCP_UNREF(exec_ctx, tcp, "destroy");
} }

@ -53,8 +53,8 @@ extern int grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size. /* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */ Takes ownership of fd. */
grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size, grpc_endpoint *grpc_tcp_create(grpc_fd *fd, grpc_resource_quota *resource_quota,
const char *peer_string); size_t read_slice_size, const char *peer_string);
/* Return the tcp endpoint's fd, or -1 if this is not available. Does not /* Return the tcp endpoint's fd, or -1 if this is not available. Does not
release the fd. release the fd.

@ -60,7 +60,8 @@ typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
/* Create a server, initially not bound to any ports. The caller owns one ref. /* Create a server, initially not bound to any ports. The caller owns one ref.
If shutdown_complete is not NULL, it will be used by If shutdown_complete is not NULL, it will be used by
grpc_tcp_server_unref() when the ref count reaches zero. */ grpc_tcp_server_unref() when the ref count reaches zero. */
grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_closure *shutdown_complete,
const grpc_channel_args *args, const grpc_channel_args *args,
grpc_tcp_server **server); grpc_tcp_server **server);

@ -137,6 +137,8 @@ struct grpc_tcp_server {
/* next pollset to assign a channel to */ /* next pollset to assign a channel to */
gpr_atm next_pollset_to_assign; gpr_atm next_pollset_to_assign;
grpc_resource_quota *resource_quota;
}; };
static gpr_once check_init = GPR_ONCE_INIT; static gpr_once check_init = GPR_ONCE_INIT;
@ -153,23 +155,37 @@ static void init(void) {
#endif #endif
} }
grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_closure *shutdown_complete,
const grpc_channel_args *args, const grpc_channel_args *args,
grpc_tcp_server **server) { grpc_tcp_server **server) {
gpr_once_init(&check_init, init); gpr_once_init(&check_init, init);
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server)); grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport; s->so_reuseport = has_so_reuseport;
s->resource_quota = grpc_resource_quota_create(NULL);
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) { for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) { if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_INTEGER) { if (args->args[i].type == GRPC_ARG_INTEGER) {
s->so_reuseport = s->so_reuseport =
has_so_reuseport && (args->args[i].value.integer != 0); has_so_reuseport && (args->args[i].value.integer != 0);
} else { } else {
grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s); gpr_free(s);
return GRPC_ERROR_CREATE(GRPC_ARG_ALLOW_REUSEPORT return GRPC_ERROR_CREATE(GRPC_ARG_ALLOW_REUSEPORT
" must be an integer"); " must be an integer");
} }
} else if (0 == strcmp(GRPC_ARG_BUFFER_POOL, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_POINTER) {
grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
s->resource_quota =
grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
} else {
grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
return GRPC_ERROR_CREATE(GRPC_ARG_BUFFER_POOL
" must be a pointer to a buffer pool");
}
} }
} }
gpr_ref_init(&s->refs, 1); gpr_ref_init(&s->refs, 1);
@ -206,6 +222,8 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(sp); gpr_free(sp);
} }
grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s); gpr_free(s);
} }
@ -422,7 +440,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
sp->server->on_accept_cb( sp->server->on_accept_cb(
exec_ctx, sp->server->on_accept_cb_arg, exec_ctx, sp->server->on_accept_cb_arg,
grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str), grpc_tcp_create(fdobj, sp->server->resource_quota,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
read_notifier_pollset, &acceptor); read_notifier_pollset, &acceptor);
gpr_free(name); gpr_free(name);

@ -124,11 +124,14 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_init(&context); grpc_httpcli_context_init(&context);
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("google_default_credentials");
grpc_httpcli_get( grpc_httpcli_get(
&exec_ctx, &context, &detector.pollent, &request, &exec_ctx, &context, &detector.pollent, resource_quota, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay), gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
grpc_closure_create(on_compute_engine_detection_http_response, &detector), grpc_closure_create(on_compute_engine_detection_http_response, &detector),
&detector.response); &detector.response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_exec_ctx_flush(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);

@ -657,11 +657,17 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
*(req.host + (req.http.path - jwks_uri)) = '\0'; *(req.host + (req.http.path - jwks_uri)) = '\0';
} }
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get( grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req, exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay), gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
grpc_closure_create(on_keys_retrieved, ctx), grpc_closure_create(on_keys_retrieved, ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]); &ctx->responses[HTTP_RESPONSE_KEYS]);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_json_destroy(json); grpc_json_destroy(json);
gpr_free(req.host); gpr_free(req.host);
return; return;
@ -764,10 +770,16 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
rsp_idx = HTTP_RESPONSE_OPENID; rsp_idx = HTTP_RESPONSE_OPENID;
} }
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get( grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req, exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay), gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
http_cb, &ctx->responses[rsp_idx]); http_cb, &ctx->responses[rsp_idx]);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(req.host); gpr_free(req.host);
gpr_free(req.http.path); gpr_free(req.http.path);
return; return;

@ -307,9 +307,15 @@ static void compute_engine_fetch_oauth2(
request.http.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH; request.http.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.http.hdr_count = 1; request.http.hdr_count = 1;
request.http.hdrs = &header; request.http.hdrs = &header;
grpc_httpcli_get(exec_ctx, httpcli_context, pollent, &request, deadline, /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
grpc_closure_create(response_cb, metadata_req), channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("oauth2_credentials");
grpc_httpcli_get(exec_ctx, httpcli_context, pollent, resource_quota, &request,
deadline, grpc_closure_create(response_cb, metadata_req),
&metadata_req->response); &metadata_req->response);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
} }
grpc_call_credentials *grpc_google_compute_engine_credentials_create( grpc_call_credentials *grpc_google_compute_engine_credentials_create(
@ -357,10 +363,16 @@ static void refresh_token_fetch_oauth2(
request.http.hdr_count = 1; request.http.hdr_count = 1;
request.http.hdrs = &header; request.http.hdrs = &header;
request.handshaker = &grpc_httpcli_ssl; request.handshaker = &grpc_httpcli_ssl;
grpc_httpcli_post(exec_ctx, httpcli_context, pollent, &request, body, /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
strlen(body), deadline, channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("oauth2_credentials_refresh");
grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota,
&request, body, strlen(body), deadline,
grpc_closure_create(response_cb, metadata_req), grpc_closure_create(response_cb, metadata_req),
&metadata_req->response); &metadata_req->response);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(body); gpr_free(body);
} }

@ -370,6 +370,12 @@ static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
return grpc_endpoint_get_workqueue(ep->wrapped_ep); return grpc_endpoint_get_workqueue(ep->wrapped_ep);
} }
static grpc_resource_user *endpoint_get_resource_user(
grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
return grpc_endpoint_get_resource_user(ep->wrapped_ep);
}
static const grpc_endpoint_vtable vtable = {endpoint_read, static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write, endpoint_write,
endpoint_get_workqueue, endpoint_get_workqueue,
@ -377,6 +383,7 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_add_to_pollset_set, endpoint_add_to_pollset_set,
endpoint_shutdown, endpoint_shutdown,
endpoint_destroy, endpoint_destroy,
endpoint_get_resource_user,
endpoint_get_peer}; endpoint_get_peer};
grpc_endpoint *grpc_secure_endpoint_create( grpc_endpoint *grpc_secure_endpoint_create(

@ -37,6 +37,7 @@
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include <pthread.h> #include <pthread.h>
#include <stdarg.h> #include <stdarg.h>
@ -93,10 +94,13 @@ void gpr_default_log(gpr_log_func_args *args) {
strcpy(time_buffer, "error:strftime"); strcpy(time_buffer, "error:strftime");
} }
fprintf(stderr, "%s%s.%09d %7tu %s:%d] %s\n", char *prefix;
gpr_log_severity_string(args->severity), time_buffer, gpr_asprintf(&prefix, "%s%s.%09d %7tu %s:%d]",
(int)(now.tv_nsec), gettid(), display_file, args->line, gpr_log_severity_string(args->severity), time_buffer,
args->message); (int)(now.tv_nsec), gettid(), display_file, args->line);
fprintf(stderr, "%-70s %s\n", prefix, args->message);
gpr_free(prefix);
} }
#endif /* defined(GPR_POSIX_LOG) */ #endif /* defined(GPR_POSIX_LOG) */

@ -1515,8 +1515,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call, STATUS_FROM_API_OVERRIDE, call, STATUS_FROM_API_OVERRIDE,
GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value)); GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value));
} }
set_status_code(call, STATUS_FROM_API_OVERRIDE, if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
(uint32_t)op->data.send_status_from_server.status); set_status_code(call, STATUS_FROM_API_OVERRIDE,
(uint32_t)op->data.send_status_from_server.status);
}
if (!prepare_application_metadata( if (!prepare_application_metadata(
call, call,
(int)op->data.send_status_from_server.trailing_metadata_count, (int)op->data.send_status_from_server.trailing_metadata_count,

@ -52,6 +52,7 @@
#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/profiling/timers.h" #include "src/core/lib/profiling/timers.h"
#include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h" #include "src/core/lib/surface/call.h"
@ -191,6 +192,7 @@ void grpc_init(void) {
// Default timeout trace to 1 // Default timeout trace to 1
grpc_cq_event_timeout_trace = 1; grpc_cq_event_timeout_trace = 1;
grpc_register_tracer("op_failure", &grpc_trace_operation_failures); grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
grpc_register_tracer("resource_quota", &grpc_resource_quota_trace);
#ifndef NDEBUG #ifndef NDEBUG
grpc_register_tracer("pending_tags", &grpc_trace_pending_tags); grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
#endif #endif

@ -842,7 +842,6 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
grpc_call_stack_element(grpc_call_get_call_stack(call), 0); grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
got_initial_metadata(exec_ctx, elem, error); got_initial_metadata(exec_ctx, elem, error);
GRPC_ERROR_UNREF(error);
return; return;
} }
call_data *calld = elem->call_data; call_data *calld = elem->call_data;

@ -34,6 +34,7 @@
#include <sstream> #include <sstream>
#include <grpc++/resource_quota.h>
#include <grpc/impl/codegen/grpc_types.h> #include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_args.h"
@ -113,6 +114,12 @@ void ChannelArguments::SetUserAgentPrefix(
} }
} }
void ChannelArguments::SetResourceQuota(
const grpc::ResourceQuota& resource_quota) {
SetPointerWithVtable(GRPC_ARG_BUFFER_POOL, resource_quota.c_resource_quota(),
grpc_resource_quota_arg_vtable());
}
void ChannelArguments::SetInt(const grpc::string& key, int value) { void ChannelArguments::SetInt(const grpc::string& key, int value) {
grpc_arg arg; grpc_arg arg;
arg.type = GRPC_ARG_INTEGER; arg.type = GRPC_ARG_INTEGER;
@ -127,12 +134,18 @@ void ChannelArguments::SetPointer(const grpc::string& key, void* value) {
static const grpc_arg_pointer_vtable vtable = { static const grpc_arg_pointer_vtable vtable = {
&PointerVtableMembers::Copy, &PointerVtableMembers::Destroy, &PointerVtableMembers::Copy, &PointerVtableMembers::Destroy,
&PointerVtableMembers::Compare}; &PointerVtableMembers::Compare};
SetPointerWithVtable(key, value, &vtable);
}
void ChannelArguments::SetPointerWithVtable(
const grpc::string& key, void* value,
const grpc_arg_pointer_vtable* vtable) {
grpc_arg arg; grpc_arg arg;
arg.type = GRPC_ARG_POINTER; arg.type = GRPC_ARG_POINTER;
strings_.push_back(key); strings_.push_back(key);
arg.key = const_cast<char*>(strings_.back().c_str()); arg.key = const_cast<char*>(strings_.back().c_str());
arg.value.pointer.p = value; arg.value.pointer.p = value;
arg.value.pointer.vtable = &vtable; arg.value.pointer.vtable = vtable;
args_.push_back(arg); args_.push_back(arg);
} }

@ -0,0 +1,51 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc++/resource_quota.h>
#include <grpc/grpc.h>
namespace grpc {
ResourceQuota::ResourceQuota() : impl_(grpc_resource_quota_create(nullptr)) {}
ResourceQuota::ResourceQuota(const grpc::string& name)
: impl_(grpc_resource_quota_create(name.c_str())) {}
ResourceQuota::~ResourceQuota() { grpc_resource_quota_unref(impl_); }
ResourceQuota& ResourceQuota::Resize(size_t new_size) {
grpc_resource_quota_resize(impl_, new_size);
return *this;
}
} // namespace grpc

@ -34,6 +34,7 @@
#include <grpc++/server_builder.h> #include <grpc++/server_builder.h>
#include <grpc++/impl/service_type.h> #include <grpc++/impl/service_type.h>
#include <grpc++/resource_quota.h>
#include <grpc++/server.h> #include <grpc++/server.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
@ -54,6 +55,7 @@ static void do_plugin_list_init(void) {
ServerBuilder::ServerBuilder() ServerBuilder::ServerBuilder()
: max_receive_message_size_(-1), : max_receive_message_size_(-1),
max_send_message_size_(-1), max_send_message_size_(-1),
resource_quota_(nullptr),
generic_service_(nullptr) { generic_service_(nullptr) {
gpr_once_init(&once_init_plugin_list, do_plugin_list_init); gpr_once_init(&once_init_plugin_list, do_plugin_list_init);
for (auto it = g_plugin_factory_list->begin(); for (auto it = g_plugin_factory_list->begin();
@ -70,6 +72,12 @@ ServerBuilder::ServerBuilder()
sizeof(maybe_default_compression_algorithm_)); sizeof(maybe_default_compression_algorithm_));
} }
ServerBuilder::~ServerBuilder() {
if (resource_quota_ != nullptr) {
grpc_resource_quota_unref(resource_quota_);
}
}
std::unique_ptr<ServerCompletionQueue> ServerBuilder::AddCompletionQueue( std::unique_ptr<ServerCompletionQueue> ServerBuilder::AddCompletionQueue(
bool is_frequently_polled) { bool is_frequently_polled) {
ServerCompletionQueue* cq = new ServerCompletionQueue(is_frequently_polled); ServerCompletionQueue* cq = new ServerCompletionQueue(is_frequently_polled);
@ -130,6 +138,16 @@ ServerBuilder& ServerBuilder::SetDefaultCompressionAlgorithm(
return *this; return *this;
} }
ServerBuilder& ServerBuilder::SetResourceQuota(
const grpc::ResourceQuota& resource_quota) {
if (resource_quota_ != nullptr) {
grpc_resource_quota_unref(resource_quota_);
}
resource_quota_ = resource_quota.c_resource_quota();
grpc_resource_quota_ref(resource_quota_);
return *this;
}
ServerBuilder& ServerBuilder::AddListeningPort( ServerBuilder& ServerBuilder::AddListeningPort(
const grpc::string& addr, std::shared_ptr<ServerCredentials> creds, const grpc::string& addr, std::shared_ptr<ServerCredentials> creds,
int* selected_port) { int* selected_port) {
@ -178,6 +196,10 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
maybe_default_compression_algorithm_.algorithm); maybe_default_compression_algorithm_.algorithm);
} }
if (resource_quota_ != nullptr) {
args.SetPointerWithVtable(GRPC_ARG_BUFFER_POOL, resource_quota_,
grpc_resource_quota_arg_vtable());
}
std::unique_ptr<Server> server(new Server(thread_pool.release(), true, std::unique_ptr<Server> server(new Server(thread_pool.release(), true,
max_receive_message_size_, &args)); max_receive_message_size_, &args));
ServerInitializer* initializer = server->initializer(); ServerInitializer* initializer = server->initializer();

@ -137,6 +137,11 @@ message ServerConfig {
// If we use an OTHER_SERVER client_type, this string gives more detail // If we use an OTHER_SERVER client_type, this string gives more detail
string other_server_api = 11; string other_server_api = 11;
// c++-only options (for now) --------------------------------
// Buffer pool size (no buffer pool specified if unset)
int32 resource_quota_size = 1001;
} }
message ServerArgs { message ServerArgs {
@ -213,6 +218,10 @@ message ScenarioResultSummary
double latency_95 = 9; double latency_95 = 9;
double latency_99 = 10; double latency_99 = 10;
double latency_999 = 11; double latency_999 = 11;
// Number of requests that succeeded/failed
double successful_requests_per_second = 12;
double failed_requests_per_second = 13;
} }
// Results of a single benchmark scenario. // Results of a single benchmark scenario.
@ -232,4 +241,6 @@ message ScenarioResult {
// Information on success or failure of each worker // Information on success or failure of each worker
repeated bool client_success = 7; repeated bool client_success = 7;
repeated bool server_success = 8; repeated bool server_success = 8;
// Number of failed requests (one row per status code seen)
repeated RequestResultCount request_results = 9;
} }

@ -38,4 +38,5 @@ package grpc.testing.duplicate;
service EchoTestService { service EchoTestService {
rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse); rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse);
rpc ResponseStream(EchoRequest) returns (stream EchoResponse);
} }

@ -59,6 +59,11 @@ message HistogramData {
double count = 6; double count = 6;
} }
message RequestResultCount {
int32 status_code = 1;
int64 count = 2;
}
message ClientStats { message ClientStats {
// Latency histogram. Data points are in nanoseconds. // Latency histogram. Data points are in nanoseconds.
HistogramData latencies = 1; HistogramData latencies = 1;
@ -67,4 +72,7 @@ message ClientStats {
double time_elapsed = 2; double time_elapsed = 2;
double time_user = 3; double time_user = 3;
double time_system = 4; double time_system = 4;
// Number of failed requests (one row per status code seen)
repeated RequestResultCount request_results = 5;
} }

@ -117,6 +117,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/pollset_windows.c', 'src/core/lib/iomgr/pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c', 'src/core/lib/iomgr/resolve_address_posix.c',
'src/core/lib/iomgr/resolve_address_windows.c', 'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c', 'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_utils_common_posix.c', 'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c', 'src/core/lib/iomgr/socket_utils_linux.c',

@ -132,6 +132,11 @@ grpc_header_key_is_legal_type grpc_header_key_is_legal_import;
grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import; grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import;
grpc_is_binary_header_type grpc_is_binary_header_import; grpc_is_binary_header_type grpc_is_binary_header_import;
grpc_call_error_to_string_type grpc_call_error_to_string_import; grpc_call_error_to_string_type grpc_call_error_to_string_import;
grpc_resource_quota_create_type grpc_resource_quota_create_import;
grpc_resource_quota_ref_type grpc_resource_quota_ref_import;
grpc_resource_quota_unref_type grpc_resource_quota_unref_import;
grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import; grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import; grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
grpc_use_signal_type grpc_use_signal_import; grpc_use_signal_type grpc_use_signal_import;
@ -401,6 +406,11 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_header_nonbin_value_is_legal_import = (grpc_header_nonbin_value_is_legal_type) GetProcAddress(library, "grpc_header_nonbin_value_is_legal"); grpc_header_nonbin_value_is_legal_import = (grpc_header_nonbin_value_is_legal_type) GetProcAddress(library, "grpc_header_nonbin_value_is_legal");
grpc_is_binary_header_import = (grpc_is_binary_header_type) GetProcAddress(library, "grpc_is_binary_header"); grpc_is_binary_header_import = (grpc_is_binary_header_type) GetProcAddress(library, "grpc_is_binary_header");
grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string"); grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string");
grpc_resource_quota_create_import = (grpc_resource_quota_create_type) GetProcAddress(library, "grpc_resource_quota_create");
grpc_resource_quota_ref_import = (grpc_resource_quota_ref_type) GetProcAddress(library, "grpc_resource_quota_ref");
grpc_resource_quota_unref_import = (grpc_resource_quota_unref_type) GetProcAddress(library, "grpc_resource_quota_unref");
grpc_resource_quota_resize_import = (grpc_resource_quota_resize_type) GetProcAddress(library, "grpc_resource_quota_resize");
grpc_resource_quota_arg_vtable_import = (grpc_resource_quota_arg_vtable_type) GetProcAddress(library, "grpc_resource_quota_arg_vtable");
grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd"); grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd");
grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd"); grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd");
grpc_use_signal_import = (grpc_use_signal_type) GetProcAddress(library, "grpc_use_signal"); grpc_use_signal_import = (grpc_use_signal_type) GetProcAddress(library, "grpc_use_signal");

@ -347,6 +347,21 @@ extern grpc_is_binary_header_type grpc_is_binary_header_import;
typedef const char *(*grpc_call_error_to_string_type)(grpc_call_error error); typedef const char *(*grpc_call_error_to_string_type)(grpc_call_error error);
extern grpc_call_error_to_string_type grpc_call_error_to_string_import; extern grpc_call_error_to_string_type grpc_call_error_to_string_import;
#define grpc_call_error_to_string grpc_call_error_to_string_import #define grpc_call_error_to_string grpc_call_error_to_string_import
typedef grpc_resource_quota *(*grpc_resource_quota_create_type)(const char *trace_name);
extern grpc_resource_quota_create_type grpc_resource_quota_create_import;
#define grpc_resource_quota_create grpc_resource_quota_create_import
typedef void(*grpc_resource_quota_ref_type)(grpc_resource_quota *resource_quota);
extern grpc_resource_quota_ref_type grpc_resource_quota_ref_import;
#define grpc_resource_quota_ref grpc_resource_quota_ref_import
typedef void(*grpc_resource_quota_unref_type)(grpc_resource_quota *resource_quota);
extern grpc_resource_quota_unref_type grpc_resource_quota_unref_import;
#define grpc_resource_quota_unref grpc_resource_quota_unref_import
typedef void(*grpc_resource_quota_resize_type)(grpc_resource_quota *resource_quota, size_t new_size);
extern grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
#define grpc_resource_quota_resize grpc_resource_quota_resize_import
typedef const grpc_arg_pointer_vtable *(*grpc_resource_quota_arg_vtable_type)(void);
extern grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
#define grpc_resource_quota_arg_vtable grpc_resource_quota_arg_vtable_import
typedef grpc_channel *(*grpc_insecure_channel_create_from_fd_type)(const char *target, int fd, const grpc_channel_args *args); typedef grpc_channel *(*grpc_insecure_channel_create_from_fd_type)(const char *target, int fd, const grpc_channel_args *args);
extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import; extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
#define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import #define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import

@ -114,7 +114,10 @@ void grpc_run_bad_client_test(
grpc_init(); grpc_init();
/* Create endpoints */ /* Create endpoints */
sfd = grpc_iomgr_create_endpoint_pair("fixture", 65536); grpc_resource_quota *resource_quota =
grpc_resource_quota_create("bad_client_test");
sfd = grpc_iomgr_create_endpoint_pair("fixture", resource_quota, 65536);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
/* Create server, completion events */ /* Create server, completion events */
a.server = grpc_server_create(NULL, NULL); a.server = grpc_server_create(NULL, NULL);

@ -111,6 +111,8 @@ extern void request_with_flags(grpc_end2end_test_config config);
extern void request_with_flags_pre_init(void); extern void request_with_flags_pre_init(void);
extern void request_with_payload(grpc_end2end_test_config config); extern void request_with_payload(grpc_end2end_test_config config);
extern void request_with_payload_pre_init(void); extern void request_with_payload_pre_init(void);
extern void resource_quota_server(grpc_end2end_test_config config);
extern void resource_quota_server_pre_init(void);
extern void server_finishes_request(grpc_end2end_test_config config); extern void server_finishes_request(grpc_end2end_test_config config);
extern void server_finishes_request_pre_init(void); extern void server_finishes_request_pre_init(void);
extern void shutdown_finishes_calls(grpc_end2end_test_config config); extern void shutdown_finishes_calls(grpc_end2end_test_config config);
@ -167,6 +169,7 @@ void grpc_end2end_tests_pre_init(void) {
registered_call_pre_init(); registered_call_pre_init();
request_with_flags_pre_init(); request_with_flags_pre_init();
request_with_payload_pre_init(); request_with_payload_pre_init();
resource_quota_server_pre_init();
server_finishes_request_pre_init(); server_finishes_request_pre_init();
shutdown_finishes_calls_pre_init(); shutdown_finishes_calls_pre_init();
shutdown_finishes_tags_pre_init(); shutdown_finishes_tags_pre_init();
@ -219,6 +222,7 @@ void grpc_end2end_tests(int argc, char **argv,
registered_call(config); registered_call(config);
request_with_flags(config); request_with_flags(config);
request_with_payload(config); request_with_payload(config);
resource_quota_server(config);
server_finishes_request(config); server_finishes_request(config);
shutdown_finishes_calls(config); shutdown_finishes_calls(config);
shutdown_finishes_tags(config); shutdown_finishes_tags(config);
@ -368,6 +372,10 @@ void grpc_end2end_tests(int argc, char **argv,
request_with_payload(config); request_with_payload(config);
continue; continue;
} }
if (0 == strcmp("resource_quota_server", argv[i])) {
resource_quota_server(config);
continue;
}
if (0 == strcmp("server_finishes_request", argv[i])) { if (0 == strcmp("server_finishes_request", argv[i])) {
server_finishes_request(config); server_finishes_request(config);
continue; continue;

@ -113,6 +113,8 @@ extern void request_with_flags(grpc_end2end_test_config config);
extern void request_with_flags_pre_init(void); extern void request_with_flags_pre_init(void);
extern void request_with_payload(grpc_end2end_test_config config); extern void request_with_payload(grpc_end2end_test_config config);
extern void request_with_payload_pre_init(void); extern void request_with_payload_pre_init(void);
extern void resource_quota_server(grpc_end2end_test_config config);
extern void resource_quota_server_pre_init(void);
extern void server_finishes_request(grpc_end2end_test_config config); extern void server_finishes_request(grpc_end2end_test_config config);
extern void server_finishes_request_pre_init(void); extern void server_finishes_request_pre_init(void);
extern void shutdown_finishes_calls(grpc_end2end_test_config config); extern void shutdown_finishes_calls(grpc_end2end_test_config config);
@ -170,6 +172,7 @@ void grpc_end2end_tests_pre_init(void) {
registered_call_pre_init(); registered_call_pre_init();
request_with_flags_pre_init(); request_with_flags_pre_init();
request_with_payload_pre_init(); request_with_payload_pre_init();
resource_quota_server_pre_init();
server_finishes_request_pre_init(); server_finishes_request_pre_init();
shutdown_finishes_calls_pre_init(); shutdown_finishes_calls_pre_init();
shutdown_finishes_tags_pre_init(); shutdown_finishes_tags_pre_init();
@ -223,6 +226,7 @@ void grpc_end2end_tests(int argc, char **argv,
registered_call(config); registered_call(config);
request_with_flags(config); request_with_flags(config);
request_with_payload(config); request_with_payload(config);
resource_quota_server(config);
server_finishes_request(config); server_finishes_request(config);
shutdown_finishes_calls(config); shutdown_finishes_calls(config);
shutdown_finishes_tags(config); shutdown_finishes_tags(config);
@ -376,6 +380,10 @@ void grpc_end2end_tests(int argc, char **argv,
request_with_payload(config); request_with_payload(config);
continue; continue;
} }
if (0 == strcmp("resource_quota_server", argv[i])) {
resource_quota_server(config);
continue;
}
if (0 == strcmp("server_finishes_request", argv[i])) { if (0 == strcmp("server_finishes_request", argv[i])) {
server_finishes_request(config); server_finishes_request(config);
continue; continue;

@ -91,7 +91,9 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
f.fixture_data = sfd; f.fixture_data = sfd;
f.cq = grpc_completion_queue_create(NULL); f.cq = grpc_completion_queue_create(NULL);
*sfd = grpc_iomgr_create_endpoint_pair("fixture", 65536); grpc_resource_quota *resource_quota = grpc_resource_quota_create("fixture");
*sfd = grpc_iomgr_create_endpoint_pair("fixture", resource_quota, 65536);
grpc_resource_quota_unref(resource_quota);
return f; return f;
} }

@ -90,7 +90,9 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
f.fixture_data = sfd; f.fixture_data = sfd;
f.cq = grpc_completion_queue_create(NULL); f.cq = grpc_completion_queue_create(NULL);
*sfd = grpc_iomgr_create_endpoint_pair("fixture", 65536); grpc_resource_quota *resource_quota = grpc_resource_quota_create("fixture");
*sfd = grpc_iomgr_create_endpoint_pair("fixture", resource_quota, 65536);
grpc_resource_quota_unref(resource_quota);
return f; return f;
} }

@ -90,7 +90,9 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
f.fixture_data = sfd; f.fixture_data = sfd;
f.cq = grpc_completion_queue_create(NULL); f.cq = grpc_completion_queue_create(NULL);
*sfd = grpc_iomgr_create_endpoint_pair("fixture", 1); grpc_resource_quota *resource_quota = grpc_resource_quota_create("fixture");
*sfd = grpc_iomgr_create_endpoint_pair("fixture", resource_quota, 1);
grpc_resource_quota_unref(resource_quota);
return f; return f;
} }

@ -357,7 +357,7 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg,
const gpr_timespec deadline = gpr_time_add( const gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(10, GPR_TIMESPAN)); gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(10, GPR_TIMESPAN));
grpc_tcp_client_connect(exec_ctx, &conn->on_server_connect_done, grpc_tcp_client_connect(exec_ctx, &conn->on_server_connect_done,
&conn->server_endpoint, conn->pollset_set, &conn->server_endpoint, conn->pollset_set, NULL,
(struct sockaddr*)&resolved_addresses->addrs[0].addr, (struct sockaddr*)&resolved_addresses->addrs[0].addr,
resolved_addresses->addrs[0].len, deadline); resolved_addresses->addrs[0].len, deadline);
grpc_resolved_addresses_destroy(resolved_addresses); grpc_resolved_addresses_destroy(resolved_addresses);
@ -417,7 +417,8 @@ static void thread_main(void* arg) {
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
grpc_end2end_http_proxy* grpc_end2end_http_proxy_create() { grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_end2end_http_proxy* proxy = gpr_malloc(sizeof(*proxy)); grpc_end2end_http_proxy* proxy = gpr_malloc(sizeof(*proxy));
memset(proxy, 0, sizeof(*proxy)); memset(proxy, 0, sizeof(*proxy));
// Construct proxy address. // Construct proxy address.
@ -426,8 +427,8 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create() {
gpr_log(GPR_INFO, "Proxy address: %s", proxy->proxy_name); gpr_log(GPR_INFO, "Proxy address: %s", proxy->proxy_name);
// Create TCP server. // Create TCP server.
proxy->channel_args = grpc_channel_args_copy(NULL); proxy->channel_args = grpc_channel_args_copy(NULL);
grpc_error* error = grpc_error* error = grpc_tcp_server_create(
grpc_tcp_server_create(NULL, proxy->channel_args, &proxy->server); &exec_ctx, NULL, proxy->channel_args, &proxy->server);
GPR_ASSERT(error == GRPC_ERROR_NONE); GPR_ASSERT(error == GRPC_ERROR_NONE);
// Bind to port. // Bind to port.
struct sockaddr_in addr; struct sockaddr_in addr;
@ -442,7 +443,6 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create() {
// Start server. // Start server.
proxy->pollset = gpr_malloc(grpc_pollset_size()); proxy->pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(proxy->pollset, &proxy->mu); grpc_pollset_init(proxy->pollset, &proxy->mu);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp_server_start(&exec_ctx, proxy->server, &proxy->pollset, 1, on_accept, grpc_tcp_server_start(&exec_ctx, proxy->server, &proxy->pollset, 1, on_accept,
proxy); proxy);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);

@ -173,6 +173,7 @@ static bool is_eof(input_stream *inp) { return inp->cur == inp->end; }
static gpr_timespec g_now; static gpr_timespec g_now;
static grpc_server *g_server; static grpc_server *g_server;
static grpc_channel *g_channel; static grpc_channel *g_channel;
static grpc_resource_quota *g_resource_quota;
extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type); extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
@ -231,8 +232,8 @@ void my_resolve_address(grpc_exec_ctx *exec_ctx, const char *addr,
// defined in tcp_client_posix.c // defined in tcp_client_posix.c
extern void (*grpc_tcp_client_connect_impl)( extern void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const struct sockaddr *addr, grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
size_t addr_len, gpr_timespec deadline); const struct sockaddr *addr, size_t addr_len, gpr_timespec deadline);
static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep, gpr_timespec deadline); grpc_endpoint **ep, gpr_timespec deadline);
@ -252,7 +253,7 @@ static void do_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
} else if (g_server != NULL) { } else if (g_server != NULL) {
grpc_endpoint *client; grpc_endpoint *client;
grpc_endpoint *server; grpc_endpoint *server;
grpc_passthru_endpoint_create(&client, &server); grpc_passthru_endpoint_create(&client, &server, g_resource_quota);
*fc->ep = client; *fc->ep = client;
grpc_transport *transport = grpc_transport *transport =
@ -289,6 +290,7 @@ static void sched_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
static void my_tcp_client_connect(grpc_exec_ctx *exec_ctx, static void my_tcp_client_connect(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const struct sockaddr *addr, size_t addr_len, const struct sockaddr *addr, size_t addr_len,
gpr_timespec deadline) { gpr_timespec deadline) {
sched_connect(exec_ctx, closure, ep, deadline); sched_connect(exec_ctx, closure, ep, deadline);
@ -520,6 +522,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
int pending_pings = 0; int pending_pings = 0;
g_active_call = new_call(NULL, ROOT); g_active_call = new_call(NULL, ROOT);
g_resource_quota = grpc_resource_quota_create("api_fuzzer");
grpc_completion_queue *cq = grpc_completion_queue_create(NULL); grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
@ -939,6 +942,11 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
} }
break; break;
} }
// resize the buffer pool
case 21: {
grpc_resource_quota_resize(g_resource_quota, read_uint22(&inp));
break;
}
} }
} }
@ -954,6 +962,8 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
.type == GRPC_QUEUE_SHUTDOWN); .type == GRPC_QUEUE_SHUTDOWN);
grpc_completion_queue_destroy(cq); grpc_completion_queue_destroy(cq);
grpc_resource_quota_unref(g_resource_quota);
grpc_shutdown(); grpc_shutdown();
return 0; return 0;
} }

@ -58,7 +58,11 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
grpc_init(); grpc_init();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_endpoint *mock_endpoint = grpc_mock_endpoint_create(discard_write); grpc_resource_quota *resource_quota =
grpc_resource_quota_create("client_fuzzer");
grpc_endpoint *mock_endpoint =
grpc_mock_endpoint_create(discard_write, resource_quota);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_completion_queue *cq = grpc_completion_queue_create(NULL); grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
grpc_transport *transport = grpc_transport *transport =

@ -56,7 +56,11 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
grpc_init(); grpc_init();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_endpoint *mock_endpoint = grpc_mock_endpoint_create(discard_write); grpc_resource_quota *resource_quota =
grpc_resource_quota_create("server_fuzzer");
grpc_endpoint *mock_endpoint =
grpc_mock_endpoint_create(discard_write, resource_quota);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_mock_endpoint_put_read( grpc_mock_endpoint_put_read(
&exec_ctx, mock_endpoint, &exec_ctx, mock_endpoint,
gpr_slice_from_copied_buffer((const char *)data, size)); gpr_slice_from_copied_buffer((const char *)data, size));

@ -39,9 +39,9 @@ import hashlib
FixtureOptions = collections.namedtuple( FixtureOptions = collections.namedtuple(
'FixtureOptions', 'FixtureOptions',
'fullstack includes_proxy dns_resolver secure platforms ci_mac tracing exclude_configs') 'fullstack includes_proxy dns_resolver secure platforms ci_mac tracing exclude_configs large_writes')
default_unsecure_fixture_options = FixtureOptions( default_unsecure_fixture_options = FixtureOptions(
True, False, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, []) True, False, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], True)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False) socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True) default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix']) uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'])
@ -66,10 +66,10 @@ END2END_FIXTURES = {
'h2_proxy': default_unsecure_fixture_options._replace(includes_proxy=True, 'h2_proxy': default_unsecure_fixture_options._replace(includes_proxy=True,
ci_mac=False), ci_mac=False),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace( 'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_configs=['msan']), ci_mac=False, exclude_configs=['msan'], large_writes=False),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(ci_mac=False), 'h2_sockpair': socketpair_unsecure_fixture_options._replace(ci_mac=False),
'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace( 'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace(
ci_mac=False, tracing=True), ci_mac=False, tracing=True, large_writes=False),
'h2_ssl': default_secure_fixture_options, 'h2_ssl': default_secure_fixture_options,
'h2_ssl_cert': default_secure_fixture_options, 'h2_ssl_cert': default_secure_fixture_options,
'h2_ssl_proxy': default_secure_fixture_options._replace(includes_proxy=True, 'h2_ssl_proxy': default_secure_fixture_options._replace(includes_proxy=True,
@ -79,8 +79,8 @@ END2END_FIXTURES = {
TestOptions = collections.namedtuple( TestOptions = collections.namedtuple(
'TestOptions', 'TestOptions',
'needs_fullstack needs_dns proxyable secure traceable cpu_cost') 'needs_fullstack needs_dns proxyable secure traceable cpu_cost large_writes')
default_test_options = TestOptions(False, False, True, False, True, 1.0) default_test_options = TestOptions(False, False, True, False, True, 1.0, False)
connectivity_test_options = default_test_options._replace(needs_fullstack=True) connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1 LOWCPU = 0.1
@ -89,6 +89,8 @@ LOWCPU = 0.1
END2END_TESTS = { END2END_TESTS = {
'bad_hostname': default_test_options, 'bad_hostname': default_test_options,
'binary_metadata': default_test_options, 'binary_metadata': default_test_options,
'resource_quota_server': default_test_options._replace(large_writes=True,
proxyable=False),
'call_creds': default_test_options._replace(secure=True), 'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options, 'cancel_after_client_done': default_test_options,
@ -151,6 +153,9 @@ def compatible(f, t):
if not END2END_TESTS[t].traceable: if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing: if END2END_FIXTURES[f].tracing:
return False return False
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
return True return True

@ -0,0 +1,353 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/end2end/cq_verifier.h"
static void *tag(intptr_t t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args);
return f;
}
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
}
static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(
f->cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5), NULL)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
}
/* Creates and returns a gpr_slice containing random alphanumeric characters. */
static gpr_slice generate_random_slice() {
size_t i;
static const char chars[] = "abcdefghijklmnopqrstuvwxyz1234567890";
char output[1024 * 1024];
for (i = 0; i < GPR_ARRAY_SIZE(output) - 1; ++i) {
output[i] = chars[rand() % (int)(sizeof(chars) - 1)];
}
output[GPR_ARRAY_SIZE(output) - 1] = '\0';
return gpr_slice_from_copied_string(output);
}
void resource_quota_server(grpc_end2end_test_config config) {
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("test_server");
grpc_resource_quota_resize(resource_quota, 5 * 1024 * 1024);
#define NUM_CALLS 100
#define CLIENT_BASE_TAG 1000
#define SERVER_START_BASE_TAG 2000
#define SERVER_RECV_BASE_TAG 3000
#define SERVER_END_BASE_TAG 4000
grpc_arg arg;
arg.key = GRPC_ARG_BUFFER_POOL;
arg.type = GRPC_ARG_POINTER;
arg.value.pointer.p = resource_quota;
arg.value.pointer.vtable = grpc_resource_quota_arg_vtable();
grpc_channel_args args = {1, &arg};
grpc_end2end_test_fixture f =
begin_test(config, "resource_quota_server", NULL, &args);
/* Create large request and response bodies. These are big enough to require
* multiple round trips to deliver to the peer, and their exact contents of
* will be verified on completion. */
gpr_slice request_payload_slice = generate_random_slice();
grpc_call *client_calls[NUM_CALLS];
grpc_call *server_calls[NUM_CALLS];
grpc_metadata_array initial_metadata_recv[NUM_CALLS];
grpc_metadata_array trailing_metadata_recv[NUM_CALLS];
grpc_metadata_array request_metadata_recv[NUM_CALLS];
grpc_call_details call_details[NUM_CALLS];
grpc_status_code status[NUM_CALLS];
char *details[NUM_CALLS];
size_t details_capacity[NUM_CALLS];
grpc_byte_buffer *request_payload_recv[NUM_CALLS];
int was_cancelled[NUM_CALLS];
grpc_call_error error;
int pending_client_calls = 0;
int pending_server_start_calls = 0;
int pending_server_recv_calls = 0;
int pending_server_end_calls = 0;
int cancelled_calls_on_client = 0;
int cancelled_calls_on_server = 0;
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_op ops[6];
grpc_op *op;
for (int i = 0; i < NUM_CALLS; i++) {
grpc_metadata_array_init(&initial_metadata_recv[i]);
grpc_metadata_array_init(&trailing_metadata_recv[i]);
grpc_metadata_array_init(&request_metadata_recv[i]);
grpc_call_details_init(&call_details[i]);
details[i] = NULL;
details_capacity[i] = 0;
request_payload_recv[i] = NULL;
was_cancelled[i] = 0;
}
for (int i = 0; i < NUM_CALLS; i++) {
error = grpc_server_request_call(
f.server, &server_calls[i], &call_details[i], &request_metadata_recv[i],
f.cq, f.cq, tag(SERVER_START_BASE_TAG + i));
GPR_ASSERT(GRPC_CALL_OK == error);
pending_server_start_calls++;
}
for (int i = 0; i < NUM_CALLS; i++) {
client_calls[i] = grpc_channel_create_call(
f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq, "/foo",
"foo.test.google.fr", n_seconds_time(60), NULL);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv[i];
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata =
&trailing_metadata_recv[i];
op->data.recv_status_on_client.status = &status[i];
op->data.recv_status_on_client.status_details = &details[i];
op->data.recv_status_on_client.status_details_capacity =
&details_capacity[i];
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(client_calls[i], ops, (size_t)(op - ops),
tag(CLIENT_BASE_TAG + i), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
pending_client_calls++;
}
while (pending_client_calls + pending_server_recv_calls +
pending_server_end_calls >
0) {
grpc_event ev = grpc_completion_queue_next(f.cq, n_seconds_time(10), NULL);
GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
int ev_tag = (int)(intptr_t)ev.tag;
if (ev_tag < CLIENT_BASE_TAG) {
abort(); /* illegal tag */
} else if (ev_tag < SERVER_START_BASE_TAG) {
/* client call finished */
int call_id = ev_tag - CLIENT_BASE_TAG;
GPR_ASSERT(call_id >= 0);
GPR_ASSERT(call_id < NUM_CALLS);
switch (status[call_id]) {
case GRPC_STATUS_RESOURCE_EXHAUSTED:
cancelled_calls_on_client++;
break;
case GRPC_STATUS_OK:
break;
default:
gpr_log(GPR_ERROR, "Unexpected status code: %d", status[call_id]);
abort();
}
GPR_ASSERT(pending_client_calls > 0);
grpc_metadata_array_destroy(&initial_metadata_recv[call_id]);
grpc_metadata_array_destroy(&trailing_metadata_recv[call_id]);
grpc_call_destroy(client_calls[call_id]);
gpr_free(details[call_id]);
pending_client_calls--;
} else if (ev_tag < SERVER_RECV_BASE_TAG) {
/* new incoming call to the server */
int call_id = ev_tag - SERVER_START_BASE_TAG;
GPR_ASSERT(call_id >= 0);
GPR_ASSERT(call_id < NUM_CALLS);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv[call_id];
op->flags = 0;
op->reserved = NULL;
op++;
error =
grpc_call_start_batch(server_calls[call_id], ops, (size_t)(op - ops),
tag(SERVER_RECV_BASE_TAG + call_id), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
GPR_ASSERT(pending_server_start_calls > 0);
pending_server_start_calls--;
pending_server_recv_calls++;
grpc_call_details_destroy(&call_details[call_id]);
grpc_metadata_array_destroy(&request_metadata_recv[call_id]);
} else if (ev_tag < SERVER_END_BASE_TAG) {
/* finished read on the server */
int call_id = ev_tag - SERVER_RECV_BASE_TAG;
GPR_ASSERT(call_id >= 0);
GPR_ASSERT(call_id < NUM_CALLS);
if (ev.success) {
if (request_payload_recv[call_id] != NULL) {
grpc_byte_buffer_destroy(request_payload_recv[call_id]);
request_payload_recv[call_id] = NULL;
}
} else {
GPR_ASSERT(request_payload_recv[call_id] == NULL);
}
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled[call_id];
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op->reserved = NULL;
op++;
error =
grpc_call_start_batch(server_calls[call_id], ops, (size_t)(op - ops),
tag(SERVER_END_BASE_TAG + call_id), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
GPR_ASSERT(pending_server_recv_calls > 0);
pending_server_recv_calls--;
pending_server_end_calls++;
} else {
int call_id = ev_tag - SERVER_END_BASE_TAG;
GPR_ASSERT(call_id >= 0);
GPR_ASSERT(call_id < NUM_CALLS);
if (was_cancelled[call_id]) {
cancelled_calls_on_server++;
}
GPR_ASSERT(pending_server_end_calls > 0);
pending_server_end_calls--;
grpc_call_destroy(server_calls[call_id]);
}
}
gpr_log(
GPR_INFO,
"Done. %d total calls: %d cancelled at server, %d cancelled at client.",
NUM_CALLS, cancelled_calls_on_server, cancelled_calls_on_client);
GPR_ASSERT(cancelled_calls_on_client >= cancelled_calls_on_server);
GPR_ASSERT(cancelled_calls_on_server >= 0.9 * cancelled_calls_on_client);
grpc_byte_buffer_destroy(request_payload);
gpr_slice_unref(request_payload_slice);
grpc_resource_quota_unref(resource_quota);
end_test(&f);
config.tear_down_data(&f);
}
void resource_quota_server_pre_init(void) {}

@ -402,7 +402,6 @@ static void test_max_message_length_on_response(grpc_end2end_test_config config,
GPR_ASSERT(0 == strcmp(call_details.method, "/service/method")); GPR_ASSERT(0 == strcmp(call_details.method, "/service/method"));
GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234"));
GPR_ASSERT(was_cancelled == 0);
GPR_ASSERT(status == GRPC_STATUS_INVALID_ARGUMENT); GPR_ASSERT(status == GRPC_STATUS_INVALID_ARGUMENT);
GPR_ASSERT(strcmp(details, GPR_ASSERT(strcmp(details,

@ -213,7 +213,6 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) {
GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE); GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE);
GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo"));
GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr"));
GPR_ASSERT(was_cancelled == 0);
gpr_free(details); gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv); grpc_metadata_array_destroy(&initial_metadata_recv);

@ -0,0 +1,353 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/end2end/cq_verifier.h"
static void *tag(intptr_t t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args, NULL);
return f;
}
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
}
static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(
f->cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5), NULL)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
}
/* Creates and returns a gpr_slice containing random alphanumeric characters. */
static gpr_slice generate_random_slice() {
size_t i;
static const char chars[] = "abcdefghijklmnopqrstuvwxyz1234567890";
char output[1024 * 1024];
for (i = 0; i < GPR_ARRAY_SIZE(output) - 1; ++i) {
output[i] = chars[rand() % (int)(sizeof(chars) - 1)];
}
output[GPR_ARRAY_SIZE(output) - 1] = '\0';
return gpr_slice_from_copied_string(output);
}
void resource_quota_server(grpc_end2end_test_config config) {
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("test_server");
grpc_resource_quota_resize(resource_quota, 5 * 1024 * 1024);
#define NUM_CALLS 100
#define CLIENT_BASE_TAG 1000
#define SERVER_START_BASE_TAG 2000
#define SERVER_RECV_BASE_TAG 3000
#define SERVER_END_BASE_TAG 4000
grpc_arg arg;
arg.key = GRPC_ARG_BUFFER_POOL;
arg.type = GRPC_ARG_POINTER;
arg.value.pointer.p = resource_quota;
arg.value.pointer.vtable = grpc_resource_quota_arg_vtable();
grpc_channel_args args = {1, &arg};
grpc_end2end_test_fixture f =
begin_test(config, "resource_quota_server", NULL, &args);
/* Create large request and response bodies. These are big enough to require
* multiple round trips to deliver to the peer, and their exact contents of
* will be verified on completion. */
gpr_slice request_payload_slice = generate_random_slice();
grpc_call *client_calls[NUM_CALLS];
grpc_call *server_calls[NUM_CALLS];
grpc_metadata_array initial_metadata_recv[NUM_CALLS];
grpc_metadata_array trailing_metadata_recv[NUM_CALLS];
grpc_metadata_array request_metadata_recv[NUM_CALLS];
grpc_call_details call_details[NUM_CALLS];
grpc_status_code status[NUM_CALLS];
char *details[NUM_CALLS];
size_t details_capacity[NUM_CALLS];
grpc_byte_buffer *request_payload_recv[NUM_CALLS];
int was_cancelled[NUM_CALLS];
grpc_call_error error;
int pending_client_calls = 0;
int pending_server_start_calls = 0;
int pending_server_recv_calls = 0;
int pending_server_end_calls = 0;
int cancelled_calls_on_client = 0;
int cancelled_calls_on_server = 0;
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_op ops[6];
grpc_op *op;
for (int i = 0; i < NUM_CALLS; i++) {
grpc_metadata_array_init(&initial_metadata_recv[i]);
grpc_metadata_array_init(&trailing_metadata_recv[i]);
grpc_metadata_array_init(&request_metadata_recv[i]);
grpc_call_details_init(&call_details[i]);
details[i] = NULL;
details_capacity[i] = 0;
request_payload_recv[i] = NULL;
was_cancelled[i] = 0;
}
for (int i = 0; i < NUM_CALLS; i++) {
error = grpc_server_request_call(
f.server, &server_calls[i], &call_details[i], &request_metadata_recv[i],
f.cq, f.cq, tag(SERVER_START_BASE_TAG + i));
GPR_ASSERT(GRPC_CALL_OK == error);
pending_server_start_calls++;
}
for (int i = 0; i < NUM_CALLS; i++) {
client_calls[i] = grpc_channel_create_call(
f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq, "/foo",
"foo.test.google.fr", n_seconds_time(60), NULL);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv[i];
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata =
&trailing_metadata_recv[i];
op->data.recv_status_on_client.status = &status[i];
op->data.recv_status_on_client.status_details = &details[i];
op->data.recv_status_on_client.status_details_capacity =
&details_capacity[i];
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(client_calls[i], ops, (size_t)(op - ops),
tag(CLIENT_BASE_TAG + i), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
pending_client_calls++;
}
while (pending_client_calls + pending_server_recv_calls +
pending_server_end_calls >
0) {
grpc_event ev = grpc_completion_queue_next(f.cq, n_seconds_time(10), NULL);
GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
int ev_tag = (int)(intptr_t)ev.tag;
if (ev_tag < CLIENT_BASE_TAG) {
abort(); /* illegal tag */
} else if (ev_tag < SERVER_START_BASE_TAG) {
/* client call finished */
int call_id = ev_tag - CLIENT_BASE_TAG;
GPR_ASSERT(call_id >= 0);
GPR_ASSERT(call_id < NUM_CALLS);
switch (status[call_id]) {
case GRPC_STATUS_RESOURCE_EXHAUSTED:
cancelled_calls_on_client++;
break;
case GRPC_STATUS_OK:
break;
default:
gpr_log(GPR_ERROR, "Unexpected status code: %d", status[call_id]);
abort();
}
GPR_ASSERT(pending_client_calls > 0);
grpc_metadata_array_destroy(&initial_metadata_recv[call_id]);
grpc_metadata_array_destroy(&trailing_metadata_recv[call_id]);
grpc_call_destroy(client_calls[call_id]);
gpr_free(details[call_id]);
pending_client_calls--;
} else if (ev_tag < SERVER_RECV_BASE_TAG) {
/* new incoming call to the server */
int call_id = ev_tag - SERVER_START_BASE_TAG;
GPR_ASSERT(call_id >= 0);
GPR_ASSERT(call_id < NUM_CALLS);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &request_payload_recv[call_id];
op->flags = 0;
op->reserved = NULL;
op++;
error =
grpc_call_start_batch(server_calls[call_id], ops, (size_t)(op - ops),
tag(SERVER_RECV_BASE_TAG + call_id), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
GPR_ASSERT(pending_server_start_calls > 0);
pending_server_start_calls--;
pending_server_recv_calls++;
grpc_call_details_destroy(&call_details[call_id]);
grpc_metadata_array_destroy(&request_metadata_recv[call_id]);
} else if (ev_tag < SERVER_END_BASE_TAG) {
/* finished read on the server */
int call_id = ev_tag - SERVER_RECV_BASE_TAG;
GPR_ASSERT(call_id >= 0);
GPR_ASSERT(call_id < NUM_CALLS);
if (ev.success) {
if (request_payload_recv[call_id] != NULL) {
grpc_byte_buffer_destroy(request_payload_recv[call_id]);
request_payload_recv[call_id] = NULL;
}
} else {
GPR_ASSERT(request_payload_recv[call_id] == NULL);
}
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled[call_id];
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_OK;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op->reserved = NULL;
op++;
error =
grpc_call_start_batch(server_calls[call_id], ops, (size_t)(op - ops),
tag(SERVER_END_BASE_TAG + call_id), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
GPR_ASSERT(pending_server_recv_calls > 0);
pending_server_recv_calls--;
pending_server_end_calls++;
} else {
int call_id = ev_tag - SERVER_END_BASE_TAG;
GPR_ASSERT(call_id >= 0);
GPR_ASSERT(call_id < NUM_CALLS);
if (was_cancelled[call_id]) {
cancelled_calls_on_server++;
}
GPR_ASSERT(pending_server_end_calls > 0);
pending_server_end_calls--;
grpc_call_destroy(server_calls[call_id]);
}
}
gpr_log(
GPR_INFO,
"Done. %d total calls: %d cancelled at server, %d cancelled at client.",
NUM_CALLS, cancelled_calls_on_server, cancelled_calls_on_client);
GPR_ASSERT(cancelled_calls_on_client >= cancelled_calls_on_server);
GPR_ASSERT(cancelled_calls_on_server >= 0.9 * cancelled_calls_on_client);
grpc_byte_buffer_destroy(request_payload);
gpr_slice_unref(request_payload_slice);
grpc_resource_quota_unref(resource_quota);
end_test(&f);
config.tear_down_data(&f);
}
void resource_quota_server_pre_init(void) {}

@ -89,8 +89,11 @@ static void test_get(int port) {
grpc_http_response response; grpc_http_response response;
memset(&response, 0, sizeof(response)); memset(&response, 0, sizeof(response));
grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, &req, n_seconds_time(15), grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
n_seconds_time(15),
grpc_closure_create(on_finish, &response), &response); grpc_closure_create(on_finish, &response), &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
while (!g_done) { while (!g_done) {
grpc_pollset_worker *worker = NULL; grpc_pollset_worker *worker = NULL;
@ -126,9 +129,11 @@ static void test_post(int port) {
grpc_http_response response; grpc_http_response response;
memset(&response, 0, sizeof(response)); memset(&response, 0, sizeof(response));
grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, &req, "hello", 5, grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
n_seconds_time(15), grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
"hello", 5, n_seconds_time(15),
grpc_closure_create(on_finish, &response), &response); grpc_closure_create(on_finish, &response), &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
while (!g_done) { while (!g_done) {
grpc_pollset_worker *worker = NULL; grpc_pollset_worker *worker = NULL;

@ -90,8 +90,11 @@ static void test_get(int port) {
grpc_http_response response; grpc_http_response response;
memset(&response, 0, sizeof(response)); memset(&response, 0, sizeof(response));
grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, &req, n_seconds_time(15), grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
n_seconds_time(15),
grpc_closure_create(on_finish, &response), &response); grpc_closure_create(on_finish, &response), &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
while (!g_done) { while (!g_done) {
grpc_pollset_worker *worker = NULL; grpc_pollset_worker *worker = NULL;
@ -128,9 +131,11 @@ static void test_post(int port) {
grpc_http_response response; grpc_http_response response;
memset(&response, 0, sizeof(response)); memset(&response, 0, sizeof(response));
grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, &req, "hello", 5, grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
n_seconds_time(15), grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
"hello", 5, n_seconds_time(15),
grpc_closure_create(on_finish, &response), &response); grpc_closure_create(on_finish, &response), &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
while (!g_done) { while (!g_done) {
grpc_pollset_worker *worker = NULL; grpc_pollset_worker *worker = NULL;

@ -84,6 +84,7 @@ static void test_code(void) {
grpc_endpoint_add_to_pollset_set, grpc_endpoint_add_to_pollset_set,
grpc_endpoint_shutdown, grpc_endpoint_shutdown,
grpc_endpoint_destroy, grpc_endpoint_destroy,
grpc_endpoint_get_resource_user,
grpc_endpoint_get_peer}; grpc_endpoint_get_peer};
endpoint.vtable = &vtable; endpoint.vtable = &vtable;

@ -49,7 +49,11 @@ static grpc_endpoint_test_fixture create_fixture_endpoint_pair(
size_t slice_size) { size_t slice_size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_endpoint_test_fixture f; grpc_endpoint_test_fixture f;
grpc_endpoint_pair p = grpc_iomgr_create_endpoint_pair("test", slice_size); grpc_resource_quota *resource_quota =
grpc_resource_quota_create("endpoint_pair_test");
grpc_endpoint_pair p =
grpc_iomgr_create_endpoint_pair("test", resource_quota, slice_size);
grpc_resource_quota_unref(resource_quota);
f.client_ep = p.client; f.client_ep = p.client;
f.server_ep = p.server; f.server_ep = p.server;

@ -52,15 +52,19 @@ int main(int argc, char **argv) {
of descriptors */ of descriptors */
rlim.rlim_cur = rlim.rlim_max = 10; rlim.rlim_cur = rlim.rlim_max = 10;
GPR_ASSERT(0 == setrlimit(RLIMIT_NOFILE, &rlim)); GPR_ASSERT(0 == setrlimit(RLIMIT_NOFILE, &rlim));
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("fd_conservation_posix_test");
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
p = grpc_iomgr_create_endpoint_pair("test", 1); p = grpc_iomgr_create_endpoint_pair("test", resource_quota, 1);
grpc_endpoint_destroy(&exec_ctx, p.client); grpc_endpoint_destroy(&exec_ctx, p.client);
grpc_endpoint_destroy(&exec_ctx, p.server); grpc_endpoint_destroy(&exec_ctx, p.server);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
grpc_resource_quota_unref(resource_quota);
grpc_iomgr_shutdown(); grpc_iomgr_shutdown();
return 0; return 0;
} }

@ -0,0 +1,743 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/lib/iomgr/resource_quota.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "test/core/util/test_config.h"
static void inc_int_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
++*(int *)a;
}
static void set_bool_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
*(bool *)a = true;
}
grpc_closure *set_bool(bool *p) { return grpc_closure_create(set_bool_cb, p); }
typedef struct {
size_t size;
grpc_resource_user *resource_user;
grpc_closure *then;
} reclaimer_args;
static void reclaimer_cb(grpc_exec_ctx *exec_ctx, void *args,
grpc_error *error) {
GPR_ASSERT(error == GRPC_ERROR_NONE);
reclaimer_args *a = args;
grpc_resource_user_free(exec_ctx, a->resource_user, a->size);
grpc_resource_user_finish_reclaimation(exec_ctx, a->resource_user);
grpc_closure_run(exec_ctx, a->then, GRPC_ERROR_NONE);
gpr_free(a);
}
grpc_closure *make_reclaimer(grpc_resource_user *resource_user, size_t size,
grpc_closure *then) {
reclaimer_args *a = gpr_malloc(sizeof(*a));
a->size = size;
a->resource_user = resource_user;
a->then = then;
return grpc_closure_create(reclaimer_cb, a);
}
static void unused_reclaimer_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
GPR_ASSERT(error == GRPC_ERROR_CANCELLED);
grpc_closure_run(exec_ctx, arg, GRPC_ERROR_NONE);
}
grpc_closure *make_unused_reclaimer(grpc_closure *then) {
return grpc_closure_create(unused_reclaimer_cb, then);
}
static void destroy_user(grpc_resource_user *usr) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
bool done = false;
grpc_resource_user_shutdown(&exec_ctx, usr, set_bool(&done));
grpc_exec_ctx_flush(&exec_ctx);
GPR_ASSERT(done);
grpc_resource_user_destroy(&exec_ctx, usr);
grpc_exec_ctx_finish(&exec_ctx);
}
static void test_no_op(void) {
gpr_log(GPR_INFO, "** test_no_op **");
grpc_resource_quota_unref(grpc_resource_quota_create("test_no_op"));
}
static void test_resize_then_destroy(void) {
gpr_log(GPR_INFO, "** test_resize_then_destroy **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_resize_then_destroy");
grpc_resource_quota_resize(p, 1024 * 1024);
grpc_resource_quota_unref(p);
}
static void test_resource_user_no_op(void) {
gpr_log(GPR_INFO, "** test_resource_user_no_op **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_resource_user_no_op");
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
grpc_resource_quota_unref(p);
destroy_user(&usr);
}
static void test_instant_alloc_then_free(void) {
gpr_log(GPR_INFO, "** test_instant_alloc_then_free **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_instant_alloc_then_free");
grpc_resource_quota_resize(p, 1024 * 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, NULL);
grpc_exec_ctx_finish(&exec_ctx);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr);
}
static void test_instant_alloc_free_pair(void) {
gpr_log(GPR_INFO, "** test_instant_alloc_free_pair **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_instant_alloc_free_pair");
grpc_resource_quota_resize(p, 1024 * 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, NULL);
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr);
}
static void test_simple_async_alloc(void) {
gpr_log(GPR_INFO, "** test_simple_async_alloc **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_simple_async_alloc");
grpc_resource_quota_resize(p, 1024 * 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr);
}
static void test_async_alloc_blocked_by_size(void) {
gpr_log(GPR_INFO, "** test_async_alloc_blocked_by_size **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_async_alloc_blocked_by_size");
grpc_resource_quota_resize(p, 1);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
bool done = false;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!done);
}
grpc_resource_quota_resize(p, 1024);
GPR_ASSERT(done);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr);
}
static void test_scavenge(void) {
gpr_log(GPR_INFO, "** test_scavenge **");
grpc_resource_quota *p = grpc_resource_quota_create("test_scavenge");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr1;
grpc_resource_user usr2;
grpc_resource_user_init(&usr1, p, "usr1");
grpc_resource_user_init(&usr2, p, "usr2");
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr1, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr1, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr2, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr2, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr1);
destroy_user(&usr2);
}
static void test_scavenge_blocked(void) {
gpr_log(GPR_INFO, "** test_scavenge_blocked **");
grpc_resource_quota *p = grpc_resource_quota_create("test_scavenge_blocked");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr1;
grpc_resource_user usr2;
grpc_resource_user_init(&usr1, p, "usr1");
grpc_resource_user_init(&usr2, p, "usr2");
bool done;
{
done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr1, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
{
done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr2, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr1, 1024);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr2, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr1);
destroy_user(&usr2);
}
static void test_blocked_until_scheduled_reclaim(void) {
gpr_log(GPR_INFO, "** test_blocked_until_scheduled_reclaim **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_blocked_until_scheduled_reclaim");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
bool reclaim_done = false;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, false,
make_reclaimer(&usr, 1024, set_bool(&reclaim_done)));
grpc_exec_ctx_finish(&exec_ctx);
}
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(reclaim_done);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr);
}
static void test_blocked_until_scheduled_reclaim_and_scavenge(void) {
gpr_log(GPR_INFO, "** test_blocked_until_scheduled_reclaim_and_scavenge **");
grpc_resource_quota *p = grpc_resource_quota_create(
"test_blocked_until_scheduled_reclaim_and_scavenge");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr1;
grpc_resource_user usr2;
grpc_resource_user_init(&usr1, p, "usr1");
grpc_resource_user_init(&usr2, p, "usr2");
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr1, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
bool reclaim_done = false;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr1, false,
make_reclaimer(&usr1, 1024, set_bool(&reclaim_done)));
grpc_exec_ctx_finish(&exec_ctx);
}
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr2, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(reclaim_done);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr2, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr1);
destroy_user(&usr2);
}
static void test_blocked_until_scheduled_destructive_reclaim(void) {
gpr_log(GPR_INFO, "** test_blocked_until_scheduled_destructive_reclaim **");
grpc_resource_quota *p = grpc_resource_quota_create(
"test_blocked_until_scheduled_destructive_reclaim");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
bool reclaim_done = false;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, true,
make_reclaimer(&usr, 1024, set_bool(&reclaim_done)));
grpc_exec_ctx_finish(&exec_ctx);
}
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(reclaim_done);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr);
}
static void test_unused_reclaim_is_cancelled(void) {
gpr_log(GPR_INFO, "** test_unused_reclaim_is_cancelled **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_unused_reclaim_is_cancelled");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
bool benign_done = false;
bool destructive_done = false;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, false, make_unused_reclaimer(set_bool(&benign_done)));
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, true,
make_unused_reclaimer(set_bool(&destructive_done)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!benign_done);
GPR_ASSERT(!destructive_done);
}
grpc_resource_quota_unref(p);
destroy_user(&usr);
GPR_ASSERT(benign_done);
GPR_ASSERT(destructive_done);
}
static void test_benign_reclaim_is_preferred(void) {
gpr_log(GPR_INFO, "** test_benign_reclaim_is_preferred **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_benign_reclaim_is_preferred");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
bool benign_done = false;
bool destructive_done = false;
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, false,
make_reclaimer(&usr, 1024, set_bool(&benign_done)));
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, true,
make_unused_reclaimer(set_bool(&destructive_done)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!benign_done);
GPR_ASSERT(!destructive_done);
}
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(benign_done);
GPR_ASSERT(!destructive_done);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr);
GPR_ASSERT(benign_done);
GPR_ASSERT(destructive_done);
}
static void test_multiple_reclaims_can_be_triggered(void) {
gpr_log(GPR_INFO, "** test_multiple_reclaims_can_be_triggered **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_multiple_reclaims_can_be_triggered");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
bool benign_done = false;
bool destructive_done = false;
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, false,
make_reclaimer(&usr, 512, set_bool(&benign_done)));
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, true,
make_reclaimer(&usr, 512, set_bool(&destructive_done)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!benign_done);
GPR_ASSERT(!destructive_done);
}
{
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(benign_done);
GPR_ASSERT(destructive_done);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota_unref(p);
destroy_user(&usr);
GPR_ASSERT(benign_done);
GPR_ASSERT(destructive_done);
}
static void test_resource_user_stays_allocated_until_memory_released(void) {
gpr_log(GPR_INFO,
"** test_resource_user_stays_allocated_until_memory_released **");
grpc_resource_quota *p = grpc_resource_quota_create(
"test_resource_user_stays_allocated_until_memory_released");
grpc_resource_quota_resize(p, 1024 * 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
bool done = false;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, NULL);
grpc_exec_ctx_finish(&exec_ctx);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_quota_unref(p);
grpc_resource_user_shutdown(&exec_ctx, &usr, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_destroy(&exec_ctx, &usr);
grpc_exec_ctx_finish(&exec_ctx);
}
}
static void test_pools_merged_on_resource_user_deletion(void) {
gpr_log(GPR_INFO, "** test_pools_merged_on_resource_user_deletion **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_pools_merged_on_resource_user_deletion");
grpc_resource_quota_resize(p, 1024);
for (int i = 0; i < 10; i++) {
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
bool done = false;
bool reclaimer_cancelled = false;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, false,
make_unused_reclaimer(set_bool(&reclaimer_cancelled)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!reclaimer_cancelled);
}
{
bool allocated = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&allocated));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(allocated);
GPR_ASSERT(!reclaimer_cancelled);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_shutdown(&exec_ctx, &usr, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!done);
GPR_ASSERT(!reclaimer_cancelled);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
GPR_ASSERT(reclaimer_cancelled);
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_destroy(&exec_ctx, &usr);
grpc_exec_ctx_finish(&exec_ctx);
}
}
grpc_resource_quota_unref(p);
}
static void test_reclaimers_can_be_posted_repeatedly(void) {
gpr_log(GPR_INFO, "** test_reclaimers_can_be_posted_repeatedly **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_reclaimers_can_be_posted_repeatedly");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
{
bool allocated = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&allocated));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(allocated);
}
for (int i = 0; i < 10; i++) {
bool reclaimer_done = false;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_post_reclaimer(
&exec_ctx, &usr, false,
make_reclaimer(&usr, 1024, set_bool(&reclaimer_done)));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!reclaimer_done);
}
{
bool allocated = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&allocated));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(allocated);
GPR_ASSERT(reclaimer_done);
}
}
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_free(&exec_ctx, &usr, 1024);
grpc_exec_ctx_finish(&exec_ctx);
}
destroy_user(&usr);
grpc_resource_quota_unref(p);
}
static void test_one_slice(void) {
gpr_log(GPR_INFO, "** test_one_slice **");
grpc_resource_quota *p = grpc_resource_quota_create("test_one_slice");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
grpc_resource_user_slice_allocator alloc;
int num_allocs = 0;
grpc_resource_user_slice_allocator_init(&alloc, &usr, inc_int_cb,
&num_allocs);
gpr_slice_buffer buffer;
gpr_slice_buffer_init(&buffer);
{
const int start_allocs = num_allocs;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(num_allocs == start_allocs + 1);
}
gpr_slice_buffer_destroy(&buffer);
destroy_user(&usr);
grpc_resource_quota_unref(p);
}
static void test_one_slice_deleted_late(void) {
gpr_log(GPR_INFO, "** test_one_slice_deleted_late **");
grpc_resource_quota *p =
grpc_resource_quota_create("test_one_slice_deleted_late");
grpc_resource_quota_resize(p, 1024);
grpc_resource_user usr;
grpc_resource_user_init(&usr, p, "usr");
grpc_resource_user_slice_allocator alloc;
int num_allocs = 0;
grpc_resource_user_slice_allocator_init(&alloc, &usr, inc_int_cb,
&num_allocs);
gpr_slice_buffer buffer;
gpr_slice_buffer_init(&buffer);
{
const int start_allocs = num_allocs;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(num_allocs == start_allocs + 1);
}
bool done = false;
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_shutdown(&exec_ctx, &usr, set_bool(&done));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(!done);
}
grpc_resource_quota_unref(p);
gpr_slice_buffer_destroy(&buffer);
GPR_ASSERT(done);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_destroy(&exec_ctx, &usr);
grpc_exec_ctx_finish(&exec_ctx);
}
}
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
test_no_op();
test_resize_then_destroy();
test_resource_user_no_op();
test_instant_alloc_then_free();
test_instant_alloc_free_pair();
test_simple_async_alloc();
test_async_alloc_blocked_by_size();
test_scavenge();
test_scavenge_blocked();
test_blocked_until_scheduled_reclaim();
test_blocked_until_scheduled_reclaim_and_scavenge();
test_blocked_until_scheduled_destructive_reclaim();
test_unused_reclaim_is_cancelled();
test_benign_reclaim_is_preferred();
test_multiple_reclaims_can_be_triggered();
test_resource_user_stays_allocated_until_memory_released();
test_pools_merged_on_resource_user_deletion();
test_reclaimers_can_be_posted_repeatedly();
test_one_slice();
test_one_slice_deleted_late();
grpc_shutdown();
return 0;
}

@ -111,7 +111,7 @@ void test_succeeds(void) {
/* connect to it */ /* connect to it */
GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0); GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0);
grpc_closure_init(&done, must_succeed, NULL); grpc_closure_init(&done, must_succeed, NULL);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
(struct sockaddr *)&addr, addr_len, (struct sockaddr *)&addr, addr_len,
gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_inf_future(GPR_CLOCK_REALTIME));
@ -160,7 +160,7 @@ void test_fails(void) {
/* connect to a broken address */ /* connect to a broken address */
grpc_closure_init(&done, must_fail, NULL); grpc_closure_init(&done, must_fail, NULL);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
(struct sockaddr *)&addr, addr_len, (struct sockaddr *)&addr, addr_len,
gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_inf_future(GPR_CLOCK_REALTIME));

@ -176,7 +176,10 @@ static void read_test(size_t num_bytes, size_t slice_size) {
create_sockets(sv); create_sockets(sv);
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), slice_size, "test"); grpc_resource_quota *resource_quota = grpc_resource_quota_create("read_test");
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), resource_quota,
slice_size, "test");
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
written_bytes = fill_socket_partial(sv[0], num_bytes); written_bytes = fill_socket_partial(sv[0], num_bytes);
@ -223,8 +226,11 @@ static void large_read_test(size_t slice_size) {
create_sockets(sv); create_sockets(sv);
ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size, grpc_resource_quota *resource_quota =
"test"); grpc_resource_quota_create("large_read_test");
ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), resource_quota,
slice_size, "test");
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
written_bytes = fill_socket(sv[0]); written_bytes = fill_socket(sv[0]);
@ -359,8 +365,11 @@ static void write_test(size_t num_bytes, size_t slice_size) {
create_sockets(sv); create_sockets(sv);
ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), grpc_resource_quota *resource_quota =
grpc_resource_quota_create("write_test");
ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), resource_quota,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test"); GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
state.ep = ep; state.ep = ep;
@ -423,8 +432,12 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
create_sockets(sv); create_sockets(sv);
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), slice_size, "test"); grpc_resource_quota *resource_quota =
grpc_resource_quota_create("release_fd_test");
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), resource_quota,
slice_size, "test");
GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0); GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
written_bytes = fill_socket_partial(sv[0], num_bytes); written_bytes = fill_socket_partial(sv[0], num_bytes);
@ -445,8 +458,10 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
"pollset_work", "pollset_work",
grpc_pollset_work(&exec_ctx, g_pollset, &worker, grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline))); gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
gpr_log(GPR_DEBUG, "wakeup: read=%" PRIdPTR " target=%" PRIdPTR,
state.read_bytes, state.target_read_bytes);
gpr_mu_unlock(g_mu); gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
} }
GPR_ASSERT(state.read_bytes == state.target_read_bytes); GPR_ASSERT(state.read_bytes == state.target_read_bytes);
@ -454,6 +469,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
gpr_slice_buffer_destroy(&state.incoming); gpr_slice_buffer_destroy(&state.incoming);
grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb); grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb);
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(g_mu); gpr_mu_lock(g_mu);
while (!fd_released_done) { while (!fd_released_done) {
grpc_pollset_worker *worker = NULL; grpc_pollset_worker *worker = NULL;
@ -461,6 +477,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
"pollset_work", "pollset_work",
grpc_pollset_work(&exec_ctx, g_pollset, &worker, grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline))); gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
gpr_log(GPR_DEBUG, "wakeup: fd_released_done=%d", fd_released_done);
} }
gpr_mu_unlock(g_mu); gpr_mu_unlock(g_mu);
GPR_ASSERT(fd_released_done == 1); GPR_ASSERT(fd_released_done == 1);
@ -506,10 +523,13 @@ static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
create_sockets(sv); create_sockets(sv);
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("tcp_posix_test_socketpair");
f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"), f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"),
slice_size, "test"); resource_quota, slice_size, "test");
f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"), f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
slice_size, "test"); resource_quota, slice_size, "test");
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);

@ -132,7 +132,8 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
static void test_no_op(void) { static void test_no_op(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp_server *s; grpc_tcp_server *s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s)); GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
grpc_tcp_server_unref(&exec_ctx, s); grpc_tcp_server_unref(&exec_ctx, s);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -140,7 +141,8 @@ static void test_no_op(void) {
static void test_no_op_with_start(void) { static void test_no_op_with_start(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp_server *s; grpc_tcp_server *s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s)); GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
LOG_TEST("test_no_op_with_start"); LOG_TEST("test_no_op_with_start");
grpc_tcp_server_start(&exec_ctx, s, NULL, 0, on_connect, NULL); grpc_tcp_server_start(&exec_ctx, s, NULL, 0, on_connect, NULL);
grpc_tcp_server_unref(&exec_ctx, s); grpc_tcp_server_unref(&exec_ctx, s);
@ -151,7 +153,8 @@ static void test_no_op_with_port(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
struct sockaddr_in addr; struct sockaddr_in addr;
grpc_tcp_server *s; grpc_tcp_server *s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s)); GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
LOG_TEST("test_no_op_with_port"); LOG_TEST("test_no_op_with_port");
memset(&addr, 0, sizeof(addr)); memset(&addr, 0, sizeof(addr));
@ -169,7 +172,8 @@ static void test_no_op_with_port_and_start(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
struct sockaddr_in addr; struct sockaddr_in addr;
grpc_tcp_server *s; grpc_tcp_server *s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s)); GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
LOG_TEST("test_no_op_with_port_and_start"); LOG_TEST("test_no_op_with_port_and_start");
int port; int port;
@ -229,7 +233,8 @@ static void test_connect(unsigned n) {
unsigned svr1_fd_count; unsigned svr1_fd_count;
int svr1_port; int svr1_port;
grpc_tcp_server *s; grpc_tcp_server *s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s)); GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
unsigned i; unsigned i;
server_weak_ref weak_ref; server_weak_ref weak_ref;
server_weak_ref_init(&weak_ref); server_weak_ref_init(&weak_ref);

@ -56,7 +56,10 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair(
grpc_endpoint_test_fixture f; grpc_endpoint_test_fixture f;
grpc_endpoint_pair tcp; grpc_endpoint_pair tcp;
tcp = grpc_iomgr_create_endpoint_pair("fixture", slice_size); grpc_resource_quota *resource_quota =
grpc_resource_quota_create("secure_endpoint_test");
tcp = grpc_iomgr_create_endpoint_pair("fixture", resource_quota, slice_size);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.client, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, tcp.client, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.server, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, tcp.server, g_pollset);

@ -113,7 +113,7 @@ void bad_server_thread(void *vargs) {
socklen_t addr_len = sizeof(addr); socklen_t addr_len = sizeof(addr);
int port; int port;
grpc_tcp_server *s; grpc_tcp_server *s;
grpc_error *error = grpc_tcp_server_create(NULL, NULL, &s); grpc_error *error = grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s);
GPR_ASSERT(error == GRPC_ERROR_NONE); GPR_ASSERT(error == GRPC_ERROR_NONE);
memset(&addr, 0, sizeof(addr)); memset(&addr, 0, sizeof(addr));
addr.ss_family = AF_INET; addr.ss_family = AF_INET;

@ -33,16 +33,20 @@
#include "test/core/util/mock_endpoint.h" #include "test/core/util/mock_endpoint.h"
#include <inttypes.h>
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
typedef struct grpc_mock_endpoint { typedef struct grpc_mock_endpoint {
grpc_endpoint base; grpc_endpoint base;
gpr_mu mu; gpr_mu mu;
int refs;
void (*on_write)(gpr_slice slice); void (*on_write)(gpr_slice slice);
gpr_slice_buffer read_buffer; gpr_slice_buffer read_buffer;
gpr_slice_buffer *on_read_out; gpr_slice_buffer *on_read_out;
grpc_closure *on_read; grpc_closure *on_read;
grpc_resource_user resource_user;
} grpc_mock_endpoint; } grpc_mock_endpoint;
static void me_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void me_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@ -74,6 +78,24 @@ static void me_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
static void me_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void me_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset_set *pollset) {} grpc_pollset_set *pollset) {}
static void unref(grpc_exec_ctx *exec_ctx, grpc_mock_endpoint *m) {
gpr_mu_lock(&m->mu);
if (0 == --m->refs) {
gpr_mu_unlock(&m->mu);
gpr_slice_buffer_destroy(&m->read_buffer);
grpc_resource_user_destroy(exec_ctx, &m->resource_user);
gpr_free(m);
} else {
gpr_mu_unlock(&m->mu);
}
}
static void me_finish_shutdown(grpc_exec_ctx *exec_ctx, void *me,
grpc_error *error) {
grpc_mock_endpoint *m = me;
unref(exec_ctx, m);
}
static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep; grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep;
gpr_mu_lock(&m->mu); gpr_mu_lock(&m->mu);
@ -82,19 +104,25 @@ static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
GRPC_ERROR_CREATE("Endpoint Shutdown"), NULL); GRPC_ERROR_CREATE("Endpoint Shutdown"), NULL);
m->on_read = NULL; m->on_read = NULL;
} }
grpc_resource_user_shutdown(exec_ctx, &m->resource_user,
grpc_closure_create(me_finish_shutdown, m));
gpr_mu_unlock(&m->mu); gpr_mu_unlock(&m->mu);
} }
static void me_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { static void me_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep; grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep;
gpr_slice_buffer_destroy(&m->read_buffer); unref(exec_ctx, m);
gpr_free(m);
} }
static char *me_get_peer(grpc_endpoint *ep) { static char *me_get_peer(grpc_endpoint *ep) {
return gpr_strdup("fake:mock_endpoint"); return gpr_strdup("fake:mock_endpoint");
} }
static grpc_resource_user *me_get_resource_user(grpc_endpoint *ep) {
grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep;
return &m->resource_user;
}
static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; } static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; }
static const grpc_endpoint_vtable vtable = { static const grpc_endpoint_vtable vtable = {
@ -105,12 +133,19 @@ static const grpc_endpoint_vtable vtable = {
me_add_to_pollset_set, me_add_to_pollset_set,
me_shutdown, me_shutdown,
me_destroy, me_destroy,
me_get_resource_user,
me_get_peer, me_get_peer,
}; };
grpc_endpoint *grpc_mock_endpoint_create(void (*on_write)(gpr_slice slice)) { grpc_endpoint *grpc_mock_endpoint_create(void (*on_write)(gpr_slice slice),
grpc_resource_quota *resource_quota) {
grpc_mock_endpoint *m = gpr_malloc(sizeof(*m)); grpc_mock_endpoint *m = gpr_malloc(sizeof(*m));
m->base.vtable = &vtable; m->base.vtable = &vtable;
m->refs = 2;
char *name;
gpr_asprintf(&name, "mock_endpoint_%" PRIxPTR, (intptr_t)m);
grpc_resource_user_init(&m->resource_user, resource_quota, name);
gpr_free(name);
gpr_slice_buffer_init(&m->read_buffer); gpr_slice_buffer_init(&m->read_buffer);
gpr_mu_init(&m->mu); gpr_mu_init(&m->mu);
m->on_write = on_write; m->on_write = on_write;

@ -36,7 +36,8 @@
#include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/endpoint.h"
grpc_endpoint *grpc_mock_endpoint_create(void (*on_write)(gpr_slice slice)); grpc_endpoint *grpc_mock_endpoint_create(void (*on_write)(gpr_slice slice),
grpc_resource_quota *resource_quota);
void grpc_mock_endpoint_put_read(grpc_exec_ctx *exec_ctx, void grpc_mock_endpoint_put_read(grpc_exec_ctx *exec_ctx,
grpc_endpoint *mock_endpoint, gpr_slice slice); grpc_endpoint *mock_endpoint, gpr_slice slice);

@ -33,6 +33,8 @@
#include "test/core/util/passthru_endpoint.h" #include "test/core/util/passthru_endpoint.h"
#include <inttypes.h>
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
@ -44,6 +46,7 @@ typedef struct {
gpr_slice_buffer read_buffer; gpr_slice_buffer read_buffer;
gpr_slice_buffer *on_read_out; gpr_slice_buffer *on_read_out;
grpc_closure *on_read; grpc_closure *on_read;
grpc_resource_user resource_user;
} half; } half;
struct passthru_endpoint { struct passthru_endpoint {
@ -122,7 +125,8 @@ static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
gpr_mu_unlock(&m->parent->mu); gpr_mu_unlock(&m->parent->mu);
} }
static void me_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { static void me_really_destroy(grpc_exec_ctx *exec_ctx, void *ep,
grpc_error *error) {
passthru_endpoint *p = ((half *)ep)->parent; passthru_endpoint *p = ((half *)ep)->parent;
gpr_mu_lock(&p->mu); gpr_mu_lock(&p->mu);
if (0 == --p->halves) { if (0 == --p->halves) {
@ -136,12 +140,23 @@ static void me_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
} }
} }
static void me_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
half *m = (half *)ep;
grpc_resource_user_shutdown(exec_ctx, &m->resource_user,
grpc_closure_create(me_really_destroy, m));
}
static char *me_get_peer(grpc_endpoint *ep) { static char *me_get_peer(grpc_endpoint *ep) {
return gpr_strdup("fake:mock_endpoint"); return gpr_strdup("fake:mock_endpoint");
} }
static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; } static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; }
static grpc_resource_user *me_get_resource_user(grpc_endpoint *ep) {
half *m = (half *)ep;
return &m->resource_user;
}
static const grpc_endpoint_vtable vtable = { static const grpc_endpoint_vtable vtable = {
me_read, me_read,
me_write, me_write,
@ -150,23 +165,32 @@ static const grpc_endpoint_vtable vtable = {
me_add_to_pollset_set, me_add_to_pollset_set,
me_shutdown, me_shutdown,
me_destroy, me_destroy,
me_get_resource_user,
me_get_peer, me_get_peer,
}; };
static void half_init(half *m, passthru_endpoint *parent) { static void half_init(half *m, passthru_endpoint *parent,
grpc_resource_quota *resource_quota,
const char *half_name) {
m->base.vtable = &vtable; m->base.vtable = &vtable;
m->parent = parent; m->parent = parent;
gpr_slice_buffer_init(&m->read_buffer); gpr_slice_buffer_init(&m->read_buffer);
m->on_read = NULL; m->on_read = NULL;
char *name;
gpr_asprintf(&name, "passthru_endpoint_%s_%" PRIxPTR, half_name,
(intptr_t)parent);
grpc_resource_user_init(&m->resource_user, resource_quota, name);
gpr_free(name);
} }
void grpc_passthru_endpoint_create(grpc_endpoint **client, void grpc_passthru_endpoint_create(grpc_endpoint **client,
grpc_endpoint **server) { grpc_endpoint **server,
grpc_resource_quota *resource_quota) {
passthru_endpoint *m = gpr_malloc(sizeof(*m)); passthru_endpoint *m = gpr_malloc(sizeof(*m));
m->halves = 2; m->halves = 2;
m->shutdown = 0; m->shutdown = 0;
half_init(&m->client, m); half_init(&m->client, m, resource_quota, "client");
half_init(&m->server, m); half_init(&m->server, m, resource_quota, "server");
gpr_mu_init(&m->mu); gpr_mu_init(&m->mu);
*client = &m->client.base; *client = &m->client.base;
*server = &m->server.base; *server = &m->server.base;

@ -37,6 +37,7 @@
#include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/endpoint.h"
void grpc_passthru_endpoint_create(grpc_endpoint **client, void grpc_passthru_endpoint_create(grpc_endpoint **client,
grpc_endpoint **server); grpc_endpoint **server,
grpc_resource_quota *resource_quota);
#endif #endif

@ -99,9 +99,12 @@ void grpc_free_port_using_server(char *server, int port) {
req.http.path = path; req.http.path = path;
grpc_httpcli_context_init(&context); grpc_httpcli_context_init(&context);
grpc_httpcli_get(&exec_ctx, &context, &pr.pops, &req, grpc_resource_quota *resource_quota =
grpc_resource_quota_create("port_server_client/free");
grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
grpc_closure_create(freed_port_from_server, &pr), &rsp); grpc_closure_create(freed_port_from_server, &pr), &rsp);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(pr.mu); gpr_mu_lock(pr.mu);
while (!pr.done) { while (!pr.done) {
grpc_pollset_worker *worker = NULL; grpc_pollset_worker *worker = NULL;
@ -167,10 +170,13 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
req.http.path = "/get"; req.http.path = "/get";
grpc_http_response_destroy(&pr->response); grpc_http_response_destroy(&pr->response);
memset(&pr->response, 0, sizeof(pr->response)); memset(&pr->response, 0, sizeof(pr->response));
grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, &req, grpc_resource_quota *resource_quota =
grpc_resource_quota_create("port_server_client/pick_retry");
grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, resource_quota, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
grpc_closure_create(got_port_from_server, pr), grpc_closure_create(got_port_from_server, pr),
&pr->response); &pr->response);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
return; return;
} }
GPR_ASSERT(response); GPR_ASSERT(response);
@ -211,9 +217,13 @@ int grpc_pick_port_using_server(char *server) {
req.http.path = "/get"; req.http.path = "/get";
grpc_httpcli_context_init(&context); grpc_httpcli_context_init(&context);
grpc_httpcli_get( grpc_resource_quota *resource_quota =
&exec_ctx, &context, &pr.pops, &req, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), grpc_resource_quota_create("port_server_client/pick");
grpc_closure_create(got_port_from_server, &pr), &pr.response); grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
grpc_closure_create(got_port_from_server, &pr),
&pr.response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(pr.mu); gpr_mu_lock(pr.mu);
while (pr.port == -1) { while (pr.port == -1) {

@ -72,8 +72,8 @@ void test_tcp_server_start(test_tcp_server *server, int port) {
addr.sin_port = htons((uint16_t)port); addr.sin_port = htons((uint16_t)port);
memset(&addr.sin_addr, 0, sizeof(addr.sin_addr)); memset(&addr.sin_addr, 0, sizeof(addr.sin_addr));
grpc_error *error = grpc_tcp_server_create(&server->shutdown_complete, NULL, grpc_error *error = grpc_tcp_server_create(
&server->tcp_server); &exec_ctx, &server->shutdown_complete, NULL, &server->tcp_server);
GPR_ASSERT(error == GRPC_ERROR_NONE); GPR_ASSERT(error == GRPC_ERROR_NONE);
error = grpc_tcp_server_add_port(server->tcp_server, &addr, sizeof(addr), error = grpc_tcp_server_add_port(server->tcp_server, &addr, sizeof(addr),
&port_added); &port_added);

@ -213,7 +213,7 @@ class ServiceA GRPC_FINAL {
void BaseClassMustBeDerivedFromService(const Service *service) {} void BaseClassMustBeDerivedFromService(const Service *service) {}
public: public:
WithStreamedUnaryMethod_MethodA1() { WithStreamedUnaryMethod_MethodA1() {
::grpc::Service::MarkMethodStreamedUnary(0, ::grpc::Service::MarkMethodStreamed(0,
new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodA1<BaseClass>::StreamedMethodA1, this, std::placeholders::_1, std::placeholders::_2))); new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodA1<BaseClass>::StreamedMethodA1, this, std::placeholders::_1, std::placeholders::_2)));
} }
~WithStreamedUnaryMethod_MethodA1() GRPC_OVERRIDE { ~WithStreamedUnaryMethod_MethodA1() GRPC_OVERRIDE {
@ -228,6 +228,8 @@ class ServiceA GRPC_FINAL {
virtual ::grpc::Status StreamedMethodA1(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::grpc::testing::Request,::grpc::testing::Response>* server_unary_streamer) = 0; virtual ::grpc::Status StreamedMethodA1(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::grpc::testing::Request,::grpc::testing::Response>* server_unary_streamer) = 0;
}; };
typedef WithStreamedUnaryMethod_MethodA1<Service > StreamedUnaryService; typedef WithStreamedUnaryMethod_MethodA1<Service > StreamedUnaryService;
typedef Service SplitStreamedService;
typedef WithStreamedUnaryMethod_MethodA1<Service > StreamedService;
}; };
// ServiceB leading comment 1 // ServiceB leading comment 1
@ -312,7 +314,7 @@ class ServiceB GRPC_FINAL {
void BaseClassMustBeDerivedFromService(const Service *service) {} void BaseClassMustBeDerivedFromService(const Service *service) {}
public: public:
WithStreamedUnaryMethod_MethodB1() { WithStreamedUnaryMethod_MethodB1() {
::grpc::Service::MarkMethodStreamedUnary(0, ::grpc::Service::MarkMethodStreamed(0,
new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodB1<BaseClass>::StreamedMethodB1, this, std::placeholders::_1, std::placeholders::_2))); new ::grpc::StreamedUnaryHandler< ::grpc::testing::Request, ::grpc::testing::Response>(std::bind(&WithStreamedUnaryMethod_MethodB1<BaseClass>::StreamedMethodB1, this, std::placeholders::_1, std::placeholders::_2)));
} }
~WithStreamedUnaryMethod_MethodB1() GRPC_OVERRIDE { ~WithStreamedUnaryMethod_MethodB1() GRPC_OVERRIDE {
@ -327,6 +329,8 @@ class ServiceB GRPC_FINAL {
virtual ::grpc::Status StreamedMethodB1(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::grpc::testing::Request,::grpc::testing::Response>* server_unary_streamer) = 0; virtual ::grpc::Status StreamedMethodB1(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::grpc::testing::Request,::grpc::testing::Response>* server_unary_streamer) = 0;
}; };
typedef WithStreamedUnaryMethod_MethodB1<Service > StreamedUnaryService; typedef WithStreamedUnaryMethod_MethodB1<Service > StreamedUnaryService;
typedef Service SplitStreamedService;
typedef WithStreamedUnaryMethod_MethodB1<Service > StreamedService;
}; };
// ServiceB trailing comment 1 // ServiceB trailing comment 1

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save