Merge remote-tracking branch 'upstream/master' into service_config_json

pull/8617/head
Mark D. Roth 8 years ago
commit 863113a0d9
  1. 16
      BUILD
  2. 6
      CMakeLists.txt
  3. 10
      Makefile
  4. 2
      binding.gyp
  5. 4
      build.yaml
  6. 2
      config.m4
  7. 3
      doc/PROTOCOL-WEB.md
  8. 6
      gRPC-Core.podspec
  9. 4
      grpc.gemspec
  10. 2
      include/grpc++/impl/codegen/completion_queue.h
  11. 3
      include/grpc++/support/channel_arguments.h
  12. 2
      include/grpc/impl/codegen/connectivity_state.h
  13. 5
      include/grpc/impl/codegen/grpc_types.h
  14. 8
      include/grpc/impl/codegen/port_platform.h
  15. 2
      include/grpc/support/log.h
  16. 2
      include/grpc/support/string_util.h
  17. 4
      package.xml
  18. 2
      src/core/ext/census/census_log.h
  19. 2
      src/core/ext/census/mlog.h
  20. 4
      src/core/ext/client_channel/client_channel.c
  21. 6
      src/core/ext/client_channel/subchannel.c
  22. 4
      src/core/ext/client_channel/subchannel.h
  23. 26
      src/core/ext/lb_policy/grpclb/grpclb.c
  24. 2
      src/core/ext/lb_policy/pick_first/pick_first.c
  25. 313
      src/core/ext/lb_policy/round_robin/round_robin.c
  26. 19
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  27. 3
      src/core/ext/transport/chttp2/transport/parsing.c
  28. 9
      src/core/ext/transport/cronet/transport/cronet_transport.c
  29. 6
      src/core/lib/channel/channel_args.c
  30. 8
      src/core/lib/channel/channel_args.h
  31. 5
      src/core/lib/channel/channel_stack.c
  32. 3
      src/core/lib/channel/channel_stack.h
  33. 2
      src/core/lib/iomgr/endpoint.c
  34. 5
      src/core/lib/iomgr/endpoint.h
  35. 2
      src/core/lib/iomgr/ev_epoll_linux.c
  36. 2076
      src/core/lib/iomgr/ev_poll_and_epoll_posix.c
  37. 41
      src/core/lib/iomgr/ev_poll_and_epoll_posix.h
  38. 2
      src/core/lib/iomgr/ev_posix.c
  39. 98
      src/core/lib/iomgr/socket_mutator.c
  40. 80
      src/core/lib/iomgr/socket_mutator.h
  41. 9
      src/core/lib/iomgr/socket_utils_common_posix.c
  42. 5
      src/core/lib/iomgr/socket_utils_posix.h
  43. 1
      src/core/lib/iomgr/tcp_client.h
  44. 16
      src/core/lib/iomgr/tcp_client_posix.c
  45. 8
      src/core/lib/iomgr/tcp_posix.c
  46. 5
      src/core/lib/iomgr/tcp_uv.c
  47. 5
      src/core/lib/iomgr/tcp_windows.c
  48. 45
      src/core/lib/security/credentials/jwt/jwt_credentials.c
  49. 33
      src/core/lib/security/credentials/oauth2/oauth2_credentials.c
  50. 2
      src/core/lib/security/credentials/plugin/plugin_credentials.c
  51. 2
      src/core/lib/security/transport/handshake.c
  52. 13
      src/core/lib/security/transport/secure_endpoint.c
  53. 12
      src/core/lib/surface/call.c
  54. 3
      src/core/lib/transport/connectivity_state.c
  55. 7
      src/core/lib/transport/metadata.c
  56. 4
      src/core/lib/transport/metadata.h
  57. 5
      src/core/lib/transport/transport.c
  58. 5
      src/core/lib/transport/transport.h
  59. 3
      src/core/lib/transport/transport_impl.h
  60. 20
      src/cpp/common/channel_arguments.cc
  61. 97
      src/csharp/Grpc.Core/Internal/AsyncCall.cs
  62. 41
      src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
  63. 7
      src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
  64. 13
      src/csharp/Grpc.Core/Internal/ChannelSafeHandle.cs
  65. 5
      src/csharp/Grpc.Core/Internal/CompletionQueueSafeHandle.cs
  66. 22
      src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs
  67. 23
      src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs
  68. 2
      src/csharp/Grpc.Core/Profiling/Profilers.cs
  69. 28
      src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
  70. 6
      src/csharp/ext/grpc_csharp_ext.c
  71. 16
      src/node/src/common.js
  72. 2
      src/python/grpcio/grpc_core_dependencies.py
  73. 4
      src/ruby/ext/grpc/rb_grpc_imports.generated.h
  74. 14
      test/core/channel/channel_args_test.c
  75. 7
      test/core/channel/channel_stack_test.c
  76. 327
      test/core/client_channel/lb_policies_test.c
  77. 8
      test/core/end2end/end2end_nosec_tests.c
  78. 8
      test/core/end2end/end2end_tests.c
  79. 1
      test/core/end2end/gen_build_yaml.py
  80. 359
      test/core/end2end/tests/filter_latency.c
  81. 3
      test/core/internal_api_canaries/iomgr.c
  82. 67
      test/core/iomgr/socket_utils_test.c
  83. 31
      test/core/network_benchmarks/low_level_ping_pong.c
  84. 2
      test/core/profiling/mark_timings.stp
  85. 4
      test/core/util/mock_endpoint.c
  86. 4
      test/core/util/passthru_endpoint.c
  87. 65
      test/cpp/common/channel_arguments_test.cc
  88. 18
      test/cpp/end2end/async_end2end_test.cc
  89. 21
      test/cpp/end2end/end2end_test.cc
  90. 12
      test/cpp/end2end/test_service_impl.cc
  91. 39
      test/cpp/end2end/thread_stress_test.cc
  92. 7
      test/cpp/qps/client.h
  93. 1
      test/cpp/qps/client_async.cc
  94. 14
      test/cpp/qps/client_sync.cc
  95. 57
      test/cpp/qps/driver.cc
  96. 2
      test/cpp/util/config_grpc_cli.h
  97. 4
      test/cpp/util/grpc_tool_test.cc
  98. 5
      tools/distrib/python/grpcio_tools/grpc/tools/command.py
  99. 4
      tools/doxygen/Doxyfile.core.internal
  100. 2
      tools/jenkins/run_full_performance.sh
  101. Some files were not shown because too many files have changed in this diff Show More

16
BUILD

@ -179,7 +179,6 @@ cc_library(
"src/core/lib/iomgr/endpoint_pair.h",
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/ev_epoll_linux.h",
"src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
"src/core/lib/iomgr/ev_poll_posix.h",
"src/core/lib/iomgr/ev_posix.h",
"src/core/lib/iomgr/exec_ctx.h",
@ -203,6 +202,7 @@ cc_library(
"src/core/lib/iomgr/sockaddr_posix.h",
"src/core/lib/iomgr/sockaddr_utils.h",
"src/core/lib/iomgr/sockaddr_windows.h",
"src/core/lib/iomgr/socket_mutator.h",
"src/core/lib/iomgr/socket_utils.h",
"src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h",
@ -354,7 +354,6 @@ cc_library(
"src/core/lib/iomgr/endpoint_pair_windows.c",
"src/core/lib/iomgr/error.c",
"src/core/lib/iomgr/ev_epoll_linux.c",
"src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
"src/core/lib/iomgr/ev_poll_posix.c",
"src/core/lib/iomgr/ev_posix.c",
"src/core/lib/iomgr/exec_ctx.c",
@ -376,6 +375,7 @@ cc_library(
"src/core/lib/iomgr/resolve_address_windows.c",
"src/core/lib/iomgr/resource_quota.c",
"src/core/lib/iomgr/sockaddr_utils.c",
"src/core/lib/iomgr/socket_mutator.c",
"src/core/lib/iomgr/socket_utils_common_posix.c",
"src/core/lib/iomgr/socket_utils_linux.c",
"src/core/lib/iomgr/socket_utils_posix.c",
@ -613,7 +613,6 @@ cc_library(
"src/core/lib/iomgr/endpoint_pair.h",
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/ev_epoll_linux.h",
"src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
"src/core/lib/iomgr/ev_poll_posix.h",
"src/core/lib/iomgr/ev_posix.h",
"src/core/lib/iomgr/exec_ctx.h",
@ -637,6 +636,7 @@ cc_library(
"src/core/lib/iomgr/sockaddr_posix.h",
"src/core/lib/iomgr/sockaddr_utils.h",
"src/core/lib/iomgr/sockaddr_windows.h",
"src/core/lib/iomgr/socket_mutator.h",
"src/core/lib/iomgr/socket_utils.h",
"src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h",
@ -773,7 +773,6 @@ cc_library(
"src/core/lib/iomgr/endpoint_pair_windows.c",
"src/core/lib/iomgr/error.c",
"src/core/lib/iomgr/ev_epoll_linux.c",
"src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
"src/core/lib/iomgr/ev_poll_posix.c",
"src/core/lib/iomgr/ev_posix.c",
"src/core/lib/iomgr/exec_ctx.c",
@ -795,6 +794,7 @@ cc_library(
"src/core/lib/iomgr/resolve_address_windows.c",
"src/core/lib/iomgr/resource_quota.c",
"src/core/lib/iomgr/sockaddr_utils.c",
"src/core/lib/iomgr/socket_mutator.c",
"src/core/lib/iomgr/socket_utils_common_posix.c",
"src/core/lib/iomgr/socket_utils_linux.c",
"src/core/lib/iomgr/socket_utils_posix.c",
@ -1002,7 +1002,6 @@ cc_library(
"src/core/lib/iomgr/endpoint_pair.h",
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/ev_epoll_linux.h",
"src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
"src/core/lib/iomgr/ev_poll_posix.h",
"src/core/lib/iomgr/ev_posix.h",
"src/core/lib/iomgr/exec_ctx.h",
@ -1026,6 +1025,7 @@ cc_library(
"src/core/lib/iomgr/sockaddr_posix.h",
"src/core/lib/iomgr/sockaddr_utils.h",
"src/core/lib/iomgr/sockaddr_windows.h",
"src/core/lib/iomgr/socket_mutator.h",
"src/core/lib/iomgr/socket_utils.h",
"src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h",
@ -1154,7 +1154,6 @@ cc_library(
"src/core/lib/iomgr/endpoint_pair_windows.c",
"src/core/lib/iomgr/error.c",
"src/core/lib/iomgr/ev_epoll_linux.c",
"src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
"src/core/lib/iomgr/ev_poll_posix.c",
"src/core/lib/iomgr/ev_posix.c",
"src/core/lib/iomgr/exec_ctx.c",
@ -1176,6 +1175,7 @@ cc_library(
"src/core/lib/iomgr/resolve_address_windows.c",
"src/core/lib/iomgr/resource_quota.c",
"src/core/lib/iomgr/sockaddr_utils.c",
"src/core/lib/iomgr/socket_mutator.c",
"src/core/lib/iomgr/socket_utils_common_posix.c",
"src/core/lib/iomgr/socket_utils_linux.c",
"src/core/lib/iomgr/socket_utils_posix.c",
@ -2015,7 +2015,6 @@ objc_library(
"src/core/lib/iomgr/endpoint_pair_windows.c",
"src/core/lib/iomgr/error.c",
"src/core/lib/iomgr/ev_epoll_linux.c",
"src/core/lib/iomgr/ev_poll_and_epoll_posix.c",
"src/core/lib/iomgr/ev_poll_posix.c",
"src/core/lib/iomgr/ev_posix.c",
"src/core/lib/iomgr/exec_ctx.c",
@ -2037,6 +2036,7 @@ objc_library(
"src/core/lib/iomgr/resolve_address_windows.c",
"src/core/lib/iomgr/resource_quota.c",
"src/core/lib/iomgr/sockaddr_utils.c",
"src/core/lib/iomgr/socket_mutator.c",
"src/core/lib/iomgr/socket_utils_common_posix.c",
"src/core/lib/iomgr/socket_utils_linux.c",
"src/core/lib/iomgr/socket_utils_posix.c",
@ -2253,7 +2253,6 @@ objc_library(
"src/core/lib/iomgr/endpoint_pair.h",
"src/core/lib/iomgr/error.h",
"src/core/lib/iomgr/ev_epoll_linux.h",
"src/core/lib/iomgr/ev_poll_and_epoll_posix.h",
"src/core/lib/iomgr/ev_poll_posix.h",
"src/core/lib/iomgr/ev_posix.h",
"src/core/lib/iomgr/exec_ctx.h",
@ -2277,6 +2276,7 @@ objc_library(
"src/core/lib/iomgr/sockaddr_posix.h",
"src/core/lib/iomgr/sockaddr_utils.h",
"src/core/lib/iomgr/sockaddr_windows.h",
"src/core/lib/iomgr/socket_mutator.h",
"src/core/lib/iomgr/socket_utils.h",
"src/core/lib/iomgr/socket_utils_posix.h",
"src/core/lib/iomgr/socket_windows.h",

@ -309,7 +309,6 @@ add_library(grpc
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_poll_and_epoll_posix.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -331,6 +330,7 @@ add_library(grpc
src/core/lib/iomgr/resolve_address_windows.c
src/core/lib/iomgr/resource_quota.c
src/core/lib/iomgr/sockaddr_utils.c
src/core/lib/iomgr/socket_mutator.c
src/core/lib/iomgr/socket_utils_common_posix.c
src/core/lib/iomgr/socket_utils_linux.c
src/core/lib/iomgr/socket_utils_posix.c
@ -588,7 +588,6 @@ add_library(grpc_cronet
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_poll_and_epoll_posix.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -610,6 +609,7 @@ add_library(grpc_cronet
src/core/lib/iomgr/resolve_address_windows.c
src/core/lib/iomgr/resource_quota.c
src/core/lib/iomgr/sockaddr_utils.c
src/core/lib/iomgr/socket_mutator.c
src/core/lib/iomgr/socket_utils_common_posix.c
src/core/lib/iomgr/socket_utils_linux.c
src/core/lib/iomgr/socket_utils_posix.c
@ -839,7 +839,6 @@ add_library(grpc_unsecure
src/core/lib/iomgr/endpoint_pair_windows.c
src/core/lib/iomgr/error.c
src/core/lib/iomgr/ev_epoll_linux.c
src/core/lib/iomgr/ev_poll_and_epoll_posix.c
src/core/lib/iomgr/ev_poll_posix.c
src/core/lib/iomgr/ev_posix.c
src/core/lib/iomgr/exec_ctx.c
@ -861,6 +860,7 @@ add_library(grpc_unsecure
src/core/lib/iomgr/resolve_address_windows.c
src/core/lib/iomgr/resource_quota.c
src/core/lib/iomgr/sockaddr_utils.c
src/core/lib/iomgr/socket_mutator.c
src/core/lib/iomgr/socket_utils_common_posix.c
src/core/lib/iomgr/socket_utils_linux.c
src/core/lib/iomgr/socket_utils_posix.c

@ -2643,7 +2643,6 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -2665,6 +2664,7 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_mutator.c \
src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \
src/core/lib/iomgr/socket_utils_posix.c \
@ -2940,7 +2940,6 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -2962,6 +2961,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_mutator.c \
src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \
src/core/lib/iomgr/socket_utils_posix.c \
@ -3228,7 +3228,6 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -3250,6 +3249,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_mutator.c \
src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \
src/core/lib/iomgr/socket_utils_posix.c \
@ -3445,7 +3445,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -3467,6 +3466,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_mutator.c \
src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \
src/core/lib/iomgr/socket_utils_posix.c \
@ -6990,6 +6990,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/empty_batch.c \
test/core/end2end/tests/filter_call_init_fails.c \
test/core/end2end/tests/filter_causes_close.c \
test/core/end2end/tests/filter_latency.c \
test/core/end2end/tests/graceful_server_shutdown.c \
test/core/end2end/tests/high_initial_seqno.c \
test/core/end2end/tests/hpack_size.c \
@ -7075,6 +7076,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/empty_batch.c \
test/core/end2end/tests/filter_call_init_fails.c \
test/core/end2end/tests/filter_causes_close.c \
test/core/end2end/tests/filter_latency.c \
test/core/end2end/tests/graceful_server_shutdown.c \
test/core/end2end/tests/high_initial_seqno.c \
test/core/end2end/tests/hpack_size.c \

@ -589,7 +589,6 @@
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll_linux.c',
'src/core/lib/iomgr/ev_poll_and_epoll_posix.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/exec_ctx.c',
@ -611,6 +610,7 @@
'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_mutator.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
'src/core/lib/iomgr/socket_utils_posix.c',

@ -186,7 +186,6 @@ filegroups:
- src/core/lib/iomgr/endpoint_pair.h
- src/core/lib/iomgr/error.h
- src/core/lib/iomgr/ev_epoll_linux.h
- src/core/lib/iomgr/ev_poll_and_epoll_posix.h
- src/core/lib/iomgr/ev_poll_posix.h
- src/core/lib/iomgr/ev_posix.h
- src/core/lib/iomgr/exec_ctx.h
@ -210,6 +209,7 @@ filegroups:
- src/core/lib/iomgr/sockaddr_posix.h
- src/core/lib/iomgr/sockaddr_utils.h
- src/core/lib/iomgr/sockaddr_windows.h
- src/core/lib/iomgr/socket_mutator.h
- src/core/lib/iomgr/socket_utils.h
- src/core/lib/iomgr/socket_utils_posix.h
- src/core/lib/iomgr/socket_windows.h
@ -285,7 +285,6 @@ filegroups:
- src/core/lib/iomgr/endpoint_pair_windows.c
- src/core/lib/iomgr/error.c
- src/core/lib/iomgr/ev_epoll_linux.c
- src/core/lib/iomgr/ev_poll_and_epoll_posix.c
- src/core/lib/iomgr/ev_poll_posix.c
- src/core/lib/iomgr/ev_posix.c
- src/core/lib/iomgr/exec_ctx.c
@ -307,6 +306,7 @@ filegroups:
- src/core/lib/iomgr/resolve_address_windows.c
- src/core/lib/iomgr/resource_quota.c
- src/core/lib/iomgr/sockaddr_utils.c
- src/core/lib/iomgr/socket_mutator.c
- src/core/lib/iomgr/socket_utils_common_posix.c
- src/core/lib/iomgr/socket_utils_linux.c
- src/core/lib/iomgr/socket_utils_posix.c

@ -105,7 +105,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -127,6 +126,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_mutator.c \
src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \
src/core/lib/iomgr/socket_utils_posix.c \

@ -60,8 +60,7 @@ HTTP/2 related behavior (specified in [gRPC over HTTP2](http://www.grpc.io/docs/
Message framing (vs. [http2-transport-mapping](http://www.grpc.io/docs/guides/wire.html#http2-transport-mapping))
1. Response status encoded as part of the response body
* Key-value pairs formatted as HTTP/1.1 headers block (without the empty
newline \r\n to terminate the block)
* Key-value pairs encoded in the HTTP/2 [literal header format](https://tools.ietf.org/html/rfc7541#section-6.2) as a single header block.
2. 8th (MSB) bit of the 1st gRPC frame byte
* 0: data
* 1: trailers

@ -268,7 +268,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/endpoint_pair.h',
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/ev_epoll_linux.h',
'src/core/lib/iomgr/ev_poll_and_epoll_posix.h',
'src/core/lib/iomgr/ev_poll_posix.h',
'src/core/lib/iomgr/ev_posix.h',
'src/core/lib/iomgr/exec_ctx.h',
@ -292,6 +291,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/sockaddr_posix.h',
'src/core/lib/iomgr/sockaddr_utils.h',
'src/core/lib/iomgr/sockaddr_windows.h',
'src/core/lib/iomgr/socket_mutator.h',
'src/core/lib/iomgr/socket_utils.h',
'src/core/lib/iomgr/socket_utils_posix.h',
'src/core/lib/iomgr/socket_windows.h',
@ -447,7 +447,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll_linux.c',
'src/core/lib/iomgr/ev_poll_and_epoll_posix.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/exec_ctx.c',
@ -469,6 +468,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_mutator.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
'src/core/lib/iomgr/socket_utils_posix.c',
@ -671,7 +671,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/endpoint_pair.h',
'src/core/lib/iomgr/error.h',
'src/core/lib/iomgr/ev_epoll_linux.h',
'src/core/lib/iomgr/ev_poll_and_epoll_posix.h',
'src/core/lib/iomgr/ev_poll_posix.h',
'src/core/lib/iomgr/ev_posix.h',
'src/core/lib/iomgr/exec_ctx.h',
@ -695,6 +694,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/sockaddr_posix.h',
'src/core/lib/iomgr/sockaddr_utils.h',
'src/core/lib/iomgr/sockaddr_windows.h',
'src/core/lib/iomgr/socket_mutator.h',
'src/core/lib/iomgr/socket_utils.h',
'src/core/lib/iomgr/socket_utils_posix.h',
'src/core/lib/iomgr/socket_windows.h',

@ -188,7 +188,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/endpoint_pair.h )
s.files += %w( src/core/lib/iomgr/error.h )
s.files += %w( src/core/lib/iomgr/ev_epoll_linux.h )
s.files += %w( src/core/lib/iomgr/ev_poll_and_epoll_posix.h )
s.files += %w( src/core/lib/iomgr/ev_poll_posix.h )
s.files += %w( src/core/lib/iomgr/ev_posix.h )
s.files += %w( src/core/lib/iomgr/exec_ctx.h )
@ -212,6 +211,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/sockaddr_posix.h )
s.files += %w( src/core/lib/iomgr/sockaddr_utils.h )
s.files += %w( src/core/lib/iomgr/sockaddr_windows.h )
s.files += %w( src/core/lib/iomgr/socket_mutator.h )
s.files += %w( src/core/lib/iomgr/socket_utils.h )
s.files += %w( src/core/lib/iomgr/socket_utils_posix.h )
s.files += %w( src/core/lib/iomgr/socket_windows.h )
@ -367,7 +367,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/endpoint_pair_windows.c )
s.files += %w( src/core/lib/iomgr/error.c )
s.files += %w( src/core/lib/iomgr/ev_epoll_linux.c )
s.files += %w( src/core/lib/iomgr/ev_poll_and_epoll_posix.c )
s.files += %w( src/core/lib/iomgr/ev_poll_posix.c )
s.files += %w( src/core/lib/iomgr/ev_posix.c )
s.files += %w( src/core/lib/iomgr/exec_ctx.c )
@ -389,6 +388,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/resolve_address_windows.c )
s.files += %w( src/core/lib/iomgr/resource_quota.c )
s.files += %w( src/core/lib/iomgr/sockaddr_utils.c )
s.files += %w( src/core/lib/iomgr/socket_mutator.c )
s.files += %w( src/core/lib/iomgr/socket_utils_common_posix.c )
s.files += %w( src/core/lib/iomgr/socket_utils_linux.c )
s.files += %w( src/core/lib/iomgr/socket_utils_posix.c )

@ -240,7 +240,7 @@ class ServerCompletionQueue : public CompletionQueue {
private:
bool is_frequently_polled_;
friend class ServerBuilder;
/// \param is_frequently_polled Informs the GPRC library about whether the
/// \param is_frequently_polled Informs the GRPC library about whether the
/// server completion queue would be actively polled (by calling Next() or
/// AsyncNext()). By default all server completion queues are assumed to be
/// frequently polled.

@ -79,6 +79,9 @@ class ChannelArguments {
/// Set the compression algorithm for the channel.
void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
/// Set the socket mutator for the channel.
void SetSocketMutator(grpc_socket_mutator* mutator);
/// The given string will be sent at the front of the user agent string.
void SetUserAgentPrefix(const grpc::string& user_agent_prefix);

@ -40,6 +40,8 @@ extern "C" {
/** Connectivity state of a channel. */
typedef enum {
/** channel has just been initialized */
GRPC_CHANNEL_INIT = -1,
/** channel is idle */
GRPC_CHANNEL_IDLE,
/** channel is connecting */

@ -84,6 +84,9 @@ typedef struct grpc_server grpc_server;
can have messages written to it and read from it. */
typedef struct grpc_call grpc_call;
/** The Socket Mutator interface allows changes on socket options */
typedef struct grpc_socket_mutator grpc_socket_mutator;
/** Type specifier for grpc_arg */
typedef enum {
GRPC_ARG_STRING,
@ -215,6 +218,8 @@ typedef struct {
/** Resolved addresses in a form used by the LB policy.
Not intended for external use. */
#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses"
/** The grpc_socket_mutator instance that set the socket options. A pointer. */
#define GRPC_ARG_SOCKET_MUTATOR "grpc.socket_mutator"
/** \} */
/** Result of a grpc call. If the caller satisfies the prerequisites of a

@ -368,14 +368,14 @@ typedef unsigned __int64 uint64_t;
#endif
#endif
#ifndef GPRC_PRINT_FORMAT_CHECK
#ifndef GPR_PRINT_FORMAT_CHECK
#ifdef __GNUC__
#define GPRC_PRINT_FORMAT_CHECK(FORMAT_STR, ARGS) \
#define GPR_PRINT_FORMAT_CHECK(FORMAT_STR, ARGS) \
__attribute__((format(printf, FORMAT_STR, ARGS)))
#else
#define GPRC_PRINT_FORMAT_CHECK(FORMAT_STR, ARGS)
#define GPR_PRINT_FORMAT_CHECK(FORMAT_STR, ARGS)
#endif
#endif /* GPRC_PRINT_FORMAT_CHECK */
#endif /* GPR_PRINT_FORMAT_CHECK */
#if GPR_FORBID_UNREACHABLE_CODE
#define GPR_UNREACHABLE_CODE(STATEMENT)

@ -75,7 +75,7 @@ const char *gpr_log_severity_string(gpr_log_severity severity);
/* Log a message. It's advised to use GPR_xxx above to generate the context
* for each message */
GPRAPI void gpr_log(const char *file, int line, gpr_log_severity severity,
const char *format, ...) GPRC_PRINT_FORMAT_CHECK(4, 5);
const char *format, ...) GPR_PRINT_FORMAT_CHECK(4, 5);
GPRAPI void gpr_log_message(const char *file, int line,
gpr_log_severity severity, const char *message);

@ -55,7 +55,7 @@ GPRAPI char *gpr_strdup(const char *src);
On error, returns -1 and sets *strp to NULL. If the format string is bad,
the result is undefined. */
GPRAPI int gpr_asprintf(char **strp, const char *format, ...)
GPRC_PRINT_FORMAT_CHECK(2, 3);
GPR_PRINT_FORMAT_CHECK(2, 3);
#ifdef __cplusplus
}

@ -195,7 +195,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_linux.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_and_epoll_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/exec_ctx.h" role="src" />
@ -219,6 +218,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_windows.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_mutator.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_windows.h" role="src" />
@ -374,7 +374,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/endpoint_pair_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/error.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_epoll_linux.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_and_epoll_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/exec_ctx.c" role="src" />
@ -396,6 +395,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/resolve_address_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/resource_quota.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/sockaddr_utils.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_mutator.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_common_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_linux.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/socket_utils_posix.c" role="src" />

@ -84,7 +84,7 @@ const void *census_log_read_next(size_t *bytes_available);
*/
size_t census_log_remaining_space(void);
/* Returns the number of times gprc_stats_log_start_write() failed due to
/* Returns the number of times grpc_stats_log_start_write() failed due to
out-of-space. */
int census_log_out_of_space_count(void);

@ -88,7 +88,7 @@ const void* census_log_read_next(size_t* bytes_available);
*/
size_t census_log_remaining_space(void);
/* Returns the number of times gprc_stats_log_start_write() failed due to
/* Returns the number of times grpc_stats_log_start_write() failed due to
out-of-space. */
int64_t census_log_out_of_space_count(void);

@ -691,7 +691,7 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
calld->deadline, &subchannel_call);
calld->call_start_time, calld->deadline, &subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
@ -944,7 +944,7 @@ retry:
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
calld->deadline, &subchannel_call);
calld->call_start_time, calld->deadline, &subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));

@ -702,15 +702,15 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **call) {
grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec start_time,
gpr_timespec deadline, grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
*call = gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = con; // Ref is added below.
grpc_error *error =
grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, *call,
NULL, NULL, path, deadline, callstk);
NULL, NULL, path, start_time, deadline, callstk);
if (error != GRPC_ERROR_NONE) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);

@ -111,8 +111,8 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
/** construct a subchannel call */
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **subchannel_call);
grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec start_time,
gpr_timespec deadline, grpc_subchannel_call **subchannel_call);
/** process a transport level op */
void grpc_connected_subchannel_process_transport_op(

@ -761,17 +761,24 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->rr_policy) {
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
}
if (glb_policy->started_picking) {
if (glb_policy->lb_call != NULL) {
grpc_call_cancel(glb_policy->lb_call, NULL);
/* lb_on_server_status_received will pick up the cancel and clean up */
}
}
grpc_connectivity_state_set(
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE("Channel Shutdown"), "glb_shutdown");
/* We need a copy of the lb_call pointer because we can't cancell the call
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
* the cancel, needs to acquire that same lock */
grpc_call *lb_call = glb_policy->lb_call;
glb_policy->lb_call = NULL;
gpr_mu_unlock(&glb_policy->mu);
/* glb_policy->lb_call and this local lb_call must be consistent at this point
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
* of query_for_backends_locked, which can only be invoked while
* glb_policy->shutting_down is false. */
if (lb_call != NULL) {
grpc_call_cancel(lb_call, NULL);
/* lb_on_server_status_received will pick up the cancel and clean up */
}
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
@ -955,9 +962,10 @@ static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void lb_call_init(glb_lb_policy *glb_policy) {
static void lb_call_init_locked(glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
GPR_ASSERT(glb_policy->server_name[0] != '\0');
GPR_ASSERT(!glb_policy->shutting_down);
/* Note the following LB call progresses every time there's activity in \a
* glb_policy->base.interested_parties, which is comprised of the polling
@ -1010,7 +1018,9 @@ static void lb_call_destroy_locked(glb_lb_policy *glb_policy) {
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->lb_channel != NULL);
lb_call_init(glb_policy);
if (glb_policy->shutting_down) return;
lb_call_init_locked(glb_policy);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",

@ -292,6 +292,8 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
} else {
loop:
switch (p->checking_connectivity) {
case GRPC_CHANNEL_INIT:
GPR_UNREACHABLE_CODE(return );
case GRPC_CHANNEL_READY:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, GRPC_ERROR_NONE,

@ -116,8 +116,13 @@ typedef struct {
grpc_closure connectivity_changed_closure;
/** this subchannels current position in subchannel->ready_list */
ready_list *ready_list_node;
/** last observed connectivity */
grpc_connectivity_state connectivity_state;
/** last observed connectivity. Not updated by
* \a grpc_subchannel_notify_on_state_change. Used to determine the previous
* state while processing the new state in \a rr_connectivity_changed */
grpc_connectivity_state prev_connectivity_state;
/** current connectivity state. Updated by \a
* grpc_subchannel_notify_on_state_change */
grpc_connectivity_state curr_connectivity_state;
/** the subchannel's target user data */
void *user_data;
/** vtable to operate over \a user_data */
@ -127,6 +132,7 @@ typedef struct {
struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
gpr_mu mu;
/** total number of addresses received at creation time */
size_t num_addresses;
@ -135,8 +141,11 @@ struct round_robin_lb_policy {
size_t num_subchannels;
subchannel_data **subchannels;
/** mutex protecting remaining members */
gpr_mu mu;
/** how many subchannels are in TRANSIENT_FAILURE */
size_t num_transient_failures;
/** how many subchannels are IDLE */
size_t num_idle;
/** have we started picking? */
int started_picking;
/** are we shutting down? */
@ -258,6 +267,10 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
gpr_free(node);
}
static bool is_ready_list_empty(round_robin_lb_policy *p) {
return p->ready_list.prev == NULL;
}
static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *elem;
@ -268,7 +281,7 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
for (size_t i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin_destroy");
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_destroy");
if (sd->user_data != NULL) {
GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(sd->user_data);
@ -381,18 +394,18 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%" PRIuPTR, (void *)p,
p->num_subchannels);
}
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
sd->connectivity_state = GRPC_CHANNEL_IDLE;
/* use some sentinel value outside of the range of grpc_connectivity_state
* to signal an undefined previous state. We won't be referring to this
* value again and it'll be overwritten after the first call to
* rr_connectivity_changed */
sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity");
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
GRPC_LB_POLICY_WEAK_REF(&p->base, "round_robin_connectivity");
&sd->curr_connectivity_state, &sd->connectivity_changed_closure);
}
}
@ -422,7 +435,7 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
/* readily available, report right away */
*target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"picked");
"rr_picked");
if (user_data != NULL) {
*user_data = selected->user_data;
@ -453,125 +466,184 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
static void update_state_counters(subchannel_data *sd) {
round_robin_lb_policy *p = sd->policy;
/* update p->num_transient_failures (resp. p->num_idle): if the previous
* state was TRANSIENT_FAILURE (resp. IDLE), decrement
* p->num_transient_failures (resp. p->num_idle). */
if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
GPR_ASSERT(p->num_transient_failures > 0);
--p->num_transient_failures;
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) {
GPR_ASSERT(p->num_idle > 0);
--p->num_idle;
}
}
/* sd is the subchannel_data associted with the updated subchannel.
* shutdown_error will only be used upon policy transition to TRANSIENT_FAILURE
* or SHUTDOWN */
static grpc_connectivity_state update_lb_connectivity_status(
grpc_exec_ctx *exec_ctx, subchannel_data *sd, grpc_error *error) {
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
*
* 1) RULE: ANY subchannel is READY => policy is READY.
* CHECK: At least one subchannel is ready iff p->ready_list is NOT empty.
*
* 2) RULE: ANY subchannel is CONNECTING => policy is CONNECTING.
* CHECK: sd->curr_connectivity_state == CONNECTING.
*
* 3) RULE: ALL subchannels are SHUTDOWN => policy is SHUTDOWN.
* CHECK: p->num_subchannels = 0.
*
* 4) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is
* TRANSIENT_FAILURE.
* CHECK: p->num_transient_failures == p->num_subchannels.
*
* 5) RULE: ALL subchannels are IDLE => policy is IDLE.
* CHECK: p->num_idle == p->num_subchannels.
*/
round_robin_lb_policy *p = sd->policy;
if (!is_ready_list_empty(p)) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
return GRPC_CHANNEL_READY;
} else if (sd->curr_connectivity_state ==
GRPC_CHANNEL_CONNECTING) { /* 2) CONNECTING */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
"rr_connecting");
return GRPC_CHANNEL_CONNECTING;
} else if (p->num_subchannels == 0) { /* 3) SHUTDOWN */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"rr_shutdown");
return GRPC_CHANNEL_SHUTDOWN;
} else if (p->num_transient_failures ==
p->num_subchannels) { /* 4) TRANSIENT_FAILURE */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "rr_transient_failure");
return GRPC_CHANNEL_TRANSIENT_FAILURE;
} else if (p->num_idle == p->num_subchannels) { /* 5) IDLE */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_IDLE,
GRPC_ERROR_NONE, "rr_idle");
return GRPC_CHANNEL_IDLE;
}
/* no change */
return sd->curr_connectivity_state;
}
static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
int unref = 0;
GRPC_ERROR_REF(error);
gpr_mu_lock(&p->mu);
if (p->shutdown) {
unref = 1;
} else {
switch (sd->connectivity_state) {
case GRPC_CHANNEL_READY:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, GRPC_ERROR_REF(error),
"connecting_ready");
/* add the newly connected subchannel to the list of connected ones.
* Note that it goes to the "end of the line". */
sd->ready_list_node = add_connected_sc_locked(p, sd);
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
ready_list *selected = peek_next_connected_locked(p);
GPR_ASSERT(selected != NULL);
if (p->pending_picks != NULL) {
/* if the selected subchannel is going to be used for the pending
* picks, update the last picked pointer */
advance_last_picked_locked(p);
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
GRPC_ERROR_UNREF(error);
return;
}
switch (sd->curr_connectivity_state) {
case GRPC_CHANNEL_INIT:
GPR_UNREACHABLE_CODE(return );
case GRPC_CHANNEL_READY:
/* add the newly connected subchannel to the list of connected ones.
* Note that it goes to the "end of the line". */
sd->ready_list_node = add_connected_sc_locked(p, sd);
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
ready_list *selected = peek_next_connected_locked(p);
GPR_ASSERT(selected != NULL);
if (p->pending_picks != NULL) {
/* if the selected subchannel is going to be used for the pending
* picks, update the last picked pointer */
advance_last_picked_locked(p);
}
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"rr_picked");
if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
(void *)selected->subchannel, (void *)selected);
}
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
update_lb_connectivity_status(exec_ctx, sd, error);
sd->prev_connectivity_state = sd->curr_connectivity_state;
/* renew notification: reuses the "rr_connectivity" weak ref */
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->curr_connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_IDLE:
++p->num_idle;
/* fallthrough */
case GRPC_CHANNEL_CONNECTING:
update_state_counters(sd);
update_lb_connectivity_status(exec_ctx, sd, error);
sd->prev_connectivity_state = sd->curr_connectivity_state;
/* renew notification: reuses the "rr_connectivity" weak ref */
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->curr_connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
++p->num_transient_failures;
/* remove from ready list if still present */
if (sd->ready_list_node != NULL) {
remove_disconnected_sc_locked(p, sd->ready_list_node);
sd->ready_list_node = NULL;
}
update_lb_connectivity_status(exec_ctx, sd, error);
sd->prev_connectivity_state = sd->curr_connectivity_state;
/* renew notification: reuses the "rr_connectivity" weak ref */
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->curr_connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_SHUTDOWN:
update_state_counters(sd);
if (sd->ready_list_node != NULL) {
remove_disconnected_sc_locked(p, sd->ready_list_node);
sd->ready_list_node = NULL;
}
--p->num_subchannels;
GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
p->subchannels[p->num_subchannels]);
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown");
p->subchannels[sd->index]->index = sd->index;
if (update_lb_connectivity_status(exec_ctx, sd, error) ==
GRPC_CHANNEL_SHUTDOWN) {
/* the policy is shutting down. Flush all the pending picks... */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"picked");
if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
(void *)selected->subchannel, (void *)selected);
}
*pp->target = NULL;
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, sd->connectivity_state,
GRPC_ERROR_REF(error), "connecting_changed");
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
/* renew state notification */
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
/* remove from ready list if still present */
if (sd->ready_list_node != NULL) {
remove_disconnected_sc_locked(p, sd->ready_list_node);
sd->ready_list_node = NULL;
}
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connecting_transient_failure");
break;
case GRPC_CHANNEL_SHUTDOWN:
if (sd->ready_list_node != NULL) {
remove_disconnected_sc_locked(p, sd->ready_list_node);
sd->ready_list_node = NULL;
}
p->num_subchannels--;
GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
p->subchannels[p->num_subchannels]);
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
p->subchannels[sd->index]->index = sd->index;
gpr_free(sd);
unref = 1;
if (p->num_subchannels == 0) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING("Round Robin Channels Exhausted",
&error, 1),
"no_more_channels");
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE,
NULL);
gpr_free(pp);
}
} else {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "subchannel_failed");
}
} /* switch */
} /* !unref */
gpr_mu_unlock(&p->mu);
if (unref) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
}
gpr_free(sd);
/* unref the "rr_connectivity" weak ref from start_picking */
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
break;
}
gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
@ -607,9 +679,9 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_unlock(&p->mu);
target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"picked");
"rr_picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "picked");
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else {
gpr_mu_unlock(&p->mu);
grpc_exec_ctx_sched(exec_ctx, closure,
@ -705,6 +777,11 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "Created RR policy at %p with %lu subchannels",
(void *)p, (unsigned long)p->num_subchannels);
}
gpr_mu_init(&p->mu);
return &p->base;
}

@ -1037,7 +1037,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
"op.send_initial_metadata");
}
} else {
s->send_trailing_metadata = NULL;
s->send_initial_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_CREATE(
@ -1523,13 +1523,17 @@ static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_error *error) {
error =
removal_error(error, s, "Pending writes failed due to stream closure");
s->fetching_send_message = NULL;
s->send_initial_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_REF(error),
"send_initial_metadata_finished");
s->send_trailing_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
s->fetching_send_message = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
"fetching_send_message_finished");
@ -2294,6 +2298,14 @@ static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
}
/*******************************************************************************
* MONITORING
*/
static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *t) {
return ((grpc_chttp2_transport *)t)->ep;
}
static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
"chttp2",
init_stream,
@ -2303,7 +2315,8 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
perform_transport_op,
destroy_stream,
destroy_transport,
chttp2_get_peer};
chttp2_get_peer,
chttp2_get_endpoint};
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,

@ -471,7 +471,8 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdstr_as_c_string(md->value));
*cached_timeout = gpr_inf_future(GPR_TIMESPAN);
}
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
cached_timeout =
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
grpc_chttp2_incoming_metadata_buffer_set_deadline(
&s->metadata_buffer[0],

@ -42,6 +42,7 @@
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/incoming_metadata.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
@ -1095,6 +1096,11 @@ static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
return NULL;
}
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *gt) {
return NULL;
}
static void perform_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_transport_op *op) {}
@ -1107,4 +1113,5 @@ const grpc_transport_vtable grpc_cronet_vtable = {sizeof(stream_obj),
perform_op,
destroy_stream,
destroy_transport,
get_peer};
get_peer,
get_endpoint};

@ -298,6 +298,12 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
}
}
grpc_channel_args *grpc_channel_args_set_socket_mutator(
grpc_channel_args *a, grpc_socket_mutator *mutator) {
grpc_arg tmp = grpc_socket_mutator_to_arg(mutator);
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
int grpc_channel_args_compare(const grpc_channel_args *a,
const grpc_channel_args *b) {
int c = GPR_ICMP(a->num_args, b->num_args);

@ -36,6 +36,7 @@
#include <grpc/compression.h>
#include <grpc/grpc.h>
#include "src/core/lib/iomgr/socket_mutator.h"
// Channel args are intentionally immutable, to avoid the need for locking.
@ -100,6 +101,13 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
int grpc_channel_args_compare(const grpc_channel_args *a,
const grpc_channel_args *b);
/** Returns a channel arg instance with socket mutator added. The socket mutator
* will perform its mutate_fd method on all file descriptors used by the
* channel.
* If \a a is non-MULL, its args are copied. */
grpc_channel_args *grpc_channel_args_set_socket_mutator(
grpc_channel_args *a, grpc_socket_mutator *mutator);
/** Returns the value of argument \a name from \a args, or NULL if not found. */
const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
const char *name);

@ -162,7 +162,8 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack) {
grpc_mdstr *path, gpr_timespec start_time, gpr_timespec deadline,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
size_t count = channel_stack->count;
@ -179,7 +180,7 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
args.start_time = gpr_now(GPR_CLOCK_MONOTONIC);
args.start_time = start_time;
for (i = 0; i < count; i++) {
args.call_stack = call_stack;
args.server_transport_data = transport_server_data;

@ -231,7 +231,8 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack);
grpc_mdstr *path, gpr_timespec start_time, gpr_timespec deadline,
grpc_call_stack *call_stack);
/* Set a pollset or a pollset_set for a call stack: must occur before the first
* op is started */
void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,

@ -66,6 +66,8 @@ char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
return ep->vtable->get_peer(ep);
}
int grpc_endpoint_get_fd(grpc_endpoint* ep) { return ep->vtable->get_fd(ep); }
grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) {
return ep->vtable->get_workqueue(ep);
}

@ -61,6 +61,7 @@ struct grpc_endpoint_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep);
char *(*get_peer)(grpc_endpoint *ep);
int (*get_fd)(grpc_endpoint *ep);
};
/* When data is available on the connection, calls the callback with slices.
@ -73,6 +74,10 @@ void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
char *grpc_endpoint_get_peer(grpc_endpoint *ep);
/* Get the file descriptor used by \a ep. Return -1 if \a ep is not using an fd.
*/
int grpc_endpoint_get_fd(grpc_endpoint *ep);
/* Retrieve a reference to the workqueue associated with this endpoint */
grpc_workqueue *grpc_endpoint_get_workqueue(grpc_endpoint *ep);

@ -163,7 +163,7 @@ static void fd_global_shutdown(void);
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */
#endif /* !defined(GRPC_PI_REF_COUNT_DEBUG) */
/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {

File diff suppressed because it is too large Load Diff

@ -1,41 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H
#define GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H
#include "src/core/lib/iomgr/ev_posix.h"
const grpc_event_engine_vtable *grpc_init_poll_and_epoll_posix(void);
#endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H */

@ -45,7 +45,6 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/ev_epoll_linux.h"
#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h"
#include "src/core/lib/iomgr/ev_poll_posix.h"
#include "src/core/lib/support/env.h"
@ -67,7 +66,6 @@ static const event_engine_factory g_factories[] = {
{"epoll", grpc_init_epoll_linux},
{"poll", grpc_init_poll_posix},
{"poll-cv", grpc_init_poll_cv_posix},
{"legacy", grpc_init_poll_and_epoll_posix},
};
static void add(const char *beg, const char *end, char ***ss, size_t *ns) {

@ -0,0 +1,98 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/lib/iomgr/socket_mutator.h"
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
void grpc_socket_mutator_init(grpc_socket_mutator *mutator,
const grpc_socket_mutator_vtable *vtable) {
mutator->vtable = vtable;
gpr_ref_init(&mutator->refcount, 1);
}
grpc_socket_mutator *grpc_socket_mutator_ref(grpc_socket_mutator *mutator) {
gpr_ref(&mutator->refcount);
return mutator;
}
bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator *mutator, int fd) {
return mutator->vtable->mutate_fd(fd, mutator);
}
int grpc_socket_mutator_compare(grpc_socket_mutator *a,
grpc_socket_mutator *b) {
int c = GPR_ICMP(a, b);
if (c != 0) {
grpc_socket_mutator *sma = a;
grpc_socket_mutator *smb = b;
c = GPR_ICMP(sma->vtable, smb->vtable);
if (c == 0) {
c = sma->vtable->compare(sma, smb);
}
}
return c;
}
void grpc_socket_mutator_unref(grpc_socket_mutator *mutator) {
if (gpr_unref(&mutator->refcount)) {
mutator->vtable->destory(mutator);
}
}
static void *socket_mutator_arg_copy(void *p) {
return grpc_socket_mutator_ref(p);
}
static void socket_mutator_arg_destroy(void *p) {
grpc_socket_mutator_unref(p);
}
static int socket_mutator_cmp(void *a, void *b) {
return grpc_socket_mutator_compare((grpc_socket_mutator *)a,
(grpc_socket_mutator *)b);
}
static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
arg.key = GRPC_ARG_SOCKET_MUTATOR;
arg.value.pointer.vtable = &socket_mutator_arg_vtable;
arg.value.pointer.p = mutator;
return arg;
}

@ -0,0 +1,80 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H
#define GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/sync.h>
#ifdef __cplusplus
extern "C" {
#endif
/** The virtual table of grpc_socket_mutator */
typedef struct {
/** Mutates the socket opitons of \a fd */
bool (*mutate_fd)(int fd, grpc_socket_mutator *mutator);
/** Compare socket mutator \a a and \a b */
int (*compare)(grpc_socket_mutator *a, grpc_socket_mutator *b);
/** Destroys the socket mutator instance */
void (*destory)(grpc_socket_mutator *mutator);
} grpc_socket_mutator_vtable;
/** The Socket Mutator interface allows changes on socket options */
struct grpc_socket_mutator {
const grpc_socket_mutator_vtable *vtable;
gpr_refcount refcount;
};
/** called by concrete implementations to initialize the base struct */
void grpc_socket_mutator_init(grpc_socket_mutator *mutator,
const grpc_socket_mutator_vtable *vtable);
/** Wrap \a mutator as a grpc_arg */
grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator);
/** Perform the file descriptor mutation operation of \a mutator on \a fd */
bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator *mutator, int fd);
/** Compare if \a a and \a b are the same mutator or have same settings */
int grpc_socket_mutator_compare(grpc_socket_mutator *a, grpc_socket_mutator *b);
grpc_socket_mutator *grpc_socket_mutator_ref(grpc_socket_mutator *mutator);
void grpc_socket_mutator_unref(grpc_socket_mutator *mutator);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H */

@ -209,6 +209,15 @@ grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) {
return GRPC_ERROR_NONE;
}
/* set a socket using a grpc_socket_mutator */
grpc_error *grpc_set_socket_with_mutator(int fd, grpc_socket_mutator *mutator) {
GPR_ASSERT(mutator);
if (!grpc_socket_mutator_mutate_fd(mutator, fd)) {
return GRPC_ERROR_CREATE("grpc_socket_mutator failed.");
}
return GRPC_ERROR_NONE;
}
static gpr_once g_probe_ipv6_once = GPR_ONCE_INIT;
static int g_ipv6_loopback_available;

@ -39,7 +39,9 @@
#include <sys/socket.h>
#include <unistd.h>
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/socket_mutator.h"
/* a wrapper for accept or accept4 */
int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
@ -88,6 +90,9 @@ grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes);
/* Tries to set the socket's receive buffer to given size. */
grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes);
/* Tries to set the socket using a grpc_socket_mutator */
grpc_error *grpc_set_socket_with_mutator(int fd, grpc_socket_mutator *mutator);
/* An enum to keep track of IPv4/IPv6 socket modes.
Currently, this information is only used when a socket is first created, but

@ -34,6 +34,7 @@
#ifndef GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H
#define GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/pollset_set.h"

@ -51,6 +51,7 @@
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_posix.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_mutator.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h"
#include "src/core/lib/iomgr/timer.h"
@ -73,7 +74,8 @@ typedef struct {
grpc_channel_args *channel_args;
} async_connect;
static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd) {
static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd,
const grpc_channel_args *channel_args) {
grpc_error *err = GRPC_ERROR_NONE;
GPR_ASSERT(fd >= 0);
@ -88,6 +90,16 @@ static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd) {
}
err = grpc_set_socket_no_sigpipe_if_possible(fd);
if (err != GRPC_ERROR_NONE) goto error;
if (channel_args) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_SOCKET_MUTATOR)) {
GPR_ASSERT(channel_args->args[i].type == GRPC_ARG_POINTER);
grpc_socket_mutator *mutator = channel_args->args[i].value.pointer.p;
err = grpc_set_socket_with_mutator(fd, mutator);
if (err != GRPC_ERROR_NONE) goto error;
}
}
}
goto done;
error:
@ -287,7 +299,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
addr = &addr4_copy;
}
if ((error = prepare_socket(addr, fd)) != GRPC_ERROR_NONE) {
if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
return;
}

@ -493,6 +493,11 @@ static char *tcp_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string);
}
static int tcp_get_fd(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return tcp->fd;
}
static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return grpc_fd_get_workqueue(tcp->em_fd);
@ -511,7 +516,8 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_shutdown,
tcp_destroy,
tcp_get_resource_user,
tcp_get_peer};
tcp_get_peer,
tcp_get_fd};
grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
grpc_resource_quota *resource_quota,

@ -325,10 +325,13 @@ static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; }
static int uv_get_fd(grpc_endpoint *ep) { return -1; }
static grpc_endpoint_vtable vtable = {
uv_endpoint_read, uv_endpoint_write, uv_get_workqueue,
uv_add_to_pollset, uv_add_to_pollset_set, uv_endpoint_shutdown,
uv_destroy, uv_get_resource_user, uv_get_peer};
uv_destroy, uv_get_resource_user, uv_get_peer,
uv_get_fd};
grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_resource_quota *resource_quota,

@ -402,6 +402,8 @@ static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) {
return tcp->resource_user;
}
static int win_get_fd(grpc_endpoint *ep) { return -1; }
static grpc_endpoint_vtable vtable = {win_read,
win_write,
win_get_workqueue,
@ -410,7 +412,8 @@ static grpc_endpoint_vtable vtable = {win_read,
win_shutdown,
win_destroy,
win_get_resource_user,
win_get_peer};
win_get_peer,
win_get_fd};
grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
grpc_resource_quota *resource_quota,

@ -144,17 +144,44 @@ grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
return &c->base;
}
static char *redact_private_key(const char *json_key) {
char *json_copy = gpr_strdup(json_key);
grpc_json *json = grpc_json_parse_string(json_copy);
if (!json) {
gpr_free(json_copy);
return gpr_strdup("<Json failed to parse.>");
}
const char *redacted = "<redacted>";
grpc_json *current = json->child;
while (current) {
if (current->type == GRPC_JSON_STRING &&
strcmp(current->key, "private_key") == 0) {
current->value = (char *)redacted;
break;
}
current = current->next;
}
char *clean_json = grpc_json_dump_to_string(json, 2);
gpr_free(json_copy);
grpc_json_destroy(json);
return clean_json;
}
grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
const char *json_key, gpr_timespec token_lifetime, void *reserved) {
GRPC_API_TRACE(
"grpc_service_account_jwt_access_credentials_create("
"json_key=%s, "
"token_lifetime="
"gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
5, (json_key, token_lifetime.tv_sec, token_lifetime.tv_nsec,
(int)token_lifetime.clock_type, reserved));
if (grpc_api_trace) {
char *clean_json = redact_private_key(json_key);
gpr_log(GPR_INFO,
"grpc_service_account_jwt_access_credentials_create("
"json_key=%s, "
"token_lifetime="
"gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
clean_json, token_lifetime.tv_sec, token_lifetime.tv_nsec,
(int)token_lifetime.clock_type, reserved);
gpr_free(clean_json);
}
GPR_ASSERT(reserved == NULL);
return grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
grpc_auth_json_key_create_from_string(json_key), token_lifetime);

@ -392,15 +392,32 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token(
return &c->base.base;
}
static char *create_loggable_refresh_token(grpc_auth_refresh_token *token) {
if (strcmp(token->type, GRPC_AUTH_JSON_TYPE_INVALID) == 0) {
return gpr_strdup("<Invalid json token>");
}
char *loggable_token = NULL;
gpr_asprintf(&loggable_token,
"{\n type: %s\n client_id: %s\n client_secret: "
"<redacted>\n refresh_token: <redacted>\n}",
token->type, token->client_id);
return loggable_token;
}
grpc_call_credentials *grpc_google_refresh_token_credentials_create(
const char *json_refresh_token, void *reserved) {
GRPC_API_TRACE(
"grpc_refresh_token_credentials_create(json_refresh_token=%s, "
"reserved=%p)",
2, (json_refresh_token, reserved));
grpc_auth_refresh_token token =
grpc_auth_refresh_token_create_from_string(json_refresh_token);
if (grpc_api_trace) {
char *loggable_token = create_loggable_refresh_token(&token);
gpr_log(GPR_INFO,
"grpc_refresh_token_credentials_create(json_refresh_token=%s, "
"reserved=%p)",
loggable_token, reserved);
gpr_free(loggable_token);
}
GPR_ASSERT(reserved == NULL);
return grpc_refresh_token_credentials_create_from_auth_refresh_token(
grpc_auth_refresh_token_create_from_string(json_refresh_token));
return grpc_refresh_token_credentials_create_from_auth_refresh_token(token);
}
//
@ -430,9 +447,9 @@ grpc_call_credentials *grpc_access_token_credentials_create(
gpr_malloc(sizeof(grpc_access_token_credentials));
char *token_md_value;
GRPC_API_TRACE(
"grpc_access_token_credentials_create(access_token=%s, "
"grpc_access_token_credentials_create(access_token=<redacted>, "
"reserved=%p)",
2, (access_token, reserved));
1, (reserved));
GPR_ASSERT(reserved == NULL);
memset(c, 0, sizeof(grpc_access_token_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;

@ -104,6 +104,8 @@ static void plugin_md_request_metadata_ready(void *request,
grpc_slice_unref(md_array[i].value);
}
gpr_free(md_array);
} else if (num_md == 0) {
r->cb(&exec_ctx, r->user_data, NULL, 0, GRPC_CREDENTIALS_OK, NULL);
}
}
gpr_free(r);

@ -125,7 +125,7 @@ static void security_handshake_done(grpc_exec_ctx *exec_ctx,
h->auth_context);
} else {
const char *msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Security handshake failed: %s", msg);
gpr_log(GPR_DEBUG, "Security handshake failed: %s", msg);
grpc_error_free_string(msg);
if (h->secure_endpoint != NULL) {

@ -31,7 +31,12 @@
*
*/
#include "src/core/lib/security/transport/secure_endpoint.h"
/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
using that endpoint. Because of various transitive includes in uv.h,
including windows.h on Windows, uv.h must be included before other system
headers. Therefore, sockaddr.h must always be included first */
#include "src/core/lib/iomgr/sockaddr.h"
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
@ -39,6 +44,7 @@
#include <grpc/support/sync.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/tsi_error.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
@ -366,6 +372,8 @@ static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
return grpc_endpoint_get_peer(ep->wrapped_ep);
}
static int endpoint_get_fd(grpc_endpoint *secure_ep) { return -1; }
static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
return grpc_endpoint_get_workqueue(ep->wrapped_ep);
@ -385,7 +393,8 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_shutdown,
endpoint_destroy,
endpoint_get_resource_user,
endpoint_get_peer};
endpoint_get_peer,
endpoint_get_fd};
grpc_endpoint *grpc_secure_endpoint_create(
struct tsi_frame_protector *protector, grpc_endpoint *transport,

@ -123,6 +123,7 @@ struct grpc_call {
grpc_channel *channel;
grpc_call *parent;
grpc_call *first_child;
gpr_timespec start_time;
/* TODO(ctiller): share with cq if possible? */
gpr_mu mu;
@ -240,6 +241,7 @@ grpc_error *grpc_call_create(const grpc_call_create_args *args,
call->channel = args->channel;
call->cq = args->cq;
call->parent = args->parent_call;
call->start_time = gpr_now(GPR_CLOCK_MONOTONIC);
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
call->is_client = args->server_transport_data == NULL;
@ -312,10 +314,10 @@ grpc_error *grpc_call_create(const grpc_call_create_args *args,
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
grpc_error *error =
grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
call->context, args->server_transport_data, path,
send_deadline, CALL_STACK_FROM_CALL(call));
grpc_error *error = grpc_call_stack_init(
&exec_ctx, channel_stack, 1, destroy_call, call, call->context,
args->server_transport_data, path, call->start_time, send_deadline,
CALL_STACK_FROM_CALL(call));
if (error != GRPC_ERROR_NONE) {
grpc_status_code status;
const char *error_str;
@ -428,6 +430,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
get_final_status(call, set_status_value_directly,
&c->final_info.final_status);
c->final_info.stats.latency =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c);
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");

@ -43,6 +43,8 @@ int grpc_connectivity_state_trace = 0;
const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
switch (state) {
case GRPC_CHANNEL_INIT:
return "INIT";
case GRPC_CHANNEL_IDLE:
return "IDLE";
case GRPC_CHANNEL_CONNECTING:
@ -159,6 +161,7 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
grpc_error_free_string(error_string);
}
switch (state) {
case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_READY:

@ -728,8 +728,8 @@ void *grpc_mdelem_get_user_data(grpc_mdelem *md, void (*destroy_func)(void *)) {
return result;
}
void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
void *user_data) {
void *grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
void *user_data) {
internal_metadata *im = (internal_metadata *)md;
GPR_ASSERT(!is_mdelem_static(md));
GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
@ -740,11 +740,12 @@ void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
if (destroy_func != NULL) {
destroy_func(user_data);
}
return;
return (void *)gpr_atm_no_barrier_load(&im->user_data);
}
gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
gpr_mu_unlock(&im->mu_user_data);
return user_data;
}
grpc_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {

@ -120,8 +120,8 @@ size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem *elem);
is used as a type tag and is checked during user_data fetch. */
void *grpc_mdelem_get_user_data(grpc_mdelem *md,
void (*if_destroy_func)(void *));
void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
void *user_data);
void *grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
void *user_data);
/* Reference counting */
//#define GRPC_METADATA_REFCOUNT_DEBUG

@ -160,6 +160,11 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
return transport->vtable->get_peer(exec_ctx, transport);
}
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport) {
return transport->vtable->get_endpoint(exec_ctx, transport);
}
void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op,
grpc_error *error) {

@ -37,6 +37,7 @@
#include <stddef.h>
#include "src/core/lib/channel/context.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
@ -295,6 +296,10 @@ void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
grpc_transport *transport);
/* Get the endpoint used by \a transport */
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport);
/* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
\a on_consumed and then delete the returned transport op */
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed);

@ -74,6 +74,9 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_get_peer */
char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
/* implementation of grpc_transport_get_endpoint */
grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
} grpc_transport_vtable;
/* an instance of a grpc transport */

@ -39,7 +39,7 @@
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/log.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/socket_mutator.h"
namespace grpc {
ChannelArguments::ChannelArguments() {
@ -88,6 +88,24 @@ void ChannelArguments::SetCompressionAlgorithm(
SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, algorithm);
}
void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
if (!mutator) {
return;
}
grpc_arg mutator_arg = grpc_socket_mutator_to_arg(mutator);
bool replaced = false;
for (auto it = args_.begin(); it != args_.end(); ++it) {
if (it->type == mutator_arg.type &&
grpc::string(it->key) == grpc::string(mutator_arg.key)) {
it->value.pointer.vtable->destroy(it->value.pointer.p);
it->value.pointer = mutator_arg.value.pointer;
}
}
if (!replaced) {
args_.push_back(mutator_arg);
}
}
// Note: a second call to this will add in front the result of the first call.
// An example is calling this on a copy of ChannelArguments which already has a
// prefix. The user can build up a prefix string by calling this multiple times,

@ -388,35 +388,29 @@ namespace Grpc.Core.Internal
private void Initialize(CompletionQueueSafeHandle cq)
{
using (Profilers.ForCurrentThread().NewScope("AsyncCall.Initialize"))
{
var call = CreateNativeCall(cq);
var call = CreateNativeCall(cq);
details.Channel.AddCallReference(this);
InitializeInternal(call);
RegisterCancellationCallback();
}
details.Channel.AddCallReference(this);
InitializeInternal(call);
RegisterCancellationCallback();
}
private INativeCall CreateNativeCall(CompletionQueueSafeHandle cq)
{
using (Profilers.ForCurrentThread().NewScope("AsyncCall.CreateNativeCall"))
{
if (injectedNativeCall != null)
{
return injectedNativeCall; // allows injecting a mock INativeCall in tests.
}
if (injectedNativeCall != null)
{
return injectedNativeCall; // allows injecting a mock INativeCall in tests.
}
var parentCall = details.Options.PropagationToken != null ? details.Options.PropagationToken.ParentCall : CallSafeHandle.NullInstance;
var parentCall = details.Options.PropagationToken != null ? details.Options.PropagationToken.ParentCall : CallSafeHandle.NullInstance;
var credentials = details.Options.Credentials;
using (var nativeCredentials = credentials != null ? credentials.ToNativeCredentials() : null)
{
var result = details.Channel.Handle.CreateCall(
parentCall, ContextPropagationToken.DefaultMask, cq,
details.Method, details.Host, Timespec.FromDateTime(details.Options.Deadline.Value), nativeCredentials);
return result;
}
var credentials = details.Options.Credentials;
using (var nativeCredentials = credentials != null ? credentials.ToNativeCredentials() : null)
{
var result = details.Channel.Handle.CreateCall(
parentCall, ContextPropagationToken.DefaultMask, cq,
details.Method, details.Host, Timespec.FromDateTime(details.Options.Deadline.Value), nativeCredentials);
return result;
}
}
@ -456,47 +450,44 @@ namespace Grpc.Core.Internal
// NOTE: because this event is a result of batch containing GRPC_OP_RECV_STATUS_ON_CLIENT,
// success will be always set to true.
using (Profilers.ForCurrentThread().NewScope("AsyncCall.HandleUnaryResponse"))
TaskCompletionSource<object> delayedStreamingWriteTcs = null;
TResponse msg = default(TResponse);
var deserializeException = TryDeserialize(receivedMessage, out msg);
lock (myLock)
{
TaskCompletionSource<object> delayedStreamingWriteTcs = null;
TResponse msg = default(TResponse);
var deserializeException = TryDeserialize(receivedMessage, out msg);
finished = true;
lock (myLock)
if (deserializeException != null && receivedStatus.Status.StatusCode == StatusCode.OK)
{
finished = true;
if (deserializeException != null && receivedStatus.Status.StatusCode == StatusCode.OK)
{
receivedStatus = new ClientSideStatus(DeserializeResponseFailureStatus, receivedStatus.Trailers);
}
finishedStatus = receivedStatus;
if (isStreamingWriteCompletionDelayed)
{
delayedStreamingWriteTcs = streamingWriteTcs;
streamingWriteTcs = null;
}
ReleaseResourcesIfPossible();
receivedStatus = new ClientSideStatus(DeserializeResponseFailureStatus, receivedStatus.Trailers);
}
finishedStatus = receivedStatus;
responseHeadersTcs.SetResult(responseHeaders);
if (delayedStreamingWriteTcs != null)
if (isStreamingWriteCompletionDelayed)
{
delayedStreamingWriteTcs.SetException(GetRpcExceptionClientOnly());
delayedStreamingWriteTcs = streamingWriteTcs;
streamingWriteTcs = null;
}
var status = receivedStatus.Status;
if (status.StatusCode != StatusCode.OK)
{
unaryResponseTcs.SetException(new RpcException(status));
return;
}
ReleaseResourcesIfPossible();
}
responseHeadersTcs.SetResult(responseHeaders);
unaryResponseTcs.SetResult(msg);
if (delayedStreamingWriteTcs != null)
{
delayedStreamingWriteTcs.SetException(GetRpcExceptionClientOnly());
}
var status = receivedStatus.Status;
if (status.StatusCode != StatusCode.OK)
{
unaryResponseTcs.SetException(new RpcException(status));
return;
}
unaryResponseTcs.SetResult(msg);
}
/// <summary>

@ -181,19 +181,16 @@ namespace Grpc.Core.Internal
/// </summary>
protected bool ReleaseResourcesIfPossible()
{
using (Profilers.ForCurrentThread().NewScope("AsyncCallBase.ReleaseResourcesIfPossible"))
if (!disposed && call != null)
{
if (!disposed && call != null)
bool noMoreSendCompletions = streamingWriteTcs == null && (halfcloseRequested || cancelRequested || finished);
if (noMoreSendCompletions && readingDone && finished)
{
bool noMoreSendCompletions = streamingWriteTcs == null && (halfcloseRequested || cancelRequested || finished);
if (noMoreSendCompletions && readingDone && finished)
{
ReleaseResources();
return true;
}
ReleaseResources();
return true;
}
return false;
}
return false;
}
protected abstract bool IsClient
@ -229,28 +226,20 @@ namespace Grpc.Core.Internal
protected byte[] UnsafeSerialize(TWrite msg)
{
using (Profilers.ForCurrentThread().NewScope("AsyncCallBase.UnsafeSerialize"))
{
return serializer(msg);
}
return serializer(msg);
}
protected Exception TryDeserialize(byte[] payload, out TRead msg)
{
using (Profilers.ForCurrentThread().NewScope("AsyncCallBase.TryDeserialize"))
try
{
try
{
msg = deserializer(payload);
return null;
}
catch (Exception e)
{
msg = default(TRead);
return e;
}
msg = deserializer(payload);
return null;
}
catch (Exception e)
{
msg = default(TRead);
return e;
}
}

@ -76,11 +76,8 @@ namespace Grpc.Core.Internal
public void StartUnary(BatchContextSafeHandle ctx, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
{
using (Profilers.ForCurrentThread().NewScope("CallSafeHandle.StartUnary"))
{
Native.grpcsharp_call_start_unary(this, ctx, payload, new UIntPtr((ulong)payload.Length), metadataArray, writeFlags)
.CheckOk();
}
Native.grpcsharp_call_start_unary(this, ctx, payload, new UIntPtr((ulong)payload.Length), metadataArray, writeFlags)
.CheckOk();
}
public void StartClientStreaming(UnaryResponseClientHandler callback, MetadataArraySafeHandle metadataArray)

@ -65,16 +65,13 @@ namespace Grpc.Core.Internal
public CallSafeHandle CreateCall(CallSafeHandle parentCall, ContextPropagationFlags propagationMask, CompletionQueueSafeHandle cq, string method, string host, Timespec deadline, CallCredentialsSafeHandle credentials)
{
using (Profilers.ForCurrentThread().NewScope("ChannelSafeHandle.CreateCall"))
var result = Native.grpcsharp_channel_create_call(this, parentCall, propagationMask, cq, method, host, deadline);
if (credentials != null)
{
var result = Native.grpcsharp_channel_create_call(this, parentCall, propagationMask, cq, method, host, deadline);
if (credentials != null)
{
result.SetCredentials(credentials);
}
result.Initialize(cq);
return result;
result.SetCredentials(credentials);
}
result.Initialize(cq);
return result;
}
public ChannelState CheckConnectivityState(bool tryToConnect)

@ -70,10 +70,7 @@ namespace Grpc.Core.Internal
public CompletionQueueEvent Pluck(IntPtr tag)
{
using (Profilers.ForCurrentThread().NewScope("CompletionQueueSafeHandle.Pluck"))
{
return Native.grpcsharp_completion_queue_pluck(this, tag);
}
return Native.grpcsharp_completion_queue_pluck(this, tag);
}
/// <summary>

@ -37,6 +37,7 @@ using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Grpc.Core.Logging;
using Grpc.Core.Profiling;
using Grpc.Core.Utils;
namespace Grpc.Core.Internal
@ -54,6 +55,8 @@ namespace Grpc.Core.Internal
readonly int poolSize;
readonly int completionQueueCount;
readonly List<BasicProfiler> threadProfilers = new List<BasicProfiler>(); // profilers assigned to threadpool threads
bool stopRequested;
IReadOnlyCollection<CompletionQueueSafeHandle> completionQueues;
@ -82,7 +85,8 @@ namespace Grpc.Core.Internal
for (int i = 0; i < poolSize; i++)
{
threads.Add(CreateAndStartThread(i));
var optionalProfiler = i < threadProfilers.Count ? threadProfilers[i] : null;
threads.Add(CreateAndStartThread(i, optionalProfiler));
}
}
}
@ -111,6 +115,11 @@ namespace Grpc.Core.Internal
{
cq.Dispose();
}
for (int i = 0; i < threadProfilers.Count; i++)
{
threadProfilers[i].Dump(string.Format("grpc_trace_thread_{0}.txt", i));
}
});
}
@ -137,12 +146,12 @@ namespace Grpc.Core.Internal
}
}
private Thread CreateAndStartThread(int threadIndex)
private Thread CreateAndStartThread(int threadIndex, IProfiler optionalProfiler)
{
var cqIndex = threadIndex % completionQueues.Count;
var cq = completionQueues.ElementAt(cqIndex);
var thread = new Thread(new ThreadStart(() => RunHandlerLoop(cq)));
var thread = new Thread(new ThreadStart(() => RunHandlerLoop(cq, optionalProfiler)));
thread.IsBackground = true;
thread.Name = string.Format("grpc {0} (cq {1})", threadIndex, cqIndex);
thread.Start();
@ -153,8 +162,13 @@ namespace Grpc.Core.Internal
/// <summary>
/// Body of the polling thread.
/// </summary>
private void RunHandlerLoop(CompletionQueueSafeHandle cq)
private void RunHandlerLoop(CompletionQueueSafeHandle cq, IProfiler optionalProfiler)
{
if (optionalProfiler != null)
{
Profilers.SetForCurrentThread(optionalProfiler);
}
CompletionQueueEvent ev;
do
{

@ -48,22 +48,19 @@ namespace Grpc.Core.Internal
public static MetadataArraySafeHandle Create(Metadata metadata)
{
using (Profilers.ForCurrentThread().NewScope("MetadataArraySafeHandle.Create"))
if (metadata.Count == 0)
{
if (metadata.Count == 0)
{
return new MetadataArraySafeHandle();
}
return new MetadataArraySafeHandle();
}
// TODO(jtattermusch): we might wanna check that the metadata is readonly
var metadataArray = Native.grpcsharp_metadata_array_create(new UIntPtr((ulong)metadata.Count));
for (int i = 0; i < metadata.Count; i++)
{
var valueBytes = metadata[i].GetSerializedValueUnsafe();
Native.grpcsharp_metadata_array_add(metadataArray, metadata[i].Key, valueBytes, new UIntPtr((ulong)valueBytes.Length));
}
return metadataArray;
// TODO(jtattermusch): we might wanna check that the metadata is readonly
var metadataArray = Native.grpcsharp_metadata_array_create(new UIntPtr((ulong)metadata.Count));
for (int i = 0; i < metadata.Count; i++)
{
var valueBytes = metadata[i].GetSerializedValueUnsafe();
Native.grpcsharp_metadata_array_add(metadataArray, metadata[i].Key, valueBytes, new UIntPtr((ulong)valueBytes.Length));
}
return metadataArray;
}
/// <summary>

@ -80,7 +80,7 @@ namespace Grpc.Core.Profiling
ProfilerEntry[] entries;
int count;
public BasicProfiler() : this(1024*1024)
public BasicProfiler() : this(20*1024*1024)
{
}

@ -103,6 +103,34 @@ namespace Grpc.IntegrationTesting
client.UnaryCall(new SimpleRequest { }, new CallOptions(credentials: callCredentials));
}
[Test]
public void MetadataCredentials_InterceptorLeavesMetadataEmpty()
{
var channelCredentials = ChannelCredentials.Create(TestCredentials.CreateSslCredentials(),
CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => TaskUtils.CompletedTask)));
channel = new Channel(Host, server.Ports.Single().BoundPort, channelCredentials, options);
client = new TestService.TestServiceClient(channel);
var ex = Assert.Throws<RpcException>(() => client.UnaryCall(new SimpleRequest { }));
// StatusCode.Unknown as the server-side handler throws an exception after not receiving the authorization header.
Assert.AreEqual(StatusCode.Unknown, ex.Status.StatusCode);
}
[Test]
public void MetadataCredentials_InterceptorThrows()
{
var callCredentials = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) =>
{
throw new Exception("Auth interceptor throws");
}));
var channelCredentials = ChannelCredentials.Create(TestCredentials.CreateSslCredentials(), callCredentials);
channel = new Channel(Host, server.Ports.Single().BoundPort, channelCredentials, options);
client = new TestService.TestServiceClient(channel);
var ex = Assert.Throws<RpcException>(() => client.UnaryCall(new SimpleRequest { }));
Assert.AreEqual(StatusCode.Unauthenticated, ex.Status.StatusCode);
}
private class FakeTestService : TestService.TestServiceBase
{
public override Task<SimpleResponse> UnaryCall(SimpleRequest request, ServerCallContext context)

@ -991,7 +991,11 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_metadata_credentials_notify_from_plugin(
grpc_credentials_plugin_metadata_cb cb,
void *user_data, grpc_metadata_array *metadata,
grpc_status_code status, const char *error_details) {
cb(user_data, metadata->metadata, metadata->count, status, error_details);
if (metadata) {
cb(user_data, metadata->metadata, metadata->count, status, error_details);
} else {
cb(user_data, NULL, 0, status, error_details);
}
}
typedef void(GPR_CALLTYPE *grpcsharp_metadata_interceptor_func)(

@ -141,8 +141,14 @@ exports.getProtobufServiceAttrs = function getProtobufServiceAttrs(service,
binaryAsBase64 = options.binaryAsBase64;
longsAsStrings = options.longsAsStrings;
}
return _.fromPairs(_.map(service.children, function(method) {
return [_.camelCase(method.name), {
/* This slightly awkward construction is used to make sure we only use
lodash@3.10.1-compatible functions. A previous version used
_.fromPairs, which would be cleaner, but was introduced in lodash
version 4 */
return _.zipObject(_.map(service.children, function(method) {
return _.camelCase(method.name);
}), _.map(service.children, function(method) {
return {
path: prefix + method.name,
requestStream: method.requestStream,
responseStream: method.responseStream,
@ -150,11 +156,11 @@ exports.getProtobufServiceAttrs = function getProtobufServiceAttrs(service,
responseType: method.resolvedResponseType,
requestSerialize: serializeCls(method.resolvedRequestType.build()),
requestDeserialize: deserializeCls(method.resolvedRequestType.build(),
binaryAsBase64, longsAsStrings),
binaryAsBase64, longsAsStrings),
responseSerialize: serializeCls(method.resolvedResponseType.build()),
responseDeserialize: deserializeCls(method.resolvedResponseType.build(),
binaryAsBase64, longsAsStrings)
}];
binaryAsBase64, longsAsStrings)
};
}));
};

@ -99,7 +99,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll_linux.c',
'src/core/lib/iomgr/ev_poll_and_epoll_posix.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/exec_ctx.c',
@ -121,6 +120,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_mutator.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
'src/core/lib/iomgr/socket_utils_posix.c',

@ -686,7 +686,7 @@ extern gpr_join_host_port_type gpr_join_host_port_import;
typedef int(*gpr_split_host_port_type)(const char *name, char **host, char **port);
extern gpr_split_host_port_type gpr_split_host_port_import;
#define gpr_split_host_port gpr_split_host_port_import
typedef void(*gpr_log_type)(const char *file, int line, gpr_log_severity severity, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(4, 5);
typedef void(*gpr_log_type)(const char *file, int line, gpr_log_severity severity, const char *format, ...) GPR_PRINT_FORMAT_CHECK(4, 5);
extern gpr_log_type gpr_log_import;
#define gpr_log gpr_log_import
typedef void(*gpr_log_message_type)(const char *file, int line, gpr_log_severity severity, const char *message);
@ -707,7 +707,7 @@ extern gpr_format_message_type gpr_format_message_import;
typedef char *(*gpr_strdup_type)(const char *src);
extern gpr_strdup_type gpr_strdup_import;
#define gpr_strdup gpr_strdup_import
typedef int(*gpr_asprintf_type)(char **strp, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(2, 3);
typedef int(*gpr_asprintf_type)(char **strp, const char *format, ...) GPR_PRINT_FORMAT_CHECK(2, 3);
extern gpr_asprintf_type gpr_asprintf_import;
#define gpr_asprintf gpr_asprintf_import
typedef const char *(*gpr_subprocess_binary_extension_type)();

@ -134,12 +134,26 @@ static void test_compression_algorithm_states(void) {
grpc_channel_args_destroy(ch_args);
}
static void test_set_socket_mutator(void) {
grpc_channel_args *ch_args;
grpc_socket_mutator mutator;
grpc_socket_mutator_init(&mutator, NULL);
ch_args = grpc_channel_args_set_socket_mutator(NULL, &mutator);
GPR_ASSERT(ch_args->num_args == 1);
GPR_ASSERT(strcmp(ch_args->args[0].key, GRPC_ARG_SOCKET_MUTATOR) == 0);
GPR_ASSERT(ch_args->args[0].type == GRPC_ARG_POINTER);
grpc_channel_args_destroy(ch_args);
}
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
test_create();
test_set_compression_algorithm();
test_compression_algorithm_states();
test_set_socket_mutator();
grpc_shutdown();
return 0;
}

@ -137,9 +137,10 @@ static void test_create_channel_stack(void) {
GPR_ASSERT(*channel_data == 0);
call_stack = gpr_malloc(channel_stack->call_stack_size);
grpc_error *error = grpc_call_stack_init(
&exec_ctx, channel_stack, 1, free_call, call_stack, NULL, NULL, path,
gpr_inf_future(GPR_CLOCK_MONOTONIC), call_stack);
grpc_error *error =
grpc_call_stack_init(&exec_ctx, channel_stack, 1, free_call, call_stack,
NULL, NULL, path, gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC), call_stack);
GPR_ASSERT(error == GRPC_ERROR_NONE);
GPR_ASSERT(call_stack->count == 1);
call_elem = grpc_call_stack_element(call_stack, 0);

@ -63,8 +63,14 @@ typedef struct servers_fixture {
grpc_metadata_array *request_metadata_recv;
} servers_fixture;
typedef struct request_sequences {
size_t n;
int *connections;
int *connectivity_states;
} request_sequences;
typedef void (*verifier_fn)(const servers_fixture *, grpc_channel *,
const int *, const size_t);
const request_sequences *, const size_t);
typedef struct test_spec {
size_t num_iters;
@ -228,9 +234,24 @@ static void teardown_servers(servers_fixture *f) {
gpr_free(f);
}
static request_sequences request_sequences_create(size_t n) {
request_sequences res;
res.n = n;
res.connections = gpr_malloc(sizeof(*res.connections) * n);
res.connectivity_states = gpr_malloc(sizeof(*res.connectivity_states) * n);
return res;
}
static void request_sequences_destroy(const request_sequences *rseqs) {
gpr_free(rseqs->connections);
gpr_free(rseqs->connectivity_states);
}
/** Returns connection sequence (server indices), which must be freed */
static int *perform_request(servers_fixture *f, grpc_channel *client,
request_data *rdata, const test_spec *spec) {
static request_sequences perform_request(servers_fixture *f,
grpc_channel *client,
request_data *rdata,
const test_spec *spec) {
grpc_call *c;
int s_idx;
int *s_valid;
@ -240,11 +261,10 @@ static int *perform_request(servers_fixture *f, grpc_channel *client,
size_t i, iter_num;
grpc_event ev;
int read_tag;
int *connection_sequence;
int completed_client;
const request_sequences sequences = request_sequences_create(spec->num_iters);
s_valid = gpr_malloc(sizeof(int) * f->num_servers);
connection_sequence = gpr_malloc(sizeof(int) * spec->num_iters);
for (iter_num = 0; iter_num < spec->num_iters; iter_num++) {
cq_verifier *cqv = cq_verifier_create(f->cq);
@ -261,7 +281,7 @@ static int *perform_request(servers_fixture *f, grpc_channel *client,
}
}
connection_sequence[iter_num] = -1;
sequences.connections[iter_num] = -1;
grpc_metadata_array_init(&rdata->initial_metadata_recv);
grpc_metadata_array_init(&rdata->trailing_metadata_recv);
@ -306,12 +326,14 @@ static int *perform_request(servers_fixture *f, grpc_channel *client,
grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL));
s_idx = -1;
while (
(ev = grpc_completion_queue_next(
f->cq, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10 * RETRY_TIMEOUT), NULL))
.type != GRPC_QUEUE_TIMEOUT) {
while ((ev = grpc_completion_queue_next(
f->cq, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(RETRY_TIMEOUT), NULL))
.type != GRPC_QUEUE_TIMEOUT) {
GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
read_tag = ((int)(intptr_t)ev.tag);
const grpc_connectivity_state conn_state =
grpc_channel_check_connectivity_state(client, 0);
sequences.connectivity_states[iter_num] = conn_state;
gpr_log(GPR_DEBUG, "EVENT: success:%d, type:%d, tag:%d iter:%" PRIuPTR,
ev.success, ev.type, read_tag, iter_num);
if (ev.success && read_tag >= 1000) {
@ -319,7 +341,7 @@ static int *perform_request(servers_fixture *f, grpc_channel *client,
/* only server notifications for non-shutdown events */
s_idx = read_tag - 1000;
s_valid[s_idx] = 1;
connection_sequence[iter_num] = s_idx;
sequences.connections[iter_num] = s_idx;
break;
} else if (read_tag == 1) {
gpr_log(GPR_DEBUG, "client timed out");
@ -382,10 +404,9 @@ static int *perform_request(servers_fixture *f, grpc_channel *client,
}
}
GPR_ASSERT(
grpc_completion_queue_next(
f->cq, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(2 * RETRY_TIMEOUT), NULL)
.type == GRPC_QUEUE_TIMEOUT);
GPR_ASSERT(grpc_completion_queue_next(
f->cq, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(RETRY_TIMEOUT), NULL)
.type == GRPC_QUEUE_TIMEOUT);
grpc_metadata_array_destroy(&rdata->initial_metadata_recv);
grpc_metadata_array_destroy(&rdata->trailing_metadata_recv);
@ -402,7 +423,7 @@ static int *perform_request(servers_fixture *f, grpc_channel *client,
gpr_free(s_valid);
return connection_sequence;
return sequences;
}
static grpc_call **perform_multirequest(servers_fixture *f,
@ -442,62 +463,10 @@ static grpc_call **perform_multirequest(servers_fixture *f,
return calls;
}
static void assert_channel_connectivity(grpc_channel *ch,
size_t num_accepted_conn_states,
int accepted_conn_state, ...) {
size_t i;
grpc_channel_stack *client_stack;
grpc_channel_element *client_channel_filter;
grpc_connectivity_state actual_conn_state;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
va_list ap;
client_stack = grpc_channel_get_channel_stack(ch);
client_channel_filter = grpc_channel_stack_last_element(client_stack);
actual_conn_state = grpc_client_channel_check_connectivity_state(
&exec_ctx, client_channel_filter, 0 /* don't try to connect */);
grpc_exec_ctx_finish(&exec_ctx);
va_start(ap, accepted_conn_state);
for (i = 0; i < num_accepted_conn_states; i++) {
if ((int)actual_conn_state == accepted_conn_state) {
break;
}
accepted_conn_state = va_arg(ap, grpc_connectivity_state);
}
va_end(ap);
if (i == num_accepted_conn_states) {
char **accepted_strs =
gpr_malloc(sizeof(char *) * num_accepted_conn_states);
char *accepted_str_joined;
va_start(ap, accepted_conn_state);
for (i = 0; i < num_accepted_conn_states; i++) {
GPR_ASSERT(gpr_asprintf(&accepted_strs[i], "%d", accepted_conn_state) >
0);
accepted_conn_state = va_arg(ap, grpc_connectivity_state);
}
va_end(ap);
accepted_str_joined = gpr_strjoin_sep((const char **)accepted_strs,
num_accepted_conn_states, ", ", NULL);
gpr_log(
GPR_ERROR,
"Channel connectivity assertion failed: expected <one of [%s]>, got %d",
accepted_str_joined, actual_conn_state);
for (i = 0; i < num_accepted_conn_states; i++) {
gpr_free(accepted_strs[i]);
}
gpr_free(accepted_strs);
gpr_free(accepted_str_joined);
abort();
}
}
void run_spec(const test_spec *spec) {
grpc_channel *client;
char *client_hostport;
char *servers_hostports_str;
int *actual_connection_sequence;
request_data rdata;
servers_fixture *f;
grpc_channel_args args;
@ -525,14 +494,14 @@ void run_spec(const test_spec *spec) {
gpr_log(GPR_INFO, "Testing '%s' with servers=%s client=%s", spec->description,
servers_hostports_str, client_hostport);
actual_connection_sequence = perform_request(f, client, &rdata, spec);
const request_sequences sequences = perform_request(f, client, &rdata, spec);
spec->verifier(f, client, actual_connection_sequence, spec->num_iters);
spec->verifier(f, client, &sequences, spec->num_iters);
gpr_free(client_hostport);
gpr_free(servers_hostports_str);
gpr_free(actual_connection_sequence);
gpr_free(rdata.call_details);
request_sequences_destroy(&sequences);
grpc_channel_destroy(client); /* calls the LB's shutdown func */
teardown_servers(f);
@ -644,7 +613,7 @@ static void test_pending_calls(size_t concurrent_calls) {
static void test_get_channel_info() {
grpc_channel *channel =
grpc_insecure_channel_create("ipv4:127.0.0.1:1234", NULL, NULL);
grpc_insecure_channel_create("ipv4:127.0.0.1:1234", &args, NULL);
// Ensures that resolver returns.
grpc_channel_check_connectivity_state(channel, true /* try_to_connect */);
// First, request no fields. This is a no-op.
@ -699,29 +668,43 @@ static void print_failed_expectations(const int *expected_connection_sequence,
static void verify_vanilla_round_robin(const servers_fixture *f,
grpc_channel *client,
const int *actual_connection_sequence,
const request_sequences *sequences,
const size_t num_iters) {
int *expected_connection_sequence;
size_t i;
const size_t expected_seq_length = f->num_servers;
/* verify conn. seq. expectation */
/* get the first sequence of "num_servers" elements */
expected_connection_sequence = gpr_malloc(sizeof(int) * expected_seq_length);
memcpy(expected_connection_sequence, actual_connection_sequence,
int *expected_connection_sequence =
gpr_malloc(sizeof(int) * expected_seq_length);
memcpy(expected_connection_sequence, sequences->connections,
sizeof(int) * expected_seq_length);
for (i = 0; i < num_iters; i++) {
const int actual = actual_connection_sequence[i];
for (size_t i = 0; i < num_iters; i++) {
const int actual = sequences->connections[i];
const int expected = expected_connection_sequence[i % expected_seq_length];
if (actual != expected) {
print_failed_expectations(expected_connection_sequence,
actual_connection_sequence, expected_seq_length,
num_iters);
gpr_log(
GPR_ERROR,
"CONNECTION SEQUENCE FAILURE: expected %d, got %d at iteration #%d",
expected, actual, (int)i);
abort();
}
}
/* All servers are available, therefore all client subchannels are READY, even
* when we only need one for the client channel state to be READY */
for (size_t i = 0; i < sequences->n; i++) {
const grpc_connectivity_state actual = sequences->connectivity_states[i];
const grpc_connectivity_state expected = GRPC_CHANNEL_READY;
if (actual != expected) {
gpr_log(GPR_ERROR,
"CONNECTIVITY STATUS SEQUENCE FAILURE: expected '%s', got '%s' "
"at iteration #%d",
grpc_connectivity_state_name(expected),
grpc_connectivity_state_name(actual), (int)i);
abort();
}
}
assert_channel_connectivity(client, 1, GRPC_CHANNEL_READY);
gpr_free(expected_connection_sequence);
}
@ -730,7 +713,7 @@ static void verify_vanilla_round_robin(const servers_fixture *f,
* given in "f") are killed */
static void verify_vanishing_floor_round_robin(
const servers_fixture *f, grpc_channel *client,
const int *actual_connection_sequence, const size_t num_iters) {
const request_sequences *sequences, const size_t num_iters) {
int *expected_connection_sequence;
const size_t expected_seq_length = 2;
size_t i;
@ -738,57 +721,83 @@ static void verify_vanishing_floor_round_robin(
/* verify conn. seq. expectation */
/* copy the first full sequence (without -1s) */
expected_connection_sequence = gpr_malloc(sizeof(int) * expected_seq_length);
memcpy(expected_connection_sequence, actual_connection_sequence + 2,
memcpy(expected_connection_sequence, sequences->connections + 2,
expected_seq_length * sizeof(int));
/* first two elements of the sequence should be [0 (1st server), -1 (failure)]
*/
GPR_ASSERT(actual_connection_sequence[0] == 0);
GPR_ASSERT(actual_connection_sequence[1] == -1);
GPR_ASSERT(sequences->connections[0] == 0);
GPR_ASSERT(sequences->connections[1] == -1);
/* the next two element must be [3, 0], repeating from that point: the 3 is
* brought forth by servers 1 and 2 disappearing after the intial pick of 0 */
GPR_ASSERT(actual_connection_sequence[2] == 3);
GPR_ASSERT(actual_connection_sequence[3] == 0);
GPR_ASSERT(sequences->connections[2] == 3);
GPR_ASSERT(sequences->connections[3] == 0);
/* make sure that the expectation obliges */
for (i = 2; i < num_iters; i++) {
const int actual = actual_connection_sequence[i];
const int actual = sequences->connections[i];
const int expected = expected_connection_sequence[i % expected_seq_length];
if (actual != expected) {
print_failed_expectations(expected_connection_sequence,
actual_connection_sequence, expected_seq_length,
sequences->connections, expected_seq_length,
num_iters);
abort();
}
}
/* There's always at least one subchannel READY (connected), therefore the
* overall state of the client channel is READY at all times. */
for (i = 0; i < sequences->n; i++) {
const grpc_connectivity_state actual = sequences->connectivity_states[i];
const grpc_connectivity_state expected = GRPC_CHANNEL_READY;
if (actual != expected) {
gpr_log(GPR_ERROR,
"CONNECTIVITY STATUS SEQUENCE FAILURE: expected '%s', got '%s' "
"at iteration #%d",
grpc_connectivity_state_name(expected),
grpc_connectivity_state_name(actual), (int)i);
abort();
}
}
gpr_free(expected_connection_sequence);
}
static void verify_total_carnage_round_robin(
const servers_fixture *f, grpc_channel *client,
const int *actual_connection_sequence, const size_t num_iters) {
size_t i;
for (i = 0; i < num_iters; i++) {
const int actual = actual_connection_sequence[i];
static void verify_total_carnage_round_robin(const servers_fixture *f,
grpc_channel *client,
const request_sequences *sequences,
const size_t num_iters) {
for (size_t i = 0; i < num_iters; i++) {
const int actual = sequences->connections[i];
const int expected = -1;
if (actual != expected) {
gpr_log(GPR_ERROR, "FAILURE: expected %d, actual %d at iter %" PRIuPTR,
expected, actual, i);
gpr_log(
GPR_ERROR,
"CONNECTION SEQUENCE FAILURE: expected %d, got %d at iteration #%d",
expected, actual, (int)i);
abort();
}
}
/* even though we know all the servers are dead, the client is still trying
* retrying, believing it's in a transient failure situation */
assert_channel_connectivity(client, 2, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_CHANNEL_CONNECTING);
/* no server is ever available. The persistent state is TRANSIENT_FAILURE */
for (size_t i = 0; i < sequences->n; i++) {
const grpc_connectivity_state actual = sequences->connectivity_states[i];
const grpc_connectivity_state expected = GRPC_CHANNEL_TRANSIENT_FAILURE;
if (actual != expected) {
gpr_log(GPR_ERROR,
"CONNECTIVITY STATUS SEQUENCE FAILURE: expected '%s', got '%s' "
"at iteration #%d",
grpc_connectivity_state_name(expected),
grpc_connectivity_state_name(actual), (int)i);
abort();
}
}
}
static void verify_partial_carnage_round_robin(
const servers_fixture *f, grpc_channel *client,
const int *actual_connection_sequence, const size_t num_iters) {
const request_sequences *sequences, const size_t num_iters) {
int *expected_connection_sequence;
size_t i;
const size_t expected_seq_length = f->num_servers;
@ -796,15 +805,15 @@ static void verify_partial_carnage_round_robin(
/* verify conn. seq. expectation */
/* get the first sequence of "num_servers" elements */
expected_connection_sequence = gpr_malloc(sizeof(int) * expected_seq_length);
memcpy(expected_connection_sequence, actual_connection_sequence,
memcpy(expected_connection_sequence, sequences->connections,
sizeof(int) * expected_seq_length);
for (i = 0; i < num_iters / 2; i++) {
const int actual = actual_connection_sequence[i];
const int actual = sequences->connections[i];
const int expected = expected_connection_sequence[i % expected_seq_length];
if (actual != expected) {
print_failed_expectations(expected_connection_sequence,
actual_connection_sequence, expected_seq_length,
sequences->connections, expected_seq_length,
num_iters);
abort();
}
@ -812,13 +821,34 @@ static void verify_partial_carnage_round_robin(
/* second half of the iterations go without response */
for (; i < num_iters; i++) {
GPR_ASSERT(actual_connection_sequence[i] == -1);
GPR_ASSERT(sequences->connections[i] == -1);
}
/* We can assert that the first client channel state should be READY, when all
* servers were available; and that the last one should be TRANSIENT_FAILURE,
* after all servers are gone. */
grpc_connectivity_state actual = sequences->connectivity_states[0];
grpc_connectivity_state expected = GRPC_CHANNEL_READY;
if (actual != expected) {
gpr_log(GPR_ERROR,
"CONNECTIVITY STATUS SEQUENCE FAILURE: expected '%s', got '%s' "
"at iteration #%d",
grpc_connectivity_state_name(expected),
grpc_connectivity_state_name(actual), 0);
abort();
}
actual = sequences->connectivity_states[num_iters - 1];
expected = GRPC_CHANNEL_TRANSIENT_FAILURE;
if (actual != expected) {
gpr_log(GPR_ERROR,
"CONNECTIVITY STATUS SEQUENCE FAILURE: expected '%s', got '%s' "
"at iteration #%d",
grpc_connectivity_state_name(expected),
grpc_connectivity_state_name(actual), (int)num_iters - 1);
abort();
}
/* even though we know all the servers are dead, the client is still trying
* retrying, believing it's in a transient failure situation */
assert_channel_connectivity(client, 2, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_CHANNEL_CONNECTING);
gpr_free(expected_connection_sequence);
}
@ -841,15 +871,14 @@ static void dump_array(const char *desc, const int *data, const size_t count) {
static void verify_rebirth_round_robin(const servers_fixture *f,
grpc_channel *client,
const int *actual_connection_sequence,
const request_sequences *sequences,
const size_t num_iters) {
int *expected_connection_sequence;
size_t i, j, unique_seq_last_idx, unique_seq_first_idx;
const size_t expected_seq_length = f->num_servers;
int *seen_elements;
dump_array("actual_connection_sequence", actual_connection_sequence,
num_iters);
dump_array("actual_connection_sequence", sequences->connections, num_iters);
/* verify conn. seq. expectation */
/* get the first unique run of length "num_servers". */
@ -860,13 +889,13 @@ static void verify_rebirth_round_robin(const servers_fixture *f,
memset(seen_elements, 0, sizeof(int) * expected_seq_length);
for (i = 0; i < num_iters; i++) {
if (actual_connection_sequence[i] < 0 ||
seen_elements[actual_connection_sequence[i]] != 0) {
if (sequences->connections[i] < 0 ||
seen_elements[sequences->connections[i]] != 0) {
/* if anything breaks the uniqueness of the run, back to square zero */
memset(seen_elements, 0, sizeof(int) * expected_seq_length);
continue;
}
seen_elements[actual_connection_sequence[i]] = 1;
seen_elements[sequences->connections[i]] = 1;
for (j = 0; j < expected_seq_length; j++) {
if (seen_elements[j] == 0) break;
}
@ -885,30 +914,72 @@ static void verify_rebirth_round_robin(const servers_fixture *f,
unique_seq_first_idx = (unique_seq_last_idx - expected_seq_length + 1);
memcpy(expected_connection_sequence,
actual_connection_sequence + unique_seq_first_idx,
sequences->connections + unique_seq_first_idx,
sizeof(int) * expected_seq_length);
/* first iteration succeeds */
GPR_ASSERT(actual_connection_sequence[0] != -1);
GPR_ASSERT(sequences->connections[0] != -1);
/* then we fail for a while... */
GPR_ASSERT(actual_connection_sequence[1] == -1);
GPR_ASSERT(sequences->connections[1] == -1);
/* ... but should be up at "unique_seq_first_idx" */
GPR_ASSERT(actual_connection_sequence[unique_seq_first_idx] != -1);
GPR_ASSERT(sequences->connections[unique_seq_first_idx] != -1);
for (j = 0, i = unique_seq_first_idx; i < num_iters; i++) {
const int actual = actual_connection_sequence[i];
const int actual = sequences->connections[i];
const int expected =
expected_connection_sequence[j++ % expected_seq_length];
if (actual != expected) {
print_failed_expectations(expected_connection_sequence,
actual_connection_sequence, expected_seq_length,
sequences->connections, expected_seq_length,
num_iters);
abort();
}
}
/* things are fine once the servers are brought back up */
assert_channel_connectivity(client, 1, GRPC_CHANNEL_READY);
/* We can assert that the first client channel state should be READY, when all
* servers were available; same thing for the last one. In the middle
* somewhere there must exist at least one TRANSIENT_FAILURE */
grpc_connectivity_state actual = sequences->connectivity_states[0];
grpc_connectivity_state expected = GRPC_CHANNEL_READY;
if (actual != expected) {
gpr_log(GPR_ERROR,
"CONNECTIVITY STATUS SEQUENCE FAILURE: expected '%s', got '%s' "
"at iteration #%d",
grpc_connectivity_state_name(expected),
grpc_connectivity_state_name(actual), 0);
abort();
}
actual = sequences->connectivity_states[num_iters - 1];
expected = GRPC_CHANNEL_READY;
if (actual != expected) {
gpr_log(GPR_ERROR,
"CONNECTIVITY STATUS SEQUENCE FAILURE: expected '%s', got '%s' "
"at iteration #%d",
grpc_connectivity_state_name(expected),
grpc_connectivity_state_name(actual), (int)num_iters - 1);
abort();
}
bool found_failure_status = false;
for (i = 1; i < sequences->n - 1; i++) {
if (sequences->connectivity_states[i] == GRPC_CHANNEL_TRANSIENT_FAILURE) {
found_failure_status = true;
break;
}
}
if (!found_failure_status) {
gpr_log(
GPR_ERROR,
"CONNECTIVITY STATUS SEQUENCE FAILURE: "
"GRPC_CHANNEL_TRANSIENT_FAILURE status not found. Got the following "
"instead:");
for (i = 0; i < num_iters; i++) {
gpr_log(GPR_ERROR, "[%d]: %s", (int)i,
grpc_connectivity_state_name(sequences->connectivity_states[i]));
}
}
gpr_free(expected_connection_sequence);
gpr_free(seen_elements);
}
@ -949,7 +1020,7 @@ int main(int argc, char **argv) {
* This should knock down the server bound to be selected next */
test_spec_reset(spec);
spec->verifier = verify_vanishing_floor_round_robin;
spec->description = "test_kill_all_server_at_2nd_iteration";
spec->description = "test_kill_middle_servers_at_2nd_iteration";
for (i = 1; i < NUM_SERVERS - 1; i++) {
spec->kill_at[1][i] = 1;
}

@ -75,6 +75,8 @@ extern void filter_call_init_fails(grpc_end2end_test_config config);
extern void filter_call_init_fails_pre_init(void);
extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config);
@ -153,6 +155,7 @@ void grpc_end2end_tests_pre_init(void) {
empty_batch_pre_init();
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_latency_pre_init();
graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init();
hpack_size_pre_init();
@ -207,6 +210,7 @@ void grpc_end2end_tests(int argc, char **argv,
empty_batch(config);
filter_call_init_fails(config);
filter_causes_close(config);
filter_latency(config);
graceful_server_shutdown(config);
high_initial_seqno(config);
hpack_size(config);
@ -304,6 +308,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_causes_close(config);
continue;
}
if (0 == strcmp("filter_latency", argv[i])) {
filter_latency(config);
continue;
}
if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config);
continue;

@ -77,6 +77,8 @@ extern void filter_call_init_fails(grpc_end2end_test_config config);
extern void filter_call_init_fails_pre_init(void);
extern void filter_causes_close(grpc_end2end_test_config config);
extern void filter_causes_close_pre_init(void);
extern void filter_latency(grpc_end2end_test_config config);
extern void filter_latency_pre_init(void);
extern void graceful_server_shutdown(grpc_end2end_test_config config);
extern void graceful_server_shutdown_pre_init(void);
extern void high_initial_seqno(grpc_end2end_test_config config);
@ -156,6 +158,7 @@ void grpc_end2end_tests_pre_init(void) {
empty_batch_pre_init();
filter_call_init_fails_pre_init();
filter_causes_close_pre_init();
filter_latency_pre_init();
graceful_server_shutdown_pre_init();
high_initial_seqno_pre_init();
hpack_size_pre_init();
@ -211,6 +214,7 @@ void grpc_end2end_tests(int argc, char **argv,
empty_batch(config);
filter_call_init_fails(config);
filter_causes_close(config);
filter_latency(config);
graceful_server_shutdown(config);
high_initial_seqno(config);
hpack_size(config);
@ -312,6 +316,10 @@ void grpc_end2end_tests(int argc, char **argv,
filter_causes_close(config);
continue;
}
if (0 == strcmp("filter_latency", argv[i])) {
filter_latency(config);
continue;
}
if (0 == strcmp("graceful_server_shutdown", argv[i])) {
graceful_server_shutdown(config);
continue;

@ -111,6 +111,7 @@ END2END_TESTS = {
'empty_batch': default_test_options,
'filter_causes_close': default_test_options,
'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options,
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False),

@ -0,0 +1,359 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/surface/channel_init.h"
#include "test/core/end2end/cq_verifier.h"
enum { TIMEOUT = 200000 };
static bool g_enable_filter = false;
static gpr_mu g_mu;
static gpr_timespec g_client_latency;
static gpr_timespec g_server_latency;
static void *tag(intptr_t t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args);
return f;
}
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
}
static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(
f->cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5), NULL)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
}
// Simple request via a server filter that saves the reported latency value.
static void test_request(grpc_end2end_test_config config) {
grpc_call *c;
grpc_call *s;
grpc_slice request_payload_slice =
grpc_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_end2end_test_fixture f =
begin_test(config, "filter_latency", NULL, NULL);
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_byte_buffer *request_payload_recv = NULL;
grpc_call_details call_details;
grpc_status_code status;
grpc_call_error error;
char *details = NULL;
size_t details_capacity = 0;
int was_cancelled = 2;
gpr_mu_lock(&g_mu);
g_client_latency = gpr_time_0(GPR_TIMESPAN);
g_server_latency = gpr_time_0(GPR_TIMESPAN);
gpr_mu_unlock(&g_mu);
const gpr_timespec start_time = gpr_now(GPR_CLOCK_MONOTONIC);
c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
"/foo", "foo.test.google.fr", deadline, NULL);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->data.send_initial_metadata.metadata = NULL;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
error =
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == strcmp(details, "xyz"));
gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(s);
grpc_call_destroy(c);
const gpr_timespec end_time = gpr_now(GPR_CLOCK_MONOTONIC);
const gpr_timespec max_latency = gpr_time_sub(end_time, start_time);
gpr_mu_lock(&g_mu);
GPR_ASSERT(gpr_time_cmp(max_latency, g_client_latency) >= 0);
GPR_ASSERT(gpr_time_cmp(gpr_time_0(GPR_TIMESPAN), g_client_latency) < 0);
GPR_ASSERT(gpr_time_cmp(max_latency, g_server_latency) >= 0);
GPR_ASSERT(gpr_time_cmp(gpr_time_0(GPR_TIMESPAN), g_server_latency) < 0);
// Server latency should always be smaller than client latency.
GPR_ASSERT(gpr_time_cmp(g_server_latency, g_client_latency) < 0);
gpr_mu_unlock(&g_mu);
cq_verifier_destroy(cqv);
grpc_byte_buffer_destroy(request_payload);
grpc_byte_buffer_destroy(request_payload_recv);
end_test(&f);
config.tear_down_data(&f);
}
/*******************************************************************************
* Test latency filter
*/
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
return GRPC_ERROR_NONE;
}
static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
void *and_free_memory) {
gpr_mu_lock(&g_mu);
g_client_latency = final_info->stats.latency;
gpr_mu_unlock(&g_mu);
}
static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
void *and_free_memory) {
gpr_mu_lock(&g_mu);
g_server_latency = final_info->stats.latency;
gpr_mu_unlock(&g_mu);
}
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {}
static const grpc_channel_filter test_client_filter = {
grpc_call_next_op,
grpc_channel_next_op,
0,
init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
client_destroy_call_elem,
0,
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client_filter_latency"};
static const grpc_channel_filter test_server_filter = {
grpc_call_next_op,
grpc_channel_next_op,
0,
init_call_elem,
grpc_call_stack_ignore_set_pollset_or_pollset_set,
server_destroy_call_elem,
0,
init_channel_elem,
destroy_channel_elem,
grpc_call_next_get_peer,
grpc_channel_next_get_info,
"server_filter_latency"};
/*******************************************************************************
* Registration
*/
static bool maybe_add_filter(grpc_channel_stack_builder *builder, void *arg) {
grpc_channel_filter *filter = arg;
if (g_enable_filter) {
// Want to add the filter as close to the end as possible, to make
// sure that all of the filters work well together. However, we
// can't add it at the very end, because the connected channel filter
// must be the last one. So we add it right before the last one.
grpc_channel_stack_builder_iterator *it =
grpc_channel_stack_builder_create_iterator_at_last(builder);
GPR_ASSERT(grpc_channel_stack_builder_move_prev(it));
const bool retval =
grpc_channel_stack_builder_add_filter_before(it, filter, NULL, NULL);
grpc_channel_stack_builder_iterator_destroy(it);
return retval;
} else {
return true;
}
}
static void init_plugin(void) {
gpr_mu_init(&g_mu);
grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
maybe_add_filter,
(void *)&test_client_filter);
grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
maybe_add_filter,
(void *)&test_client_filter);
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
maybe_add_filter,
(void *)&test_server_filter);
}
static void destroy_plugin(void) { gpr_mu_destroy(&g_mu); }
void filter_latency(grpc_end2end_test_config config) {
g_enable_filter = true;
test_request(config);
g_enable_filter = false;
}
void filter_latency_pre_init(void) {
grpc_register_plugin(init_plugin, destroy_plugin);
}

@ -85,7 +85,8 @@ static void test_code(void) {
grpc_endpoint_shutdown,
grpc_endpoint_destroy,
grpc_endpoint_get_resource_user,
grpc_endpoint_get_peer};
grpc_endpoint_get_peer,
grpc_endpoint_get_fd};
endpoint.vtable = &vtable;
grpc_endpoint_read(&exec_ctx, &endpoint, NULL, NULL);

@ -39,13 +39,57 @@
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include <errno.h>
#include <netinet/ip.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/socket_mutator.h"
#include "test/core/util/test_config.h"
struct test_socket_mutator {
grpc_socket_mutator base;
int option_value;
};
static bool mutate_fd(int fd, grpc_socket_mutator *mutator) {
int newval;
socklen_t intlen = sizeof(newval);
struct test_socket_mutator *m = (struct test_socket_mutator *)mutator;
if (0 != setsockopt(fd, IPPROTO_IP, IP_TOS, &m->option_value,
sizeof(m->option_value))) {
return false;
}
if (0 != getsockopt(fd, IPPROTO_IP, IP_TOS, &newval, &intlen)) {
return false;
}
if (newval != m->option_value) {
return false;
}
return true;
}
static void destroy_test_mutator(grpc_socket_mutator *mutator) {
struct test_socket_mutator *m = (struct test_socket_mutator *)mutator;
gpr_free(m);
}
static int compare_test_mutator(grpc_socket_mutator *a,
grpc_socket_mutator *b) {
struct test_socket_mutator *ma = (struct test_socket_mutator *)a;
struct test_socket_mutator *mb = (struct test_socket_mutator *)b;
return GPR_ICMP(ma->option_value, mb->option_value);
}
static const grpc_socket_mutator_vtable mutator_vtable = {
mutate_fd, compare_test_mutator, destroy_test_mutator};
int main(int argc, char **argv) {
int sock;
grpc_error *err;
grpc_test_init(argc, argv);
sock = socket(PF_INET, SOCK_STREAM, 0);
@ -68,6 +112,29 @@ int main(int argc, char **argv) {
GPR_ASSERT(GRPC_LOG_IF_ERROR("set_socket_low_latency",
grpc_set_socket_low_latency(sock, 0)));
struct test_socket_mutator mutator;
grpc_socket_mutator_init(&mutator.base, &mutator_vtable);
mutator.option_value = IPTOS_LOWDELAY;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"set_socket_with_mutator",
grpc_set_socket_with_mutator(sock, (grpc_socket_mutator *)&mutator)));
mutator.option_value = IPTOS_THROUGHPUT;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"set_socket_with_mutator",
grpc_set_socket_with_mutator(sock, (grpc_socket_mutator *)&mutator)));
mutator.option_value = IPTOS_RELIABILITY;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"set_socket_with_mutator",
grpc_set_socket_with_mutator(sock, (grpc_socket_mutator *)&mutator)));
mutator.option_value = -1;
err = grpc_set_socket_with_mutator(sock, (grpc_socket_mutator *)&mutator);
GPR_ASSERT(err != GRPC_ERROR_NONE);
GRPC_ERROR_UNREF(err);
close(sock);
return 0;

@ -56,6 +56,7 @@
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
typedef struct fd_pair {
@ -229,12 +230,12 @@ static int blocking_write_bytes(struct thread_args *args, char *buf) {
on the scenario we're using.
*/
static int set_socket_nonblocking(thread_args *args) {
if (!grpc_set_socket_nonblocking(args->fds.read_fd, 1)) {
gpr_log(GPR_ERROR, "Unable to set socket nonblocking: %s", strerror(errno));
if (!GRPC_LOG_IF_ERROR("Unable to set read socket nonblocking",
grpc_set_socket_nonblocking(args->fds.read_fd, 1))) {
return -1;
}
if (!grpc_set_socket_nonblocking(args->fds.write_fd, 1)) {
gpr_log(GPR_ERROR, "Unable to set socket nonblocking: %s", strerror(errno));
if (!GRPC_LOG_IF_ERROR("Unable to set write socket nonblocking",
grpc_set_socket_nonblocking(args->fds.write_fd, 1))) {
return -1;
}
return 0;
@ -347,10 +348,16 @@ static int create_listening_socket(struct sockaddr *port, socklen_t len) {
goto error;
}
if (!grpc_set_socket_cloexec(fd, 1) || !grpc_set_socket_low_latency(fd, 1) ||
!grpc_set_socket_reuse_addr(fd, 1)) {
gpr_log(GPR_ERROR, "Unable to configure socket %d: %s", fd,
strerror(errno));
if (!GRPC_LOG_IF_ERROR("Failed to set listening socket cloexec",
grpc_set_socket_cloexec(fd, 1))) {
goto error;
}
if (!GRPC_LOG_IF_ERROR("Failed to set listening socket low latency",
grpc_set_socket_low_latency(fd, 1))) {
goto error;
}
if (!GRPC_LOG_IF_ERROR("Failed to set listening socket reuse addr",
grpc_set_socket_reuse_addr(fd, 1))) {
goto error;
}
@ -386,8 +393,12 @@ static int connect_client(struct sockaddr *addr, socklen_t len) {
goto error;
}
if (!grpc_set_socket_cloexec(fd, 1) || !grpc_set_socket_low_latency(fd, 1)) {
gpr_log(GPR_ERROR, "Failed to configure socket");
if (!GRPC_LOG_IF_ERROR("Failed to set connecting socket cloexec",
grpc_set_socket_cloexec(fd, 1))) {
goto error;
}
if (!GRPC_LOG_IF_ERROR("Failed to set connecting socket low latency",
grpc_set_socket_low_latency(fd, 1))) {
goto error;
}

@ -2,7 +2,7 @@
* probe definition.
*
* For a statically build binary, that'd be the name of the binary itself.
* For dinamically built ones, point to the location of the libgprc.so being
* For dynamically built ones, point to the location of the libgrpc.so being
* used. */
global starts, times, times_per_tag

@ -37,6 +37,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/sockaddr.h"
typedef struct grpc_mock_endpoint {
grpc_endpoint base;
@ -105,6 +106,8 @@ static grpc_resource_user *me_get_resource_user(grpc_endpoint *ep) {
return m->resource_user;
}
static int me_get_fd(grpc_endpoint *ep) { return -1; }
static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; }
static const grpc_endpoint_vtable vtable = {
@ -117,6 +120,7 @@ static const grpc_endpoint_vtable vtable = {
me_destroy,
me_get_resource_user,
me_get_peer,
me_get_fd,
};
grpc_endpoint *grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),

@ -37,6 +37,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/sockaddr.h"
typedef struct passthru_endpoint passthru_endpoint;
@ -146,6 +147,8 @@ static char *me_get_peer(grpc_endpoint *ep) {
return gpr_strdup("fake:mock_endpoint");
}
static int me_get_fd(grpc_endpoint *ep) { return -1; }
static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; }
static grpc_resource_user *me_get_resource_user(grpc_endpoint *ep) {
@ -163,6 +166,7 @@ static const grpc_endpoint_vtable vtable = {
me_destroy,
me_get_resource_user,
me_get_peer,
me_get_fd,
};
static void half_init(half *m, passthru_endpoint *parent,

@ -35,11 +35,56 @@
#include <grpc++/grpc++.h>
#include <grpc/grpc.h>
#include <grpc/support/useful.h>
#include <gtest/gtest.h>
#include "src/core/lib/iomgr/socket_mutator.h"
namespace grpc {
namespace testing {
namespace {
// A simple grpc_socket_mutator to be used to test SetSocketMutator
class TestSocketMutator : public grpc_socket_mutator {
public:
TestSocketMutator();
bool MutateFd(int fd) {
// Do nothing on the fd
return true;
}
};
//
// C API for TestSocketMutator
//
bool test_mutator_mutate_fd(int fd, grpc_socket_mutator* mutator) {
TestSocketMutator* tsm = (TestSocketMutator*)mutator;
return tsm->MutateFd(fd);
}
int test_mutator_compare(grpc_socket_mutator* a, grpc_socket_mutator* b) {
return GPR_ICMP(a, b);
}
void test_mutator_destroy(grpc_socket_mutator* mutator) {
TestSocketMutator* tsm = (TestSocketMutator*)mutator;
delete tsm;
}
grpc_socket_mutator_vtable test_mutator_vtable = {
test_mutator_mutate_fd, test_mutator_compare, test_mutator_destroy};
//
// TestSocketMutator implementation
//
TestSocketMutator::TestSocketMutator() {
grpc_socket_mutator_init(this, &test_mutator_vtable);
}
}
class ChannelArgumentsTest : public ::testing::Test {
protected:
ChannelArgumentsTest()
@ -166,6 +211,26 @@ TEST_F(ChannelArgumentsTest, SetPointer) {
EXPECT_TRUE(HasArg(arg0));
}
TEST_F(ChannelArgumentsTest, SetSocketMutator) {
VerifyDefaultChannelArgs();
grpc_arg arg0, arg1;
TestSocketMutator* mutator0 = new TestSocketMutator();
TestSocketMutator* mutator1 = new TestSocketMutator();
arg0 = grpc_socket_mutator_to_arg(mutator0);
arg1 = grpc_socket_mutator_to_arg(mutator1);
channel_args_.SetSocketMutator(mutator0);
EXPECT_TRUE(HasArg(arg0));
channel_args_.SetSocketMutator(mutator1);
EXPECT_TRUE(HasArg(arg1));
// arg0 is replaced by arg1
EXPECT_FALSE(HasArg(arg0));
// arg0 is destroyed by grpc_socket_mutator_to_arg(mutator1)
arg1.value.pointer.vtable->destroy(arg1.value.pointer.p);
}
TEST_F(ChannelArgumentsTest, SetUserAgentPrefix) {
VerifyDefaultChannelArgs();
grpc::string prefix("prefix");

@ -352,15 +352,13 @@ void ServerWait(Server* server, int* notify) {
}
TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) {
int notify = 0;
std::thread* wait_thread =
new std::thread(&ServerWait, server_.get(), &notify);
std::thread wait_thread(&ServerWait, server_.get(), &notify);
ResetStub();
SendRpc(1);
EXPECT_EQ(0, notify);
server_->Shutdown();
wait_thread->join();
wait_thread.join();
EXPECT_EQ(1, notify);
delete wait_thread;
}
TEST_P(AsyncEnd2endTest, ShutdownThenWait) {
@ -991,7 +989,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
expected_server_cq_result = false;
}
std::thread* server_try_cancel_thd = NULL;
std::thread* server_try_cancel_thd = nullptr;
auto verif = Verifier(GetParam().disable_blocking);
@ -1027,7 +1025,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
}
}
if (server_try_cancel_thd != NULL) {
if (server_try_cancel_thd != nullptr) {
server_try_cancel_thd->join();
delete server_try_cancel_thd;
}
@ -1112,7 +1110,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
expected_cq_result = false;
}
std::thread* server_try_cancel_thd = NULL;
std::thread* server_try_cancel_thd = nullptr;
auto verif = Verifier(GetParam().disable_blocking);
@ -1150,7 +1148,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
}
}
if (server_try_cancel_thd != NULL) {
if (server_try_cancel_thd != nullptr) {
server_try_cancel_thd->join();
delete server_try_cancel_thd;
}
@ -1252,7 +1250,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
expected_cq_result = false;
}
std::thread* server_try_cancel_thd = NULL;
std::thread* server_try_cancel_thd = nullptr;
auto verif = Verifier(GetParam().disable_blocking);
@ -1332,7 +1330,7 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 8);
}
if (server_try_cancel_thd != NULL) {
if (server_try_cancel_thd != nullptr) {
server_try_cancel_thd->join();
delete server_try_cancel_thd;
}

@ -656,25 +656,23 @@ TEST_P(End2endTest, SimpleRpcWithCustomeUserAgentPrefix) {
TEST_P(End2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
ResetStub();
std::vector<std::thread*> threads;
std::vector<std::thread> threads;
for (int i = 0; i < 10; ++i) {
threads.push_back(new std::thread(SendRpc, stub_.get(), 10, true));
threads.emplace_back(SendRpc, stub_.get(), 10, true);
}
for (int i = 0; i < 10; ++i) {
threads[i]->join();
delete threads[i];
threads[i].join();
}
}
TEST_P(End2endTest, MultipleRpcs) {
ResetStub();
std::vector<std::thread*> threads;
std::vector<std::thread> threads;
for (int i = 0; i < 10; ++i) {
threads.push_back(new std::thread(SendRpc, stub_.get(), 10, false));
threads.emplace_back(SendRpc, stub_.get(), 10, false);
}
for (int i = 0; i < 10; ++i) {
threads[i]->join();
delete threads[i];
threads[i].join();
}
}
@ -1058,13 +1056,12 @@ TEST_P(ProxyEnd2endTest, SimpleRpcWithEmptyMessages) {
TEST_P(ProxyEnd2endTest, MultipleRpcs) {
ResetStub();
std::vector<std::thread*> threads;
std::vector<std::thread> threads;
for (int i = 0; i < 10; ++i) {
threads.push_back(new std::thread(SendRpc, stub_.get(), 10, false));
threads.emplace_back(SendRpc, stub_.get(), 10, false);
}
for (int i = 0; i < 10; ++i) {
threads[i]->join();
delete threads[i];
threads[i].join();
}
}

@ -194,7 +194,7 @@ Status TestServiceImpl::RequestStream(ServerContext* context,
return Status::CANCELLED;
}
std::thread* server_try_cancel_thd = NULL;
std::thread* server_try_cancel_thd = nullptr;
if (server_try_cancel == CANCEL_DURING_PROCESSING) {
server_try_cancel_thd =
new std::thread(&TestServiceImpl::ServerTryCancel, this, context);
@ -212,7 +212,7 @@ Status TestServiceImpl::RequestStream(ServerContext* context,
}
gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read);
if (server_try_cancel_thd != NULL) {
if (server_try_cancel_thd != nullptr) {
server_try_cancel_thd->join();
delete server_try_cancel_thd;
return Status::CANCELLED;
@ -248,7 +248,7 @@ Status TestServiceImpl::ResponseStream(ServerContext* context,
}
EchoResponse response;
std::thread* server_try_cancel_thd = NULL;
std::thread* server_try_cancel_thd = nullptr;
if (server_try_cancel == CANCEL_DURING_PROCESSING) {
server_try_cancel_thd =
new std::thread(&TestServiceImpl::ServerTryCancel, this, context);
@ -259,7 +259,7 @@ Status TestServiceImpl::ResponseStream(ServerContext* context,
writer->Write(response);
}
if (server_try_cancel_thd != NULL) {
if (server_try_cancel_thd != nullptr) {
server_try_cancel_thd->join();
delete server_try_cancel_thd;
return Status::CANCELLED;
@ -295,7 +295,7 @@ Status TestServiceImpl::BidiStream(
return Status::CANCELLED;
}
std::thread* server_try_cancel_thd = NULL;
std::thread* server_try_cancel_thd = nullptr;
if (server_try_cancel == CANCEL_DURING_PROCESSING) {
server_try_cancel_thd =
new std::thread(&TestServiceImpl::ServerTryCancel, this, context);
@ -307,7 +307,7 @@ Status TestServiceImpl::BidiStream(
stream->Write(response);
}
if (server_try_cancel_thd != NULL) {
if (server_try_cancel_thd != nullptr) {
server_try_cancel_thd->join();
delete server_try_cancel_thd;
return Status::CANCELLED;

@ -232,19 +232,19 @@ class CommonStressTestSyncServer : public CommonStressTest<TestServiceImpl> {
class CommonStressTestAsyncServer
: public CommonStressTest<grpc::testing::EchoTestService::AsyncService> {
public:
CommonStressTestAsyncServer() : contexts_(kNumAsyncServerThreads * 100) {}
void SetUp() override {
shutting_down_ = false;
ServerBuilder builder;
SetUpStart(&builder, &service_);
cq_ = builder.AddCompletionQueue();
SetUpEnd(&builder);
contexts_ = new Context[kNumAsyncServerThreads * 100];
for (int i = 0; i < kNumAsyncServerThreads * 100; i++) {
RefreshContext(i);
}
for (int i = 0; i < kNumAsyncServerThreads; i++) {
server_threads_.push_back(
new std::thread(&CommonStressTestAsyncServer::ProcessRpcs, this));
server_threads_.emplace_back(&CommonStressTestAsyncServer::ProcessRpcs,
this);
}
}
void TearDown() override {
@ -256,8 +256,7 @@ class CommonStressTestAsyncServer
}
for (int i = 0; i < kNumAsyncServerThreads; i++) {
server_threads_[i]->join();
delete server_threads_[i];
server_threads_[i].join();
}
void* ignored_tag;
@ -265,7 +264,6 @@ class CommonStressTestAsyncServer
while (cq_->Next(&ignored_tag, &ignored_ok))
;
TearDownEnd();
delete[] contexts_;
}
private:
@ -311,12 +309,13 @@ class CommonStressTestAsyncServer
response_writer;
EchoRequest recv_request;
enum { READY, DONE } state;
} * contexts_;
};
std::vector<Context> contexts_;
::grpc::testing::EchoTestService::AsyncService service_;
std::unique_ptr<ServerCompletionQueue> cq_;
bool shutting_down_;
std::mutex mu_;
std::vector<std::thread*> server_threads_;
std::vector<std::thread> server_threads_;
};
template <class Common>
@ -353,14 +352,12 @@ typedef ::testing::Types<CommonStressTestSyncServer,
TYPED_TEST_CASE(End2endTest, CommonTypes);
TYPED_TEST(End2endTest, ThreadStress) {
this->common_.ResetStub();
std::vector<std::thread*> threads;
std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads; ++i) {
threads.push_back(
new std::thread(SendRpc, this->common_.GetStub(), kNumRpcs));
threads.emplace_back(SendRpc, this->common_.GetStub(), kNumRpcs);
}
for (int i = 0; i < kNumThreads; ++i) {
threads[i]->join();
delete threads[i];
threads[i].join();
}
}
@ -442,26 +439,24 @@ class AsyncClientEnd2endTest : public ::testing::Test {
TYPED_TEST_CASE(AsyncClientEnd2endTest, CommonTypes);
TYPED_TEST(AsyncClientEnd2endTest, ThreadStress) {
this->common_.ResetStub();
std::vector<std::thread *> send_threads, completion_threads;
std::vector<std::thread> send_threads, completion_threads;
for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
completion_threads.push_back(new std::thread(
completion_threads.emplace_back(
&AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncCompleteRpc,
this));
this);
}
for (int i = 0; i < kNumAsyncSendThreads; ++i) {
send_threads.push_back(new std::thread(
send_threads.emplace_back(
&AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncSendRpc,
this, kNumRpcs));
this, kNumRpcs);
}
for (int i = 0; i < kNumAsyncSendThreads; ++i) {
send_threads[i]->join();
delete send_threads[i];
send_threads[i].join();
}
this->Wait();
for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
completion_threads[i]->join();
delete completion_threads[i];
completion_threads[i].join();
}
}

@ -163,10 +163,9 @@ class Client {
MaybeStartRequests();
// avoid std::vector for old compilers that expect a copy constructor
if (reset) {
Histogram* to_merge = new Histogram[threads_.size()];
StatusHistogram* to_merge_status = new StatusHistogram[threads_.size()];
std::vector<Histogram> to_merge(threads_.size());
std::vector<StatusHistogram> to_merge_status(threads_.size());
for (size_t i = 0; i < threads_.size(); i++) {
threads_[i]->BeginSwap(&to_merge[i], &to_merge_status[i]);
@ -177,8 +176,6 @@ class Client {
latencies.Merge(to_merge[i]);
MergeStatusHistogram(to_merge_status[i], &statuses);
}
delete[] to_merge;
delete[] to_merge_status;
timer_result = timer->Mark();
} else {
// merge snapshots of each thread histogram

@ -177,7 +177,6 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
shutdown_state_.emplace_back(new PerThreadShutdownState());
}
using namespace std::placeholders;
int t = 0;
for (int ch = 0; ch < config.client_channels(); ch++) {
for (int i = 0; i < config.outstanding_rpcs_per_channel(); i++) {

@ -138,10 +138,9 @@ class SynchronousUnaryClient final : public SynchronousClient {
class SynchronousStreamingClient final : public SynchronousClient {
public:
SynchronousStreamingClient(const ClientConfig& config)
: SynchronousClient(config) {
context_ = new grpc::ClientContext[num_threads_];
stream_ = new std::unique_ptr<
grpc::ClientReaderWriter<SimpleRequest, SimpleResponse>>[num_threads_];
: SynchronousClient(config),
context_(num_threads_),
stream_(num_threads_) {
for (size_t thread_idx = 0; thread_idx < num_threads_; thread_idx++) {
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
@ -161,8 +160,6 @@ class SynchronousStreamingClient final : public SynchronousClient {
}
}
}
delete[] stream_;
delete[] context_;
}
bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) override {
@ -182,8 +179,9 @@ class SynchronousStreamingClient final : public SynchronousClient {
private:
// These are both conceptually std::vector but cannot be for old compilers
// that expect contained classes to support copy constructors
grpc::ClientContext* context_;
std::unique_ptr<grpc::ClientReaderWriter<SimpleRequest, SimpleResponse>>*
std::vector<grpc::ClientContext> context_;
std::vector<
std::unique_ptr<grpc::ClientReaderWriter<SimpleRequest, SimpleResponse>>>
stream_;
};

@ -192,30 +192,6 @@ static void postprocess_scenario_result(ScenarioResult* result) {
}
}
// Namespace for classes and functions used only in RunScenario
// Using this rather than local definitions to workaround gcc-4.4 limitations
// regarding using templates without linkage
namespace runsc {
// ClientContext allocator
static ClientContext* AllocContext(list<ClientContext>* contexts) {
contexts->emplace_back();
auto context = &contexts->back();
context->set_wait_for_ready(true);
return context;
}
struct ServerData {
unique_ptr<WorkerService::Stub> stub;
unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream;
};
struct ClientData {
unique_ptr<WorkerService::Stub> stub;
unique_ptr<ClientReaderWriter<ClientArgs, ClientStatus>> stream;
};
} // namespace runsc
std::unique_ptr<ScenarioResult> RunScenario(
const ClientConfig& initial_client_config, size_t num_clients,
const ServerConfig& initial_server_config, size_t num_servers,
@ -225,6 +201,12 @@ std::unique_ptr<ScenarioResult> RunScenario(
// ClientContext allocations (all are destroyed at scope exit)
list<ClientContext> contexts;
auto alloc_context = [](list<ClientContext>* contexts) {
contexts->emplace_back();
auto context = &contexts->back();
context->set_wait_for_ready(true);
return context;
};
// To be added to the result, containing the final configuration used for
// client and config (including host, etc.)
@ -277,10 +259,11 @@ std::unique_ptr<ScenarioResult> RunScenario(
workers.resize(num_clients + num_servers);
// Start servers
using runsc::ServerData;
// servers is array rather than std::vector to avoid gcc-4.4 issues
// where class contained in std::vector must have a copy constructor
auto* servers = new ServerData[num_servers];
struct ServerData {
unique_ptr<WorkerService::Stub> stub;
unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream;
};
std::vector<ServerData> servers(num_servers);
for (size_t i = 0; i < num_servers; i++) {
gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")",
workers[i].c_str(), i);
@ -324,8 +307,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
ServerArgs args;
*args.mutable_setup() = server_config;
servers[i].stream =
servers[i].stub->RunServer(runsc::AllocContext(&contexts));
servers[i].stream = servers[i].stub->RunServer(alloc_context(&contexts));
if (!servers[i].stream->Write(args)) {
gpr_log(GPR_ERROR, "Could not write args to server %zu", i);
}
@ -343,10 +325,11 @@ std::unique_ptr<ScenarioResult> RunScenario(
// Targets are all set by now
result_client_config = client_config;
// Start clients
using runsc::ClientData;
// clients is array rather than std::vector to avoid gcc-4.4 issues
// where class contained in std::vector must have a copy constructor
auto* clients = new ClientData[num_clients];
struct ClientData {
unique_ptr<WorkerService::Stub> stub;
unique_ptr<ClientReaderWriter<ClientArgs, ClientStatus>> stream;
};
std::vector<ClientData> clients(num_clients);
size_t channels_allocated = 0;
for (size_t i = 0; i < num_clients; i++) {
const auto& worker = workers[i + num_servers];
@ -395,8 +378,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
ClientArgs args;
*args.mutable_setup() = per_client_config;
clients[i].stream =
clients[i].stub->RunClient(runsc::AllocContext(&contexts));
clients[i].stream = clients[i].stub->RunClient(alloc_context(&contexts));
if (!clients[i].stream->Write(args)) {
gpr_log(GPR_ERROR, "Could not write args to client %zu", i);
}
@ -516,7 +498,6 @@ std::unique_ptr<ScenarioResult> RunScenario(
s.error_message().c_str());
}
}
delete[] clients;
merged_latencies.FillProto(result->mutable_latencies());
for (std::unordered_map<int, int64_t>::iterator it = merged_statuses.begin();
@ -559,8 +540,6 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
}
delete[] servers;
postprocess_scenario_result(result.get());
return result;
}

@ -77,7 +77,7 @@ namespace compiler {
typedef GRPC_CUSTOM_DISKSOURCETREE DiskSourceTree;
typedef GRPC_CUSTOM_IMPORTER Importer;
typedef GRPC_CUSTOM_MULTIFILEERRORCOLLECTOR MultiFileErrorCollector;
} // namespace importer
} // namespace compiler
} // namespace protobuf
} // namespace grpc

@ -112,8 +112,6 @@ size_t ArraySize(T& a) {
static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))));
}
} // namespame
class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
public:
Status Echo(ServerContext* context, const EchoRequest* request,
@ -132,6 +130,8 @@ class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
}
};
} // namespace
class GrpcToolTest : public ::testing::Test {
protected:
GrpcToolTest() {}

@ -28,6 +28,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import pkg_resources
import sys
import setuptools
@ -43,10 +44,14 @@ def build_package_protos(package_root):
if filename.endswith('.proto'):
proto_files.append(os.path.abspath(os.path.join(root, filename)))
well_known_protos_include = pkg_resources.resource_filename(
'grpc.tools', '_proto')
for proto_file in proto_files:
command = [
'grpc.tools.protoc',
'--proto_path={}'.format(inclusion_root),
'--proto_path={}'.format(well_known_protos_include),
'--python_out={}'.format(inclusion_root),
'--grpc_python_out={}'.format(inclusion_root),
] + [proto_file]

@ -811,7 +811,6 @@ src/core/lib/iomgr/endpoint.h \
src/core/lib/iomgr/endpoint_pair.h \
src/core/lib/iomgr/error.h \
src/core/lib/iomgr/ev_epoll_linux.h \
src/core/lib/iomgr/ev_poll_and_epoll_posix.h \
src/core/lib/iomgr/ev_poll_posix.h \
src/core/lib/iomgr/ev_posix.h \
src/core/lib/iomgr/exec_ctx.h \
@ -835,6 +834,7 @@ src/core/lib/iomgr/sockaddr.h \
src/core/lib/iomgr/sockaddr_posix.h \
src/core/lib/iomgr/sockaddr_utils.h \
src/core/lib/iomgr/sockaddr_windows.h \
src/core/lib/iomgr/socket_mutator.h \
src/core/lib/iomgr/socket_utils.h \
src/core/lib/iomgr/socket_utils_posix.h \
src/core/lib/iomgr/socket_windows.h \
@ -990,7 +990,6 @@ src/core/lib/iomgr/endpoint_pair_uv.c \
src/core/lib/iomgr/endpoint_pair_windows.c \
src/core/lib/iomgr/error.c \
src/core/lib/iomgr/ev_epoll_linux.c \
src/core/lib/iomgr/ev_poll_and_epoll_posix.c \
src/core/lib/iomgr/ev_poll_posix.c \
src/core/lib/iomgr/ev_posix.c \
src/core/lib/iomgr/exec_ctx.c \
@ -1012,6 +1011,7 @@ src/core/lib/iomgr/resolve_address_uv.c \
src/core/lib/iomgr/resolve_address_windows.c \
src/core/lib/iomgr/resource_quota.c \
src/core/lib/iomgr/sockaddr_utils.c \
src/core/lib/iomgr/socket_mutator.c \
src/core/lib/iomgr/socket_utils_common_posix.c \
src/core/lib/iomgr/socket_utils_linux.c \
src/core/lib/iomgr/socket_utils_posix.c \

@ -36,7 +36,7 @@ cd $(dirname $0)/../..
# run 8core client vs 8core server
tools/run_tests/run_performance_tests.py \
-l c++ csharp node ruby java python go \
-l c++ csharp node ruby java python go node_express \
--netperf \
--category scalable \
--bq_result_table performance_test.performance_experiment \

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save