Merge branch 'master' into debug-timers

pull/12663/head
Sree Kuchibhotla 7 years ago
commit a27764e593
  1. 10
      BUILD
  2. 199
      CMakeLists.txt
  3. 203
      Makefile
  4. 2
      bazel/generate_cc.bzl
  5. 16
      bazel/grpc_build_system.bzl
  6. 2
      build.yaml
  7. 2
      config.m4
  8. 1
      doc/environment_variables.md
  9. 5
      doc/service_config.md
  10. 12
      examples/cpp/helloworld/greeter_async_client.cc
  11. 14
      examples/cpp/helloworld/greeter_async_client2.cc
  12. 1
      gRPC-Core.podspec
  13. 20
      grpc.def
  14. 2
      grpc.gemspec
  15. 7
      include/grpc++/generic/generic_stub.h
  16. 136
      include/grpc++/impl/codegen/async_stream.h
  17. 41
      include/grpc++/impl/codegen/async_unary_call.h
  18. 141
      include/grpc++/impl/codegen/byte_buffer.h
  19. 114
      include/grpc++/impl/codegen/call.h
  20. 9
      include/grpc++/impl/codegen/method_handler_impl.h
  21. 14
      include/grpc++/impl/codegen/rpc_service_method.h
  22. 17
      include/grpc++/impl/codegen/serialization_traits.h
  23. 78
      include/grpc++/impl/codegen/slice.h
  24. 68
      include/grpc++/support/byte_buffer.h
  25. 6
      include/grpc++/support/channel_arguments.h
  26. 80
      include/grpc++/support/slice.h
  27. 64
      include/grpc/byte_buffer.h
  28. 4
      include/grpc/compression.h
  29. 86
      include/grpc/impl/codegen/byte_buffer.h
  30. 19
      include/grpc/impl/codegen/grpc_types.h
  31. 6
      include/grpc/slice.h
  32. 6
      package.xml
  33. 2
      setup.py
  34. 296
      src/compiler/cpp_generator.cc
  35. 4
      src/compiler/python_generator.cc
  36. 8
      src/core/ext/census/base_resources.c
  37. 13
      src/core/ext/filters/client_channel/client_channel.c
  38. 2
      src/core/ext/filters/client_channel/client_channel_factory.c
  39. 4
      src/core/ext/filters/client_channel/client_channel_plugin.c
  40. 10
      src/core/ext/filters/client_channel/http_proxy.c
  41. 156
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  42. 2
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
  43. 4
      src/core/ext/filters/client_channel/lb_policy_factory.c
  44. 2
      src/core/ext/filters/client_channel/lb_policy_factory.h
  45. 6
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
  46. 25
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
  47. 2
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
  48. 2
      src/core/ext/filters/client_channel/subchannel.c
  49. 4
      src/core/ext/filters/http/message_compress/message_compress_filter.c
  50. 3
      src/core/ext/filters/load_reporting/server_load_reporting_plugin.c
  51. 2
      src/core/ext/transport/chttp2/client/insecure/channel_create.c
  52. 2
      src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
  53. 3
      src/core/ext/transport/chttp2/server/chttp2_server.c
  54. 1
      src/core/ext/transport/chttp2/transport/chttp2_plugin.c
  55. 330
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  56. 1
      src/core/ext/transport/chttp2/transport/chttp2_transport.h
  57. 5
      src/core/ext/transport/chttp2/transport/frame_ping.c
  58. 10
      src/core/ext/transport/chttp2/transport/frame_window_update.c
  59. 35
      src/core/ext/transport/chttp2/transport/hpack_encoder.c
  60. 3
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  61. 39
      src/core/ext/transport/chttp2/transport/internal.h
  62. 7
      src/core/ext/transport/chttp2/transport/parsing.c
  63. 52
      src/core/ext/transport/chttp2/transport/stream_lists.c
  64. 43
      src/core/ext/transport/chttp2/transport/writing.c
  65. 4
      src/core/ext/transport/inproc/inproc_transport.c
  66. 12
      src/core/lib/channel/channel_args.c
  67. 2
      src/core/lib/channel/channel_stack.h
  68. 4
      src/core/lib/compression/compression.c
  69. 303
      src/core/lib/debug/stats_data.c
  70. 187
      src/core/lib/debug/stats_data.h
  71. 80
      src/core/lib/debug/stats_data.yaml
  72. 33
      src/core/lib/debug/stats_data_bq_schema.sql
  73. 2
      src/core/lib/debug/trace.h
  74. 2
      src/core/lib/http/httpcli.c
  75. 3
      src/core/lib/http/httpcli_security_connector.c
  76. 18
      src/core/lib/iomgr/closure.c
  77. 2
      src/core/lib/iomgr/error.c
  78. 162
      src/core/lib/iomgr/ev_epoll1_linux.c
  79. 276
      src/core/lib/iomgr/ev_epollex_linux.c
  80. 64
      src/core/lib/iomgr/ev_epollsig_linux.c
  81. 100
      src/core/lib/iomgr/ev_poll_posix.c
  82. 4
      src/core/lib/iomgr/ev_posix.c
  83. 2
      src/core/lib/iomgr/ev_posix.h
  84. 43
      src/core/lib/iomgr/executor.c
  85. 2
      src/core/lib/iomgr/iomgr.c
  86. 6
      src/core/lib/iomgr/is_epollexclusive_available.c
  87. 2
      src/core/lib/iomgr/pollset.h
  88. 2
      src/core/lib/iomgr/pollset_uv.c
  89. 6
      src/core/lib/iomgr/pollset_windows.c
  90. 2
      src/core/lib/iomgr/resolve_address_posix.c
  91. 1
      src/core/lib/iomgr/resource_quota.c
  92. 4
      src/core/lib/iomgr/socket_factory_posix.c
  93. 4
      src/core/lib/iomgr/socket_mutator.c
  94. 4
      src/core/lib/iomgr/tcp_server_posix.c
  95. 6
      src/core/lib/iomgr/timer_generic.c
  96. 3
      src/core/lib/security/credentials/google_default/google_default_credentials.c
  97. 22
      src/core/lib/security/transport/security_connector.c
  98. 2
      src/core/lib/security/transport/security_handshaker.c
  99. 2
      src/core/lib/support/log_linux.c
  100. 2
      src/core/lib/support/string.c
  101. Some files were not shown because too many files have changed in this diff Show More

10
BUILD

@ -574,6 +574,8 @@ grpc_cc_library(
"src/core/lib/compression/compression.c", "src/core/lib/compression/compression.c",
"src/core/lib/compression/message_compress.c", "src/core/lib/compression/message_compress.c",
"src/core/lib/compression/stream_compression.c", "src/core/lib/compression/stream_compression.c",
"src/core/lib/debug/stats.c",
"src/core/lib/debug/stats_data.c",
"src/core/lib/http/format_request.c", "src/core/lib/http/format_request.c",
"src/core/lib/http/httpcli.c", "src/core/lib/http/httpcli.c",
"src/core/lib/http/parser.c", "src/core/lib/http/parser.c",
@ -690,8 +692,6 @@ grpc_cc_library(
"src/core/lib/transport/timeout_encoding.c", "src/core/lib/transport/timeout_encoding.c",
"src/core/lib/transport/transport.c", "src/core/lib/transport/transport.c",
"src/core/lib/transport/transport_op_string.c", "src/core/lib/transport/transport_op_string.c",
"src/core/lib/debug/stats.c",
"src/core/lib/debug/stats_data.c",
], ],
hdrs = [ hdrs = [
"src/core/lib/channel/channel_args.h", "src/core/lib/channel/channel_args.h",
@ -705,6 +705,8 @@ grpc_cc_library(
"src/core/lib/compression/algorithm_metadata.h", "src/core/lib/compression/algorithm_metadata.h",
"src/core/lib/compression/message_compress.h", "src/core/lib/compression/message_compress.h",
"src/core/lib/compression/stream_compression.h", "src/core/lib/compression/stream_compression.h",
"src/core/lib/debug/stats.h",
"src/core/lib/debug/stats_data.h",
"src/core/lib/http/format_request.h", "src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h", "src/core/lib/http/httpcli.h",
"src/core/lib/http/parser.h", "src/core/lib/http/parser.h",
@ -807,8 +809,6 @@ grpc_cc_library(
"src/core/lib/transport/timeout_encoding.h", "src/core/lib/transport/timeout_encoding.h",
"src/core/lib/transport/transport.h", "src/core/lib/transport/transport.h",
"src/core/lib/transport/transport_impl.h", "src/core/lib/transport/transport_impl.h",
"src/core/lib/debug/stats.h",
"src/core/lib/debug/stats_data.h",
], ],
external_deps = [ external_deps = [
"zlib", "zlib",
@ -989,7 +989,6 @@ grpc_cc_library(
name = "grpc_codegen", name = "grpc_codegen",
language = "c", language = "c",
public_hdrs = [ public_hdrs = [
"include/grpc/impl/codegen/byte_buffer.h",
"include/grpc/impl/codegen/byte_buffer_reader.h", "include/grpc/impl/codegen/byte_buffer_reader.h",
"include/grpc/impl/codegen/compression_types.h", "include/grpc/impl/codegen/compression_types.h",
"include/grpc/impl/codegen/connectivity_state.h", "include/grpc/impl/codegen/connectivity_state.h",
@ -1488,7 +1487,6 @@ grpc_cc_library(
public_hdrs = [ public_hdrs = [
"include/grpc++/impl/codegen/async_stream.h", "include/grpc++/impl/codegen/async_stream.h",
"include/grpc++/impl/codegen/async_unary_call.h", "include/grpc++/impl/codegen/async_unary_call.h",
"include/grpc++/impl/codegen/byte_buffer.h",
"include/grpc++/impl/codegen/call.h", "include/grpc++/impl/codegen/call.h",
"include/grpc++/impl/codegen/call_hook.h", "include/grpc++/impl/codegen/call_hook.h",
"include/grpc++/impl/codegen/channel_interface.h", "include/grpc++/impl/codegen/channel_interface.h",

@ -761,6 +761,18 @@ add_dependencies(buildtests_cxx thread_stress_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx writes_per_rpc_test) add_dependencies(buildtests_cxx writes_per_rpc_test)
endif() endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx resolver_component_test_unsecure)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx resolver_component_test)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx resolver_component_tests_runner_invoker_unsecure)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx resolver_component_tests_runner_invoker)
endif()
add_custom_target(buildtests add_custom_target(buildtests
DEPENDS buildtests_c buildtests_cxx) DEPENDS buildtests_c buildtests_cxx)
@ -1238,7 +1250,6 @@ target_link_libraries(grpc
) )
foreach(_hdr foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -1545,7 +1556,6 @@ target_link_libraries(grpc_cronet
) )
foreach(_hdr foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -1822,7 +1832,6 @@ target_link_libraries(grpc_test_util
) )
foreach(_hdr foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -2083,7 +2092,6 @@ target_link_libraries(grpc_test_util_unsecure
) )
foreach(_hdr foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -2379,7 +2387,6 @@ target_link_libraries(grpc_unsecure
) )
foreach(_hdr foreach(_hdr
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -2689,7 +2696,6 @@ foreach(_hdr
include/grpc/slice_buffer.h include/grpc/slice_buffer.h
include/grpc/status.h include/grpc/status.h
include/grpc/support/workaround_list.h include/grpc/support/workaround_list.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -2700,7 +2706,6 @@ foreach(_hdr
include/grpc/impl/codegen/status.h include/grpc/impl/codegen/status.h
include/grpc++/impl/codegen/async_stream.h include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h include/grpc++/impl/codegen/channel_interface.h
@ -3183,7 +3188,6 @@ foreach(_hdr
include/grpc/slice_buffer.h include/grpc/slice_buffer.h
include/grpc/status.h include/grpc/status.h
include/grpc/support/workaround_list.h include/grpc/support/workaround_list.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -3194,7 +3198,6 @@ foreach(_hdr
include/grpc/impl/codegen/status.h include/grpc/impl/codegen/status.h
include/grpc++/impl/codegen/async_stream.h include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h include/grpc++/impl/codegen/channel_interface.h
@ -3555,7 +3558,6 @@ target_link_libraries(grpc++_test_util
foreach(_hdr foreach(_hdr
include/grpc++/impl/codegen/async_stream.h include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h include/grpc++/impl/codegen/channel_interface.h
@ -3583,7 +3585,6 @@ foreach(_hdr
include/grpc++/impl/codegen/stub_options.h include/grpc++/impl/codegen/stub_options.h
include/grpc++/impl/codegen/sync_stream.h include/grpc++/impl/codegen/sync_stream.h
include/grpc++/impl/codegen/time.h include/grpc++/impl/codegen/time.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -3695,7 +3696,6 @@ target_link_libraries(grpc++_test_util_unsecure
foreach(_hdr foreach(_hdr
include/grpc++/impl/codegen/async_stream.h include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h include/grpc++/impl/codegen/channel_interface.h
@ -3723,7 +3723,6 @@ foreach(_hdr
include/grpc++/impl/codegen/stub_options.h include/grpc++/impl/codegen/stub_options.h
include/grpc++/impl/codegen/sync_stream.h include/grpc++/impl/codegen/sync_stream.h
include/grpc++/impl/codegen/time.h include/grpc++/impl/codegen/time.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -3926,7 +3925,6 @@ foreach(_hdr
include/grpc/slice_buffer.h include/grpc/slice_buffer.h
include/grpc/status.h include/grpc/status.h
include/grpc/support/workaround_list.h include/grpc/support/workaround_list.h
include/grpc/impl/codegen/byte_buffer.h
include/grpc/impl/codegen/byte_buffer_reader.h include/grpc/impl/codegen/byte_buffer_reader.h
include/grpc/impl/codegen/compression_types.h include/grpc/impl/codegen/compression_types.h
include/grpc/impl/codegen/connectivity_state.h include/grpc/impl/codegen/connectivity_state.h
@ -3937,7 +3935,6 @@ foreach(_hdr
include/grpc/impl/codegen/status.h include/grpc/impl/codegen/status.h
include/grpc++/impl/codegen/async_stream.h include/grpc++/impl/codegen/async_stream.h
include/grpc++/impl/codegen/async_unary_call.h include/grpc++/impl/codegen/async_unary_call.h
include/grpc++/impl/codegen/byte_buffer.h
include/grpc++/impl/codegen/call.h include/grpc++/impl/codegen/call.h
include/grpc++/impl/codegen/call_hook.h include/grpc++/impl/codegen/call_hook.h
include/grpc++/impl/codegen/channel_interface.h include/grpc++/impl/codegen/channel_interface.h
@ -14129,6 +14126,178 @@ target_link_libraries(inproc_nosec_test
gpr gpr
) )
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(resolver_component_test_unsecure
test/cpp/naming/resolver_component_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(resolver_component_test_unsecure
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(resolver_component_test_unsecure
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util_unsecure
grpc_test_util_unsecure
gpr_test_util
grpc++_unsecure
grpc_unsecure
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(resolver_component_test
test/cpp/naming/resolver_component_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(resolver_component_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(resolver_component_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
gpr_test_util
grpc++
grpc
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(resolver_component_tests_runner_invoker_unsecure
test/cpp/naming/resolver_component_tests_runner_invoker.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(resolver_component_tests_runner_invoker_unsecure
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(resolver_component_tests_runner_invoker_unsecure
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
gpr_test_util
grpc++
grpc
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(resolver_component_tests_runner_invoker
test/cpp/naming/resolver_component_tests_runner_invoker.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(resolver_component_tests_runner_invoker
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CARES_INCLUDE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(resolver_component_tests_runner_invoker
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
gpr_test_util
grpc++
grpc
gpr
grpc++_test_config
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS) endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS)

@ -1266,6 +1266,10 @@ h2_sockpair+trace_nosec_test: $(BINDIR)/$(CONFIG)/h2_sockpair+trace_nosec_test
h2_sockpair_1byte_nosec_test: $(BINDIR)/$(CONFIG)/h2_sockpair_1byte_nosec_test h2_sockpair_1byte_nosec_test: $(BINDIR)/$(CONFIG)/h2_sockpair_1byte_nosec_test
h2_uds_nosec_test: $(BINDIR)/$(CONFIG)/h2_uds_nosec_test h2_uds_nosec_test: $(BINDIR)/$(CONFIG)/h2_uds_nosec_test
inproc_nosec_test: $(BINDIR)/$(CONFIG)/inproc_nosec_test inproc_nosec_test: $(BINDIR)/$(CONFIG)/inproc_nosec_test
resolver_component_test_unsecure: $(BINDIR)/$(CONFIG)/resolver_component_test_unsecure
resolver_component_test: $(BINDIR)/$(CONFIG)/resolver_component_test
resolver_component_tests_runner_invoker_unsecure: $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure
resolver_component_tests_runner_invoker: $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker
api_fuzzer_one_entry: $(BINDIR)/$(CONFIG)/api_fuzzer_one_entry api_fuzzer_one_entry: $(BINDIR)/$(CONFIG)/api_fuzzer_one_entry
client_fuzzer_one_entry: $(BINDIR)/$(CONFIG)/client_fuzzer_one_entry client_fuzzer_one_entry: $(BINDIR)/$(CONFIG)/client_fuzzer_one_entry
hpack_parser_fuzzer_test_one_entry: $(BINDIR)/$(CONFIG)/hpack_parser_fuzzer_test_one_entry hpack_parser_fuzzer_test_one_entry: $(BINDIR)/$(CONFIG)/hpack_parser_fuzzer_test_one_entry
@ -1652,6 +1656,10 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/boringssl_x509_test \ $(BINDIR)/$(CONFIG)/boringssl_x509_test \
$(BINDIR)/$(CONFIG)/boringssl_tab_test \ $(BINDIR)/$(CONFIG)/boringssl_tab_test \
$(BINDIR)/$(CONFIG)/boringssl_v3name_test \ $(BINDIR)/$(CONFIG)/boringssl_v3name_test \
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_test \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker \
else else
buildtests_cxx: privatelibs_cxx \ buildtests_cxx: privatelibs_cxx \
@ -1730,6 +1738,10 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/thread_manager_test \ $(BINDIR)/$(CONFIG)/thread_manager_test \
$(BINDIR)/$(CONFIG)/thread_stress_test \ $(BINDIR)/$(CONFIG)/thread_stress_test \
$(BINDIR)/$(CONFIG)/writes_per_rpc_test \ $(BINDIR)/$(CONFIG)/writes_per_rpc_test \
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_test \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker \
endif endif
@ -2141,6 +2153,10 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/thread_stress_test || ( echo test thread_stress_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/thread_stress_test || ( echo test thread_stress_test failed ; exit 1 )
$(E) "[RUN] Testing writes_per_rpc_test" $(E) "[RUN] Testing writes_per_rpc_test"
$(Q) $(BINDIR)/$(CONFIG)/writes_per_rpc_test || ( echo test writes_per_rpc_test failed ; exit 1 ) $(Q) $(BINDIR)/$(CONFIG)/writes_per_rpc_test || ( echo test writes_per_rpc_test failed ; exit 1 )
$(E) "[RUN] Testing resolver_component_tests_runner_invoker_unsecure"
$(Q) $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure || ( echo test resolver_component_tests_runner_invoker_unsecure failed ; exit 1 )
$(E) "[RUN] Testing resolver_component_tests_runner_invoker"
$(Q) $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker || ( echo test resolver_component_tests_runner_invoker failed ; exit 1 )
flaky_test_cxx: buildtests_cxx flaky_test_cxx: buildtests_cxx
@ -3190,7 +3206,6 @@ LIBGRPC_SRC = \
src/core/plugin_registry/grpc_plugin_registry.c \ src/core/plugin_registry/grpc_plugin_registry.c \
PUBLIC_HEADERS_C += \ PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -3497,7 +3512,6 @@ LIBGRPC_CRONET_SRC = \
src/core/plugin_registry/grpc_cronet_plugin_registry.c \ src/core/plugin_registry/grpc_cronet_plugin_registry.c \
PUBLIC_HEADERS_C += \ PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -3775,7 +3789,6 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/ext/filters/http/server/http_server_filter.c \ src/core/ext/filters/http/server/http_server_filter.c \
PUBLIC_HEADERS_C += \ PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -4027,7 +4040,6 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/ext/filters/http/server/http_server_filter.c \ src/core/ext/filters/http/server/http_server_filter.c \
PUBLIC_HEADERS_C += \ PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -4300,7 +4312,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/plugin_registry/grpc_unsecure_plugin_registry.c \ src/core/plugin_registry/grpc_unsecure_plugin_registry.c \
PUBLIC_HEADERS_C += \ PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -4589,7 +4600,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc/slice_buffer.h \ include/grpc/slice_buffer.h \
include/grpc/status.h \ include/grpc/status.h \
include/grpc/support/workaround_list.h \ include/grpc/support/workaround_list.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -4600,7 +4610,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/status.h \ include/grpc/impl/codegen/status.h \
include/grpc++/impl/codegen/async_stream.h \ include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \ include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \ include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \ include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \ include/grpc++/impl/codegen/channel_interface.h \
@ -5084,7 +5093,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc/slice_buffer.h \ include/grpc/slice_buffer.h \
include/grpc/status.h \ include/grpc/status.h \
include/grpc/support/workaround_list.h \ include/grpc/support/workaround_list.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -5095,7 +5103,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/status.h \ include/grpc/impl/codegen/status.h \
include/grpc++/impl/codegen/async_stream.h \ include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \ include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \ include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \ include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \ include/grpc++/impl/codegen/channel_interface.h \
@ -5449,7 +5456,6 @@ LIBGRPC++_TEST_UTIL_SRC = \
PUBLIC_HEADERS_CXX += \ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/async_stream.h \ include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \ include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \ include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \ include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \ include/grpc++/impl/codegen/channel_interface.h \
@ -5477,7 +5483,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/stub_options.h \ include/grpc++/impl/codegen/stub_options.h \
include/grpc++/impl/codegen/sync_stream.h \ include/grpc++/impl/codegen/sync_stream.h \
include/grpc++/impl/codegen/time.h \ include/grpc++/impl/codegen/time.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -5566,7 +5571,6 @@ LIBGRPC++_TEST_UTIL_UNSECURE_SRC = \
PUBLIC_HEADERS_CXX += \ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/async_stream.h \ include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \ include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \ include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \ include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \ include/grpc++/impl/codegen/channel_interface.h \
@ -5594,7 +5598,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/stub_options.h \ include/grpc++/impl/codegen/stub_options.h \
include/grpc++/impl/codegen/sync_stream.h \ include/grpc++/impl/codegen/sync_stream.h \
include/grpc++/impl/codegen/time.h \ include/grpc++/impl/codegen/time.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -5802,7 +5805,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc/slice_buffer.h \ include/grpc/slice_buffer.h \
include/grpc/status.h \ include/grpc/status.h \
include/grpc/support/workaround_list.h \ include/grpc/support/workaround_list.h \
include/grpc/impl/codegen/byte_buffer.h \
include/grpc/impl/codegen/byte_buffer_reader.h \ include/grpc/impl/codegen/byte_buffer_reader.h \
include/grpc/impl/codegen/compression_types.h \ include/grpc/impl/codegen/compression_types.h \
include/grpc/impl/codegen/connectivity_state.h \ include/grpc/impl/codegen/connectivity_state.h \
@ -5813,7 +5815,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/status.h \ include/grpc/impl/codegen/status.h \
include/grpc++/impl/codegen/async_stream.h \ include/grpc++/impl/codegen/async_stream.h \
include/grpc++/impl/codegen/async_unary_call.h \ include/grpc++/impl/codegen/async_unary_call.h \
include/grpc++/impl/codegen/byte_buffer.h \
include/grpc++/impl/codegen/call.h \ include/grpc++/impl/codegen/call.h \
include/grpc++/impl/codegen/call_hook.h \ include/grpc++/impl/codegen/call_hook.h \
include/grpc++/impl/codegen/channel_interface.h \ include/grpc++/impl/codegen/channel_interface.h \
@ -19495,6 +19496,178 @@ ifneq ($(NO_DEPS),true)
endif endif
RESOLVER_COMPONENT_TEST_UNSECURE_SRC = \
test/cpp/naming/resolver_component_test.cc \
RESOLVER_COMPONENT_TEST_UNSECURE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(RESOLVER_COMPONENT_TEST_UNSECURE_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure: $(PROTOBUF_DEP) $(RESOLVER_COMPONENT_TEST_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(RESOLVER_COMPONENT_TEST_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/resolver_component_test_unsecure
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/naming/resolver_component_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_resolver_component_test_unsecure: $(RESOLVER_COMPONENT_TEST_UNSECURE_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(RESOLVER_COMPONENT_TEST_UNSECURE_OBJS:.o=.dep)
endif
endif
RESOLVER_COMPONENT_TEST_SRC = \
test/cpp/naming/resolver_component_test.cc \
RESOLVER_COMPONENT_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(RESOLVER_COMPONENT_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/resolver_component_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/resolver_component_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/resolver_component_test: $(PROTOBUF_DEP) $(RESOLVER_COMPONENT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(RESOLVER_COMPONENT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/resolver_component_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/naming/resolver_component_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_resolver_component_test: $(RESOLVER_COMPONENT_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(RESOLVER_COMPONENT_TEST_OBJS:.o=.dep)
endif
endif
RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_SRC = \
test/cpp/naming/resolver_component_tests_runner_invoker.cc \
RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure: $(PROTOBUF_DEP) $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/naming/resolver_component_tests_runner_invoker.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_resolver_component_tests_runner_invoker_unsecure: $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_UNSECURE_OBJS:.o=.dep)
endif
endif
RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_SRC = \
test/cpp/naming/resolver_component_tests_runner_invoker.cc \
RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker: $(PROTOBUF_DEP) $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/naming/resolver_component_tests_runner_invoker.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a
deps_resolver_component_tests_runner_invoker: $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(RESOLVER_COMPONENT_TESTS_RUNNER_INVOKER_OBJS:.o=.dep)
endif
endif
API_FUZZER_ONE_ENTRY_SRC = \ API_FUZZER_ONE_ENTRY_SRC = \
test/core/end2end/fuzzers/api_fuzzer.c \ test/core/end2end/fuzzers/api_fuzzer.c \
test/core/util/one_corpus_entry_fuzzer.c \ test/core/util/one_corpus_entry_fuzzer.c \

@ -55,7 +55,7 @@ def generate_cc_impl(ctx):
arguments = arguments, arguments = arguments,
) )
return struct(files=set(out_files)) return struct(files=depset(out_files))
_generate_cc = rule( _generate_cc = rule(
attrs = { attrs = {

@ -106,6 +106,22 @@ def grpc_sh_test(name, srcs, args = [], data = []):
args = args, args = args,
data = data) data = data)
def grpc_sh_binary(name, srcs, data = []):
native.sh_test(
name = name,
srcs = srcs,
data = data)
def grpc_py_binary(name, srcs, data = [], deps = []):
if name == "test_dns_server":
# TODO: allow running test_dns_server in oss bazel test suite
deps = []
native.py_binary(
name = name,
srcs = srcs,
data = data,
deps = deps)
def grpc_package(name, visibility = "private", features = []): def grpc_package(name, visibility = "private", features = []):
if visibility == "tests": if visibility == "tests":
visibility = ["//test:__subpackages__"] visibility = ["//test:__subpackages__"]

@ -502,7 +502,6 @@ filegroups:
- grpc_deadline_filter - grpc_deadline_filter
- name: grpc_codegen - name: grpc_codegen
public_headers: public_headers:
- include/grpc/impl/codegen/byte_buffer.h
- include/grpc/impl/codegen/byte_buffer_reader.h - include/grpc/impl/codegen/byte_buffer_reader.h
- include/grpc/impl/codegen/compression_types.h - include/grpc/impl/codegen/compression_types.h
- include/grpc/impl/codegen/connectivity_state.h - include/grpc/impl/codegen/connectivity_state.h
@ -970,7 +969,6 @@ filegroups:
public_headers: public_headers:
- include/grpc++/impl/codegen/async_stream.h - include/grpc++/impl/codegen/async_stream.h
- include/grpc++/impl/codegen/async_unary_call.h - include/grpc++/impl/codegen/async_unary_call.h
- include/grpc++/impl/codegen/byte_buffer.h
- include/grpc++/impl/codegen/call.h - include/grpc++/impl/codegen/call.h
- include/grpc++/impl/codegen/call_hook.h - include/grpc++/impl/codegen/call_hook.h
- include/grpc++/impl/codegen/channel_interface.h - include/grpc++/impl/codegen/channel_interface.h

@ -12,7 +12,7 @@ if test "$PHP_GRPC" != "no"; then
LIBS="-lpthread $LIBS" LIBS="-lpthread $LIBS"
CFLAGS="-Wall -Werror -Wno-parentheses-equality -Wno-unused-value -std=c11" CFLAGS="-Wall -Werror -Wno-parentheses-equality -Wno-unused-value -std=c11"
CXXFLAGS="-std=c++11" CXXFLAGS="-std=c++11 -fno-exceptions -fno-rtti"
GRPC_SHARED_LIBADD="-lpthread $GRPC_SHARED_LIBADD" GRPC_SHARED_LIBADD="-lpthread $GRPC_SHARED_LIBADD"
PHP_REQUIRE_CXX() PHP_REQUIRE_CXX()
PHP_ADD_LIBRARY(pthread) PHP_ADD_LIBRARY(pthread)

@ -50,6 +50,7 @@ some configuration as environment variables that can be set.
- channel_stack_builder - traces information about channel stacks being built - channel_stack_builder - traces information about channel stacks being built
- executor - traces grpc's internal thread pool ('the executor') - executor - traces grpc's internal thread pool ('the executor')
- http - traces state in the http2 transport engine - http - traces state in the http2 transport engine
- http2_stream_state - traces all http2 stream state mutations.
- http1 - traces HTTP/1.x operations performed by gRPC - http1 - traces HTTP/1.x operations performed by gRPC
- inproc - traces the in-process transport - inproc - traces the in-process transport
- flowctl - traces http2 flow control - flowctl - traces http2 flow control

@ -24,10 +24,7 @@ The service config is a JSON string of the following form:
// opposed to backend addresses), gRPC will use grpclb (see // opposed to backend addresses), gRPC will use grpclb (see
// https://github.com/grpc/grpc/blob/master/doc/load-balancing.md), // https://github.com/grpc/grpc/blob/master/doc/load-balancing.md),
// regardless of what LB policy is requested either here or via the // regardless of what LB policy is requested either here or via the
// client API. However, if the resolver returns at least one backend // client API.
// address in addition to the balancer address(es), the client may fall
// back to the requested policy if it is unable to reach any of the
// grpclb load balancers.
'loadBalancingPolicy': string, 'loadBalancingPolicy': string,
// Per-method configuration. Optional. // Per-method configuration. Optional.

@ -60,11 +60,15 @@ class GreeterClient {
// Storage for the status of the RPC upon completion. // Storage for the status of the RPC upon completion.
Status status; Status status;
// stub_->AsyncSayHello() performs the RPC call, returning an instance we // stub_->PrepareAsyncSayHello() creates an RPC object, returning
// store in "rpc". Because we are using the asynchronous API, we need to // an instance to store in "call" but does not actually start the RPC
// hold on to the "rpc" instance in order to get updates on the ongoing RPC. // Because we are using the asynchronous API, we need to hold on to
// the "call" instance in order to get updates on the ongoing RPC.
std::unique_ptr<ClientAsyncResponseReader<HelloReply> > rpc( std::unique_ptr<ClientAsyncResponseReader<HelloReply> > rpc(
stub_->AsyncSayHello(&context, request, &cq)); stub_->PrepareAsyncSayHello(&context, request, &cq));
// StartCall initiates the RPC call
rpc->StartCall();
// Request that, upon completion of the RPC, "reply" be updated with the // Request that, upon completion of the RPC, "reply" be updated with the
// server's response; "status" with the indication of whether the operation // server's response; "status" with the indication of whether the operation

@ -49,11 +49,15 @@ class GreeterClient {
// Call object to store rpc data // Call object to store rpc data
AsyncClientCall* call = new AsyncClientCall; AsyncClientCall* call = new AsyncClientCall;
// stub_->AsyncSayHello() performs the RPC call, returning an instance to // stub_->PrepareAsyncSayHello() creates an RPC object, returning
// store in "call". Because we are using the asynchronous API, we need to // an instance to store in "call" but does not actually start the RPC
// hold on to the "call" instance in order to get updates on the ongoing RPC. // Because we are using the asynchronous API, we need to hold on to
call->response_reader = stub_->AsyncSayHello(&call->context, request, &cq_); // the "call" instance in order to get updates on the ongoing RPC.
call->response_reader =
stub_->PrepareAsyncSayHello(&call->context, request, &cq_);
// StartCall initiates the RPC call
call->response_reader->StartCall();
// Request that, upon completion of the RPC, "reply" be updated with the // Request that, upon completion of the RPC, "reply" be updated with the
// server's response; "status" with the indication of whether the operation // server's response; "status" with the indication of whether the operation

@ -141,7 +141,6 @@ Pod::Spec.new do |s|
'include/grpc/impl/codegen/sync_generic.h', 'include/grpc/impl/codegen/sync_generic.h',
'include/grpc/impl/codegen/sync_posix.h', 'include/grpc/impl/codegen/sync_posix.h',
'include/grpc/impl/codegen/sync_windows.h', 'include/grpc/impl/codegen/sync_windows.h',
'include/grpc/impl/codegen/byte_buffer.h',
'include/grpc/impl/codegen/byte_buffer_reader.h', 'include/grpc/impl/codegen/byte_buffer_reader.h',
'include/grpc/impl/codegen/compression_types.h', 'include/grpc/impl/codegen/compression_types.h',
'include/grpc/impl/codegen/connectivity_state.h', 'include/grpc/impl/codegen/connectivity_state.h',

@ -1,4 +1,14 @@
EXPORTS EXPORTS
grpc_raw_byte_buffer_create
grpc_raw_compressed_byte_buffer_create
grpc_byte_buffer_copy
grpc_byte_buffer_length
grpc_byte_buffer_destroy
grpc_byte_buffer_reader_init
grpc_byte_buffer_reader_destroy
grpc_byte_buffer_reader_next
grpc_byte_buffer_reader_readall
grpc_raw_byte_buffer_from_reader
census_initialize census_initialize
census_shutdown census_shutdown
census_supported census_supported
@ -135,16 +145,6 @@ EXPORTS
grpc_server_add_secure_http2_port grpc_server_add_secure_http2_port
grpc_call_set_credentials grpc_call_set_credentials
grpc_server_credentials_set_auth_metadata_processor grpc_server_credentials_set_auth_metadata_processor
grpc_raw_byte_buffer_create
grpc_raw_compressed_byte_buffer_create
grpc_byte_buffer_copy
grpc_byte_buffer_length
grpc_byte_buffer_destroy
grpc_byte_buffer_reader_init
grpc_byte_buffer_reader_destroy
grpc_byte_buffer_reader_next
grpc_byte_buffer_reader_readall
grpc_raw_byte_buffer_from_reader
grpc_slice_ref grpc_slice_ref
grpc_slice_unref grpc_slice_unref
grpc_slice_copy grpc_slice_copy

@ -29,6 +29,7 @@ Gem::Specification.new do |s|
s.add_dependency 'google-protobuf', '~> 3.1' s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'googleauth', '~> 0.5.1' s.add_dependency 'googleauth', '~> 0.5.1'
s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0'
s.add_development_dependency 'bundler', '~> 1.9' s.add_development_dependency 'bundler', '~> 1.9'
s.add_development_dependency 'facter', '~> 2.4' s.add_development_dependency 'facter', '~> 2.4'
@ -145,7 +146,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/support/tmpfile_posix.c ) s.files += %w( src/core/lib/support/tmpfile_posix.c )
s.files += %w( src/core/lib/support/tmpfile_windows.c ) s.files += %w( src/core/lib/support/tmpfile_windows.c )
s.files += %w( src/core/lib/support/wrap_memcpy.c ) s.files += %w( src/core/lib/support/wrap_memcpy.c )
s.files += %w( include/grpc/impl/codegen/byte_buffer.h )
s.files += %w( include/grpc/impl/codegen/byte_buffer_reader.h ) s.files += %w( include/grpc/impl/codegen/byte_buffer_reader.h )
s.files += %w( include/grpc/impl/codegen/compression_types.h ) s.files += %w( include/grpc/impl/codegen/compression_types.h )
s.files += %w( include/grpc/impl/codegen/connectivity_state.h ) s.files += %w( include/grpc/impl/codegen/connectivity_state.h )

@ -44,6 +44,13 @@ class GenericStub final {
ClientContext* context, const grpc::string& method, CompletionQueue* cq, ClientContext* context, const grpc::string& method, CompletionQueue* cq,
void* tag); void* tag);
/// Setup a call to a named method \a method using \a context, but don't
/// start it. Let it be started explicitly with StartCall and a tag.
/// The return value only indicates whether or not registration of the call
/// succeeded (i.e. the call won't proceed if the return value is nullptr).
std::unique_ptr<GenericClientAsyncReaderWriter> PrepareCall(
ClientContext* context, const grpc::string& method, CompletionQueue* cq);
private: private:
std::shared_ptr<ChannelInterface> channel_; std::shared_ptr<ChannelInterface> channel_;
}; };

@ -35,6 +35,11 @@ class ClientAsyncStreamingInterface {
public: public:
virtual ~ClientAsyncStreamingInterface() {} virtual ~ClientAsyncStreamingInterface() {}
/// Start the call that was set up by the constructor, but only if the
/// constructor was invoked through the "Prepare" API which doesn't actually
/// start the call
virtual void StartCall(void* tag) = 0;
/// Request notification of the reading of the initial metadata. Completion /// Request notification of the reading of the initial metadata. Completion
/// will be notified by \a tag on the associated completion queue. /// will be notified by \a tag on the associated completion queue.
/// This call is optional, but if it is used, it cannot be used concurrently /// This call is optional, but if it is used, it cannot be used concurrently
@ -156,20 +161,22 @@ class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface,
template <class R> template <class R>
class ClientAsyncReader final : public ClientAsyncReaderInterface<R> { class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
public: public:
/// Create a stream and write the first request out. /// Create a stream object.
/// Write the first request out if \a start is set.
/// \a tag will be notified on \a cq when the call has been started and /// \a tag will be notified on \a cq when the call has been started and
/// \a request has been written out. /// \a request has been written out. If \a start is not set, \a tag must be
/// nullptr and the actual call must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata /// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call. /// used to send to the server when starting the call.
template <class W> template <class W>
static ClientAsyncReader* Create(ChannelInterface* channel, static ClientAsyncReader* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method, CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, const W& request, ClientContext* context, const W& request,
void* tag) { bool start, void* tag) {
Call call = channel->CreateCall(method, context, cq); Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc( return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReader))) call.call(), sizeof(ClientAsyncReader)))
ClientAsyncReader(call, context, request, tag); ClientAsyncReader(call, context, request, start, tag);
} }
// always allocated against a call arena, no memory free required // always allocated against a call arena, no memory free required
@ -177,6 +184,12 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
assert(size == sizeof(ClientAsyncReader)); assert(size == sizeof(ClientAsyncReader));
} }
void StartCall(void* tag) override {
assert(!started_);
started_ = true;
StartCallInternal(tag);
}
/// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata
/// method for semantics. /// method for semantics.
/// ///
@ -186,6 +199,7 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
/// calling code can access the received metadata through the /// calling code can access the received metadata through the
/// \a ClientContext. /// \a ClientContext.
void ReadInitialMetadata(void* tag) override { void ReadInitialMetadata(void* tag) override {
assert(started_);
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag); meta_ops_.set_output_tag(tag);
@ -194,6 +208,7 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
} }
void Read(R* msg, void* tag) override { void Read(R* msg, void* tag) override {
assert(started_);
read_ops_.set_output_tag(tag); read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) { if (!context_->initial_metadata_received_) {
read_ops_.RecvInitialMetadata(context_); read_ops_.RecvInitialMetadata(context_);
@ -208,6 +223,7 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
/// - the \a ClientContext associated with this call is updated with /// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata received from the server. /// possible initial and trailing metadata received from the server.
void Finish(Status* status, void* tag) override { void Finish(Status* status, void* tag) override {
assert(started_);
finish_ops_.set_output_tag(tag); finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) { if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_); finish_ops_.RecvInitialMetadata(context_);
@ -219,19 +235,28 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
private: private:
template <class W> template <class W>
ClientAsyncReader(Call call, ClientContext* context, const W& request, ClientAsyncReader(Call call, ClientContext* context, const W& request,
void* tag) bool start, void* tag)
: context_(context), call_(call) { : context_(context), call_(call), started_(start) {
init_ops_.set_output_tag(tag);
init_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
// TODO(ctiller): don't assert // TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok()); GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok());
init_ops_.ClientSendClose(); init_ops_.ClientSendClose();
if (start) {
StartCallInternal(tag);
} else {
assert(tag == nullptr);
}
}
void StartCallInternal(void* tag) {
init_ops_.SendInitialMetadata(context_->send_initial_metadata_,
context_->initial_metadata_flags());
init_ops_.set_output_tag(tag);
call_.PerformOps(&init_ops_); call_.PerformOps(&init_ops_);
} }
ClientContext* context_; ClientContext* context_;
Call call_; Call call_;
bool started_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose> CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
init_ops_; init_ops_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_; CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
@ -257,9 +282,12 @@ class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
template <class W> template <class W>
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> { class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
public: public:
/// Create a stream and write the first request out. /// Create a stream object.
/// Start the RPC if \a start is set
/// \a tag will be notified on \a cq when the call has been started (i.e. /// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent) and \a request has been written out. /// intitial metadata sent) and \a request has been written out.
/// If \a start is not set, \a tag must be nullptr and the actual call
/// must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata /// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call. /// used to send to the server when starting the call.
/// \a response will be filled in with the single expected response /// \a response will be filled in with the single expected response
@ -269,11 +297,11 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
static ClientAsyncWriter* Create(ChannelInterface* channel, static ClientAsyncWriter* Create(ChannelInterface* channel,
CompletionQueue* cq, const RpcMethod& method, CompletionQueue* cq, const RpcMethod& method,
ClientContext* context, R* response, ClientContext* context, R* response,
void* tag) { bool start, void* tag) {
Call call = channel->CreateCall(method, context, cq); Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc( return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncWriter))) call.call(), sizeof(ClientAsyncWriter)))
ClientAsyncWriter(call, context, response, tag); ClientAsyncWriter(call, context, response, start, tag);
} }
// always allocated against a call arena, no memory free required // always allocated against a call arena, no memory free required
@ -281,6 +309,12 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
assert(size == sizeof(ClientAsyncWriter)); assert(size == sizeof(ClientAsyncWriter));
} }
void StartCall(void* tag) override {
assert(!started_);
started_ = true;
StartCallInternal(tag);
}
/// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method for /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method for
/// semantics. /// semantics.
/// ///
@ -289,6 +323,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// associated with this call is updated, and the calling code can access /// associated with this call is updated, and the calling code can access
/// the received metadata through the \a ClientContext. /// the received metadata through the \a ClientContext.
void ReadInitialMetadata(void* tag) override { void ReadInitialMetadata(void* tag) override {
assert(started_);
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag); meta_ops_.set_output_tag(tag);
@ -297,6 +332,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
} }
void Write(const W& msg, void* tag) override { void Write(const W& msg, void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag); write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert // TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
@ -304,6 +340,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
} }
void Write(const W& msg, WriteOptions options, void* tag) override { void Write(const W& msg, WriteOptions options, void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag); write_ops_.set_output_tag(tag);
if (options.is_last_message()) { if (options.is_last_message()) {
options.set_buffer_hint(); options.set_buffer_hint();
@ -315,6 +352,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
} }
void WritesDone(void* tag) override { void WritesDone(void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag); write_ops_.set_output_tag(tag);
write_ops_.ClientSendClose(); write_ops_.ClientSendClose();
call_.PerformOps(&write_ops_); call_.PerformOps(&write_ops_);
@ -328,6 +366,7 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
/// - attempts to fill in the \a response parameter passed to this class's /// - attempts to fill in the \a response parameter passed to this class's
/// constructor with the server's response message. /// constructor with the server's response message.
void Finish(Status* status, void* tag) override { void Finish(Status* status, void* tag) override {
assert(started_);
finish_ops_.set_output_tag(tag); finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) { if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_); finish_ops_.RecvInitialMetadata(context_);
@ -338,25 +377,32 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
private: private:
template <class R> template <class R>
ClientAsyncWriter(Call call, ClientContext* context, R* response, void* tag) ClientAsyncWriter(Call call, ClientContext* context, R* response, bool start,
: context_(context), call_(call) { void* tag)
: context_(context), call_(call), started_(start) {
finish_ops_.RecvMessage(response); finish_ops_.RecvMessage(response);
finish_ops_.AllowNoMessage(); finish_ops_.AllowNoMessage();
// if corked bit is set in context, we buffer up the initial metadata to if (start) {
// coalesce with later message to be sent. No op is performed. StartCallInternal(tag);
if (context_->initial_metadata_corked_) {
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else { } else {
assert(tag == nullptr);
}
}
void StartCallInternal(void* tag) {
write_ops_.SendInitialMetadata(context_->send_initial_metadata_,
context_->initial_metadata_flags());
// if corked bit is set in context, we just keep the initial metadata
// buffered up to coalesce with later message send. No op is performed.
if (!context_->initial_metadata_corked_) {
write_ops_.set_output_tag(tag); write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_); call_.PerformOps(&write_ops_);
} }
} }
ClientContext* context_; ClientContext* context_;
Call call_; Call call_;
bool started_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_; CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose> CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
write_ops_; write_ops_;
@ -388,20 +434,23 @@ template <class W, class R>
class ClientAsyncReaderWriter final class ClientAsyncReaderWriter final
: public ClientAsyncReaderWriterInterface<W, R> { : public ClientAsyncReaderWriterInterface<W, R> {
public: public:
/// Create a stream and write the first request out. /// Create a stream object.
/// Start the RPC request if \a start is set.
/// \a tag will be notified on \a cq when the call has been started (i.e. /// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent). /// intitial metadata sent). If \a start is not set, \a tag must be
/// nullptr and the actual call must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata /// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call. /// used to send to the server when starting the call.
static ClientAsyncReaderWriter* Create(ChannelInterface* channel, static ClientAsyncReaderWriter* Create(ChannelInterface* channel,
CompletionQueue* cq, CompletionQueue* cq,
const RpcMethod& method, const RpcMethod& method,
ClientContext* context, void* tag) { ClientContext* context, bool start,
void* tag) {
Call call = channel->CreateCall(method, context, cq); Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc( return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncReaderWriter))) call.call(), sizeof(ClientAsyncReaderWriter)))
ClientAsyncReaderWriter(call, context, tag); ClientAsyncReaderWriter(call, context, start, tag);
} }
// always allocated against a call arena, no memory free required // always allocated against a call arena, no memory free required
@ -409,6 +458,12 @@ class ClientAsyncReaderWriter final
assert(size == sizeof(ClientAsyncReaderWriter)); assert(size == sizeof(ClientAsyncReaderWriter));
} }
void StartCall(void* tag) override {
assert(!started_);
started_ = true;
StartCallInternal(tag);
}
/// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method /// See the \a ClientAsyncStreamingInterface.ReadInitialMetadata method
/// for semantics of this method. /// for semantics of this method.
/// ///
@ -417,6 +472,7 @@ class ClientAsyncReaderWriter final
/// is updated with it, and then the receiving initial metadata can /// is updated with it, and then the receiving initial metadata can
/// be accessed through this \a ClientContext. /// be accessed through this \a ClientContext.
void ReadInitialMetadata(void* tag) override { void ReadInitialMetadata(void* tag) override {
assert(started_);
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag); meta_ops_.set_output_tag(tag);
@ -425,6 +481,7 @@ class ClientAsyncReaderWriter final
} }
void Read(R* msg, void* tag) override { void Read(R* msg, void* tag) override {
assert(started_);
read_ops_.set_output_tag(tag); read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) { if (!context_->initial_metadata_received_) {
read_ops_.RecvInitialMetadata(context_); read_ops_.RecvInitialMetadata(context_);
@ -434,6 +491,7 @@ class ClientAsyncReaderWriter final
} }
void Write(const W& msg, void* tag) override { void Write(const W& msg, void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag); write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert // TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
@ -441,6 +499,7 @@ class ClientAsyncReaderWriter final
} }
void Write(const W& msg, WriteOptions options, void* tag) override { void Write(const W& msg, WriteOptions options, void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag); write_ops_.set_output_tag(tag);
if (options.is_last_message()) { if (options.is_last_message()) {
options.set_buffer_hint(); options.set_buffer_hint();
@ -452,6 +511,7 @@ class ClientAsyncReaderWriter final
} }
void WritesDone(void* tag) override { void WritesDone(void* tag) override {
assert(started_);
write_ops_.set_output_tag(tag); write_ops_.set_output_tag(tag);
write_ops_.ClientSendClose(); write_ops_.ClientSendClose();
call_.PerformOps(&write_ops_); call_.PerformOps(&write_ops_);
@ -462,6 +522,7 @@ class ClientAsyncReaderWriter final
/// - the \a ClientContext associated with this call is updated with /// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server. /// possible initial and trailing metadata sent from the server.
void Finish(Status* status, void* tag) override { void Finish(Status* status, void* tag) override {
assert(started_);
finish_ops_.set_output_tag(tag); finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) { if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_); finish_ops_.RecvInitialMetadata(context_);
@ -471,23 +532,30 @@ class ClientAsyncReaderWriter final
} }
private: private:
ClientAsyncReaderWriter(Call call, ClientContext* context, void* tag) ClientAsyncReaderWriter(Call call, ClientContext* context, bool start,
: context_(context), call_(call) { void* tag)
if (context_->initial_metadata_corked_) { : context_(context), call_(call), started_(start) {
// if corked bit is set in context, we buffer up the initial metadata to if (start) {
// coalesce with later message to be sent. No op is performed. StartCallInternal(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
} else { } else {
assert(tag == nullptr);
}
}
void StartCallInternal(void* tag) {
write_ops_.SendInitialMetadata(context_->send_initial_metadata_,
context_->initial_metadata_flags());
// if corked bit is set in context, we just keep the initial metadata
// buffered up to coalesce with later message send. No op is performed.
if (!context_->initial_metadata_corked_) {
write_ops_.set_output_tag(tag); write_ops_.set_output_tag(tag);
write_ops_.SendInitialMetadata(context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&write_ops_); call_.PerformOps(&write_ops_);
} }
} }
ClientContext* context_; ClientContext* context_;
Call call_; Call call_;
bool started_;
CallOpSet<CallOpRecvInitialMetadata> meta_ops_; CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_; CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose> CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>

@ -32,13 +32,18 @@ namespace grpc {
class CompletionQueue; class CompletionQueue;
extern CoreCodegenInterface* g_core_codegen_interface; extern CoreCodegenInterface* g_core_codegen_interface;
/// An interface relevant for async client side unary RPCS (which send /// An interface relevant for async client side unary RPCs (which send
/// one request message to a server and receive one response message). /// one request message to a server and receive one response message).
template <class R> template <class R>
class ClientAsyncResponseReaderInterface { class ClientAsyncResponseReaderInterface {
public: public:
virtual ~ClientAsyncResponseReaderInterface() {} virtual ~ClientAsyncResponseReaderInterface() {}
/// Start the call that was set up by the constructor, but only if the
/// constructor was invoked through the "Prepare" API which doesn't actually
/// start the call
virtual void StartCall() = 0;
/// Request notification of the reading of initial metadata. Completion /// Request notification of the reading of initial metadata. Completion
/// will be notified by \a tag on the associated completion queue. /// will be notified by \a tag on the associated completion queue.
/// This call is optional, but if it is used, it cannot be used concurrently /// This call is optional, but if it is used, it cannot be used concurrently
@ -70,9 +75,10 @@ template <class R>
class ClientAsyncResponseReader final class ClientAsyncResponseReader final
: public ClientAsyncResponseReaderInterface<R> { : public ClientAsyncResponseReaderInterface<R> {
public: public:
/// Start a call and write the request out. /// Start a call and write the request out if \a start is set.
/// \a tag will be notified on \a cq when the call has been started (i.e. /// \a tag will be notified on \a cq when the call has been started (i.e.
/// intitial metadata sent) and \a request has been written out. /// intitial metadata sent) and \a request has been written out.
/// If \a start is not set, the actual call must be initiated by StartCall
/// Note that \a context will be used to fill in custom initial metadata /// Note that \a context will be used to fill in custom initial metadata
/// used to send to the server when starting the call. /// used to send to the server when starting the call.
template <class W> template <class W>
@ -80,11 +86,11 @@ class ClientAsyncResponseReader final
CompletionQueue* cq, CompletionQueue* cq,
const RpcMethod& method, const RpcMethod& method,
ClientContext* context, ClientContext* context,
const W& request) { const W& request, bool start) {
Call call = channel->CreateCall(method, context, cq); Call call = channel->CreateCall(method, context, cq);
return new (g_core_codegen_interface->grpc_call_arena_alloc( return new (g_core_codegen_interface->grpc_call_arena_alloc(
call.call(), sizeof(ClientAsyncResponseReader))) call.call(), sizeof(ClientAsyncResponseReader)))
ClientAsyncResponseReader(call, context, request); ClientAsyncResponseReader(call, context, request, start);
} }
// always allocated against a call arena, no memory free required // always allocated against a call arena, no memory free required
@ -92,13 +98,20 @@ class ClientAsyncResponseReader final
assert(size == sizeof(ClientAsyncResponseReader)); assert(size == sizeof(ClientAsyncResponseReader));
} }
void StartCall() override {
assert(!started_);
started_ = true;
StartCallInternal();
}
/// See \a ClientAsyncResponseReaderInterface::ReadInitialMetadata for /// See \a ClientAsyncResponseReaderInterface::ReadInitialMetadata for
/// semantics. /// semantics.
/// ///
/// Side effect: /// Side effect:
/// - the \a ClientContext associated with this call is updated with /// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server. /// possible initial and trailing metadata sent from the server.
void ReadInitialMetadata(void* tag) { void ReadInitialMetadata(void* tag) override {
assert(started_);
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_buf.set_output_tag(tag); meta_buf.set_output_tag(tag);
@ -111,7 +124,8 @@ class ClientAsyncResponseReader final
/// Side effect: /// Side effect:
/// - the \a ClientContext associated with this call is updated with /// - the \a ClientContext associated with this call is updated with
/// possible initial and trailing metadata sent from the server. /// possible initial and trailing metadata sent from the server.
void Finish(R* msg, Status* status, void* tag) { void Finish(R* msg, Status* status, void* tag) override {
assert(started_);
finish_buf.set_output_tag(tag); finish_buf.set_output_tag(tag);
if (!context_->initial_metadata_received_) { if (!context_->initial_metadata_received_) {
finish_buf.RecvInitialMetadata(context_); finish_buf.RecvInitialMetadata(context_);
@ -125,15 +139,22 @@ class ClientAsyncResponseReader final
private: private:
ClientContext* const context_; ClientContext* const context_;
Call call_; Call call_;
bool started_;
template <class W> template <class W>
ClientAsyncResponseReader(Call call, ClientContext* context, const W& request) ClientAsyncResponseReader(Call call, ClientContext* context, const W& request,
: context_(context), call_(call) { bool start)
init_buf.SendInitialMetadata(context->send_initial_metadata_, : context_(context), call_(call), started_(start) {
context->initial_metadata_flags()); // Bind the metadata at time of StartCallInternal but set up the rest here
// TODO(ctiller): don't assert // TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(init_buf.SendMessage(request).ok()); GPR_CODEGEN_ASSERT(init_buf.SendMessage(request).ok());
init_buf.ClientSendClose(); init_buf.ClientSendClose();
if (start) StartCallInternal();
}
void StartCallInternal() {
init_buf.SendInitialMetadata(context_->send_initial_metadata_,
context_->initial_metadata_flags());
call_.PerformOps(&init_buf); call_.PerformOps(&init_buf);
} }

@ -1,141 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCXX_IMPL_CODEGEN_BYTE_BUFFER_H
#define GRPCXX_IMPL_CODEGEN_BYTE_BUFFER_H
#include <grpc/impl/codegen/byte_buffer.h>
#include <grpc++/impl/codegen/config.h>
#include <grpc++/impl/codegen/core_codegen_interface.h>
#include <grpc++/impl/codegen/serialization_traits.h>
#include <grpc++/impl/codegen/slice.h>
#include <grpc++/impl/codegen/status.h>
#include <vector>
namespace grpc {
template <class R>
class CallOpRecvMessage;
class MethodHandler;
namespace internal {
template <class M, class T>
class MessageDeserializer;
}
/// A sequence of bytes.
class ByteBuffer final {
public:
/// Constuct an empty buffer.
ByteBuffer() : buffer_(nullptr) {}
/// Construct buffer from \a slices, of which there are \a nslices.
ByteBuffer(const Slice* slices, size_t nslices);
/// Constuct a byte buffer by referencing elements of existing buffer
/// \a buf. Wrapper of core function grpc_byte_buffer_copy
ByteBuffer(const ByteBuffer& buf);
~ByteBuffer() {
if (buffer_) {
g_core_codegen_interface->grpc_byte_buffer_destroy(buffer_);
}
}
ByteBuffer& operator=(const ByteBuffer&);
/// Dump (read) the buffer contents into \a slices.
Status Dump(std::vector<Slice>* slices) const;
/// Remove all data.
void Clear() {
if (buffer_) {
g_core_codegen_interface->grpc_byte_buffer_destroy(buffer_);
buffer_ = nullptr;
}
}
/// Make a duplicate copy of the internals of this byte
/// buffer so that we have our own owned version of it.
/// bbuf.Duplicate(); is equivalent to bbuf=bbuf; but is actually readable
void Duplicate() {
buffer_ = g_core_codegen_interface->grpc_byte_buffer_copy(buffer_);
}
/// Forget underlying byte buffer without destroying
/// Use this only for un-owned byte buffers
void Release() { buffer_ = nullptr; }
/// Buffer size in bytes.
size_t Length() const;
/// Swap the state of *this and *other.
void Swap(ByteBuffer* other);
/// Is this ByteBuffer valid?
bool Valid() const { return (buffer_ != nullptr); }
private:
friend class SerializationTraits<ByteBuffer, void>;
friend class CallOpSendMessage;
template <class R>
friend class CallOpRecvMessage;
friend class CallOpGenericRecvMessage;
friend class MethodHandler;
template <class M, class T>
friend class internal::MessageDeserializer;
// takes ownership
void set_buffer(grpc_byte_buffer* buf) {
if (buffer_) {
Clear();
}
buffer_ = buf;
}
grpc_byte_buffer* c_buffer() { return buffer_; }
grpc_byte_buffer** c_buffer_ptr() { return &buffer_; }
// DEPRECATED: Implicit conversion to transparently
// support deprecated SerializationTraits API
// No need to inline since deprecated
operator grpc_byte_buffer*();
operator const grpc_byte_buffer*() const;
grpc_byte_buffer* buffer_;
};
template <>
class SerializationTraits<ByteBuffer, void> {
public:
static Status Deserialize(const ByteBuffer& byte_buffer, ByteBuffer* dest) {
dest->set_buffer(byte_buffer.buffer_);
return Status::OK;
}
static Status Serialize(const ByteBuffer& source, ByteBuffer* buffer,
bool* own_buffer) {
*buffer = source;
*own_buffer = true;
return Status::OK;
}
};
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_BYTE_BUFFER_H

@ -25,7 +25,6 @@
#include <map> #include <map>
#include <memory> #include <memory>
#include <grpc++/impl/codegen/byte_buffer.h>
#include <grpc++/impl/codegen/call_hook.h> #include <grpc++/impl/codegen/call_hook.h>
#include <grpc++/impl/codegen/client_context.h> #include <grpc++/impl/codegen/client_context.h>
#include <grpc++/impl/codegen/completion_queue_tag.h> #include <grpc++/impl/codegen/completion_queue_tag.h>
@ -40,6 +39,8 @@
#include <grpc/impl/codegen/compression_types.h> #include <grpc/impl/codegen/compression_types.h>
#include <grpc/impl/codegen/grpc_types.h> #include <grpc/impl/codegen/grpc_types.h>
struct grpc_byte_buffer;
namespace grpc { namespace grpc {
class ByteBuffer; class ByteBuffer;
@ -280,7 +281,7 @@ class CallOpSendInitialMetadata {
class CallOpSendMessage { class CallOpSendMessage {
public: public:
CallOpSendMessage() : send_buf_() {} CallOpSendMessage() : send_buf_(nullptr) {}
/// Send \a message using \a options for the write. The \a options are cleared /// Send \a message using \a options for the write. The \a options are cleared
/// after use. /// after use.
@ -293,67 +294,33 @@ class CallOpSendMessage {
protected: protected:
void AddOp(grpc_op* ops, size_t* nops) { void AddOp(grpc_op* ops, size_t* nops) {
if (!send_buf_.Valid()) return; if (send_buf_ == nullptr) return;
grpc_op* op = &ops[(*nops)++]; grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_SEND_MESSAGE; op->op = GRPC_OP_SEND_MESSAGE;
op->flags = write_options_.flags(); op->flags = write_options_.flags();
op->reserved = NULL; op->reserved = NULL;
op->data.send_message.send_message = send_buf_.c_buffer(); op->data.send_message.send_message = send_buf_;
// Flags are per-message: clear them after use. // Flags are per-message: clear them after use.
write_options_.Clear(); write_options_.Clear();
} }
void FinishOp(bool* status) { send_buf_.Clear(); } void FinishOp(bool* status) {
g_core_codegen_interface->grpc_byte_buffer_destroy(send_buf_);
send_buf_ = nullptr;
}
private: private:
template <class M, class T = void> grpc_byte_buffer* send_buf_;
class MessageSerializer;
ByteBuffer send_buf_;
WriteOptions write_options_; WriteOptions write_options_;
}; };
namespace internal {
template <class T>
T Example();
} // namespace internal
template <class M>
class CallOpSendMessage::MessageSerializer<
M, typename std::enable_if<std::is_same<
::grpc::Status, decltype(SerializationTraits<M>::Serialize(
internal::Example<const M&>(),
internal::Example<grpc_byte_buffer**>(),
internal::Example<bool*>()))>::value>::type> {
public:
static Status SendMessageInternal(const M& message, ByteBuffer* bbuf,
bool* own_buf) {
return SerializationTraits<M>::Serialize(message, bbuf->c_buffer_ptr(),
own_buf);
}
};
template <class M>
class CallOpSendMessage::MessageSerializer<
M, typename std::enable_if<std::is_same<
::grpc::Status, decltype(SerializationTraits<M>::Serialize(
internal::Example<const M&>(),
internal::Example<::grpc::ByteBuffer*>(),
internal::Example<bool*>()))>::value>::type> {
public:
static Status SendMessageInternal(const M& message, ByteBuffer* bbuf,
bool* own_buf) {
return SerializationTraits<M>::Serialize(message, bbuf, own_buf);
}
};
template <class M> template <class M>
Status CallOpSendMessage::SendMessage(const M& message, WriteOptions options) { Status CallOpSendMessage::SendMessage(const M& message, WriteOptions options) {
write_options_ = options; write_options_ = options;
bool own_buf; bool own_buf;
Status result = Status result =
MessageSerializer<M>::SendMessageInternal(message, &send_buf_, &own_buf); SerializationTraits<M>::Serialize(message, &send_buf_, &own_buf);
if (!own_buf) { if (!own_buf) {
send_buf_.Duplicate(); send_buf_ = g_core_codegen_interface->grpc_byte_buffer_copy(send_buf_);
} }
return result; return result;
} }
@ -363,36 +330,6 @@ Status CallOpSendMessage::SendMessage(const M& message) {
return SendMessage(message, WriteOptions()); return SendMessage(message, WriteOptions());
} }
namespace internal {
template <class M, class T = void>
class MessageDeserializer;
template <class M>
class MessageDeserializer<
M, typename std::enable_if<std::is_same<
::grpc::Status, decltype(SerializationTraits<M>::Deserialize(
internal::Example<const ::grpc::ByteBuffer&>(),
internal::Example<M*>()))>::value>::type> {
public:
static Status Deserialize(const ByteBuffer& bbuf, M* message) {
return SerializationTraits<M>::Deserialize(bbuf, message);
}
};
template <class M>
class MessageDeserializer<
M, typename std::enable_if<std::is_same<
::grpc::Status, decltype(SerializationTraits<M>::Deserialize(
internal::Example<grpc_byte_buffer*>(),
internal::Example<M*>()))>::value>::type> {
public:
static Status Deserialize(const ByteBuffer& bbuf, M* message) {
return SerializationTraits<M>::Deserialize(
const_cast<ByteBuffer&>(bbuf).c_buffer(), message);
}
};
} // namespace internal
template <class R> template <class R>
class CallOpRecvMessage { class CallOpRecvMessage {
public: public:
@ -415,20 +352,18 @@ class CallOpRecvMessage {
op->op = GRPC_OP_RECV_MESSAGE; op->op = GRPC_OP_RECV_MESSAGE;
op->flags = 0; op->flags = 0;
op->reserved = NULL; op->reserved = NULL;
op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr(); op->data.recv_message.recv_message = &recv_buf_;
} }
void FinishOp(bool* status) { void FinishOp(bool* status) {
if (message_ == nullptr) return; if (message_ == nullptr) return;
if (recv_buf_.Valid()) { if (recv_buf_) {
if (*status) { if (*status) {
got_message = *status = got_message = *status =
internal::MessageDeserializer<R>::Deserialize(recv_buf_, message_) SerializationTraits<R>::Deserialize(recv_buf_, message_).ok();
.ok();
recv_buf_.Release();
} else { } else {
got_message = false; got_message = false;
recv_buf_.Clear(); g_core_codegen_interface->grpc_byte_buffer_destroy(recv_buf_);
} }
} else { } else {
got_message = false; got_message = false;
@ -441,14 +376,14 @@ class CallOpRecvMessage {
private: private:
R* message_; R* message_;
ByteBuffer recv_buf_; grpc_byte_buffer* recv_buf_;
bool allow_not_getting_message_; bool allow_not_getting_message_;
}; };
namespace CallOpGenericRecvMessageHelper { namespace CallOpGenericRecvMessageHelper {
class DeserializeFunc { class DeserializeFunc {
public: public:
virtual Status Deserialize(const ByteBuffer& buf) = 0; virtual Status Deserialize(grpc_byte_buffer* buf) = 0;
virtual ~DeserializeFunc() {} virtual ~DeserializeFunc() {}
}; };
@ -456,8 +391,8 @@ template <class R>
class DeserializeFuncType final : public DeserializeFunc { class DeserializeFuncType final : public DeserializeFunc {
public: public:
DeserializeFuncType(R* message) : message_(message) {} DeserializeFuncType(R* message) : message_(message) {}
Status Deserialize(const ByteBuffer& buf) override { Status Deserialize(grpc_byte_buffer* buf) override {
return grpc::internal::MessageDeserializer<R>::Deserialize(buf, message_); return SerializationTraits<R>::Deserialize(buf, message_);
} }
~DeserializeFuncType() override {} ~DeserializeFuncType() override {}
@ -493,19 +428,18 @@ class CallOpGenericRecvMessage {
op->op = GRPC_OP_RECV_MESSAGE; op->op = GRPC_OP_RECV_MESSAGE;
op->flags = 0; op->flags = 0;
op->reserved = NULL; op->reserved = NULL;
op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr(); op->data.recv_message.recv_message = &recv_buf_;
} }
void FinishOp(bool* status) { void FinishOp(bool* status) {
if (!deserialize_) return; if (!deserialize_) return;
if (recv_buf_.Valid()) { if (recv_buf_) {
if (*status) { if (*status) {
got_message = true; got_message = true;
*status = deserialize_->Deserialize(recv_buf_).ok(); *status = deserialize_->Deserialize(recv_buf_).ok();
recv_buf_.Release();
} else { } else {
got_message = false; got_message = false;
recv_buf_.Clear(); g_core_codegen_interface->grpc_byte_buffer_destroy(recv_buf_);
} }
} else { } else {
got_message = false; got_message = false;
@ -518,7 +452,7 @@ class CallOpGenericRecvMessage {
private: private:
std::unique_ptr<CallOpGenericRecvMessageHelper::DeserializeFunc> deserialize_; std::unique_ptr<CallOpGenericRecvMessageHelper::DeserializeFunc> deserialize_;
ByteBuffer recv_buf_; grpc_byte_buffer* recv_buf_;
bool allow_not_getting_message_; bool allow_not_getting_message_;
}; };

@ -19,7 +19,6 @@
#ifndef GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H #ifndef GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H
#define GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H #define GRPCXX_IMPL_CODEGEN_METHOD_HANDLER_IMPL_H
#include <grpc++/impl/codegen/byte_buffer.h>
#include <grpc++/impl/codegen/core_codegen_interface.h> #include <grpc++/impl/codegen/core_codegen_interface.h>
#include <grpc++/impl/codegen/rpc_service_method.h> #include <grpc++/impl/codegen/rpc_service_method.h>
#include <grpc++/impl/codegen/sync_stream.h> #include <grpc++/impl/codegen/sync_stream.h>
@ -38,8 +37,8 @@ class RpcMethodHandler : public MethodHandler {
void RunHandler(const HandlerParameter& param) final { void RunHandler(const HandlerParameter& param) final {
RequestType req; RequestType req;
Status status = internal::MessageDeserializer<RequestType>::Deserialize( Status status =
param.request, &req); SerializationTraits<RequestType>::Deserialize(param.request, &req);
ResponseType rsp; ResponseType rsp;
if (status.ok()) { if (status.ok()) {
status = func_(service_, param.server_context, &req, &rsp); status = func_(service_, param.server_context, &req, &rsp);
@ -124,8 +123,8 @@ class ServerStreamingHandler : public MethodHandler {
void RunHandler(const HandlerParameter& param) final { void RunHandler(const HandlerParameter& param) final {
RequestType req; RequestType req;
Status status = internal::MessageDeserializer<RequestType>::Deserialize( Status status =
param.request, &req); SerializationTraits<RequestType>::Deserialize(param.request, &req);
if (status.ok()) { if (status.ok()) {
ServerWriter<ResponseType> writer(param.call, param.server_context); ServerWriter<ResponseType> writer(param.call, param.server_context);

@ -25,11 +25,14 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include <grpc++/impl/codegen/byte_buffer.h>
#include <grpc++/impl/codegen/config.h> #include <grpc++/impl/codegen/config.h>
#include <grpc++/impl/codegen/rpc_method.h> #include <grpc++/impl/codegen/rpc_method.h>
#include <grpc++/impl/codegen/status.h> #include <grpc++/impl/codegen/status.h>
extern "C" {
struct grpc_byte_buffer;
}
namespace grpc { namespace grpc {
class ServerContext; class ServerContext;
class StreamContextInterface; class StreamContextInterface;
@ -40,14 +43,11 @@ class MethodHandler {
virtual ~MethodHandler() {} virtual ~MethodHandler() {}
struct HandlerParameter { struct HandlerParameter {
HandlerParameter(Call* c, ServerContext* context, grpc_byte_buffer* req) HandlerParameter(Call* c, ServerContext* context, grpc_byte_buffer* req)
: call(c), server_context(context) { : call(c), server_context(context), request(req) {}
request.set_buffer(req);
}
~HandlerParameter() { request.Release(); }
Call* call; Call* call;
ServerContext* server_context; ServerContext* server_context;
// Handler required to destroy these contents // Handler required to grpc_byte_buffer_destroy this
ByteBuffer request; grpc_byte_buffer* request;
}; };
virtual void RunHandler(const HandlerParameter& param) = 0; virtual void RunHandler(const HandlerParameter& param) = 0;
}; };

@ -26,24 +26,15 @@ namespace grpc {
/// Used for hooking different message serialization API's into GRPC. /// Used for hooking different message serialization API's into GRPC.
/// Each SerializationTraits implementation must provide the following /// Each SerializationTraits implementation must provide the following
/// functions: /// functions:
/// 1. static Status Serialize(const Message& msg,
/// ByteBuffer* buffer,
/// bool* own_buffer);
/// AND/OR
/// static Status Serialize(const Message& msg, /// static Status Serialize(const Message& msg,
/// grpc_byte_buffer** buffer, /// grpc_byte_buffer** buffer,
/// bool* own_buffer); /// bool* own_buffer);
/// The former is preferred; the latter is deprecated
///
/// 2. static Status Deserialize(const ByteBuffer& buffer,
/// Message* msg);
/// AND/OR
/// static Status Deserialize(grpc_byte_buffer* buffer, /// static Status Deserialize(grpc_byte_buffer* buffer,
/// Message* msg); /// Message* msg,
/// The former is preferred; the latter is deprecated /// int max_receive_message_size);
/// ///
/// Serialize is required to convert message to a ByteBuffer, and /// Serialize is required to convert message to a grpc_byte_buffer, and
/// return that byte buffer through *buffer. *own_buffer should /// to store a pointer to that byte buffer at *buffer. *own_buffer should
/// be set to true if the caller owns said byte buffer, or false if /// be set to true if the caller owns said byte buffer, or false if
/// ownership is retained elsewhere. /// ownership is retained elsewhere.
/// ///

@ -19,89 +19,11 @@
#ifndef GRPCXX_IMPL_CODEGEN_SLICE_H #ifndef GRPCXX_IMPL_CODEGEN_SLICE_H
#define GRPCXX_IMPL_CODEGEN_SLICE_H #define GRPCXX_IMPL_CODEGEN_SLICE_H
#include <grpc++/impl/codegen/config.h>
#include <grpc++/impl/codegen/core_codegen_interface.h> #include <grpc++/impl/codegen/core_codegen_interface.h>
#include <grpc++/impl/codegen/string_ref.h> #include <grpc++/impl/codegen/string_ref.h>
#include <grpc/impl/codegen/slice.h>
namespace grpc { namespace grpc {
/// A wrapper around \a grpc_slice.
///
/// A slice represents a contiguous reference counted array of bytes.
/// It is cheap to take references to a slice, and it is cheap to create a
/// slice pointing to a subset of another slice.
class Slice final {
public:
/// Construct an empty slice.
Slice();
/// Destructor - drops one reference.
~Slice();
enum AddRef { ADD_REF };
/// Construct a slice from \a slice, adding a reference.
Slice(grpc_slice slice, AddRef);
enum StealRef { STEAL_REF };
/// Construct a slice from \a slice, stealing a reference.
Slice(grpc_slice slice, StealRef);
/// Allocate a slice of specified size
Slice(size_t len);
/// Construct a slice from a copied buffer
Slice(const void* buf, size_t len);
/// Construct a slice from a copied string
Slice(const grpc::string& str);
enum StaticSlice { STATIC_SLICE };
/// Construct a slice from a static buffer
Slice(const void* buf, size_t len, StaticSlice);
/// Copy constructor, adds a reference.
Slice(const Slice& other);
/// Assignment, reference count is unchanged.
Slice& operator=(Slice other) {
std::swap(slice_, other.slice_);
return *this;
}
/// Create a slice pointing at some data. Calls malloc to allocate a refcount
/// for the object, and arranges that destroy will be called with the
/// user data pointer passed in at destruction. Can be the same as buf or
/// different (e.g., if data is part of a larger structure that must be
/// destroyed when the data is no longer needed)
Slice(void* buf, size_t len, void (*destroy)(void*), void* user_data);
/// Specialization of above for common case where buf == user_data
Slice(void* buf, size_t len, void (*destroy)(void*))
: Slice(buf, len, destroy, buf) {}
/// Similar to the above but has a destroy that also takes slice length
Slice(void* buf, size_t len, void (*destroy)(void*, size_t));
/// Byte size.
size_t size() const { return GRPC_SLICE_LENGTH(slice_); }
/// Raw pointer to the beginning (first element) of the slice.
const uint8_t* begin() const { return GRPC_SLICE_START_PTR(slice_); }
/// Raw pointer to the end (one byte \em past the last element) of the slice.
const uint8_t* end() const { return GRPC_SLICE_END_PTR(slice_); }
/// Raw C slice. Caller needs to call grpc_slice_unref when done.
grpc_slice c_slice() const;
private:
friend class ByteBuffer;
grpc_slice slice_;
};
inline grpc::string_ref StringRefFromSlice(const grpc_slice* slice) { inline grpc::string_ref StringRefFromSlice(const grpc_slice* slice) {
return grpc::string_ref( return grpc::string_ref(
reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(*slice)), reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(*slice)),

@ -19,7 +19,6 @@
#ifndef GRPCXX_SUPPORT_BYTE_BUFFER_H #ifndef GRPCXX_SUPPORT_BYTE_BUFFER_H
#define GRPCXX_SUPPORT_BYTE_BUFFER_H #define GRPCXX_SUPPORT_BYTE_BUFFER_H
#include <grpc++/impl/codegen/byte_buffer.h>
#include <grpc++/impl/serialization_traits.h> #include <grpc++/impl/serialization_traits.h>
#include <grpc++/support/config.h> #include <grpc++/support/config.h>
#include <grpc++/support/slice.h> #include <grpc++/support/slice.h>
@ -28,4 +27,71 @@
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <vector>
namespace grpc {
/// A sequence of bytes.
class ByteBuffer final {
public:
/// Constuct an empty buffer.
ByteBuffer() : buffer_(nullptr) {}
/// Construct buffer from \a slices, of which there are \a nslices.
ByteBuffer(const Slice* slices, size_t nslices);
/// Constuct a byte buffer by referencing elements of existing buffer
/// \a buf. Wrapper of core function grpc_byte_buffer_copy
ByteBuffer(const ByteBuffer& buf);
~ByteBuffer();
ByteBuffer& operator=(const ByteBuffer&);
/// Dump (read) the buffer contents into \a slices.
Status Dump(std::vector<Slice>* slices) const;
/// Remove all data.
void Clear();
/// Buffer size in bytes.
size_t Length() const;
/// Swap the state of *this and *other.
void Swap(ByteBuffer* other);
private:
friend class SerializationTraits<ByteBuffer, void>;
// takes ownership
void set_buffer(grpc_byte_buffer* buf) {
if (buffer_) {
Clear();
}
buffer_ = buf;
}
// For \a SerializationTraits's usage.
grpc_byte_buffer* buffer() const { return buffer_; }
grpc_byte_buffer* buffer_;
};
template <>
class SerializationTraits<ByteBuffer, void> {
public:
static Status Deserialize(grpc_byte_buffer* byte_buffer, ByteBuffer* dest) {
dest->set_buffer(byte_buffer);
return Status::OK;
}
static Status Serialize(const ByteBuffer& source, grpc_byte_buffer** buffer,
bool* own_buffer) {
*buffer = grpc_byte_buffer_copy(source.buffer());
*own_buffer = true;
return Status::OK;
}
};
} // namespace grpc
#endif // GRPCXX_SUPPORT_BYTE_BUFFER_H #endif // GRPCXX_SUPPORT_BYTE_BUFFER_H

@ -64,12 +64,6 @@ class ChannelArguments {
/// Set the compression algorithm for the channel. /// Set the compression algorithm for the channel.
void SetCompressionAlgorithm(grpc_compression_algorithm algorithm); void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
/// Set the grpclb fallback timeout (in ms) for the channel. If this amount
/// of time has passed but we have not gotten any non-empty \a serverlist from
/// the balancer, we will fall back to use the backend address(es) returned by
/// the resolver.
void SetGrpclbFallbackTimeout(int fallback_timeout);
/// Set the socket mutator for the channel. /// Set the socket mutator for the channel.
void SetSocketMutator(grpc_socket_mutator* mutator); void SetSocketMutator(grpc_socket_mutator* mutator);

@ -19,8 +19,86 @@
#ifndef GRPCXX_SUPPORT_SLICE_H #ifndef GRPCXX_SUPPORT_SLICE_H
#define GRPCXX_SUPPORT_SLICE_H #define GRPCXX_SUPPORT_SLICE_H
#include <grpc++/impl/codegen/slice.h>
#include <grpc++/support/config.h> #include <grpc++/support/config.h>
#include <grpc/slice.h> #include <grpc/slice.h>
namespace grpc {
/// A wrapper around \a grpc_slice.
///
/// A slice represents a contiguous reference counted array of bytes.
/// It is cheap to take references to a slice, and it is cheap to create a
/// slice pointing to a subset of another slice.
class Slice final {
public:
/// Construct an empty slice.
Slice();
/// Destructor - drops one reference.
~Slice();
enum AddRef { ADD_REF };
/// Construct a slice from \a slice, adding a reference.
Slice(grpc_slice slice, AddRef);
enum StealRef { STEAL_REF };
/// Construct a slice from \a slice, stealing a reference.
Slice(grpc_slice slice, StealRef);
/// Allocate a slice of specified size
Slice(size_t len);
/// Construct a slice from a copied buffer
Slice(const void* buf, size_t len);
/// Construct a slice from a copied string
Slice(const grpc::string& str);
enum StaticSlice { STATIC_SLICE };
/// Construct a slice from a static buffer
Slice(const void* buf, size_t len, StaticSlice);
/// Copy constructor, adds a reference.
Slice(const Slice& other);
/// Assignment, reference count is unchanged.
Slice& operator=(Slice other) {
std::swap(slice_, other.slice_);
return *this;
}
/// Create a slice pointing at some data. Calls malloc to allocate a refcount
/// for the object, and arranges that destroy will be called with the
/// user data pointer passed in at destruction. Can be the same as buf or
/// different (e.g., if data is part of a larger structure that must be
/// destroyed when the data is no longer needed)
Slice(void* buf, size_t len, void (*destroy)(void*), void* user_data);
/// Specialization of above for common case where buf == user_data
Slice(void* buf, size_t len, void (*destroy)(void*))
: Slice(buf, len, destroy, buf) {}
/// Similar to the above but has a destroy that also takes slice length
Slice(void* buf, size_t len, void (*destroy)(void*, size_t));
/// Byte size.
size_t size() const { return GRPC_SLICE_LENGTH(slice_); }
/// Raw pointer to the beginning (first element) of the slice.
const uint8_t* begin() const { return GRPC_SLICE_START_PTR(slice_); }
/// Raw pointer to the end (one byte \em past the last element) of the slice.
const uint8_t* end() const { return GRPC_SLICE_END_PTR(slice_); }
/// Raw C slice. Caller needs to call grpc_slice_unref when done.
grpc_slice c_slice() const { return grpc_slice_ref(slice_); }
private:
friend class ByteBuffer;
grpc_slice slice_;
};
} // namespace grpc
#endif // GRPCXX_SUPPORT_SLICE_H #endif // GRPCXX_SUPPORT_SLICE_H

@ -19,7 +19,69 @@
#ifndef GRPC_BYTE_BUFFER_H #ifndef GRPC_BYTE_BUFFER_H
#define GRPC_BYTE_BUFFER_H #define GRPC_BYTE_BUFFER_H
#include <grpc/impl/codegen/byte_buffer.h> #include <grpc/impl/codegen/grpc_types.h>
#include <grpc/slice_buffer.h> #include <grpc/slice_buffer.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Returns a RAW byte buffer instance over the given slices (up to \a nslices).
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
size_t nslices);
/** Returns a *compressed* RAW byte buffer instance over the given slices (up to
* \a nslices). The \a compression argument defines the compression algorithm
* used to generate the data in \a slices.
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_slice *slices, size_t nslices, grpc_compression_algorithm compression);
/** Copies input byte buffer \a bb.
*
* Increases the reference count of all the source slices. The user is
* responsible for calling grpc_byte_buffer_destroy over the returned copy. */
GRPCAPI grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb);
/** Returns the size of the given byte buffer, in bytes. */
GRPCAPI size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
/** Destroys \a byte_buffer deallocating all its memory. */
GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
/** Reader for byte buffers. Iterates over slices in the byte buffer */
struct grpc_byte_buffer_reader;
typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
/** Initialize \a reader to read over \a buffer.
* Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer);
/** Cleanup and destroy \a reader */
GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
/** Updates \a slice with the next piece of data from from \a reader and returns
* 1. Returns 0 at the end of the stream. Caller is responsible for calling
* grpc_slice_unref on the result. */
GRPCAPI int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
grpc_slice *slice);
/** Merge all data from \a reader into single slice */
GRPCAPI grpc_slice
grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader);
/** Returns a RAW byte buffer instance from the output of \a reader. */
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_BYTE_BUFFER_H */ #endif /* GRPC_BYTE_BUFFER_H */

@ -44,13 +44,13 @@ int grpc_stream_compression_algorithm_parse(
* algorithm. Note that \a name is statically allocated and must *not* be freed. * algorithm. Note that \a name is statically allocated and must *not* be freed.
* Returns 1 upon success, 0 otherwise. */ * Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_compression_algorithm_name( GRPCAPI int grpc_compression_algorithm_name(
grpc_compression_algorithm algorithm, char **name); grpc_compression_algorithm algorithm, const char **name);
/** Updates \a name with the encoding name corresponding to a valid \a /** Updates \a name with the encoding name corresponding to a valid \a
* algorithm. Note that \a name is statically allocated and must *not* be freed. * algorithm. Note that \a name is statically allocated and must *not* be freed.
* Returns 1 upon success, 0 otherwise. */ * Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_stream_compression_algorithm_name( GRPCAPI int grpc_stream_compression_algorithm_name(
grpc_stream_compression_algorithm algorithm, char **name); grpc_stream_compression_algorithm algorithm, const char **name);
/** Returns the compression algorithm corresponding to \a level for the /** Returns the compression algorithm corresponding to \a level for the
* compression algorithms encoded in the \a accepted_encodings bitset. * compression algorithms encoded in the \a accepted_encodings bitset.

@ -1,86 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_IMPL_CODEGEN_BYTE_BUFFER_H
#define GRPC_IMPL_CODEGEN_BYTE_BUFFER_H
#include <grpc/impl/codegen/grpc_types.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Returns a RAW byte buffer instance over the given slices (up to \a nslices).
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
size_t nslices);
/** Returns a *compressed* RAW byte buffer instance over the given slices (up to
* \a nslices). The \a compression argument defines the compression algorithm
* used to generate the data in \a slices.
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
GRPCAPI grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_slice *slices, size_t nslices, grpc_compression_algorithm compression);
/** Copies input byte buffer \a bb.
*
* Increases the reference count of all the source slices. The user is
* responsible for calling grpc_byte_buffer_destroy over the returned copy. */
GRPCAPI grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb);
/** Returns the size of the given byte buffer, in bytes. */
GRPCAPI size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
/** Destroys \a byte_buffer deallocating all its memory. */
GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
/** Reader for byte buffers. Iterates over slices in the byte buffer */
struct grpc_byte_buffer_reader;
typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
/** Initialize \a reader to read over \a buffer.
* Returns 1 upon success, 0 otherwise. */
GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer);
/** Cleanup and destroy \a reader */
GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
/** Updates \a slice with the next piece of data from from \a reader and returns
* 1. Returns 0 at the end of the stream. Caller is responsible for calling
* grpc_slice_unref on the result. */
GRPCAPI int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
grpc_slice *slice);
/** Merge all data from \a reader into single slice */
GRPCAPI grpc_slice
grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader);
/** Returns a RAW byte buffer instance from the output of \a reader. */
GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_IMPL_CODEGEN_BYTE_BUFFER_H */

@ -188,9 +188,14 @@ typedef struct {
#define GRPC_ARG_HTTP2_MAX_FRAME_SIZE "grpc.http2.max_frame_size" #define GRPC_ARG_HTTP2_MAX_FRAME_SIZE "grpc.http2.max_frame_size"
/** Should BDP probing be performed? */ /** Should BDP probing be performed? */
#define GRPC_ARG_HTTP2_BDP_PROBE "grpc.http2.bdp_probe" #define GRPC_ARG_HTTP2_BDP_PROBE "grpc.http2.bdp_probe"
/** Minimum time (in milliseconds) between successive ping frames being sent */ /** Minimum time between sending successive ping frames without receiving any
#define GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS \ data frame, Int valued, milliseconds. */
#define GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS \
"grpc.http2.min_time_between_pings_ms" "grpc.http2.min_time_between_pings_ms"
/** Minimum allowed time between receiving successive ping frames without
sending any data frame. Int valued, milliseconds */
#define GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS \
"grpc.http2.min_ping_interval_without_data_ms"
/** Channel arg to override the http2 :scheme header */ /** Channel arg to override the http2 :scheme header */
#define GRPC_ARG_HTTP2_SCHEME "grpc.http2_scheme" #define GRPC_ARG_HTTP2_SCHEME "grpc.http2_scheme"
/** How many pings can we send before needing to send a data frame or header /** How many pings can we send before needing to send a data frame or header
@ -202,10 +207,6 @@ typedef struct {
closing the transport? (0 indicates that the server can bear an infinite closing the transport? (0 indicates that the server can bear an infinite
number of misbehaving pings) */ number of misbehaving pings) */
#define GRPC_ARG_HTTP2_MAX_PING_STRIKES "grpc.http2.max_ping_strikes" #define GRPC_ARG_HTTP2_MAX_PING_STRIKES "grpc.http2.max_ping_strikes"
/** Minimum allowed time between two pings without sending any data frame. Int
valued, seconds */
#define GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS \
"grpc.http2.min_ping_interval_without_data_ms"
/** How much data are we willing to queue up per stream if /** How much data are we willing to queue up per stream if
GRPC_WRITE_BUFFER_HINT is set? This is an upper bound */ GRPC_WRITE_BUFFER_HINT is set? This is an upper bound */
#define GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE "grpc.http2.write_buffer_size" #define GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE "grpc.http2.write_buffer_size"
@ -287,11 +288,7 @@ typedef struct {
"grpc.experimental.tcp_max_read_chunk_size" "grpc.experimental.tcp_max_read_chunk_size"
/* Timeout in milliseconds to use for calls to the grpclb load balancer. /* Timeout in milliseconds to use for calls to the grpclb load balancer.
If 0 or unset, the balancer calls will have no deadline. */ If 0 or unset, the balancer calls will have no deadline. */
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_call_timeout_ms" #define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the grpclb load
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
/** If non-zero, grpc server's cronet compression workaround will be enabled */ /** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \ #define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
"grpc.workaround.cronet_compression" "grpc.workaround.cronet_compression"

@ -65,11 +65,7 @@ GPRAPI grpc_slice grpc_slice_new_with_len(void *p, size_t len,
GPRAPI grpc_slice grpc_slice_malloc(size_t length); GPRAPI grpc_slice grpc_slice_malloc(size_t length);
GPRAPI grpc_slice grpc_slice_malloc_large(size_t length); GPRAPI grpc_slice grpc_slice_malloc_large(size_t length);
#define GRPC_SLICE_MALLOC(len) \ #define GRPC_SLICE_MALLOC(len) grpc_slice_malloc(len)
((len) <= GRPC_SLICE_INLINED_SIZE \
? (grpc_slice){.refcount = NULL, \
.data.inlined = {.length = (uint8_t)(len)}} \
: grpc_slice_malloc_large((len)))
/** Intern a slice: /** Intern a slice:

@ -10,7 +10,7 @@
<email>grpc-packages@google.com</email> <email>grpc-packages@google.com</email>
<active>yes</active> <active>yes</active>
</lead> </lead>
<date>2017-05-22</date> <date>2017-08-24</date>
<time>16:06:07</time> <time>16:06:07</time>
<version> <version>
<release>1.7.0dev</release> <release>1.7.0dev</release>
@ -25,6 +25,9 @@
- Channel are now by default persistent #11878 - Channel are now by default persistent #11878
- Some bug fixes from 1.4 branch #12109, #12123 - Some bug fixes from 1.4 branch #12109, #12123
- Fixed hang bug when fork() was used #11814 - Fixed hang bug when fork() was used #11814
- License changed to Apache 2.0
- Added support for php_namespace option in codegen plugin #11886
- Updated gRPC C Core library version 1.6
</notes> </notes>
<contents> <contents>
<dir baseinstalldir="/" name="/"> <dir baseinstalldir="/" name="/">
@ -155,7 +158,6 @@
<file baseinstalldir="/" name="src/core/lib/support/tmpfile_posix.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/tmpfile_posix.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/tmpfile_windows.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/tmpfile_windows.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/support/wrap_memcpy.c" role="src" /> <file baseinstalldir="/" name="src/core/lib/support/wrap_memcpy.c" role="src" />
<file baseinstalldir="/" name="include/grpc/impl/codegen/byte_buffer.h" role="src" />
<file baseinstalldir="/" name="include/grpc/impl/codegen/byte_buffer_reader.h" role="src" /> <file baseinstalldir="/" name="include/grpc/impl/codegen/byte_buffer_reader.h" role="src" />
<file baseinstalldir="/" name="include/grpc/impl/codegen/compression_types.h" role="src" /> <file baseinstalldir="/" name="include/grpc/impl/codegen/compression_types.h" role="src" />
<file baseinstalldir="/" name="include/grpc/impl/codegen/connectivity_state.h" role="src" /> <file baseinstalldir="/" name="include/grpc/impl/codegen/connectivity_state.h" role="src" />

@ -70,7 +70,7 @@ CLASSIFIERS = [
'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License', 'License :: OSI Approved :: Apache Software License',
], ]
# Environment variable to determine whether or not the Cython extension should # Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files # *use* Cython or use the generated C files. Note that this requires the C files

@ -165,25 +165,37 @@ void PrintHeaderClientMethodInterfaces(
(*vars)["Request"] = method->input_type_name(); (*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name(); (*vars)["Response"] = method->output_type_name();
struct {
grpc::string prefix;
grpc::string method_params; // extra arguments to method
grpc::string raw_args; // extra arguments to raw version of method
} async_prefixes[] = {{"Async", ", void* tag", ", tag"},
{"PrepareAsync", "", ""}};
if (is_public) { if (is_public) {
if (method->NoStreaming()) { if (method->NoStreaming()) {
printer->Print( printer->Print(
*vars, *vars,
"virtual ::grpc::Status $Method$(::grpc::ClientContext* context, " "virtual ::grpc::Status $Method$(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) = 0;\n"); "const $Request$& request, $Response$* response) = 0;\n");
printer->Print(*vars, for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print(
*vars,
"std::unique_ptr< " "std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>> " "::grpc::ClientAsyncResponseReaderInterface< $Response$>> "
"Async$Method$(::grpc::ClientContext* context, " "$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"const $Request$& request, " "const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n"); "::grpc::CompletionQueue* cq) {\n");
printer->Indent(); printer->Indent();
printer->Print(*vars, printer->Print(
*vars,
"return std::unique_ptr< " "return std::unique_ptr< "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>>(" "::grpc::ClientAsyncResponseReaderInterface< $Response$>>("
"Async$Method$Raw(context, request, cq));\n"); "$AsyncPrefix$$Method$Raw(context, request, cq));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
}
} else if (ClientOnlyStreaming(method)) { } else if (ClientOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
@ -197,19 +209,26 @@ void PrintHeaderClientMethodInterfaces(
"($Method$Raw(context, response));\n"); "($Method$Raw(context, response));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print( printer->Print(
*vars, *vars,
"std::unique_ptr< ::grpc::ClientAsyncWriterInterface< $Request$>>" "std::unique_ptr< ::grpc::ClientAsyncWriterInterface< $Request$>>"
" Async$Method$(::grpc::ClientContext* context, $Response$* " " $AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"$Response$* "
"response, " "response, "
"::grpc::CompletionQueue* cq, void* tag) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent(); printer->Indent();
printer->Print(*vars, printer->Print(*vars,
"return std::unique_ptr< " "return std::unique_ptr< "
"::grpc::ClientAsyncWriterInterface< $Request$>>(" "::grpc::ClientAsyncWriterInterface< $Request$>>("
"Async$Method$Raw(context, response, cq, tag));\n"); "$AsyncPrefix$$Method$Raw(context, response, "
"cq$AsyncRawArgs$));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
}
} else if (ServerOnlyStreaming(method)) { } else if (ServerOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
@ -223,19 +242,25 @@ void PrintHeaderClientMethodInterfaces(
"($Method$Raw(context, request));\n"); "($Method$Raw(context, request));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print( printer->Print(
*vars, *vars,
"std::unique_ptr< ::grpc::ClientAsyncReaderInterface< $Response$>> " "std::unique_ptr< ::grpc::ClientAsyncReaderInterface< $Response$>> "
"Async$Method$(" "$AsyncPrefix$$Method$("
"::grpc::ClientContext* context, const $Request$& request, " "::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent(); printer->Indent();
printer->Print(*vars, printer->Print(
*vars,
"return std::unique_ptr< " "return std::unique_ptr< "
"::grpc::ClientAsyncReaderInterface< $Response$>>(" "::grpc::ClientAsyncReaderInterface< $Response$>>("
"Async$Method$Raw(context, request, cq, tag));\n"); "$AsyncPrefix$$Method$Raw(context, request, cq$AsyncRawArgs$));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
}
} else if (method->BidiStreaming()) { } else if (method->BidiStreaming()) {
printer->Print(*vars, printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientReaderWriterInterface< " "std::unique_ptr< ::grpc::ClientReaderWriterInterface< "
@ -249,61 +274,83 @@ void PrintHeaderClientMethodInterfaces(
"$Method$Raw(context));\n"); "$Method$Raw(context));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print( printer->Print(
*vars, *vars,
"std::unique_ptr< " "std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>> " "::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>> "
"Async$Method$(::grpc::ClientContext* context, " "$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent(); printer->Indent();
printer->Print( printer->Print(
*vars, *vars,
"return std::unique_ptr< " "return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>>(" "::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>>("
"Async$Method$Raw(context, cq, tag));\n"); "$AsyncPrefix$$Method$Raw(context, cq$AsyncRawArgs$));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
} }
}
} else { } else {
if (method->NoStreaming()) { if (method->NoStreaming()) {
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print( printer->Print(
*vars, *vars,
"virtual ::grpc::ClientAsyncResponseReaderInterface< $Response$>* " "virtual ::grpc::ClientAsyncResponseReaderInterface< $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, " "$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, " "const $Request$& request, "
"::grpc::CompletionQueue* cq) = 0;\n"); "::grpc::CompletionQueue* cq) = 0;\n");
}
} else if (ClientOnlyStreaming(method)) { } else if (ClientOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
"virtual ::grpc::ClientWriterInterface< $Request$>*" "virtual ::grpc::ClientWriterInterface< $Request$>*"
" $Method$Raw(" " $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) = 0;\n"); "::grpc::ClientContext* context, $Response$* response) = 0;\n");
printer->Print(*vars, for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncWriterInterface< $Request$>*" "virtual ::grpc::ClientAsyncWriterInterface< $Request$>*"
" Async$Method$Raw(::grpc::ClientContext* context, " " $AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"$Response$* response, " "$Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) = 0;\n");
}
} else if (ServerOnlyStreaming(method)) { } else if (ServerOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
"virtual ::grpc::ClientReaderInterface< $Response$>* $Method$Raw(" "virtual ::grpc::ClientReaderInterface< $Response$>* $Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) = 0;\n"); "::grpc::ClientContext* context, const $Request$& request) = 0;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
printer->Print( printer->Print(
*vars, *vars,
"virtual ::grpc::ClientAsyncReaderInterface< $Response$>* " "virtual ::grpc::ClientAsyncReaderInterface< $Response$>* "
"Async$Method$Raw(" "$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, " "::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) = 0;\n");
}
} else if (method->BidiStreaming()) { } else if (method->BidiStreaming()) {
printer->Print(*vars, printer->Print(*vars,
"virtual ::grpc::ClientReaderWriterInterface< $Request$, " "virtual ::grpc::ClientReaderWriterInterface< $Request$, "
"$Response$>* " "$Response$>* "
"$Method$Raw(::grpc::ClientContext* context) = 0;\n"); "$Method$Raw(::grpc::ClientContext* context) = 0;\n");
printer->Print(*vars, for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
printer->Print(
*vars,
"virtual ::grpc::ClientAsyncReaderWriterInterface< " "virtual ::grpc::ClientAsyncReaderWriterInterface< "
"$Request$, $Response$>* " "$Request$, $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, " "$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) = 0;\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) = 0;\n");
}
} }
} }
} }
@ -315,25 +362,35 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
(*vars)["Method"] = method->name(); (*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name(); (*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name(); (*vars)["Response"] = method->output_type_name();
struct {
grpc::string prefix;
grpc::string method_params; // extra arguments to method
grpc::string raw_args; // extra arguments to raw version of method
} async_prefixes[] = {{"Async", ", void* tag", ", tag"},
{"PrepareAsync", "", ""}};
if (is_public) { if (is_public) {
if (method->NoStreaming()) { if (method->NoStreaming()) {
printer->Print( printer->Print(
*vars, *vars,
"::grpc::Status $Method$(::grpc::ClientContext* context, " "::grpc::Status $Method$(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) override;\n"); "const $Request$& request, $Response$* response) override;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print( printer->Print(
*vars, *vars,
"std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> " "std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> "
"Async$Method$(::grpc::ClientContext* context, " "$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"const $Request$& request, " "const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n"); "::grpc::CompletionQueue* cq) {\n");
printer->Indent(); printer->Indent();
printer->Print(*vars, printer->Print(*vars,
"return std::unique_ptr< " "return std::unique_ptr< "
"::grpc::ClientAsyncResponseReader< $Response$>>(" "::grpc::ClientAsyncResponseReader< $Response$>>("
"Async$Method$Raw(context, request, cq));\n"); "$AsyncPrefix$$Method$Raw(context, request, cq));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
}
} else if (ClientOnlyStreaming(method)) { } else if (ClientOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
@ -346,18 +403,24 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
"($Method$Raw(context, response));\n"); "($Method$Raw(context, response));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(*vars, printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>" "std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>"
" Async$Method$(::grpc::ClientContext* context, " " $AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"$Response$* response, " "$Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent(); printer->Indent();
printer->Print( printer->Print(
*vars, *vars,
"return std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>(" "return std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>("
"Async$Method$Raw(context, response, cq, tag));\n"); "$AsyncPrefix$$Method$Raw(context, response, "
"cq$AsyncRawArgs$));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
}
} else if (ServerOnlyStreaming(method)) { } else if (ServerOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
@ -371,19 +434,24 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
"($Method$Raw(context, request));\n"); "($Method$Raw(context, request));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print( printer->Print(
*vars, *vars,
"std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>> " "std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>> "
"Async$Method$(" "$AsyncPrefix$$Method$("
"::grpc::ClientContext* context, const $Request$& request, " "::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent(); printer->Indent();
printer->Print( printer->Print(
*vars, *vars,
"return std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>>(" "return std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>>("
"Async$Method$Raw(context, request, cq, tag));\n"); "$AsyncPrefix$$Method$Raw(context, request, cq$AsyncRawArgs$));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
}
} else if (method->BidiStreaming()) { } else if (method->BidiStreaming()) {
printer->Print( printer->Print(
*vars, *vars,
@ -396,53 +464,80 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
"$Method$Raw(context));\n"); "$Method$Raw(context));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(*vars, printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientAsyncReaderWriter< " "std::unique_ptr< ::grpc::ClientAsyncReaderWriter< "
"$Request$, $Response$>> " "$Request$, $Response$>> "
"Async$Method$(::grpc::ClientContext* context, " "$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Indent(); printer->Indent();
printer->Print(*vars, printer->Print(
*vars,
"return std::unique_ptr< " "return std::unique_ptr< "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>>(" "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>>("
"Async$Method$Raw(context, cq, tag));\n"); "$AsyncPrefix$$Method$Raw(context, cq$AsyncRawArgs$));\n");
printer->Outdent(); printer->Outdent();
printer->Print("}\n"); printer->Print("}\n");
} }
}
} else { } else {
if (method->NoStreaming()) { if (method->NoStreaming()) {
printer->Print(*vars, for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print(
*vars,
"::grpc::ClientAsyncResponseReader< $Response$>* " "::grpc::ClientAsyncResponseReader< $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, " "$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, " "const $Request$& request, "
"::grpc::CompletionQueue* cq) override;\n"); "::grpc::CompletionQueue* cq) override;\n");
}
} else if (ClientOnlyStreaming(method)) { } else if (ClientOnlyStreaming(method)) {
printer->Print(*vars, printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* $Method$Raw(" "::grpc::ClientWriter< $Request$>* $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) " "::grpc::ClientContext* context, $Response$* response) "
"override;\n"); "override;\n");
printer->Print(*vars, for (auto async_prefix : async_prefixes) {
"::grpc::ClientAsyncWriter< $Request$>* Async$Method$Raw(" (*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(
*vars,
"::grpc::ClientAsyncWriter< $Request$>* $AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, " "::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) override;\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) override;\n");
}
} else if (ServerOnlyStreaming(method)) { } else if (ServerOnlyStreaming(method)) {
printer->Print(*vars, printer->Print(*vars,
"::grpc::ClientReader< $Response$>* $Method$Raw(" "::grpc::ClientReader< $Response$>* $Method$Raw("
"::grpc::ClientContext* context, const $Request$& request)" "::grpc::ClientContext* context, const $Request$& request)"
" override;\n"); " override;\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print( printer->Print(
*vars, *vars,
"::grpc::ClientAsyncReader< $Response$>* Async$Method$Raw(" "::grpc::ClientAsyncReader< $Response$>* $AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, " "::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) override;\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) override;\n");
}
} else if (method->BidiStreaming()) { } else if (method->BidiStreaming()) {
printer->Print(*vars, printer->Print(*vars,
"::grpc::ClientReaderWriter< $Request$, $Response$>* " "::grpc::ClientReaderWriter< $Request$, $Response$>* "
"$Method$Raw(::grpc::ClientContext* context) override;\n"); "$Method$Raw(::grpc::ClientContext* context) override;\n");
printer->Print(*vars, for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncRawArgs"] = async_prefix.raw_args;
printer->Print(
*vars,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* " "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, " "$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) override;\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) override;\n");
}
} }
} }
} }
@ -1077,6 +1172,13 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
(*vars)["Method"] = method->name(); (*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name(); (*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name(); (*vars)["Response"] = method->output_type_name();
struct {
grpc::string prefix;
grpc::string start; // bool literal expressed as string
grpc::string method_params; // extra arguments to method
grpc::string create_args; // extra arguments to creator
} async_prefixes[] = {{"Async", "true", ", void* tag", ", tag"},
{"PrepareAsync", "false", "", ", nullptr"}};
if (method->NoStreaming()) { if (method->NoStreaming()) {
printer->Print(*vars, printer->Print(*vars,
"::grpc::Status $ns$$Service$::Stub::$Method$(" "::grpc::Status $ns$$Service$::Stub::$Method$("
@ -1087,10 +1189,13 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, request, response);\n" "context, request, response);\n"
"}\n\n"); "}\n\n");
printer->Print( for (auto async_prefix : async_prefixes) {
*vars, (*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
printer->Print(*vars,
"::grpc::ClientAsyncResponseReader< $Response$>* " "::grpc::ClientAsyncResponseReader< $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw(::grpc::ClientContext* context, " "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(::grpc::"
"ClientContext* context, "
"const $Request$& request, " "const $Request$& request, "
"::grpc::CompletionQueue* cq) {\n"); "::grpc::CompletionQueue* cq) {\n");
printer->Print(*vars, printer->Print(*vars,
@ -1098,8 +1203,9 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"::grpc::ClientAsyncResponseReader< $Response$>::Create(" "::grpc::ClientAsyncResponseReader< $Response$>::Create("
"channel_.get(), cq, " "channel_.get(), cq, "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, request);\n" "context, request, $AsyncStart$);\n"
"}\n\n"); "}\n\n");
}
} else if (ClientOnlyStreaming(method)) { } else if (ClientOnlyStreaming(method)) {
printer->Print(*vars, printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* " "::grpc::ClientWriter< $Request$>* "
@ -1111,17 +1217,23 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, response);\n" "context, response);\n"
"}\n\n"); "}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncCreateArgs"] = async_prefix.create_args;
printer->Print(*vars, printer->Print(*vars,
"::grpc::ClientAsyncWriter< $Request$>* " "::grpc::ClientAsyncWriter< $Request$>* "
"$ns$$Service$::Stub::Async$Method$Raw(" "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, " "::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(*vars, printer->Print(*vars,
" return ::grpc::ClientAsyncWriter< $Request$>::Create(" " return ::grpc::ClientAsyncWriter< $Request$>::Create("
"channel_.get(), cq, " "channel_.get(), cq, "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, response, tag);\n" "context, response, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n"); "}\n\n");
}
} else if (ServerOnlyStreaming(method)) { } else if (ServerOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
@ -1134,17 +1246,24 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, request);\n" "context, request);\n"
"}\n\n"); "}\n\n");
printer->Print(*vars, for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncCreateArgs"] = async_prefix.create_args;
printer->Print(
*vars,
"::grpc::ClientAsyncReader< $Response$>* " "::grpc::ClientAsyncReader< $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw(" "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, " "::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) {\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print(*vars, printer->Print(*vars,
" return ::grpc::ClientAsyncReader< $Response$>::Create(" " return ::grpc::ClientAsyncReader< $Response$>::Create("
"channel_.get(), cq, " "channel_.get(), cq, "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, request, tag);\n" "context, request, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n"); "}\n\n");
}
} else if (method->BidiStreaming()) { } else if (method->BidiStreaming()) {
printer->Print( printer->Print(
*vars, *vars,
@ -1157,21 +1276,27 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context);\n" "context);\n"
"}\n\n"); "}\n\n");
printer->Print( for (auto async_prefix : async_prefixes) {
*vars, (*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncStart"] = async_prefix.start;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["AsyncCreateArgs"] = async_prefix.create_args;
printer->Print(*vars,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* " "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::Async$Method$Raw(::grpc::ClientContext* context, " "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(::grpc::"
"::grpc::CompletionQueue* cq, void* tag) {\n"); "ClientContext* context, "
"::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
printer->Print( printer->Print(
*vars, *vars,
" return " " return "
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create(" "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create("
"channel_.get(), cq, " "channel_.get(), cq, "
"rpcmethod_$Method$_, " "rpcmethod_$Method$_, "
"context, tag);\n" "context, $AsyncStart$$AsyncCreateArgs$);\n"
"}\n\n"); "}\n\n");
} }
} }
}
void PrintSourceServerMethod(grpc_generator::Printer *printer, void PrintSourceServerMethod(grpc_generator::Printer *printer,
const grpc_generator::Method *method, const grpc_generator::Method *method,
@ -1460,50 +1585,79 @@ void PrintMockClientMethods(grpc_generator::Printer *printer,
(*vars)["Request"] = method->input_type_name(); (*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name(); (*vars)["Response"] = method->output_type_name();
struct {
grpc::string prefix;
grpc::string method_params; // extra arguments to method
int extra_method_param_count;
} async_prefixes[] = {{"Async", ", void* tag", 1}, {"PrepareAsync", "", 0}};
if (method->NoStreaming()) { if (method->NoStreaming()) {
printer->Print( printer->Print(
*vars, *vars,
"MOCK_METHOD3($Method$, ::grpc::Status(::grpc::ClientContext* context, " "MOCK_METHOD3($Method$, ::grpc::Status(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response));\n"); "const $Request$& request, $Response$* response));\n");
printer->Print(*vars, for (auto async_prefix : async_prefixes) {
"MOCK_METHOD3(Async$Method$Raw, " (*vars)["AsyncPrefix"] = async_prefix.prefix;
printer->Print(
*vars,
"MOCK_METHOD3($AsyncPrefix$$Method$Raw, "
"::grpc::ClientAsyncResponseReaderInterface< $Response$>*" "::grpc::ClientAsyncResponseReaderInterface< $Response$>*"
"(::grpc::ClientContext* context, const $Request$& request, " "(::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq));\n"); "::grpc::CompletionQueue* cq));\n");
}
} else if (ClientOnlyStreaming(method)) { } else if (ClientOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
"MOCK_METHOD2($Method$Raw, " "MOCK_METHOD2($Method$Raw, "
"::grpc::ClientWriterInterface< $Request$>*" "::grpc::ClientWriterInterface< $Request$>*"
"(::grpc::ClientContext* context, $Response$* response));\n"); "(::grpc::ClientContext* context, $Response$* response));\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["MockArgs"] =
std::to_string(3 + async_prefix.extra_method_param_count);
printer->Print(*vars, printer->Print(*vars,
"MOCK_METHOD4(Async$Method$Raw, " "MOCK_METHOD$MockArgs$($AsyncPrefix$$Method$Raw, "
"::grpc::ClientAsyncWriterInterface< $Request$>*" "::grpc::ClientAsyncWriterInterface< $Request$>*"
"(::grpc::ClientContext* context, $Response$* response, " "(::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag));\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$));\n");
}
} else if (ServerOnlyStreaming(method)) { } else if (ServerOnlyStreaming(method)) {
printer->Print( printer->Print(
*vars, *vars,
"MOCK_METHOD2($Method$Raw, " "MOCK_METHOD2($Method$Raw, "
"::grpc::ClientReaderInterface< $Response$>*" "::grpc::ClientReaderInterface< $Response$>*"
"(::grpc::ClientContext* context, const $Request$& request));\n"); "(::grpc::ClientContext* context, const $Request$& request));\n");
printer->Print(*vars, for (auto async_prefix : async_prefixes) {
"MOCK_METHOD4(Async$Method$Raw, " (*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["MockArgs"] =
std::to_string(3 + async_prefix.extra_method_param_count);
printer->Print(
*vars,
"MOCK_METHOD$MockArgs$($AsyncPrefix$$Method$Raw, "
"::grpc::ClientAsyncReaderInterface< $Response$>*" "::grpc::ClientAsyncReaderInterface< $Response$>*"
"(::grpc::ClientContext* context, const $Request$& request, " "(::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag));\n"); "::grpc::CompletionQueue* cq$AsyncMethodParams$));\n");
}
} else if (method->BidiStreaming()) { } else if (method->BidiStreaming()) {
printer->Print( printer->Print(
*vars, *vars,
"MOCK_METHOD1($Method$Raw, " "MOCK_METHOD1($Method$Raw, "
"::grpc::ClientReaderWriterInterface< $Request$, $Response$>*" "::grpc::ClientReaderWriterInterface< $Request$, $Response$>*"
"(::grpc::ClientContext* context));\n"); "(::grpc::ClientContext* context));\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
(*vars)["AsyncMethodParams"] = async_prefix.method_params;
(*vars)["MockArgs"] =
std::to_string(2 + async_prefix.extra_method_param_count);
printer->Print( printer->Print(
*vars, *vars,
"MOCK_METHOD3(Async$Method$Raw, " "MOCK_METHOD$MockArgs$($AsyncPrefix$$Method$Raw, "
"::grpc::ClientAsyncReaderWriterInterface<$Request$, $Response$>*" "::grpc::ClientAsyncReaderWriterInterface<$Request$, $Response$>*"
"(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, " "(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq"
"void* tag));\n"); "$AsyncMethodParams$));\n");
}
} }
} }

@ -767,9 +767,9 @@ bool PythonGrpcGenerator::Generate(const FileDescriptor* file,
ProtoBufFile pbfile(file); ProtoBufFile pbfile(file);
PrivateGenerator generator(config_, &pbfile); PrivateGenerator generator(config_, &pbfile);
if (parameter == "grpc_2_0") { if (parameter == "" || parameter == "grpc_2_0") {
return GenerateGrpc(context, generator, pb2_grpc_file_name, true); return GenerateGrpc(context, generator, pb2_grpc_file_name, true);
} else if (parameter == "") { } else if (parameter == "grpc_1_0") {
return GenerateGrpc(context, generator, pb2_grpc_file_name, true) && return GenerateGrpc(context, generator, pb2_grpc_file_name, true) &&
GenerateGrpc(context, generator, pb2_file_name, false); GenerateGrpc(context, generator, pb2_file_name, false);
} else { } else {

@ -37,16 +37,16 @@
void define_base_resources() { void define_base_resources() {
google_census_Resource_BasicUnit numerator = google_census_Resource_BasicUnit numerator =
google_census_Resource_BasicUnit_SECS; google_census_Resource_BasicUnit_SECS;
resource r = {"client_rpc_latency", // name resource r = {(char *)"client_rpc_latency", // name
"Client RPC latency in seconds", // description (char *)"Client RPC latency in seconds", // description
0, // prefix 0, // prefix
1, // n_numerators 1, // n_numerators
&numerator, // numerators &numerator, // numerators
0, // n_denominators 0, // n_denominators
NULL}; // denominators NULL}; // denominators
define_resource(&r); define_resource(&r);
r = (resource){"server_rpc_latency", // name r = (resource){(char *)"server_rpc_latency", // name
"Server RPC latency in seconds", // description (char *)"Server RPC latency in seconds", // description
0, // prefix 0, // prefix
1, // n_numerators 1, // n_numerators
&numerator, // numerators &numerator, // numerators

@ -375,7 +375,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
} }
// Extract the following fields from the resolver result, if non-NULL. // Extract the following fields from the resolver result, if non-NULL.
bool lb_policy_updated = false; bool lb_policy_updated = false;
char *lb_policy_name = NULL; char *lb_policy_name_dup = NULL;
bool lb_policy_name_changed = false; bool lb_policy_name_changed = false;
grpc_lb_policy *new_lb_policy = NULL; grpc_lb_policy *new_lb_policy = NULL;
char *service_config_json = NULL; char *service_config_json = NULL;
@ -383,6 +383,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_hash_table *method_params_table = NULL; grpc_slice_hash_table *method_params_table = NULL;
if (chand->resolver_result != NULL) { if (chand->resolver_result != NULL) {
// Find LB policy name. // Find LB policy name.
const char *lb_policy_name = NULL;
const grpc_arg *channel_arg = const grpc_arg *channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME); grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL) { if (channel_arg != NULL) {
@ -473,7 +474,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
// Before we clean up, save a copy of lb_policy_name, since it might // Before we clean up, save a copy of lb_policy_name, since it might
// be pointing to data inside chand->resolver_result. // be pointing to data inside chand->resolver_result.
// The copy will be saved in chand->lb_policy_name below. // The copy will be saved in chand->lb_policy_name below.
lb_policy_name = gpr_strdup(lb_policy_name); lb_policy_name_dup = gpr_strdup(lb_policy_name);
grpc_channel_args_destroy(exec_ctx, chand->resolver_result); grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
chand->resolver_result = NULL; chand->resolver_result = NULL;
} }
@ -481,8 +482,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, " "chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"", "service_config=\"%s\"",
chand, lb_policy_name, lb_policy_name_changed ? " (changed)" : "", chand, lb_policy_name_dup,
service_config_json); lb_policy_name_changed ? " (changed)" : "", service_config_json);
} }
// Now swap out fields in chand. Note that the new values may still // Now swap out fields in chand. Note that the new values may still
// be NULL if (e.g.) the resolver failed to return results or the // be NULL if (e.g.) the resolver failed to return results or the
@ -490,9 +491,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
// //
// First, swap out the data used by cc_get_channel_info(). // First, swap out the data used by cc_get_channel_info().
gpr_mu_lock(&chand->info_mu); gpr_mu_lock(&chand->info_mu);
if (lb_policy_name != NULL) { if (lb_policy_name_dup != NULL) {
gpr_free(chand->info_lb_policy_name); gpr_free(chand->info_lb_policy_name);
chand->info_lb_policy_name = lb_policy_name; chand->info_lb_policy_name = lb_policy_name_dup;
} }
if (service_config_json != NULL) { if (service_config_json != NULL) {
gpr_free(chand->info_service_config_json); gpr_free(chand->info_service_config_json);

@ -63,6 +63,6 @@ static const grpc_arg_pointer_vtable factory_arg_vtable = {
grpc_arg grpc_client_channel_factory_create_channel_arg( grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory* factory) { grpc_client_channel_factory* factory) {
return grpc_channel_arg_pointer_create(GRPC_ARG_CLIENT_CHANNEL_FACTORY, return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CLIENT_CHANNEL_FACTORY,
factory, &factory_arg_vtable); factory, &factory_arg_vtable);
} }

@ -54,8 +54,8 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
char *default_authority = grpc_get_default_authority( char *default_authority = grpc_get_default_authority(
exec_ctx, grpc_channel_stack_builder_get_target(builder)); exec_ctx, grpc_channel_stack_builder_get_target(builder));
if (default_authority != NULL) { if (default_authority != NULL) {
grpc_arg arg = grpc_channel_arg_string_create(GRPC_ARG_DEFAULT_AUTHORITY, grpc_arg arg = grpc_channel_arg_string_create(
default_authority); (char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder, grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
new_args); new_args);

@ -44,6 +44,8 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
GPR_ASSERT(user_cred != NULL); GPR_ASSERT(user_cred != NULL);
char* proxy_name = NULL; char* proxy_name = NULL;
char* uri_str = gpr_getenv("http_proxy"); char* uri_str = gpr_getenv("http_proxy");
char** authority_strs = NULL;
size_t authority_nstrs;
if (uri_str == NULL) return NULL; if (uri_str == NULL) return NULL;
grpc_uri* uri = grpc_uri* uri =
grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */); grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */);
@ -56,8 +58,6 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
goto done; goto done;
} }
/* Split on '@' to separate user credentials from host */ /* Split on '@' to separate user credentials from host */
char** authority_strs = NULL;
size_t authority_nstrs;
gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs); gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs);
GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */ GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */
if (authority_nstrs == 1) { if (authority_nstrs == 1) {
@ -157,7 +157,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
} }
grpc_arg args_to_add[2]; grpc_arg args_to_add[2];
args_to_add[0] = grpc_channel_arg_string_create( args_to_add[0] = grpc_channel_arg_string_create(
GRPC_ARG_HTTP_CONNECT_SERVER, (char*)GRPC_ARG_HTTP_CONNECT_SERVER,
uri->path[0] == '/' ? uri->path + 1 : uri->path); uri->path[0] == '/' ? uri->path + 1 : uri->path);
if (user_cred != NULL) { if (user_cred != NULL) {
/* Use base64 encoding for user credentials as stated in RFC 7617 */ /* Use base64 encoding for user credentials as stated in RFC 7617 */
@ -166,8 +166,8 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
char* header; char* header;
gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred); gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred);
gpr_free(encoded_user_cred); gpr_free(encoded_user_cred);
args_to_add[1] = args_to_add[1] = grpc_channel_arg_string_create(
grpc_channel_arg_string_create(GRPC_ARG_HTTP_CONNECT_HEADERS, header); (char*)GRPC_ARG_HTTP_CONNECT_HEADERS, header);
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2); *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2);
gpr_free(header); gpr_free(header);
} else { } else {

@ -123,7 +123,6 @@
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6 #define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120 #define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2 #define GRPC_GRPCLB_RECONNECT_JITTER 0.2
#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb"); grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
@ -300,10 +299,6 @@ typedef struct glb_lb_policy {
/** timeout in milliseconds for the LB call. 0 means no deadline. */ /** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms; int lb_call_timeout_ms;
/** timeout in milliseconds for before using fallback backend addresses.
* 0 means not using fallback. */
int lb_fallback_timeout_ms;
/** for communicating with the LB server */ /** for communicating with the LB server */
grpc_channel *lb_channel; grpc_channel *lb_channel;
@ -330,9 +325,6 @@ typedef struct glb_lb_policy {
* Otherwise, we delegate to the RR policy. */ * Otherwise, we delegate to the RR policy. */
size_t serverlist_index; size_t serverlist_index;
/** stores the backend addresses from the resolver */
grpc_lb_addresses *fallback_backend_addresses;
/** list of picks that are waiting on RR's policy connectivity */ /** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks; pending_pick *pending_picks;
@ -353,9 +345,6 @@ typedef struct glb_lb_policy {
/** is \a lb_call_retry_timer active? */ /** is \a lb_call_retry_timer active? */
bool retry_timer_active; bool retry_timer_active;
/** is \a lb_fallback_timer active? */
bool fallback_timer_active;
/** called upon changes to the LB channel's connectivity. */ /** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed; grpc_closure lb_channel_on_connectivity_changed;
@ -378,9 +367,6 @@ typedef struct glb_lb_policy {
/* LB call retry timer callback. */ /* LB call retry timer callback. */
grpc_closure lb_on_call_retry; grpc_closure lb_on_call_retry;
/* LB fallback timer callback. */
grpc_closure lb_on_fallback;
grpc_call *lb_call; /* streaming call to the LB server, */ grpc_call *lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */ grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
@ -404,9 +390,6 @@ typedef struct glb_lb_policy {
/** LB call retry timer */ /** LB call retry timer */
grpc_timer lb_call_retry_timer; grpc_timer lb_call_retry_timer;
/** LB fallback timer */
grpc_timer lb_fallback_timer;
bool initial_request_sent; bool initial_request_sent;
bool seen_initial_response; bool seen_initial_response;
@ -553,32 +536,6 @@ static grpc_lb_addresses *process_serverlist_locked(
return lb_addresses; return lb_addresses;
} }
/* Returns the backend addresses extracted from the given addresses */
static grpc_lb_addresses *extract_backend_addresses_locked(
grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
/* first pass: count the number of backend addresses */
size_t num_backends = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (!addresses->addresses[i].is_balancer) {
++num_backends;
}
}
/* second pass: actually populate the addresses and (empty) LB tokens */
grpc_lb_addresses *backend_addresses =
grpc_lb_addresses_create(num_backends, &lb_token_vtable);
size_t num_copied = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) continue;
const grpc_resolved_address *addr = &addresses->addresses[i].address;
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
addr->len, false /* is_balancer */,
NULL /* balancer_name */,
(void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
++num_copied;
}
return backend_addresses;
}
static void update_lb_connectivity_status_locked( static void update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_connectivity_state rr_state, grpc_error *rr_state_error) { grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
@ -646,8 +603,6 @@ static bool pick_from_internal_rr_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
const grpc_lb_policy_pick_args *pick_args, bool force_async, const grpc_lb_policy_pick_args *pick_args, bool force_async,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) { grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
// Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != NULL) {
// Look at the index into the serverlist to see if we should drop this call. // Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server *server = grpc_grpclb_server *server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++]; glb_policy->serverlist->servers[glb_policy->serverlist_index++];
@ -666,8 +621,8 @@ static bool pick_from_internal_rr_locked(
// the client_load_reporting filter, because we do not create a // the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter) // subchannel call (and therefore no client_load_reporting filter)
// for dropped calls. // for dropped calls.
grpc_grpclb_client_stats_add_call_dropped_locked( grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token,
server->load_balance_token, wc_arg->client_stats); wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats); grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) { if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL); GPR_ASSERT(wc_arg->wrapped_closure != NULL);
@ -678,7 +633,6 @@ static bool pick_from_internal_rr_locked(
gpr_free(wc_arg->free_when_done); gpr_free(wc_arg->free_when_done);
return true; return true;
} }
}
// Pick via the RR policy. // Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked( const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context, exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
@ -715,18 +669,8 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx, static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) { glb_lb_policy *glb_policy) {
grpc_lb_addresses *addresses; grpc_lb_addresses *addresses =
if (glb_policy->serverlist != NULL) { process_serverlist_locked(exec_ctx, glb_policy->serverlist);
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
} else {
// If rr_handover_locked() is invoked when we haven't received any
// serverlist from the balancer, we use the fallback backends returned by
// the resolver. Note that the fallback backend list may be empty, in which
// case the new round_robin policy will keep the requested picks pending.
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
GPR_ASSERT(addresses != NULL); GPR_ASSERT(addresses != NULL);
grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args)); grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory; args->client_channel_factory = glb_policy->cc_factory;
@ -832,6 +776,8 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* glb_policy->rr_policy may be NULL (initial handover) */ /* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx, static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) { glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
if (glb_policy->shutting_down) return; if (glb_policy->shutting_down) return;
grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy); grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL); GPR_ASSERT(args != NULL);
@ -980,9 +926,6 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) { if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist); grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
} }
if (glb_policy->fallback_backend_addresses != NULL) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
}
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator); grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
grpc_subchannel_index_unref(); grpc_subchannel_index_unref();
if (glb_policy->pending_update_args != NULL) { if (glb_policy->pending_update_args != NULL) {
@ -1124,28 +1067,10 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy); glb_lb_policy *glb_policy);
static void start_picking_locked(grpc_exec_ctx *exec_ctx, static void start_picking_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) { glb_lb_policy *glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
glb_policy->serverlist == NULL) {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec deadline = gpr_time_add(
now,
gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->fallback_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
&glb_policy->lb_on_fallback, now);
}
glb_policy->started_picking = true; glb_policy->started_picking = true;
gpr_backoff_reset(&glb_policy->lb_call_backoff_state); gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy); query_for_backends_locked(exec_ctx, glb_policy);
@ -1600,15 +1525,6 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) { if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */ /* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist); grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
} else {
/* or dispose of the fallback */
grpc_lb_addresses_destroy(exec_ctx,
glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses = NULL;
if (glb_policy->fallback_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
glb_policy->fallback_timer_active = false;
}
} }
/* and update the copy in the glb_lb_policy instance. This /* and update the copy in the glb_lb_policy instance. This
* serverlist instance will be destroyed either upon the next * serverlist instance will be destroyed either upon the next
@ -1619,7 +1535,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
} }
} else { } else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Received empty server list, ignoring."); gpr_log(GPR_INFO,
"Received empty server list. Picks will stay pending until "
"a response with > 0 servers is received");
} }
grpc_grpclb_destroy_serverlist(serverlist); grpc_grpclb_destroy_serverlist(serverlist);
} }
@ -1642,6 +1560,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); /* loop */ &glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error); GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_response_received_locked_shutdown");
} }
} else { /* empty payload: call cancelled. */ } else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received_locked" weak ref taken in /* dispose of the "lb_on_response_received_locked" weak ref taken in
@ -1666,26 +1587,6 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer"); GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
} }
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't do anything. */
if (glb_policy->serverlist != NULL) return;
glb_policy->fallback_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Falling back to use backends from resolver (grpclb %p)",
(void *)glb_policy);
}
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
rr_handover_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) { void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg; glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
@ -1806,17 +1707,6 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
&glb_policy->lb_channel_connectivity, &glb_policy->lb_channel_connectivity,
&glb_policy->lb_channel_on_connectivity_changed, NULL); &glb_policy->lb_channel_on_connectivity_changed, NULL);
} }
// Propagate update to fallback_backend_addresses if a non-empty serverlist
// hasn't been received from the balancer.
if (glb_policy->serverlist == NULL) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
if (glb_policy->rr_policy != NULL) {
rr_handover_locked(exec_ctx, glb_policy);
}
}
} }
// Invoked as part of the update process. It continues watching the LB channel // Invoked as part of the update process. It continues watching the LB channel
@ -1899,7 +1789,13 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory, grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) { grpc_lb_policy_args *args) {
/* Count the number of gRPC-LB addresses. There must be at least one. */ /* Count the number of gRPC-LB addresses. There must be at least one.
* TODO(roth): For now, we ignore non-balancer addresses, but in the
* future, we may change the behavior such that we fall back to using
* the non-balancer addresses if we cannot reach any balancers. In the
* fallback case, we should use the LB policy indicated by
* GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
* unset, we should default to pick_first). */
const grpc_arg *arg = const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) { if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@ -1935,24 +1831,14 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
glb_policy->lb_call_timeout_ms = glb_policy->lb_call_timeout_ms =
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX}); grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0,
INT_MAX});
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args, // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter. // since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg = grpc_arg new_arg = grpc_channel_arg_string_create(
grpc_channel_arg_string_create(GRPC_ARG_LB_POLICY_NAME, "grpclb"); (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME}; static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
glb_policy->args = grpc_channel_args_copy_and_add_and_remove( glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1); args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
/* Extract the backend addresses (may be empty) from the resolver for
* fallback. */
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
/* Create a client channel over them to communicate with a LB service */ /* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator = glb_policy->response_generator =
grpc_fake_resolver_response_generator_create(); grpc_fake_resolver_response_generator_create();

@ -589,7 +589,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Dispose of outdated subchannel lists. // Dispose of outdated subchannel lists.
if (sd->subchannel_list != p->subchannel_list && if (sd->subchannel_list != p->subchannel_list &&
sd->subchannel_list != p->latest_pending_subchannel_list) { sd->subchannel_list != p->latest_pending_subchannel_list) {
char *reason = NULL; const char *reason = NULL;
if (sd->subchannel_list->shutting_down) { if (sd->subchannel_list->shutting_down) {
reason = "sl_outdated_straggler"; reason = "sl_outdated_straggler";
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason); rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason);

@ -56,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
} }
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index, void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
const void* address, size_t address_len, void* address, size_t address_len,
bool is_balancer, const char* balancer_name, bool is_balancer, const char* balancer_name,
void* user_data) { void* user_data) {
GPR_ASSERT(index < addresses->num_addresses); GPR_ASSERT(index < addresses->num_addresses);
@ -141,7 +141,7 @@ static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
grpc_arg grpc_lb_addresses_create_channel_arg( grpc_arg grpc_lb_addresses_create_channel_arg(
const grpc_lb_addresses* addresses) { const grpc_lb_addresses* addresses) {
return grpc_channel_arg_pointer_create( return grpc_channel_arg_pointer_create(
GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable); (char*)GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable);
} }
grpc_lb_addresses* grpc_lb_addresses_find_channel_arg( grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(

@ -73,7 +73,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
* \a address is a socket address of length \a address_len. * \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */ * Takes ownership of \a balancer_name. */
void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index, void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
const void *address, size_t address_len, void *address, size_t address_len,
bool is_balancer, const char *balancer_name, bool is_balancer, const char *balancer_name,
void *user_data); void *user_data);

@ -204,7 +204,7 @@ static char *choose_service_config(char *service_config_choice_json) {
int random_pct = rand() % 100; int random_pct = rand() % 100;
int percentage; int percentage;
if (sscanf(field->value, "%d", &percentage) != 1 || if (sscanf(field->value, "%d", &percentage) != 1 ||
random_pct > percentage) { random_pct > percentage || percentage == 0) {
service_config_json = NULL; service_config_json = NULL;
break; break;
} }
@ -249,7 +249,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
service_config_string); service_config_string);
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG; args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
new_args[num_args_to_add++] = grpc_channel_arg_string_create( new_args[num_args_to_add++] = grpc_channel_arg_string_create(
GRPC_ARG_SERVICE_CONFIG, service_config_string); (char *)GRPC_ARG_SERVICE_CONFIG, service_config_string);
service_config = grpc_service_config_create(service_config_string); service_config = grpc_service_config_create(service_config_string);
if (service_config != NULL) { if (service_config != NULL) {
const char *lb_policy_name = const char *lb_policy_name =
@ -257,7 +257,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (lb_policy_name != NULL) { if (lb_policy_name != NULL) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME; args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create( new_args[num_args_to_add++] = grpc_channel_arg_string_create(
GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name); (char *)GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
} }
} }
} }

@ -275,14 +275,15 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
gpr_log(GPR_DEBUG, "on_txt_done_cb"); gpr_log(GPR_DEBUG, "on_txt_done_cb");
char *error_msg; char *error_msg;
grpc_ares_request *r = (grpc_ares_request *)arg; grpc_ares_request *r = (grpc_ares_request *)arg;
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext *result = NULL;
struct ares_txt_ext *reply = NULL;
grpc_error *error = GRPC_ERROR_NONE;
gpr_mu_lock(&r->mu); gpr_mu_lock(&r->mu);
if (status != ARES_SUCCESS) goto fail; if (status != ARES_SUCCESS) goto fail;
struct ares_txt_ext *reply = NULL;
status = ares_parse_txt_reply_ext(buf, len, &reply); status = ares_parse_txt_reply_ext(buf, len, &reply);
if (status != ARES_SUCCESS) goto fail; if (status != ARES_SUCCESS) goto fail;
// Find service config in TXT record. // Find service config in TXT record.
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext *result;
for (result = reply; result != NULL; result = result->next) { for (result = reply; result != NULL; result = result->next) {
if (result->record_start && if (result->record_start &&
memcmp(result->txt, g_service_config_attribute_prefix, prefix_len) == memcmp(result->txt, g_service_config_attribute_prefix, prefix_len) ==
@ -313,7 +314,7 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
fail: fail:
gpr_asprintf(&error_msg, "C-ares TXT lookup status is not ARES_SUCCESS: %s", gpr_asprintf(&error_msg, "C-ares TXT lookup status is not ARES_SUCCESS: %s",
ares_strerror(status)); ares_strerror(status));
grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg); gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) { if (r->error == GRPC_ERROR_NONE) {
r->error = error; r->error = error;
@ -331,6 +332,9 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb, grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
char **service_config_json) { char **service_config_json) {
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request *hr = NULL;
grpc_ares_request *r = NULL;
ares_channel *channel = NULL;
/* TODO(zyc): Enable tracing after #9603 is checked in */ /* TODO(zyc): Enable tracing after #9603 is checked in */
/* if (grpc_dns_trace) { /* if (grpc_dns_trace) {
gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s", gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
@ -360,8 +364,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties); error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup; if (error != GRPC_ERROR_NONE) goto error_cleanup;
grpc_ares_request *r = r = (grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
(grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu); gpr_mu_init(&r->mu);
r->ev_driver = ev_driver; r->ev_driver = ev_driver;
r->on_done = on_done; r->on_done = on_done;
@ -369,7 +372,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
r->service_config_json_out = service_config_json; r->service_config_json_out = service_config_json;
r->success = false; r->success = false;
r->error = GRPC_ERROR_NONE; r->error = GRPC_ERROR_NONE;
ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver); channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
// If dns_server is specified, use it. // If dns_server is specified, use it.
if (dns_server != NULL) { if (dns_server != NULL) {
@ -410,12 +413,12 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
} }
gpr_ref_init(&r->pending_queries, 1); gpr_ref_init(&r->pending_queries, 1);
if (grpc_ipv6_loopback_available()) { if (grpc_ipv6_loopback_available()) {
grpc_ares_hostbyname_request *hr = create_hostbyname_request( hr = create_hostbyname_request(r, host, strhtons(port),
r, host, strhtons(port), false /* is_balancer */); false /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_cb, hr); ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_cb, hr);
} }
grpc_ares_hostbyname_request *hr = create_hostbyname_request( hr = create_hostbyname_request(r, host, strhtons(port),
r, host, strhtons(port), false /* is_balancer */); false /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb, hr); ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb, hr);
if (check_grpclb) { if (check_grpclb) {
/* Query the SRV record */ /* Query the SRV record */

@ -210,7 +210,7 @@ grpc_arg grpc_fake_resolver_response_generator_arg(
grpc_fake_resolver_response_generator* generator) { grpc_fake_resolver_response_generator* generator) {
grpc_arg arg; grpc_arg arg;
arg.type = GRPC_ARG_POINTER; arg.type = GRPC_ARG_POINTER;
arg.key = GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR; arg.key = (char*)GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR;
arg.value.pointer.p = generator; arg.value.pointer.p = generator;
arg.value.pointer.vtable = &response_generator_arg_vtable; arg.value.pointer.vtable = &response_generator_arg_vtable;
return arg; return arg;

@ -811,6 +811,6 @@ const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args) {
grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) { grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) {
return grpc_channel_arg_string_create( return grpc_channel_arg_string_create(
GRPC_ARG_SUBCHANNEL_ADDRESS, (char *)GRPC_ARG_SUBCHANNEL_ADDRESS,
addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup("")); addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
} }

@ -244,7 +244,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
&calld->slices, &tmp); &calld->slices, &tmp);
if (did_compress) { if (did_compress) {
if (GRPC_TRACER_ON(grpc_compression_trace)) { if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name; const char *algo_name;
const size_t before_size = calld->slices.length; const size_t before_size = calld->slices.length;
const size_t after_size = tmp.length; const size_t after_size = tmp.length;
const float savings_ratio = 1.0f - (float)after_size / (float)before_size; const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
@ -258,7 +258,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
send_flags |= GRPC_WRITE_INTERNAL_COMPRESS; send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else { } else {
if (GRPC_TRACER_ON(grpc_compression_trace)) { if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name; const char *algo_name;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm, GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name)); &algo_name));
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,

@ -55,7 +55,8 @@ static bool maybe_add_server_load_reporting_filter(
} }
grpc_arg grpc_load_reporting_enable_arg() { grpc_arg grpc_load_reporting_enable_arg() {
return grpc_channel_arg_integer_create(GRPC_ARG_ENABLE_LOAD_REPORTING, 1); return grpc_channel_arg_integer_create((char *)GRPC_ARG_ENABLE_LOAD_REPORTING,
1);
} }
/* Plugin registration */ /* Plugin registration */

@ -55,7 +55,7 @@ static grpc_channel *client_channel_factory_create_channel(
} }
// Add channel arg containing the server URI. // Add channel arg containing the server URI.
grpc_arg arg = grpc_channel_arg_string_create( grpc_arg arg = grpc_channel_arg_string_create(
GRPC_ARG_SERVER_URI, (char *)GRPC_ARG_SERVER_URI,
grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target)); grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
const char *to_remove[] = {GRPC_ARG_SERVER_URI}; const char *to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args *new_args = grpc_channel_args *new_args =

@ -42,7 +42,7 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
(target, fd, args)); (target, fd, args));
grpc_arg default_authority_arg = grpc_channel_arg_string_create( grpc_arg default_authority_arg = grpc_channel_arg_string_create(
GRPC_ARG_DEFAULT_AUTHORITY, "test.authority"); (char *)GRPC_ARG_DEFAULT_AUTHORITY, (char *)"test.authority");
grpc_channel_args *final_args = grpc_channel_args *final_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1); grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);

@ -201,6 +201,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
grpc_error *err = GRPC_ERROR_NONE; grpc_error *err = GRPC_ERROR_NONE;
server_state *state = NULL; server_state *state = NULL;
grpc_error **errors = NULL; grpc_error **errors = NULL;
size_t naddrs = 0;
*port_num = -1; *port_num = -1;
@ -225,7 +226,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
state->shutdown = true; state->shutdown = true;
gpr_mu_init(&state->mu); gpr_mu_init(&state->mu);
const size_t naddrs = resolved->naddrs; naddrs = resolved->naddrs;
errors = (grpc_error **)gpr_malloc(sizeof(*errors) * naddrs); errors = (grpc_error **)gpr_malloc(sizeof(*errors) * naddrs);
for (i = 0; i < naddrs; i++) { for (i = 0; i < naddrs; i++) {
errors[i] = errors[i] =

@ -23,6 +23,7 @@
void grpc_chttp2_plugin_init(void) { void grpc_chttp2_plugin_init(void) {
grpc_register_tracer(&grpc_http_trace); grpc_register_tracer(&grpc_http_trace);
grpc_register_tracer(&grpc_flowctl_trace); grpc_register_tracer(&grpc_flowctl_trace);
grpc_register_tracer(&grpc_trace_http2_stream_state);
#ifndef NDEBUG #ifndef NDEBUG
grpc_register_tracer(&grpc_trace_chttp2_refcount); grpc_register_tracer(&grpc_trace_chttp2_refcount);
#endif #endif

@ -64,6 +64,11 @@
#define DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS false #define DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS false
#define KEEPALIVE_TIME_BACKOFF_MULTIPLIER 2 #define KEEPALIVE_TIME_BACKOFF_MULTIPLIER 2
#define DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
#define DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
#define DEFAULT_MAX_PINGS_BETWEEN_DATA 0 /* unlimited */
#define DEFAULT_MAX_PING_STRIKES 2
static int g_default_client_keepalive_time_ms = static int g_default_client_keepalive_time_ms =
DEFAULT_CLIENT_KEEPALIVE_TIME_MS; DEFAULT_CLIENT_KEEPALIVE_TIME_MS;
static int g_default_client_keepalive_timeout_ms = static int g_default_client_keepalive_timeout_ms =
@ -75,6 +80,13 @@ static int g_default_server_keepalive_timeout_ms =
static bool g_default_keepalive_permit_without_calls = static bool g_default_keepalive_permit_without_calls =
DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS; DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS;
static int g_default_min_sent_ping_interval_without_data_ms =
DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS;
static int g_default_min_recv_ping_interval_without_data_ms =
DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS;
static int g_default_max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA;
static int g_default_max_ping_strikes = DEFAULT_MAX_PING_STRIKES;
#define MAX_CLIENT_STREAM_ID 0x7fffffffu #define MAX_CLIENT_STREAM_ID 0x7fffffffu
grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false, "http"); grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false, "http");
grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false, "flowctl"); grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false, "flowctl");
@ -144,18 +156,14 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error); grpc_error *error);
static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, static void send_ping_locked(
grpc_chttp2_ping_type ping_type, grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure *on_initiate, grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate,
grpc_closure *on_complete); grpc_closure *on_complete,
grpc_chttp2_initiate_write_reason initiate_write_reason);
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error); grpc_error *error);
#define DEFAULT_MIN_TIME_BETWEEN_PINGS_MS 0
#define DEFAULT_MAX_PINGS_BETWEEN_DATA 3
#define DEFAULT_MAX_PING_STRIKES 2
#define DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
/** keepalive-relevant functions */ /** keepalive-relevant functions */
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error); grpc_error *error);
@ -346,7 +354,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (is_client) { if (is_client) {
grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string( grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
GRPC_CHTTP2_CLIENT_CONNECT_STRING)); GRPC_CHTTP2_CLIENT_CONNECT_STRING));
grpc_chttp2_initiate_write(exec_ctx, t, "initial_write");
} }
/* configure http2 the way we like it */ /* configure http2 the way we like it */
@ -362,14 +369,12 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
queue_setting_update(exec_ctx, t, queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1); GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
t->ping_policy = (grpc_chttp2_repeated_ping_policy){ t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
.max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA, t->ping_policy.min_sent_ping_interval_without_data = gpr_time_from_millis(
.min_time_between_pings = g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN);
gpr_time_from_millis(DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, GPR_TIMESPAN), t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
.max_ping_strikes = DEFAULT_MAX_PING_STRIKES, t->ping_policy.min_recv_ping_interval_without_data = gpr_time_from_millis(
.min_ping_interval_without_data = gpr_time_from_millis( g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN);
DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, GPR_TIMESPAN),
};
/* Keepalive setting */ /* Keepalive setting */
if (t->is_client) { if (t->is_client) {
@ -428,28 +433,36 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) { GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
t->ping_policy.max_pings_without_data = grpc_channel_arg_get_integer( t->ping_policy.max_pings_without_data = grpc_channel_arg_get_integer(
&channel_args->args[i], &channel_args->args[i],
(grpc_integer_options){DEFAULT_MAX_PINGS_BETWEEN_DATA, 0, INT_MAX}); (grpc_integer_options){g_default_max_pings_without_data, 0,
INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key, } else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MAX_PING_STRIKES)) { GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer( t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer(
&channel_args->args[i], &channel_args->args[i],
(grpc_integer_options){DEFAULT_MAX_PING_STRIKES, 0, INT_MAX}); (grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key, } else if (0 ==
GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS)) { strcmp(
t->ping_policy.min_time_between_pings = gpr_time_from_millis( channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_sent_ping_interval_without_data =
gpr_time_from_millis(
grpc_channel_arg_get_integer( grpc_channel_arg_get_integer(
&channel_args->args[i], &channel_args->args[i],
(grpc_integer_options){DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, 0, (grpc_integer_options){
g_default_min_sent_ping_interval_without_data_ms, 0,
INT_MAX}), INT_MAX}),
GPR_TIMESPAN); GPR_TIMESPAN);
} else if (0 == } else if (0 ==
strcmp(channel_args->args[i].key, strcmp(
GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS)) { channel_args->args[i].key,
t->ping_policy.min_ping_interval_without_data = gpr_time_from_millis( GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_recv_ping_interval_without_data =
gpr_time_from_millis(
grpc_channel_arg_get_integer( grpc_channel_arg_get_integer(
&channel_args->args[i], &channel_args->args[i],
(grpc_integer_options){ (grpc_integer_options){
DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, 0, INT_MAX}), g_default_min_recv_ping_interval_without_data_ms, 0,
INT_MAX}),
GPR_TIMESPAN); GPR_TIMESPAN);
} else if (0 == strcmp(channel_args->args[i].key, } else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) { GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
@ -557,8 +570,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
} }
} }
t->ping_state.pings_before_data_required = /* No pings allowed before receiving a header or data frame. */
t->ping_policy.max_pings_without_data; t->ping_state.pings_before_data_required = 0;
t->ping_state.is_delayed_ping_timer_set = false; t->ping_state.is_delayed_ping_timer_set = false;
t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC); t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
@ -578,7 +591,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED; t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
} }
grpc_chttp2_initiate_write(exec_ctx, t, "init"); grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
post_benign_reclaimer(exec_ctx, t); post_benign_reclaimer(exec_ctx, t);
} }
@ -624,6 +638,9 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN, connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport"); GRPC_ERROR_REF(error), "close_transport");
grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error)); grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error));
if (t->ping_state.is_delayed_ping_timer_set) {
grpc_timer_cancel(exec_ctx, &t->ping_state.delayed_ping_timer);
}
switch (t->keepalive_state) { switch (t->keepalive_state) {
case GRPC_CHTTP2_KEEPALIVE_STATE_WAITING: case GRPC_CHTTP2_KEEPALIVE_STATE_WAITING:
grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer); grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
@ -846,13 +863,91 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
} }
} }
static void inc_initiate_write_reason(
grpc_exec_ctx *exec_ctx, grpc_chttp2_initiate_write_reason reason) {
switch (reason) {
case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA(
exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA(
exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL(
exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING(
exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE(
exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED(
exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx);
break;
case GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx);
break;
}
}
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx, void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason) { grpc_chttp2_transport *t,
grpc_chttp2_initiate_write_reason reason) {
GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0); GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0);
switch (t->write_state) { switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE: case GRPC_CHTTP2_WRITE_STATE_IDLE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason); inc_initiate_write_reason(exec_ctx, reason);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
grpc_chttp2_initiate_write_reason_string(reason));
t->is_first_write_in_batch = true; t->is_first_write_in_batch = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
@ -864,7 +959,7 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
break; break;
case GRPC_CHTTP2_WRITE_STATE_WRITING: case GRPC_CHTTP2_WRITE_STATE_WRITING:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
reason); grpc_chttp2_initiate_write_reason_string(reason));
break; break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE: case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
break; break;
@ -872,16 +967,12 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_chttp2_initiate_write", 0); GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
} }
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx, void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_chttp2_stream *s) {
bool also_initiate_write, const char *reason) {
if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) { if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become"); GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
} }
if (also_initiate_write) {
grpc_chttp2_initiate_write(exec_ctx, t, reason);
}
} }
static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t, static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t,
@ -1105,7 +1196,9 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s); grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
post_destructive_reclaimer(exec_ctx, t); post_destructive_reclaimer(exec_ctx, t);
grpc_chttp2_become_writable(exec_ctx, t, s, true, "new_stream"); grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM);
} }
/* cancel out streams that will never be started */ /* cancel out streams that will never be started */
while (t->next_stream_id >= MAX_CLIENT_STREAM_ID && while (t->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@ -1202,7 +1295,9 @@ static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s) { grpc_chttp2_stream *s) {
if (s->id != 0 && (!s->write_buffering || if (s->id != 0 && (!s->write_buffering ||
s->flow_controlled_buffer.length > t->write_buffer_size)) { s->flow_controlled_buffer.length > t->write_buffer_size)) {
grpc_chttp2_become_writable(exec_ctx, t, s, true, "op.send_message"); grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE);
} }
} }
@ -1404,14 +1499,13 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} }
} else { } else {
GPR_ASSERT(s->id != 0); GPR_ASSERT(s->id != 0);
bool initiate_write = true; grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
if (op->send_message && if (!(op->send_message &&
(op->payload->send_message.send_message->flags & (op->payload->send_message.send_message->flags &
GRPC_WRITE_BUFFER_HINT)) { GRPC_WRITE_BUFFER_HINT))) {
initiate_write = false; grpc_chttp2_initiate_write(
exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA);
} }
grpc_chttp2_become_writable(exec_ctx, t, s, initiate_write,
"op.send_initial_metadata");
} }
} else { } else {
s->send_initial_metadata = NULL; s->send_initial_metadata = NULL;
@ -1519,8 +1613,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} else if (s->id != 0) { } else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding /* TODO(ctiller): check if there's flow control for any outstanding
bytes before going writable */ bytes before going writable */
grpc_chttp2_become_writable(exec_ctx, t, s, true, grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
"op.send_trailing_metadata"); grpc_chttp2_initiate_write(
exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA);
} }
} }
} }
@ -1632,15 +1727,17 @@ static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, static void send_ping_locked(
grpc_chttp2_ping_type ping_type, grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure *on_initiate, grpc_closure *on_ack) { grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate,
grpc_closure *on_ack,
grpc_chttp2_initiate_write_reason initiate_write_reason) {
grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type]; grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type];
grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], on_initiate, grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], on_initiate,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack, if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
GRPC_ERROR_NONE)) { GRPC_ERROR_NONE)) {
grpc_chttp2_initiate_write(exec_ctx, t, "send_ping"); grpc_chttp2_initiate_write(exec_ctx, t, initiate_write_reason);
} }
} }
@ -1648,7 +1745,10 @@ static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) { grpc_error *error) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp; grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->ping_state.is_delayed_ping_timer_set = false; t->ping_state.is_delayed_ping_timer_set = false;
grpc_chttp2_initiate_write(exec_ctx, t, "retry_send_ping"); if (error == GRPC_ERROR_NONE) {
grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
}
} }
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@ -1663,7 +1763,8 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
} }
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) { if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
grpc_chttp2_initiate_write(exec_ctx, t, "continue_pings"); grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS);
} }
} }
@ -1676,7 +1777,8 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&slice, &http_error); &slice, &http_error);
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error, grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
grpc_slice_ref_internal(slice), &t->qbuf); grpc_slice_ref_internal(slice), &t->qbuf);
grpc_chttp2_initiate_write(exec_ctx, t, "goaway_sent"); grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT);
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
@ -1723,7 +1825,8 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
if (op->send_ping) { if (op->send_ping) {
send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, NULL, send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, NULL,
op->send_ping); op->send_ping,
GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
} }
if (op->on_connectivity_state_change != NULL) { if (op->on_connectivity_state_change != NULL) {
@ -1968,7 +2071,8 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_add( grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error, &t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
&s->stats.outgoing)); &s->stats.outgoing));
grpc_chttp2_initiate_write(exec_ctx, t, "rst_stream"); grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
} }
} }
if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) { if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) {
@ -2289,7 +2393,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&s->stats.outgoing)); &s->stats.outgoing));
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error); grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error);
grpc_chttp2_initiate_write(exec_ctx, t, "close_from_api"); grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API);
} }
typedef struct { typedef struct {
@ -2324,19 +2429,20 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED: case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
break; break;
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY: case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
grpc_chttp2_become_writable(exec_ctx, t, s, true, grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
"immediate stream flowctl"); grpc_chttp2_initiate_write(
exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL);
break; break;
case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE: case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
grpc_chttp2_become_writable(exec_ctx, t, s, false, grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
"queue stream flowctl");
break; break;
} }
switch (action.send_transport_update) { switch (action.send_transport_update) {
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED: case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
break; break;
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY: case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
grpc_chttp2_initiate_write(exec_ctx, t, "immediate transport flowctl"); grpc_chttp2_initiate_write(
exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL);
break; break;
// this is the same as no action b/c every time the transport enters the // this is the same as no action b/c every time the transport enters the
// writing path it will maybe do an update // writing path it will maybe do an update
@ -2354,7 +2460,8 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
(uint32_t)action.max_frame_size); (uint32_t)action.max_frame_size);
} }
if (action.send_setting_update == GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY) { if (action.send_setting_update == GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY) {
grpc_chttp2_initiate_write(exec_ctx, t, "immediate setting update"); grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS);
} }
} }
if (action.need_ping) { if (action.need_ping) {
@ -2362,7 +2469,8 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
grpc_bdp_estimator_schedule_ping(&t->flow_control.bdp_estimator); grpc_bdp_estimator_schedule_ping(&t->flow_control.bdp_estimator);
send_ping_locked(exec_ctx, t, send_ping_locked(exec_ctx, t,
GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE, GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE,
&t->start_bdp_ping_locked, &t->finish_bdp_ping_locked); &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked,
GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING);
} }
} }
@ -2441,7 +2549,10 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (t->flow_control.initial_window_update > 0) { if (t->flow_control.initial_window_update > 0) {
grpc_chttp2_stream *s; grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) { while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
grpc_chttp2_become_writable(exec_ctx, t, s, true, "unstalled"); grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
grpc_chttp2_initiate_write(
exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING);
} }
} }
t->flow_control.initial_window_update = 0; t->flow_control.initial_window_update = 0;
@ -2538,6 +2649,36 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
&args->args[i], &args->args[i],
(grpc_integer_options){g_default_keepalive_permit_without_calls, (grpc_integer_options){g_default_keepalive_permit_without_calls,
0, 1}); 0, 1});
} else if (0 ==
strcmp(args->args[i].key, GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
g_default_max_ping_strikes = grpc_channel_arg_get_integer(
&args->args[i],
(grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX});
} else if (0 == strcmp(args->args[i].key,
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
g_default_max_pings_without_data = grpc_channel_arg_get_integer(
&args->args[i], (grpc_integer_options){
g_default_max_pings_without_data, 0, INT_MAX});
} else if (0 ==
strcmp(
args->args[i].key,
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
g_default_min_sent_ping_interval_without_data_ms =
grpc_channel_arg_get_integer(
&args->args[i],
(grpc_integer_options){
g_default_min_sent_ping_interval_without_data_ms, 0,
INT_MAX});
} else if (0 ==
strcmp(
args->args[i].key,
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
g_default_min_recv_ping_interval_without_data_ms =
grpc_channel_arg_get_integer(
&args->args[i],
(grpc_integer_options){
g_default_min_recv_ping_interval_without_data_ms, 0,
INT_MAX});
} }
} }
} }
@ -2556,7 +2697,8 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end"); GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE,
&t->start_keepalive_ping_locked, &t->start_keepalive_ping_locked,
&t->finish_keepalive_ping_locked); &t->finish_keepalive_ping_locked,
GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
} else { } else {
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init( grpc_timer_init(
@ -3017,6 +3159,56 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
/******************************************************************************* /*******************************************************************************
* MONITORING * MONITORING
*/ */
const char *grpc_chttp2_initiate_write_reason_string(
grpc_chttp2_initiate_write_reason reason) {
switch (reason) {
case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
return "INITIAL_WRITE";
case GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM:
return "START_NEW_STREAM";
case GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE:
return "SEND_MESSAGE";
case GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA:
return "SEND_INITIAL_METADATA";
case GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA:
return "SEND_TRAILING_METADATA";
case GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING:
return "RETRY_SEND_PING";
case GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS:
return "CONTINUE_PINGS";
case GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT:
return "GOAWAY_SENT";
case GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM:
return "RST_STREAM";
case GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API:
return "CLOSE_FROM_API";
case GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL:
return "STREAM_FLOW_CONTROL";
case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL:
return "TRANSPORT_FLOW_CONTROL";
case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
return "SEND_SETTINGS";
case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING:
return "BDP_ESTIMATOR_PING";
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
return "FLOW_CONTROL_UNSTALLED_BY_SETTING";
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:
return "FLOW_CONTROL_UNSTALLED_BY_UPDATE";
case GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING:
return "APPLICATION_PING";
case GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING:
return "KEEPALIVE_PING";
case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED:
return "TRANSPORT_FLOW_CONTROL_UNSTALLED";
case GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE:
return "PING_RESPONSE";
case GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM:
return "FORCE_RST_STREAM";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx, static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *t) { grpc_transport *t) {
return ((grpc_chttp2_transport *)t)->ep; return ((grpc_chttp2_transport *)t)->ep;

@ -25,6 +25,7 @@
extern grpc_tracer_flag grpc_http_trace; extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace; extern grpc_tracer_flag grpc_flowctl_trace;
extern grpc_tracer_flag grpc_trace_http2_stream_state;
#ifndef NDEBUG #ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_chttp2_refcount; extern grpc_tracer_flag grpc_trace_chttp2_refcount;

@ -92,7 +92,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_allowed_ping = gpr_timespec next_allowed_ping =
gpr_time_add(t->ping_recv_state.last_ping_recv_time, gpr_time_add(t->ping_recv_state.last_ping_recv_time,
t->ping_policy.min_ping_interval_without_data); t->ping_policy.min_recv_ping_interval_without_data);
if (t->keepalive_permit_without_calls == 0 && if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) { grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
@ -117,7 +117,8 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks)); t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks));
} }
t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes; t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
grpc_chttp2_initiate_write(exec_ctx, t, "ping response"); grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE);
} }
} }
} }

@ -99,8 +99,10 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_chttp2_flowctl_recv_stream_update( grpc_chttp2_flowctl_recv_stream_update(
&t->flow_control, &s->flow_control, received_update); &t->flow_control, &s->flow_control, received_update);
if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) { if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) {
grpc_chttp2_become_writable(exec_ctx, t, s, true, grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
"stream.read_flow_control"); grpc_chttp2_initiate_write(
exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE);
} }
} }
} else { } else {
@ -109,7 +111,9 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
received_update); received_update);
bool is_zero = t->flow_control.remote_window <= 0; bool is_zero = t->flow_control.remote_window <= 0;
if (was_zero && !is_zero) { if (was_zero && !is_zero) {
grpc_chttp2_initiate_write(exec_ctx, t, "new_global_flow_control"); grpc_chttp2_initiate_write(
exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED);
} }
} }
} }

@ -51,8 +51,10 @@
#define MAX_DECODER_SPACE_USAGE 512 #define MAX_DECODER_SPACE_USAGE 512
static grpc_slice_refcount terminal_slice_refcount = {NULL, NULL}; static grpc_slice_refcount terminal_slice_refcount = {NULL, NULL};
static const grpc_slice terminal_slice = {&terminal_slice_refcount, static const grpc_slice terminal_slice = {
.data.refcounted = {0, 0}}; &terminal_slice_refcount, /* refcount */
{{0, 0}} /* data.refcounted */
};
extern grpc_tracer_flag grpc_http_trace; extern grpc_tracer_flag grpc_http_trace;
@ -283,29 +285,26 @@ typedef struct {
} wire_value; } wire_value;
static wire_value get_wire_value(grpc_mdelem elem, bool true_binary_enabled) { static wire_value get_wire_value(grpc_mdelem elem, bool true_binary_enabled) {
wire_value wire_val;
if (grpc_is_binary_header(GRPC_MDKEY(elem))) { if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
if (true_binary_enabled) { if (true_binary_enabled) {
return (wire_value){ wire_val.huffman_prefix = 0x00;
.huffman_prefix = 0x00, wire_val.insert_null_before_wire_value = true;
.insert_null_before_wire_value = true, wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)),
};
} else { } else {
return (wire_value){ wire_val.huffman_prefix = 0x80;
.huffman_prefix = 0x80, wire_val.insert_null_before_wire_value = false;
.insert_null_before_wire_value = false, wire_val.data =
.data = grpc_chttp2_base64_encode_and_huffman_compress( grpc_chttp2_base64_encode_and_huffman_compress(GRPC_MDVALUE(elem));
GRPC_MDVALUE(elem)),
};
} }
} else { } else {
/* TODO(ctiller): opportunistically compress non-binary headers */ /* TODO(ctiller): opportunistically compress non-binary headers */
return (wire_value){ wire_val.huffman_prefix = 0x00;
.huffman_prefix = 0x00, wire_val.insert_null_before_wire_value = false;
.insert_null_before_wire_value = false, wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)),
};
} }
return wire_val;
} }
static size_t wire_value_length(wire_value v) { static size_t wire_value_length(wire_value v) {

@ -1649,7 +1649,8 @@ static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
grpc_slice_buffer_add( grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR, &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing)); &s->stats.outgoing));
grpc_chttp2_initiate_write(exec_ctx, t, "force_rst_stream"); grpc_chttp2_initiate_write(exec_ctx, t,
GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM);
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE); grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
} }
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst"); GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");

@ -79,16 +79,43 @@ typedef enum {
GRPC_CHTTP2_PCL_COUNT /* must be last */ GRPC_CHTTP2_PCL_COUNT /* must be last */
} grpc_chttp2_ping_closure_list; } grpc_chttp2_ping_closure_list;
typedef enum {
GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE,
GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM,
GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE,
GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA,
GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA,
GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING,
GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS,
GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT,
GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM,
GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API,
GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL,
GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS,
GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING,
GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING,
GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING,
GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED,
GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE,
GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM,
} grpc_chttp2_initiate_write_reason;
const char *grpc_chttp2_initiate_write_reason_string(
grpc_chttp2_initiate_write_reason reason);
typedef struct { typedef struct {
grpc_closure_list lists[GRPC_CHTTP2_PCL_COUNT]; grpc_closure_list lists[GRPC_CHTTP2_PCL_COUNT];
uint64_t inflight_id; uint64_t inflight_id;
} grpc_chttp2_ping_queue; } grpc_chttp2_ping_queue;
typedef struct { typedef struct {
gpr_timespec min_time_between_pings;
int max_pings_without_data; int max_pings_without_data;
int max_ping_strikes; int max_ping_strikes;
gpr_timespec min_ping_interval_without_data; gpr_timespec min_sent_ping_interval_without_data;
gpr_timespec min_recv_ping_interval_without_data;
} grpc_chttp2_repeated_ping_policy; } grpc_chttp2_repeated_ping_policy;
typedef struct { typedef struct {
@ -599,7 +626,8 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function. The actual call chain is documented in the implementation of this function.
*/ */
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx, void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason); grpc_chttp2_transport *t,
grpc_chttp2_initiate_write_reason reason);
typedef struct { typedef struct {
/** are we writing? */ /** are we writing? */
@ -851,10 +879,9 @@ void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
/** add a ref to the stream and add it to the writable list; /** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */ ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx, void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_chttp2_stream *s);
bool also_initiate_write, const char *reason);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx, void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s, grpc_chttp2_transport *t, grpc_chttp2_stream *s,

@ -383,6 +383,9 @@ error_handler:
/* t->parser = grpc_chttp2_data_parser_parse;*/ /* t->parser = grpc_chttp2_data_parser_parse;*/
t->parser = grpc_chttp2_data_parser_parse; t->parser = grpc_chttp2_data_parser_parse;
t->parser_data = &s->data_parser; t->parser_data = &s->data_parser;
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) { } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
/* handle stream errors by closing the stream */ /* handle stream errors by closing the stream */
@ -559,6 +562,10 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0; (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
} }
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */ /* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id); s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == NULL) { if (s == NULL) {

@ -20,6 +20,27 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
static char *stream_list_id_string(grpc_chttp2_stream_list_id id) {
switch (id) {
case GRPC_CHTTP2_LIST_WRITABLE:
return "writable";
case GRPC_CHTTP2_LIST_WRITING:
return "writing";
case GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT:
return "stalled_by_transport";
case GRPC_CHTTP2_LIST_STALLED_BY_STREAM:
return "stalled_by_stream";
case GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY:
return "waiting_for_concurrency";
case STREAM_LIST_COUNT:
GPR_UNREACHABLE_CODE(return "unknown");
}
GPR_UNREACHABLE_CODE(return "unknown");
}
grpc_tracer_flag grpc_trace_http2_stream_state =
GRPC_TRACER_INITIALIZER(false, "http2_stream_state");
/* core list management */ /* core list management */
static bool stream_list_empty(grpc_chttp2_transport *t, static bool stream_list_empty(grpc_chttp2_transport *t,
@ -44,6 +65,10 @@ static bool stream_list_pop(grpc_chttp2_transport *t,
s->included[id] = 0; s->included[id] = 0;
} }
*stream = s; *stream = s;
if (s && GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
return s != 0; return s != 0;
} }
@ -62,6 +87,10 @@ static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
} else { } else {
t->lists[id].tail = s->links[id].prev; t->lists[id].tail = s->links[id].prev;
} }
if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
} }
static bool stream_list_maybe_remove(grpc_chttp2_transport *t, static bool stream_list_maybe_remove(grpc_chttp2_transport *t,
@ -90,6 +119,10 @@ static void stream_list_add_tail(grpc_chttp2_transport *t,
} }
t->lists[id].tail = s; t->lists[id].tail = s;
s->included[id] = 1; s->included[id] = 1;
if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
} }
static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s, static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
@ -150,17 +183,12 @@ void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t, void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) { grpc_chttp2_stream *s) {
GRPC_FLOW_CONTROL_IF_TRACING(
gpr_log(GPR_DEBUG, "stream %u stalled by transport", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
} }
bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) { grpc_chttp2_stream **s) {
bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
GRPC_FLOW_CONTROL_IF_TRACING(if (ret) gpr_log(
GPR_DEBUG, "stream %u un-stalled by transport", (*s)->id));
return ret;
} }
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t, void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
@ -170,23 +198,15 @@ void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t, void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) { grpc_chttp2_stream *s) {
GRPC_FLOW_CONTROL_IF_TRACING(
gpr_log(GPR_DEBUG, "stream %u stalled by stream", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
} }
bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t, bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) { grpc_chttp2_stream **s) {
bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
GRPC_FLOW_CONTROL_IF_TRACING(
if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", (*s)->id));
return ret;
} }
bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t, bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) { grpc_chttp2_stream *s) {
bool ret = stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
GRPC_FLOW_CONTROL_IF_TRACING(
if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", s->id));
return ret;
} }

@ -68,7 +68,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
} }
if (t->ping_state.pings_before_data_required == 0 && if (t->ping_state.pings_before_data_required == 0 &&
t->ping_policy.max_pings_without_data != 0) { t->ping_policy.max_pings_without_data != 0) {
/* need to send something of substance before sending a ping again */ /* need to receive something of substance before sending a ping again */
if (GRPC_TRACER_ON(grpc_http_trace) || if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d", gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d",
@ -78,11 +78,18 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
return; return;
} }
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec elapsed = gpr_time_sub(now, t->ping_state.last_ping_sent_time); gpr_timespec next_allowed_ping =
/*gpr_log(GPR_DEBUG, "elapsed:%d.%09d min:%d.%09d", (int)elapsed.tv_sec, gpr_time_add(t->ping_state.last_ping_sent_time,
elapsed.tv_nsec, (int)t->ping_policy.min_time_between_pings.tv_sec, t->ping_policy.min_sent_ping_interval_without_data);
(int)t->ping_policy.min_time_between_pings.tv_nsec);*/ if (t->keepalive_permit_without_calls == 0 &&
if (gpr_time_cmp(elapsed, t->ping_policy.min_time_between_pings) < 0) { grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
next_allowed_ping = gpr_time_add(t->ping_recv_state.last_ping_recv_time,
gpr_time_from_seconds(7200, GPR_TIMESPAN));
}
/* gpr_log(GPR_DEBUG, "next_allowed_ping:%d.%09d now:%d.%09d",
(int)next_allowed_ping.tv_sec, (int)next_allowed_ping.tv_nsec,
(int)now.tv_sec, (int)now.tv_nsec); */
if (gpr_time_cmp(next_allowed_ping, now) > 0) {
/* not enough elapsed time between successive pings */ /* not enough elapsed time between successive pings */
if (GRPC_TRACER_ON(grpc_http_trace) || if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
@ -93,9 +100,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
if (!t->ping_state.is_delayed_ping_timer_set) { if (!t->ping_state.is_delayed_ping_timer_set) {
t->ping_state.is_delayed_ping_timer_set = true; t->ping_state.is_delayed_ping_timer_set = true;
grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer, grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer,
gpr_time_add(t->ping_state.last_ping_sent_time, next_allowed_ping, &t->retry_initiate_ping_locked,
t->ping_policy.min_time_between_pings),
&t->retry_initiate_ping_locked,
gpr_now(GPR_CLOCK_MONOTONIC)); gpr_now(GPR_CLOCK_MONOTONIC));
} }
return; return;
@ -119,6 +124,12 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
grpc_chttp2_ping_create(false, pq->inflight_id)); grpc_chttp2_ping_create(false, pq->inflight_id));
GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx); GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx);
t->ping_state.last_ping_sent_time = now; t->ping_state.last_ping_sent_time = now;
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping sent [%p]: %d/%d", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);
}
t->ping_state.pings_before_data_required -= t->ping_state.pings_before_data_required -=
(t->ping_state.pings_before_data_required != 0); (t->ping_state.pings_before_data_required != 0);
} }
@ -201,9 +212,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (t->flow_control.remote_window > 0) { if (t->flow_control.remote_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) { while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s) && if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
stream_ref_if_not_destroyed(&s->refcount->refs)) { stream_ref_if_not_destroyed(&s->refcount->refs);
grpc_chttp2_initiate_write(exec_ctx, t, "transport.read_flow_control");
} }
} }
} }
@ -258,8 +268,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
.stats = &s->stats.outgoing}; .stats = &s->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0, grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
s->send_initial_metadata, &hopt, &t->outbuf); s->send_initial_metadata, &hopt, &t->outbuf);
t->ping_state.pings_before_data_required = now_writing = true;
t->ping_policy.max_pings_without_data;
if (!t->is_client) { if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time = t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC); gpr_inf_past(GPR_CLOCK_MONOTONIC);
@ -298,8 +307,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_slice_buffer_add( grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce, &t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce,
&s->stats.outgoing)); &s->stats.outgoing));
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
if (!t->is_client) { if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time = t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC); gpr_inf_past(GPR_CLOCK_MONOTONIC);
@ -376,8 +383,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
send_bytes); send_bytes);
s->sending_bytes += send_bytes; s->sending_bytes += send_bytes;
} }
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
if (!t->is_client) { if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time = t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC); gpr_inf_past(GPR_CLOCK_MONOTONIC);
@ -488,8 +493,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_slice_buffer_add( grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(0, transport_announce, &t->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
&throwaway_stats)); &throwaway_stats));
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
if (!t->is_client) { if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time = t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC); gpr_inf_past(GPR_CLOCK_MONOTONIC);

@ -1263,8 +1263,8 @@ grpc_channel *grpc_inproc_channel_create(grpc_server *server,
grpc_arg default_authority_arg; grpc_arg default_authority_arg;
default_authority_arg.type = GRPC_ARG_STRING; default_authority_arg.type = GRPC_ARG_STRING;
default_authority_arg.key = GRPC_ARG_DEFAULT_AUTHORITY; default_authority_arg.key = (char *)GRPC_ARG_DEFAULT_AUTHORITY;
default_authority_arg.value.string = "inproc.authority"; default_authority_arg.value.string = (char *)"inproc.authority";
grpc_channel_args *client_args = grpc_channel_args *client_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1); grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);

@ -243,7 +243,7 @@ grpc_channel_args *grpc_channel_args_set_compression_algorithm(
GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT); GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp; grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER; tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM; tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm; tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1); return grpc_channel_args_copy_and_add(a, &tmp, 1);
} }
@ -253,7 +253,7 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT); GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp; grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER; tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM; tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm; tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1); return grpc_channel_args_copy_and_add(a, &tmp, 1);
} }
@ -308,7 +308,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
if (grpc_channel_args_get_compression_algorithm(*a) == algorithm && if (grpc_channel_args_get_compression_algorithm(*a) == algorithm &&
state == 0) { state == 0) {
char *algo_name = NULL; const char *algo_name = NULL;
GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0); GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0);
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
"Tried to disable default compression algorithm '%s'. The " "Tried to disable default compression algorithm '%s'. The "
@ -324,7 +324,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
/* create a new arg */ /* create a new arg */
grpc_arg tmp; grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER; tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET; tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */ /* all enabled by default */
tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) { if (state != 0) {
@ -349,7 +349,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm && if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm &&
state == 0) { state == 0) {
char *algo_name = NULL; const char *algo_name = NULL;
GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) != GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) !=
0); 0);
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
@ -366,7 +366,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
/* create a new arg */ /* create a new arg */
grpc_arg tmp; grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER; tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET; tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */ /* all enabled by default */
tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1; tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) { if (state != 0) {

@ -281,7 +281,7 @@ grpc_channel_stack *grpc_channel_stack_from_top_element(
/* Given the top element of a call stack, get the call stack itself */ /* Given the top element of a call stack, get the call stack itself */
grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem); grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
void grpc_call_log_op(char *file, int line, gpr_log_severity severity, void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_call_element *elem,
grpc_transport_stream_op_batch *op); grpc_transport_stream_op_batch *op);

@ -60,7 +60,7 @@ int grpc_stream_compression_algorithm_parse(
} }
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm, int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
char **name) { const char **name) {
GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2, GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name)); ((int)algorithm, name));
switch (algorithm) { switch (algorithm) {
@ -80,7 +80,7 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
} }
int grpc_stream_compression_algorithm_name( int grpc_stream_compression_algorithm_name(
grpc_stream_compression_algorithm algorithm, char **name) { grpc_stream_compression_algorithm algorithm, const char **name) {
GRPC_API_TRACE( GRPC_API_TRACE(
"grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2, "grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name)); ((int)algorithm, name));

@ -31,6 +31,12 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"server_channels_created", "server_channels_created",
"syscall_poll", "syscall_poll",
"syscall_wait", "syscall_wait",
"pollset_kick",
"pollset_kicked_without_poller",
"pollset_kicked_again",
"pollset_kick_wakeup_fd",
"pollset_kick_wakeup_cv",
"pollset_kick_own_thread",
"histogram_slow_lookups", "histogram_slow_lookups",
"syscall_write", "syscall_write",
"syscall_read", "syscall_read",
@ -50,6 +56,27 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"http2_writes_offloaded", "http2_writes_offloaded",
"http2_writes_continued", "http2_writes_continued",
"http2_partial_writes", "http2_partial_writes",
"http2_initiate_write_due_to_initial_write",
"http2_initiate_write_due_to_start_new_stream",
"http2_initiate_write_due_to_send_message",
"http2_initiate_write_due_to_send_initial_metadata",
"http2_initiate_write_due_to_send_trailing_metadata",
"http2_initiate_write_due_to_retry_send_ping",
"http2_initiate_write_due_to_continue_pings",
"http2_initiate_write_due_to_goaway_sent",
"http2_initiate_write_due_to_rst_stream",
"http2_initiate_write_due_to_close_from_api",
"http2_initiate_write_due_to_stream_flow_control",
"http2_initiate_write_due_to_transport_flow_control",
"http2_initiate_write_due_to_send_settings",
"http2_initiate_write_due_to_bdp_estimator_ping",
"http2_initiate_write_due_to_flow_control_unstalled_by_setting",
"http2_initiate_write_due_to_flow_control_unstalled_by_update",
"http2_initiate_write_due_to_application_ping",
"http2_initiate_write_due_to_keepalive_ping",
"http2_initiate_write_due_to_transport_flow_control_unstalled",
"http2_initiate_write_due_to_ping_response",
"http2_initiate_write_due_to_force_rst_stream",
"combiner_locks_initiated", "combiner_locks_initiated",
"combiner_locks_scheduled_items", "combiner_locks_scheduled_items",
"combiner_locks_scheduled_final_items", "combiner_locks_scheduled_final_items",
@ -60,6 +87,8 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"executor_wakeup_initiated", "executor_wakeup_initiated",
"executor_queue_drained", "executor_queue_drained",
"executor_push_retries", "executor_push_retries",
"executor_threads_created",
"executor_threads_used",
"server_requested_calls", "server_requested_calls",
"server_slowpath_requests_queued", "server_slowpath_requests_queued",
}; };
@ -70,6 +99,18 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client subchannels created", "Number of server channels created", "Number of client subchannels created", "Number of server channels created",
"Number of polling syscalls (epoll_wait, poll, etc) made by this process", "Number of polling syscalls (epoll_wait, poll, etc) made by this process",
"Number of sleeping syscalls made by this process", "Number of sleeping syscalls made by this process",
"How many polling wakeups were performed by the process (only valid for "
"epoll1 right now)",
"How many times was a polling wakeup requested without an active poller "
"(only valid for epoll1 right now)",
"How many times was the same polling worker awoken repeatedly before "
"waking up (only valid for epoll1 right now)",
"How many times was an eventfd used as the wakeup vector for a polling "
"wakeup (only valid for epoll1 right now)",
"How many times was a condition variable used as the wakeup vector for a "
"polling wakeup (only valid for epoll1 right now)",
"How many times could a polling wakeup be satisfied by keeping the waking "
"thread awake? (only valid for epoll1 right now)",
"Number of times histogram increments went through the slow (binary " "Number of times histogram increments went through the slow (binary "
"search) path", "search) path",
"Number of write syscalls (or equivalent - eg sendmsg) made by this " "Number of write syscalls (or equivalent - eg sendmsg) made by this "
@ -92,6 +133,30 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"written", "written",
"Number of HTTP2 writes that were made knowing there was still more data " "Number of HTTP2 writes that were made knowing there was still more data "
"to be written (we cap maximum write size to syscall_write)", "to be written (we cap maximum write size to syscall_write)",
"Number of HTTP2 writes initiated due to 'initial_write'",
"Number of HTTP2 writes initiated due to 'start_new_stream'",
"Number of HTTP2 writes initiated due to 'send_message'",
"Number of HTTP2 writes initiated due to 'send_initial_metadata'",
"Number of HTTP2 writes initiated due to 'send_trailing_metadata'",
"Number of HTTP2 writes initiated due to 'retry_send_ping'",
"Number of HTTP2 writes initiated due to 'continue_pings'",
"Number of HTTP2 writes initiated due to 'goaway_sent'",
"Number of HTTP2 writes initiated due to 'rst_stream'",
"Number of HTTP2 writes initiated due to 'close_from_api'",
"Number of HTTP2 writes initiated due to 'stream_flow_control'",
"Number of HTTP2 writes initiated due to 'transport_flow_control'",
"Number of HTTP2 writes initiated due to 'send_settings'",
"Number of HTTP2 writes initiated due to 'bdp_estimator_ping'",
"Number of HTTP2 writes initiated due to "
"'flow_control_unstalled_by_setting'",
"Number of HTTP2 writes initiated due to "
"'flow_control_unstalled_by_update'",
"Number of HTTP2 writes initiated due to 'application_ping'",
"Number of HTTP2 writes initiated due to 'keepalive_ping'",
"Number of HTTP2 writes initiated due to "
"'transport_flow_control_unstalled'",
"Number of HTTP2 writes initiated due to 'ping_response'",
"Number of HTTP2 writes initiated due to 'force_rst_stream'",
"Number of combiner lock entries by process (first items queued to a " "Number of combiner lock entries by process (first items queued to a "
"combiner)", "combiner)",
"Number of items scheduled against combiner locks", "Number of items scheduled against combiner locks",
@ -106,11 +171,15 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of times an executor queue was drained", "Number of times an executor queue was drained",
"Number of times we raced and were forced to retry pushing a closure to " "Number of times we raced and were forced to retry pushing a closure to "
"the executor", "the executor",
"Size of the backing thread pool for overflow gRPC Core work",
"How many executor threads actually got used",
"How many calls were requested (not necessarily received) by the server", "How many calls were requested (not necessarily received) by the server",
"How many times was the server slow path taken (indicates too few " "How many times was the server slow path taken (indicates too few "
"outstanding requests)", "outstanding requests)",
}; };
const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"call_initial_size",
"poll_events_returned",
"tcp_write_size", "tcp_write_size",
"tcp_write_iov_size", "tcp_write_iov_size",
"tcp_read_size", "tcp_read_size",
@ -121,9 +190,12 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"http2_send_message_per_write", "http2_send_message_per_write",
"http2_send_trailing_metadata_per_write", "http2_send_trailing_metadata_per_write",
"http2_send_flowctl_per_write", "http2_send_flowctl_per_write",
"executor_closures_per_wakeup",
"server_cqs_checked", "server_cqs_checked",
}; };
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = { const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Initial size of the grpc_call arena created at call start",
"How many events are called for each syscall_poll",
"Number of bytes offered to each syscall_write", "Number of bytes offered to each syscall_write",
"Number of byte segments offered to each syscall_write", "Number of byte segments offered to each syscall_write",
"Number of bytes received by each syscall_read", "Number of bytes received by each syscall_read",
@ -134,10 +206,47 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of streams whose payload was written per TCP write", "Number of streams whose payload was written per TCP write",
"Number of streams terminated per TCP write", "Number of streams terminated per TCP write",
"Number of flow control updates written per TCP write", "Number of flow control updates written per TCP write",
"Number of closures executed each time an executor wakes up",
"How many completion queues were checked looking for a CQ that had " "How many completion queues were checked looking for a CQ that had "
"requested the incoming call", "requested the incoming call",
}; };
const int grpc_stats_table_0[65] = { const int grpc_stats_table_0[65] = {
0, 1, 2, 3, 4, 5, 7, 9, 11, 14,
17, 21, 26, 32, 39, 47, 57, 68, 82, 98,
117, 140, 167, 199, 238, 284, 339, 404, 482, 575,
685, 816, 972, 1158, 1380, 1644, 1959, 2334, 2780, 3312,
3945, 4699, 5597, 6667, 7941, 9459, 11267, 13420, 15984, 19038,
22676, 27009, 32169, 38315, 45635, 54353, 64737, 77104, 91834, 109378,
130273, 155159, 184799, 220100, 262144};
const uint8_t grpc_stats_table_1[124] = {
0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6,
7, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 22, 23, 24,
24, 25, 25, 26, 26, 26, 27, 27, 28, 29, 29, 30, 30, 30, 31, 31, 32, 33,
33, 34, 34, 34, 35, 35, 36, 37, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50,
51, 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58};
const int grpc_stats_table_2[129] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30,
32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60,
63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 94, 98, 102, 106, 110,
114, 118, 122, 126, 131, 136, 141, 146, 151, 156, 162, 168, 174, 180, 186,
192, 199, 206, 213, 220, 228, 236, 244, 252, 260, 269, 278, 287, 297, 307,
317, 327, 338, 349, 360, 372, 384, 396, 409, 422, 436, 450, 464, 479, 494,
510, 526, 543, 560, 578, 596, 615, 634, 654, 674, 695, 717, 739, 762, 785,
809, 834, 859, 885, 912, 939, 967, 996, 1024};
const uint8_t grpc_stats_table_3[166] = {
0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16,
17, 17, 18, 19, 19, 20, 21, 21, 22, 23, 23, 24, 25, 25, 26, 26, 27, 27, 28,
28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 39,
40, 40, 41, 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51,
51, 52, 52, 53, 53, 54, 54, 55, 56, 57, 58, 59, 59, 60, 61, 62, 63, 63, 64,
65, 65, 66, 67, 67, 68, 69, 69, 70, 71, 71, 72, 72, 73, 73, 74, 75, 75, 76,
76, 77, 78, 79, 79, 80, 81, 82, 83, 84, 85, 85, 86, 87, 88, 88, 89, 90, 90,
91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97, 98, 98, 99};
const int grpc_stats_table_4[65] = {
0, 1, 2, 3, 4, 6, 8, 11, 0, 1, 2, 3, 4, 6, 8, 11,
15, 20, 26, 34, 44, 57, 73, 94, 15, 20, 26, 34, 44, 57, 73, 94,
121, 155, 199, 255, 327, 419, 537, 688, 121, 155, 199, 255, 327, 419, 537, 688,
@ -147,28 +256,82 @@ const int grpc_stats_table_0[65] = {
326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502, 326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502,
2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801, 2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801,
16777216}; 16777216};
const uint8_t grpc_stats_table_1[87] = { const uint8_t grpc_stats_table_5[87] = {
0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11,
11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23,
24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36, 24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36,
36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48, 36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48,
49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59}; 49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59};
const int grpc_stats_table_2[65] = { const int grpc_stats_table_6[65] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47, 14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47,
51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139, 51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139,
151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387, 151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387,
418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024}; 418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024};
const uint8_t grpc_stats_table_3[102] = { const uint8_t grpc_stats_table_7[102] = {
0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14,
14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23,
23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32,
32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51}; 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
const int grpc_stats_table_4[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64}; const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_5[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5}; const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 262144);
if (value < 6) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4651092515166879744ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4618441417868443648ull) >> 49)] + 6;
_bkt.dbl = grpc_stats_table_0[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64));
}
void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 29) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4642789003353915392ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4628855992006737920ull) >> 47)] + 29;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 128));
}
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216); value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) { if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
@ -182,8 +345,8 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4683743612465315840ull) { if (_val.uint < 4683743612465315840ull) {
int bucket = int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket]; _bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
bucket); bucket);
@ -191,9 +354,10 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
} }
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
grpc_stats_histo_find_bucket_slow( grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64)); (exec_ctx), value, grpc_stats_table_4, 64));
} }
void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024); value = GPR_CLAMP(value, 0, 1024);
if (value < 13) { if (value < 13) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@ -207,8 +371,8 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4637863191261478912ull) { if (_val.uint < 4637863191261478912ull) {
int bucket = int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket]; _bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket); GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket);
@ -216,9 +380,10 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
} }
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
grpc_stats_histo_find_bucket_slow( grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64)); (exec_ctx), value, grpc_stats_table_6, 64));
} }
void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216); value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) { if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
@ -232,8 +397,8 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4683743612465315840ull) { if (_val.uint < 4683743612465315840ull) {
int bucket = int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket]; _bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
bucket); bucket);
@ -241,9 +406,10 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
} }
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
grpc_stats_histo_find_bucket_slow( grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64)); (exec_ctx), value, grpc_stats_table_4, 64));
} }
void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216); value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) { if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
@ -257,8 +423,8 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4683743612465315840ull) { if (_val.uint < 4683743612465315840ull) {
int bucket = int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket]; _bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
bucket); bucket);
@ -266,10 +432,11 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
} }
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
grpc_stats_histo_find_bucket_slow( grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64)); (exec_ctx), value, grpc_stats_table_4, 64));
} }
void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
int value) { int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024); value = GPR_CLAMP(value, 0, 1024);
if (value < 13) { if (value < 13) {
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
@ -283,8 +450,8 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4637863191261478912ull) { if (_val.uint < 4637863191261478912ull) {
int bucket = int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket]; _bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, bucket); (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, bucket);
@ -293,10 +460,11 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
grpc_stats_histo_find_bucket_slow( grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64)); (exec_ctx), value, grpc_stats_table_6, 64));
} }
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
int value) { int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216); value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) { if (value < 5) {
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
@ -310,8 +478,8 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4683743612465315840ull) { if (_val.uint < 4683743612465315840ull) {
int bucket = int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket]; _bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket); (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket);
@ -320,10 +488,11 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
grpc_stats_histo_find_bucket_slow( grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64)); (exec_ctx), value, grpc_stats_table_4, 64));
} }
void grpc_stats_inc_http2_send_initial_metadata_per_write( void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) { grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024); value = GPR_CLAMP(value, 0, 1024);
if (value < 13) { if (value < 13) {
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
@ -338,8 +507,8 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write(
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4637863191261478912ull) { if (_val.uint < 4637863191261478912ull) {
int bucket = int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket]; _bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
@ -348,11 +517,12 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write(
} }
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_2, grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
64)); 64));
} }
void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
int value) { int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024); value = GPR_CLAMP(value, 0, 1024);
if (value < 13) { if (value < 13) {
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
@ -366,8 +536,8 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4637863191261478912ull) { if (_val.uint < 4637863191261478912ull) {
int bucket = int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket]; _bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket); (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket);
@ -376,10 +546,11 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
grpc_stats_histo_find_bucket_slow( grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64)); (exec_ctx), value, grpc_stats_table_6, 64));
} }
void grpc_stats_inc_http2_send_trailing_metadata_per_write( void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) { grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024); value = GPR_CLAMP(value, 0, 1024);
if (value < 13) { if (value < 13) {
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
@ -394,8 +565,8 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4637863191261478912ull) { if (_val.uint < 4637863191261478912ull) {
int bucket = int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket]; _bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
@ -404,11 +575,12 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
} }
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_2, grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
64)); 64));
} }
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int value) { int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024); value = GPR_CLAMP(value, 0, 1024);
if (value < 13) { if (value < 13) {
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
@ -422,8 +594,8 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4637863191261478912ull) { if (_val.uint < 4637863191261478912ull) {
int bucket = int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket]; _bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM( GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket); (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket);
@ -432,9 +604,38 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
grpc_stats_histo_find_bucket_slow( grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64)); (exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_6, 64));
} }
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) { void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
/* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 64); value = GPR_CLAMP(value, 0, 64);
if (value < 3) { if (value < 3) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@ -448,8 +649,8 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value; _val.dbl = value;
if (_val.uint < 4625196817309499392ull) { if (_val.uint < 4625196817309499392ull) {
int bucket = int bucket =
grpc_stats_table_5[((_val.uint - 4613937818241073152ull) >> 51)] + 3; grpc_stats_table_9[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
_bkt.dbl = grpc_stats_table_4[bucket]; _bkt.dbl = grpc_stats_table_8[bucket];
bucket -= (_val.uint < _bkt.uint); bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket); GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
@ -457,18 +658,21 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
} }
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
grpc_stats_histo_find_bucket_slow( grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_4, 8)); (exec_ctx), value, grpc_stats_table_8, 8));
} }
const int grpc_stats_histo_buckets[11] = {64, 64, 64, 64, 64, 64, const int grpc_stats_histo_buckets[14] = {64, 128, 64, 64, 64, 64, 64,
64, 64, 64, 64, 8}; 64, 64, 64, 64, 64, 64, 8};
const int grpc_stats_histo_start[11] = {0, 64, 128, 192, 256, 320, const int grpc_stats_histo_start[14] = {0, 64, 192, 256, 320, 384, 448,
384, 448, 512, 576, 640}; 512, 576, 640, 704, 768, 832, 896};
const int *const grpc_stats_histo_bucket_boundaries[11] = { const int *const grpc_stats_histo_bucket_boundaries[14] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0, grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0, grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
grpc_stats_table_2, grpc_stats_table_4}; grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
void (*const grpc_stats_inc_histogram[11])(grpc_exec_ctx *exec_ctx, int x) = { grpc_stats_table_6, grpc_stats_table_8};
void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_call_initial_size,
grpc_stats_inc_poll_events_returned,
grpc_stats_inc_tcp_write_size, grpc_stats_inc_tcp_write_size,
grpc_stats_inc_tcp_write_iov_size, grpc_stats_inc_tcp_write_iov_size,
grpc_stats_inc_tcp_read_size, grpc_stats_inc_tcp_read_size,
@ -479,4 +683,5 @@ void (*const grpc_stats_inc_histogram[11])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_http2_send_message_per_write, grpc_stats_inc_http2_send_message_per_write,
grpc_stats_inc_http2_send_trailing_metadata_per_write, grpc_stats_inc_http2_send_trailing_metadata_per_write,
grpc_stats_inc_http2_send_flowctl_per_write, grpc_stats_inc_http2_send_flowctl_per_write,
grpc_stats_inc_executor_closures_per_wakeup,
grpc_stats_inc_server_cqs_checked}; grpc_stats_inc_server_cqs_checked};

@ -33,6 +33,12 @@ typedef enum {
GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED, GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED,
GRPC_STATS_COUNTER_SYSCALL_POLL, GRPC_STATS_COUNTER_SYSCALL_POLL,
GRPC_STATS_COUNTER_SYSCALL_WAIT, GRPC_STATS_COUNTER_SYSCALL_WAIT,
GRPC_STATS_COUNTER_POLLSET_KICK,
GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER,
GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN,
GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD,
GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV,
GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD,
GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS, GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS,
GRPC_STATS_COUNTER_SYSCALL_WRITE, GRPC_STATS_COUNTER_SYSCALL_WRITE,
GRPC_STATS_COUNTER_SYSCALL_READ, GRPC_STATS_COUNTER_SYSCALL_READ,
@ -52,6 +58,27 @@ typedef enum {
GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED, GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED,
GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED, GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED,
GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES, GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM,
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED, GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS, GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS, GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS,
@ -62,6 +89,8 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED, GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED, GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES, GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED,
GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED,
GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS, GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED, GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
GRPC_STATS_COUNTER_COUNT GRPC_STATS_COUNTER_COUNT
@ -69,6 +98,8 @@ typedef enum {
extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT]; extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
extern const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT]; extern const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT];
typedef enum { typedef enum {
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
@ -79,35 +110,42 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
GRPC_STATS_HISTOGRAM_COUNT GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms; } grpc_stats_histograms;
extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT]; extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
extern const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT]; extern const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT];
typedef enum { typedef enum {
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 0, GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_FIRST_SLOT = 0,
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_FIRST_SLOT = 64,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_BUCKETS = 128,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 192,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 64, GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 256,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 128, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 320,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 192, GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 384,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 64, GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 256, GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 448,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 320, GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 512,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 384, GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 576,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 448, GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 640,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 512, GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 704,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 576, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 640, GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 832,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 896,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8, GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
GRPC_STATS_HISTOGRAM_BUCKETS = 648 GRPC_STATS_HISTOGRAM_BUCKETS = 904
} grpc_stats_histogram_constants; } grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ #define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
@ -126,6 +164,19 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL)
#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \ #define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT)
#define GRPC_STATS_INC_POLLSET_KICK(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK)
#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER)
#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN)
#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD)
#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV)
#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD)
#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \ #define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS)
#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \ #define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \
@ -169,6 +220,95 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \ #define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA( \
exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA( \
exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL( \
exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL( \
exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING( \
exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING( \
exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE( \
exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED( \
exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx) \
GRPC_STATS_INC_COUNTER( \
(exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \ #define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \ GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED) GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
@ -197,11 +337,22 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \ #define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
#define GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED)
#define GRPC_STATS_INC_EXECUTOR_THREADS_USED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED)
#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \ #define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \ #define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \ GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED) GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \
grpc_stats_inc_call_initial_size((exec_ctx), (int)(value))
void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, value) \
grpc_stats_inc_poll_events_returned((exec_ctx), (int)(value))
void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \ #define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value)) grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x); void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
@ -237,13 +388,17 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value)) grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int x); int x);
#define GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, value) \
grpc_stats_inc_executor_closures_per_wakeup((exec_ctx), (int)(value))
void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int x);
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \ #define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value)) grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x); void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
extern const int grpc_stats_histo_buckets[11]; extern const int grpc_stats_histo_buckets[14];
extern const int grpc_stats_histo_start[11]; extern const int grpc_stats_histo_start[14];
extern const int *const grpc_stats_histo_bucket_boundaries[11]; extern const int *const grpc_stats_histo_bucket_boundaries[14];
extern void (*const grpc_stats_inc_histogram[11])(grpc_exec_ctx *exec_ctx, extern void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx,
int x); int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -20,6 +20,10 @@
doc: Number of client side calls created by this process doc: Number of client side calls created by this process
- counter: server_calls_created - counter: server_calls_created
doc: Number of server side calls created by this process doc: Number of server side calls created by this process
- histogram: call_initial_size
max: 262144
buckets: 64
doc: Initial size of the grpc_call arena created at call start
- counter: cqs_created - counter: cqs_created
doc: Number of completion queues created doc: Number of completion queues created
- counter: client_channels_created - counter: client_channels_created
@ -33,6 +37,32 @@
doc: Number of polling syscalls (epoll_wait, poll, etc) made by this process doc: Number of polling syscalls (epoll_wait, poll, etc) made by this process
- counter: syscall_wait - counter: syscall_wait
doc: Number of sleeping syscalls made by this process doc: Number of sleeping syscalls made by this process
- histogram: poll_events_returned
max: 1024
buckets: 128
doc: How many events are called for each syscall_poll
- counter: pollset_kick
doc: How many polling wakeups were performed by the process
(only valid for epoll1 right now)
- counter: pollset_kicked_without_poller
doc: How many times was a polling wakeup requested without an active poller
(only valid for epoll1 right now)
- counter: pollset_kicked_again
doc: How many times was the same polling worker awoken repeatedly before
waking up
(only valid for epoll1 right now)
- counter: pollset_kick_wakeup_fd
doc: How many times was an eventfd used as the wakeup vector for a polling
wakeup
(only valid for epoll1 right now)
- counter: pollset_kick_wakeup_cv
doc: How many times was a condition variable used as the wakeup vector for a
polling wakeup
(only valid for epoll1 right now)
- counter: pollset_kick_own_thread
doc: How many times could a polling wakeup be satisfied by keeping the waking
thread awake?
(only valid for epoll1 right now)
# stats system # stats system
- counter: histogram_slow_lookups - counter: histogram_slow_lookups
doc: Number of times histogram increments went through the slow doc: Number of times histogram increments went through the slow
@ -117,6 +147,48 @@
- counter: http2_partial_writes - counter: http2_partial_writes
doc: Number of HTTP2 writes that were made knowing there was still more data doc: Number of HTTP2 writes that were made knowing there was still more data
to be written (we cap maximum write size to syscall_write) to be written (we cap maximum write size to syscall_write)
- counter: http2_initiate_write_due_to_initial_write
doc: Number of HTTP2 writes initiated due to 'initial_write'
- counter: http2_initiate_write_due_to_start_new_stream
doc: Number of HTTP2 writes initiated due to 'start_new_stream'
- counter: http2_initiate_write_due_to_send_message
doc: Number of HTTP2 writes initiated due to 'send_message'
- counter: http2_initiate_write_due_to_send_initial_metadata
doc: Number of HTTP2 writes initiated due to 'send_initial_metadata'
- counter: http2_initiate_write_due_to_send_trailing_metadata
doc: Number of HTTP2 writes initiated due to 'send_trailing_metadata'
- counter: http2_initiate_write_due_to_retry_send_ping
doc: Number of HTTP2 writes initiated due to 'retry_send_ping'
- counter: http2_initiate_write_due_to_continue_pings
doc: Number of HTTP2 writes initiated due to 'continue_pings'
- counter: http2_initiate_write_due_to_goaway_sent
doc: Number of HTTP2 writes initiated due to 'goaway_sent'
- counter: http2_initiate_write_due_to_rst_stream
doc: Number of HTTP2 writes initiated due to 'rst_stream'
- counter: http2_initiate_write_due_to_close_from_api
doc: Number of HTTP2 writes initiated due to 'close_from_api'
- counter: http2_initiate_write_due_to_stream_flow_control
doc: Number of HTTP2 writes initiated due to 'stream_flow_control'
- counter: http2_initiate_write_due_to_transport_flow_control
doc: Number of HTTP2 writes initiated due to 'transport_flow_control'
- counter: http2_initiate_write_due_to_send_settings
doc: Number of HTTP2 writes initiated due to 'send_settings'
- counter: http2_initiate_write_due_to_bdp_estimator_ping
doc: Number of HTTP2 writes initiated due to 'bdp_estimator_ping'
- counter: http2_initiate_write_due_to_flow_control_unstalled_by_setting
doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_setting'
- counter: http2_initiate_write_due_to_flow_control_unstalled_by_update
doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_update'
- counter: http2_initiate_write_due_to_application_ping
doc: Number of HTTP2 writes initiated due to 'application_ping'
- counter: http2_initiate_write_due_to_keepalive_ping
doc: Number of HTTP2 writes initiated due to 'keepalive_ping'
- counter: http2_initiate_write_due_to_transport_flow_control_unstalled
doc: Number of HTTP2 writes initiated due to 'transport_flow_control_unstalled'
- counter: http2_initiate_write_due_to_ping_response
doc: Number of HTTP2 writes initiated due to 'ping_response'
- counter: http2_initiate_write_due_to_force_rst_stream
doc: Number of HTTP2 writes initiated due to 'force_rst_stream'
# combiner locks # combiner locks
- counter: combiner_locks_initiated - counter: combiner_locks_initiated
doc: Number of combiner lock entries by process doc: Number of combiner lock entries by process
@ -143,6 +215,14 @@
- counter: executor_push_retries - counter: executor_push_retries
doc: Number of times we raced and were forced to retry pushing a closure to doc: Number of times we raced and were forced to retry pushing a closure to
the executor the executor
- counter: executor_threads_created
doc: Size of the backing thread pool for overflow gRPC Core work
- counter: executor_threads_used
doc: How many executor threads actually got used
- histogram: executor_closures_per_wakeup
max: 1024
buckets: 64
doc: Number of closures executed each time an executor wakes up
# server # server
- counter: server_requested_calls - counter: server_requested_calls
doc: How many calls were requested (not necessarily received) by the server doc: How many calls were requested (not necessarily received) by the server

@ -1,7 +1,17 @@
client_calls_created_per_iteration:FLOAT, client_calls_created_per_iteration:FLOAT,
server_calls_created_per_iteration:FLOAT, server_calls_created_per_iteration:FLOAT,
cqs_created_per_iteration:FLOAT,
client_channels_created_per_iteration:FLOAT,
client_subchannels_created_per_iteration:FLOAT,
server_channels_created_per_iteration:FLOAT,
syscall_poll_per_iteration:FLOAT, syscall_poll_per_iteration:FLOAT,
syscall_wait_per_iteration:FLOAT, syscall_wait_per_iteration:FLOAT,
pollset_kick_per_iteration:FLOAT,
pollset_kicked_without_poller_per_iteration:FLOAT,
pollset_kicked_again_per_iteration:FLOAT,
pollset_kick_wakeup_fd_per_iteration:FLOAT,
pollset_kick_wakeup_cv_per_iteration:FLOAT,
pollset_kick_own_thread_per_iteration:FLOAT,
histogram_slow_lookups_per_iteration:FLOAT, histogram_slow_lookups_per_iteration:FLOAT,
syscall_write_per_iteration:FLOAT, syscall_write_per_iteration:FLOAT,
syscall_read_per_iteration:FLOAT, syscall_read_per_iteration:FLOAT,
@ -21,6 +31,27 @@ http2_writes_begun_per_iteration:FLOAT,
http2_writes_offloaded_per_iteration:FLOAT, http2_writes_offloaded_per_iteration:FLOAT,
http2_writes_continued_per_iteration:FLOAT, http2_writes_continued_per_iteration:FLOAT,
http2_partial_writes_per_iteration:FLOAT, http2_partial_writes_per_iteration:FLOAT,
http2_initiate_write_due_to_initial_write_per_iteration:FLOAT,
http2_initiate_write_due_to_start_new_stream_per_iteration:FLOAT,
http2_initiate_write_due_to_send_message_per_iteration:FLOAT,
http2_initiate_write_due_to_send_initial_metadata_per_iteration:FLOAT,
http2_initiate_write_due_to_send_trailing_metadata_per_iteration:FLOAT,
http2_initiate_write_due_to_retry_send_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_continue_pings_per_iteration:FLOAT,
http2_initiate_write_due_to_goaway_sent_per_iteration:FLOAT,
http2_initiate_write_due_to_rst_stream_per_iteration:FLOAT,
http2_initiate_write_due_to_close_from_api_per_iteration:FLOAT,
http2_initiate_write_due_to_stream_flow_control_per_iteration:FLOAT,
http2_initiate_write_due_to_transport_flow_control_per_iteration:FLOAT,
http2_initiate_write_due_to_send_settings_per_iteration:FLOAT,
http2_initiate_write_due_to_bdp_estimator_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_flow_control_unstalled_by_setting_per_iteration:FLOAT,
http2_initiate_write_due_to_flow_control_unstalled_by_update_per_iteration:FLOAT,
http2_initiate_write_due_to_application_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_keepalive_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_transport_flow_control_unstalled_per_iteration:FLOAT,
http2_initiate_write_due_to_ping_response_per_iteration:FLOAT,
http2_initiate_write_due_to_force_rst_stream_per_iteration:FLOAT,
combiner_locks_initiated_per_iteration:FLOAT, combiner_locks_initiated_per_iteration:FLOAT,
combiner_locks_scheduled_items_per_iteration:FLOAT, combiner_locks_scheduled_items_per_iteration:FLOAT,
combiner_locks_scheduled_final_items_per_iteration:FLOAT, combiner_locks_scheduled_final_items_per_iteration:FLOAT,
@ -31,5 +62,7 @@ executor_scheduled_to_self_per_iteration:FLOAT,
executor_wakeup_initiated_per_iteration:FLOAT, executor_wakeup_initiated_per_iteration:FLOAT,
executor_queue_drained_per_iteration:FLOAT, executor_queue_drained_per_iteration:FLOAT,
executor_push_retries_per_iteration:FLOAT, executor_push_retries_per_iteration:FLOAT,
executor_threads_created_per_iteration:FLOAT,
executor_threads_used_per_iteration:FLOAT,
server_requested_calls_per_iteration:FLOAT, server_requested_calls_per_iteration:FLOAT,
server_slowpath_requests_queued_per_iteration:FLOAT server_slowpath_requests_queued_per_iteration:FLOAT

@ -35,7 +35,7 @@ typedef struct {
#else #else
bool value; bool value;
#endif #endif
char *name; const char *name;
} grpc_tracer_flag; } grpc_tracer_flag;
#ifdef GRPC_THREADSAFE_TRACER #ifdef GRPC_THREADSAFE_TRACER

@ -217,7 +217,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
GRPC_CLOSURE_INIT(&req->connected, on_connected, req, GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
grpc_arg arg = grpc_channel_arg_pointer_create( grpc_arg arg = grpc_channel_arg_pointer_create(
GRPC_ARG_RESOURCE_QUOTA, req->resource_quota, (char *)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
grpc_resource_quota_arg_vtable()); grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &arg}; grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep, grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,

@ -43,7 +43,8 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx,
grpc_httpcli_ssl_channel_security_connector *c = grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc; (grpc_httpcli_ssl_channel_security_connector *)sc;
if (c->handshaker_factory != NULL) { if (c->handshaker_factory != NULL) {
tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory); tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory);
c->handshaker_factory = NULL;
} }
if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name); if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name);
gpr_free(sc); gpr_free(sc);

@ -167,7 +167,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_BEGIN("grpc_closure_sched", 0); GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) { if (c != NULL) {
#ifndef NDEBUG #ifndef NDEBUG
GPR_ASSERT(!c->scheduled); if (c->scheduled) {
gpr_log(GPR_ERROR,
"Closure already scheduled. (closure: %p, created: [%s:%d], "
"previously scheduled at: [%s: %d] run?: %s",
c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated, c->run ? "true" : "false");
abort();
}
c->scheduled = true; c->scheduled = true;
c->file_initiated = file; c->file_initiated = file;
c->line_initiated = line; c->line_initiated = line;
@ -191,7 +198,14 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
while (c != NULL) { while (c != NULL) {
grpc_closure *next = c->next_data.next; grpc_closure *next = c->next_data.next;
#ifndef NDEBUG #ifndef NDEBUG
GPR_ASSERT(!c->scheduled); if (c->scheduled) {
gpr_log(GPR_ERROR,
"Closure already scheduled. (closure: %p, created: [%s:%d], "
"previously scheduled at: [%s: %d] run?: %s",
c, c->file_created, c->line_created, c->file_initiated,
c->line_initiated, c->run ? "true" : "false");
abort();
}
c->scheduled = true; c->scheduled = true;
c->file_initiated = file; c->file_initiated = file;
c->line_initiated = line; c->line_initiated = line;

@ -641,7 +641,7 @@ static char *key_time(grpc_error_times which) {
static char *fmt_time(gpr_timespec tm) { static char *fmt_time(gpr_timespec tm) {
char *out; char *out;
char *pfx = "!!"; const char *pfx = "!!";
switch (tm.clock_type) { switch (tm.clock_type) {
case GPR_CLOCK_MONOTONIC: case GPR_CLOCK_MONOTONIC:
pfx = "@monotonic:"; pfx = "@monotonic:";

@ -130,9 +130,9 @@ static void fd_global_shutdown(void);
* Pollset Declarations * Pollset Declarations
*/ */
typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state_t; typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
static const char *kick_state_string(kick_state_t st) { static const char *kick_state_string(kick_state st) {
switch (st) { switch (st) {
case UNKICKED: case UNKICKED:
return "UNKICKED"; return "UNKICKED";
@ -145,7 +145,7 @@ static const char *kick_state_string(kick_state_t st) {
} }
struct grpc_pollset_worker { struct grpc_pollset_worker {
kick_state_t kick_state; kick_state state;
int kick_state_mutator; // which line of code last changed kick state int kick_state_mutator; // which line of code last changed kick state
bool initialized_cv; bool initialized_cv;
grpc_pollset_worker *next; grpc_pollset_worker *next;
@ -154,9 +154,9 @@ struct grpc_pollset_worker {
grpc_closure_list schedule_on_end_work; grpc_closure_list schedule_on_end_work;
}; };
#define SET_KICK_STATE(worker, state) \ #define SET_KICK_STATE(worker, kick_state) \
do { \ do { \
(worker)->kick_state = (state); \ (worker)->state = (kick_state); \
(worker)->kick_state_mutator = __LINE__; \ (worker)->kick_state_mutator = __LINE__; \
} while (false) } while (false)
@ -280,8 +280,9 @@ static grpc_fd *fd_create(int fd, const char *name) {
#endif #endif
gpr_free(fd_name); gpr_free(fd_name);
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET), struct epoll_event ev;
.data.ptr = new_fd}; ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
ev.data.ptr = new_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) { if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno)); gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
} }
@ -435,8 +436,9 @@ static grpc_error *pollset_global_init(void) {
global_wakeup_fd.read_fd = -1; global_wakeup_fd.read_fd = -1;
grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd); grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
if (err != GRPC_ERROR_NONE) return err; if (err != GRPC_ERROR_NONE) return err;
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET), struct epoll_event ev;
.data.ptr = &global_wakeup_fd}; ev.events = (uint32_t)(EPOLLIN | EPOLLET);
ev.data.ptr = &global_wakeup_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd, if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
&ev) != 0) { &ev) != 0) {
return GRPC_OS_ERROR(errno, "epoll_ctl"); return GRPC_OS_ERROR(errno, "epoll_ctl");
@ -502,22 +504,27 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu); gpr_mu_destroy(&pollset->mu);
} }
static grpc_error *pollset_kick_all(grpc_pollset *pollset) { static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset) {
GPR_TIMER_BEGIN("pollset_kick_all", 0); GPR_TIMER_BEGIN("pollset_kick_all", 0);
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
if (pollset->root_worker != NULL) { if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker; grpc_pollset_worker *worker = pollset->root_worker;
do { do {
switch (worker->kick_state) { GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
switch (worker->state) {
case KICKED: case KICKED:
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
break; break;
case UNKICKED: case UNKICKED:
SET_KICK_STATE(worker, KICKED); SET_KICK_STATE(worker, KICKED);
if (worker->initialized_cv) { if (worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->cv); gpr_cv_signal(&worker->cv);
} }
break; break;
case DESIGNATED_POLLER: case DESIGNATED_POLLER:
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
SET_KICK_STATE(worker, KICKED); SET_KICK_STATE(worker, KICKED);
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd), append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_kick_all"); "pollset_kick_all");
@ -550,7 +557,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down); GPR_ASSERT(!pollset->shutting_down);
pollset->shutdown_closure = closure; pollset->shutdown_closure = closure;
pollset->shutting_down = true; pollset->shutting_down = true;
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset)); GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
pollset_maybe_finish_shutdown(exec_ctx, pollset); pollset_maybe_finish_shutdown(exec_ctx, pollset);
GPR_TIMER_END("pollset_shutdown", 0); GPR_TIMER_END("pollset_shutdown", 0);
} }
@ -567,7 +574,10 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
} }
static const gpr_timespec round_up = { static const gpr_timespec round_up = {
.clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1}; 0, /* tv_sec */
GPR_NS_PER_MS - 1, /* tv_nsec */
GPR_TIMESPAN /* clock_type */
};
timeout = gpr_time_sub(deadline, now); timeout = gpr_time_sub(deadline, now);
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up)); int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
return millis >= 1 ? millis : 1; return millis >= 1 ? millis : 1;
@ -646,6 +656,8 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait"); if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r); gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
} }
@ -688,7 +700,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d", gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->kick_state), pollset, worker, kick_state_string(worker->state),
is_reassigning); is_reassigning);
} }
if (pollset->seen_inactive) { if (pollset->seen_inactive) {
@ -708,12 +720,12 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
at this point is if it were "kicked specifically". Since the worker has at this point is if it were "kicked specifically". Since the worker has
not added itself to the pollset yet (by calling worker_insert()), it is not added itself to the pollset yet (by calling worker_insert()), it is
not visible in the "kick any" path yet */ not visible in the "kick any" path yet */
if (worker->kick_state == UNKICKED) { if (worker->state == UNKICKED) {
pollset->seen_inactive = false; pollset->seen_inactive = false;
if (neighborhood->active_root == NULL) { if (neighborhood->active_root == NULL) {
neighborhood->active_root = pollset->next = pollset->prev = pollset; neighborhood->active_root = pollset->next = pollset->prev = pollset;
/* Make this the designated poller if there isn't one already */ /* Make this the designated poller if there isn't one already */
if (worker->kick_state == UNKICKED && if (worker->state == UNKICKED &&
gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) { gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
SET_KICK_STATE(worker, DESIGNATED_POLLER); SET_KICK_STATE(worker, DESIGNATED_POLLER);
} }
@ -733,19 +745,19 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
worker_insert(pollset, worker); worker_insert(pollset, worker);
pollset->begin_refs--; pollset->begin_refs--;
if (worker->kick_state == UNKICKED && !pollset->kicked_without_poller) { if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker); GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
worker->initialized_cv = true; worker->initialized_cv = true;
gpr_cv_init(&worker->cv); gpr_cv_init(&worker->cv);
while (worker->kick_state == UNKICKED && !pollset->shutting_down) { while (worker->state == UNKICKED && !pollset->shutting_down) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d", gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
pollset, worker, kick_state_string(worker->kick_state), pollset, worker, kick_state_string(worker->state),
pollset->shutting_down); pollset->shutting_down);
} }
if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) && if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
worker->kick_state == UNKICKED) { worker->state == UNKICKED) {
/* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
received a kick */ received a kick */
SET_KICK_STATE(worker, KICKED); SET_KICK_STATE(worker, KICKED);
@ -758,7 +770,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d " "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d", "kicked_without_poller: %d",
pollset, worker, kick_state_string(worker->kick_state), pollset, worker, kick_state_string(worker->state),
pollset->shutting_down, pollset->kicked_without_poller); pollset->shutting_down, pollset->kicked_without_poller);
} }
@ -778,11 +790,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
} }
GPR_TIMER_END("begin_worker", 0); GPR_TIMER_END("begin_worker", 0);
return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down; return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
} }
static bool check_neighborhood_for_available_poller( static bool check_neighborhood_for_available_poller(
pollset_neighborhood *neighborhood) { grpc_exec_ctx *exec_ctx, pollset_neighborhood *neighborhood) {
GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0); GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
bool found_worker = false; bool found_worker = false;
do { do {
@ -795,7 +807,7 @@ static bool check_neighborhood_for_available_poller(
grpc_pollset_worker *inspect_worker = inspect->root_worker; grpc_pollset_worker *inspect_worker = inspect->root_worker;
if (inspect_worker != NULL) { if (inspect_worker != NULL) {
do { do {
switch (inspect_worker->kick_state) { switch (inspect_worker->state) {
case UNKICKED: case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0, if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) { (gpr_atm)inspect_worker)) {
@ -806,6 +818,7 @@ static bool check_neighborhood_for_available_poller(
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER); SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) { if (inspect_worker->initialized_cv) {
GPR_TIMER_MARK("signal worker", 0); GPR_TIMER_MARK("signal worker", 0);
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&inspect_worker->cv); gpr_cv_signal(&inspect_worker->cv);
} }
} else { } else {
@ -858,13 +871,14 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure_list_move(&worker->schedule_on_end_work, grpc_closure_list_move(&worker->schedule_on_end_work,
&exec_ctx->closure_list); &exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) { if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->kick_state == UNKICKED) { if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker); gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
} }
GPR_ASSERT(worker->next->initialized_cv); GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next); gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER); SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->next->cv); gpr_cv_signal(&worker->next->cv);
if (grpc_exec_ctx_has_work(exec_ctx)) { if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
@ -883,7 +897,8 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
&g_neighborhoods[(poller_neighborhood_idx + i) % &g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods]; g_num_neighborhoods];
if (gpr_mu_trylock(&neighborhood->mu)) { if (gpr_mu_trylock(&neighborhood->mu)) {
found_worker = check_neighborhood_for_available_poller(neighborhood); found_worker =
check_neighborhood_for_available_poller(exec_ctx, neighborhood);
gpr_mu_unlock(&neighborhood->mu); gpr_mu_unlock(&neighborhood->mu);
scan_state[i] = true; scan_state[i] = true;
} else { } else {
@ -896,7 +911,8 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
&g_neighborhoods[(poller_neighborhood_idx + i) % &g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods]; g_num_neighborhoods];
gpr_mu_lock(&neighborhood->mu); gpr_mu_lock(&neighborhood->mu);
found_worker = check_neighborhood_for_available_poller(neighborhood); found_worker =
check_neighborhood_for_available_poller(exec_ctx, neighborhood);
gpr_mu_unlock(&neighborhood->mu); gpr_mu_unlock(&neighborhood->mu);
} }
grpc_exec_ctx_flush(exec_ctx); grpc_exec_ctx_flush(exec_ctx);
@ -978,9 +994,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return error; return error;
} }
static grpc_error *pollset_kick(grpc_pollset *pollset, static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) { grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0); GPR_TIMER_BEGIN("pollset_kick", 0);
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
grpc_error *ret_err = GRPC_ERROR_NONE; grpc_error *ret_err = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_strvec log; gpr_strvec log;
@ -993,14 +1010,14 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_strvec_add(&log, tmp); gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) { if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}", gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
kick_state_string(pollset->root_worker->kick_state), kick_state_string(pollset->root_worker->state),
pollset->root_worker->next, pollset->root_worker->next,
kick_state_string(pollset->root_worker->next->kick_state)); kick_state_string(pollset->root_worker->next->state));
gpr_strvec_add(&log, tmp); gpr_strvec_add(&log, tmp);
} }
if (specific_worker != NULL) { if (specific_worker != NULL) {
gpr_asprintf(&tmp, " worker_kick_state=%s", gpr_asprintf(&tmp, " worker_kick_state=%s",
kick_state_string(specific_worker->kick_state)); kick_state_string(specific_worker->state));
gpr_strvec_add(&log, tmp); gpr_strvec_add(&log, tmp);
} }
tmp = gpr_strvec_flatten(&log, NULL); tmp = gpr_strvec_flatten(&log, NULL);
@ -1013,6 +1030,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) { if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker *root_worker = pollset->root_worker; grpc_pollset_worker *root_worker = pollset->root_worker;
if (root_worker == NULL) { if (root_worker == NULL) {
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
pollset->kicked_without_poller = true; pollset->kicked_without_poller = true;
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked_without_poller"); gpr_log(GPR_ERROR, " .. kicked_without_poller");
@ -1020,13 +1038,15 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done; goto done;
} }
grpc_pollset_worker *next_worker = root_worker->next; grpc_pollset_worker *next_worker = root_worker->next;
if (root_worker->kick_state == KICKED) { if (root_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker); gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
} }
SET_KICK_STATE(root_worker, KICKED); SET_KICK_STATE(root_worker, KICKED);
goto done; goto done;
} else if (next_worker->kick_state == KICKED) { } else if (next_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker); gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
} }
@ -1037,13 +1057,15 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
// there is no next worker // there is no next worker
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load( root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) { &g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", root_worker); gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
} }
SET_KICK_STATE(root_worker, KICKED); SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd); ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done; goto done;
} else if (next_worker->kick_state == UNKICKED) { } else if (next_worker->state == UNKICKED) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", next_worker); gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
} }
@ -1051,8 +1073,8 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
SET_KICK_STATE(next_worker, KICKED); SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv); gpr_cv_signal(&next_worker->cv);
goto done; goto done;
} else if (next_worker->kick_state == DESIGNATED_POLLER) { } else if (next_worker->state == DESIGNATED_POLLER) {
if (root_worker->kick_state != DESIGNATED_POLLER) { if (root_worker->state != DESIGNATED_POLLER) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log( gpr_log(
GPR_ERROR, GPR_ERROR,
@ -1061,10 +1083,12 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
} }
SET_KICK_STATE(root_worker, KICKED); SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) { if (root_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&root_worker->cv); gpr_cv_signal(&root_worker->cv);
} }
goto done; goto done;
} else { } else {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker, gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
root_worker); root_worker);
@ -1074,11 +1098,13 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done; goto done;
} }
} else { } else {
GPR_ASSERT(next_worker->kick_state == KICKED); GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
GPR_ASSERT(next_worker->state == KICKED);
SET_KICK_STATE(next_worker, KICKED); SET_KICK_STATE(next_worker, KICKED);
goto done; goto done;
} }
} else { } else {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked while waking up"); gpr_log(GPR_ERROR, " .. kicked while waking up");
} }
@ -1088,13 +1114,14 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
GPR_UNREACHABLE_CODE(goto done); GPR_UNREACHABLE_CODE(goto done);
} }
if (specific_worker->kick_state == KICKED) { if (specific_worker->state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. specific worker already kicked"); gpr_log(GPR_ERROR, " .. specific worker already kicked");
} }
goto done; goto done;
} else if (gpr_tls_get(&g_current_thread_worker) == } else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) { (intptr_t)specific_worker) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker); gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
} }
@ -1102,6 +1129,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done; goto done;
} else if (specific_worker == } else if (specific_worker ==
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) { (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick active poller"); gpr_log(GPR_ERROR, " .. kick active poller");
} }
@ -1109,6 +1137,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd); ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done; goto done;
} else if (specific_worker->initialized_cv) { } else if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick waiting worker"); gpr_log(GPR_ERROR, " .. kick waiting worker");
} }
@ -1116,6 +1145,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_cv_signal(&specific_worker->cv); gpr_cv_signal(&specific_worker->cv);
goto done; goto done;
} else { } else {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick non-waiting worker"); gpr_log(GPR_ERROR, " .. kick non-waiting worker");
} }
@ -1172,34 +1202,34 @@ static void shutdown_engine(void) {
} }
static const grpc_event_engine_vtable vtable = { static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset), sizeof(grpc_pollset),
.fd_create = fd_create, fd_create,
.fd_wrapped_fd = fd_wrapped_fd, fd_wrapped_fd,
.fd_orphan = fd_orphan, fd_orphan,
.fd_shutdown = fd_shutdown, fd_shutdown,
.fd_is_shutdown = fd_is_shutdown, fd_notify_on_read,
.fd_notify_on_read = fd_notify_on_read, fd_notify_on_write,
.fd_notify_on_write = fd_notify_on_write, fd_is_shutdown,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, fd_get_read_notifier_pollset,
.pollset_init = pollset_init, pollset_init,
.pollset_shutdown = pollset_shutdown, pollset_shutdown,
.pollset_destroy = pollset_destroy, pollset_destroy,
.pollset_work = pollset_work, pollset_work,
.pollset_kick = pollset_kick, pollset_kick,
.pollset_add_fd = pollset_add_fd, pollset_add_fd,
.pollset_set_create = pollset_set_create, pollset_set_create,
.pollset_set_destroy = pollset_set_destroy, pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset, pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset, pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set, pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set, pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd, pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd, pollset_set_del_fd,
.shutdown_engine = shutdown_engine, shutdown_engine,
}; };
/* It is possible that GLIBC has epoll but the underlying kernel doesn't. /* It is possible that GLIBC has epoll but the underlying kernel doesn't.

@ -97,12 +97,12 @@ static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg,
* pollable Declarations * pollable Declarations
*/ */
typedef struct pollable_t { typedef struct pollable {
polling_obj po; polling_obj po;
int epfd; int epfd;
grpc_wakeup_fd wakeup; grpc_wakeup_fd wakeup;
grpc_pollset_worker *root_worker; grpc_pollset_worker *root_worker;
} pollable_t; } pollable;
static const char *polling_obj_type_string(polling_obj_type t) { static const char *polling_obj_type_string(polling_obj_type t) {
switch (t) { switch (t) {
@ -122,7 +122,7 @@ static const char *polling_obj_type_string(polling_obj_type t) {
return "<invalid>"; return "<invalid>";
} }
static char *pollable_desc(pollable_t *p) { static char *pollable_desc(pollable *p) {
char *out; char *out;
gpr_asprintf(&out, "type=%s group=%p epfd=%d wakeup=%d", gpr_asprintf(&out, "type=%s group=%p epfd=%d wakeup=%d",
polling_obj_type_string(p->po.type), p->po.group, p->epfd, polling_obj_type_string(p->po.type), p->po.group, p->epfd,
@ -130,19 +130,19 @@ static char *pollable_desc(pollable_t *p) {
return out; return out;
} }
static pollable_t g_empty_pollable; static pollable g_empty_pollable;
static void pollable_init(pollable_t *p, polling_obj_type type); static void pollable_init(pollable *p, polling_obj_type type);
static void pollable_destroy(pollable_t *p); static void pollable_destroy(pollable *p);
/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */ /* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
static grpc_error *pollable_materialize(pollable_t *p); static grpc_error *pollable_materialize(pollable *p);
/******************************************************************************* /*******************************************************************************
* Fd Declarations * Fd Declarations
*/ */
struct grpc_fd { struct grpc_fd {
pollable_t pollable; pollable pollable_obj;
int fd; int fd;
/* refst format: /* refst format:
bit 0 : 1=Active / 0=Orphaned bit 0 : 1=Active / 0=Orphaned
@ -193,15 +193,15 @@ struct grpc_pollset_worker {
pollset_worker_link links[POLLSET_WORKER_LINK_COUNT]; pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
gpr_cv cv; gpr_cv cv;
grpc_pollset *pollset; grpc_pollset *pollset;
pollable_t *pollable; pollable *pollable_obj;
}; };
#define MAX_EPOLL_EVENTS 100 #define MAX_EPOLL_EVENTS 100
#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5 #define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
struct grpc_pollset { struct grpc_pollset {
pollable_t pollable; pollable pollable_obj;
pollable_t *current_pollable; pollable *current_pollable_obj;
int kick_alls_pending; int kick_alls_pending;
bool kicked_without_poller; bool kicked_without_poller;
grpc_closure *shutdown_closure; grpc_closure *shutdown_closure;
@ -282,7 +282,7 @@ static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_fd *fd = (grpc_fd *)arg; grpc_fd *fd = (grpc_fd *)arg;
/* Add the fd to the freelist */ /* Add the fd to the freelist */
grpc_iomgr_unregister_object(&fd->iomgr_object); grpc_iomgr_unregister_object(&fd->iomgr_object);
pollable_destroy(&fd->pollable); pollable_destroy(&fd->pollable_obj);
gpr_mu_destroy(&fd->orphaned_mu); gpr_mu_destroy(&fd->orphaned_mu);
gpr_mu_lock(&fd_freelist_mu); gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist; fd->freelist_next = fd_freelist;
@ -343,7 +343,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd)); new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
} }
pollable_init(&new_fd->pollable, PO_FD); pollable_init(&new_fd->pollable_obj, PO_FD);
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd; new_fd->fd = fd;
@ -385,7 +385,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
bool is_fd_closed = already_closed; bool is_fd_closed = already_closed;
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
gpr_mu_lock(&fd->pollable.po.mu); gpr_mu_lock(&fd->pollable_obj.po.mu);
gpr_mu_lock(&fd->orphaned_mu); gpr_mu_lock(&fd->orphaned_mu);
fd->on_done_closure = on_done; fd->on_done_closure = on_done;
@ -411,7 +411,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->orphaned_mu); gpr_mu_unlock(&fd->orphaned_mu);
gpr_mu_unlock(&fd->pollable.po.mu); gpr_mu_unlock(&fd->pollable_obj.po.mu);
UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */ UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error)); GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
@ -451,13 +451,13 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
* Pollable Definitions * Pollable Definitions
*/ */
static void pollable_init(pollable_t *p, polling_obj_type type) { static void pollable_init(pollable *p, polling_obj_type type) {
po_init(&p->po, type); po_init(&p->po, type);
p->root_worker = NULL; p->root_worker = NULL;
p->epfd = -1; p->epfd = -1;
} }
static void pollable_destroy(pollable_t *p) { static void pollable_destroy(pollable *p) {
po_destroy(&p->po); po_destroy(&p->po);
if (p->epfd != -1) { if (p->epfd != -1) {
close(p->epfd); close(p->epfd);
@ -466,7 +466,7 @@ static void pollable_destroy(pollable_t *p) {
} }
/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */ /* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
static grpc_error *pollable_materialize(pollable_t *p) { static grpc_error *pollable_materialize(pollable *p) {
if (p->epfd == -1) { if (p->epfd == -1) {
int new_epfd = epoll_create1(EPOLL_CLOEXEC); int new_epfd = epoll_create1(EPOLL_CLOEXEC);
if (new_epfd < 0) { if (new_epfd < 0) {
@ -477,8 +477,9 @@ static grpc_error *pollable_materialize(pollable_t *p) {
close(new_epfd); close(new_epfd);
return err; return err;
} }
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET), struct epoll_event ev;
.data.ptr = (void *)(1 | (intptr_t)&p->wakeup)}; ev.events = (uint32_t)(EPOLLIN | EPOLLET);
ev.data.ptr = (void *)(1 | (intptr_t)&p->wakeup);
if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) { if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) {
err = GRPC_OS_ERROR(errno, "epoll_ctl"); err = GRPC_OS_ERROR(errno, "epoll_ctl");
close(new_epfd); close(new_epfd);
@ -492,7 +493,7 @@ static grpc_error *pollable_materialize(pollable_t *p) {
} }
/* pollable must be materialized */ /* pollable must be materialized */
static grpc_error *pollable_add_fd(pollable_t *p, grpc_fd *fd) { static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "pollable_add_fd"; static const char *err_desc = "pollable_add_fd";
const int epfd = p->epfd; const int epfd = p->epfd;
@ -507,9 +508,9 @@ static grpc_error *pollable_add_fd(pollable_t *p, grpc_fd *fd) {
gpr_mu_unlock(&fd->orphaned_mu); gpr_mu_unlock(&fd->orphaned_mu);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
struct epoll_event ev_fd = { struct epoll_event ev_fd;
.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE), ev_fd.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
.data.ptr = fd}; ev_fd.data.ptr = fd;
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) { if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
switch (errno) { switch (errno) {
case EEXIST: case EEXIST:
@ -557,30 +558,34 @@ static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_unused) { grpc_error *error_unused) {
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
grpc_pollset *pollset = (grpc_pollset *)arg; grpc_pollset *pollset = (grpc_pollset *)arg;
gpr_mu_lock(&pollset->pollable.po.mu); gpr_mu_lock(&pollset->pollable_obj.po.mu);
if (pollset->root_worker != NULL) { if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker; grpc_pollset_worker *worker = pollset->root_worker;
do { do {
if (worker->pollable != &pollset->pollable) { GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
gpr_mu_lock(&worker->pollable->po.mu); if (worker->pollable_obj != &pollset->pollable_obj) {
gpr_mu_lock(&worker->pollable_obj->po.mu);
} }
if (worker->initialized_cv && worker != pollset->root_worker) { if (worker->initialized_cv && worker != pollset->root_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_cv %p (pollable %p vs %p)", gpr_log(GPR_DEBUG, "PS:%p kickall_via_cv %p (pollable %p vs %p)",
pollset, worker, &pollset->pollable, worker->pollable); pollset, worker, &pollset->pollable_obj,
worker->pollable_obj);
} }
worker->kicked = true; worker->kicked = true;
gpr_cv_signal(&worker->cv); gpr_cv_signal(&worker->cv);
} else { } else {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_wakeup %p (pollable %p vs %p)", gpr_log(GPR_DEBUG, "PS:%p kickall_via_wakeup %p (pollable %p vs %p)",
pollset, worker, &pollset->pollable, worker->pollable); pollset, worker, &pollset->pollable_obj,
worker->pollable_obj);
} }
append_error(&error, grpc_wakeup_fd_wakeup(&worker->pollable->wakeup), append_error(&error,
grpc_wakeup_fd_wakeup(&worker->pollable_obj->wakeup),
"pollset_shutdown"); "pollset_shutdown");
} }
if (worker->pollable != &pollset->pollable) { if (worker->pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&worker->pollable->po.mu); gpr_mu_unlock(&worker->pollable_obj->po.mu);
} }
worker = worker->links[PWL_POLLSET].next; worker = worker->links[PWL_POLLSET].next;
@ -588,7 +593,7 @@ static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
} }
pollset->kick_alls_pending--; pollset->kick_alls_pending--;
pollset_maybe_finish_shutdown(exec_ctx, pollset); pollset_maybe_finish_shutdown(exec_ctx, pollset);
gpr_mu_unlock(&pollset->pollable.po.mu); gpr_mu_unlock(&pollset->pollable_obj.po.mu);
GRPC_LOG_IF_ERROR("kick_all", error); GRPC_LOG_IF_ERROR("kick_all", error);
} }
@ -599,7 +604,7 @@ static void pollset_kick_all(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable_t *p, static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p,
grpc_pollset_worker *specific_worker) { grpc_pollset_worker *specific_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
@ -662,26 +667,27 @@ static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable_t *p,
} }
/* p->po.mu must be held before calling this function */ /* p->po.mu must be held before calling this function */
static grpc_error *pollset_kick(grpc_pollset *pollset, static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) { grpc_pollset_worker *specific_worker) {
pollable_t *p = pollset->current_pollable; pollable *p = pollset->current_pollable_obj;
if (p != &pollset->pollable) { GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
if (p != &pollset->pollable_obj) {
gpr_mu_lock(&p->po.mu); gpr_mu_lock(&p->po.mu);
} }
grpc_error *error = pollset_kick_inner(pollset, p, specific_worker); grpc_error *error = pollset_kick_inner(pollset, p, specific_worker);
if (p != &pollset->pollable) { if (p != &pollset->pollable_obj) {
gpr_mu_unlock(&p->po.mu); gpr_mu_unlock(&p->po.mu);
} }
return error; return error;
} }
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollable_init(&pollset->pollable, PO_POLLSET); pollable_init(&pollset->pollable_obj, PO_POLLSET);
pollset->current_pollable = &g_empty_pollable; pollset->current_pollable_obj = &g_empty_pollable;
pollset->kicked_without_poller = false; pollset->kicked_without_poller = false;
pollset->shutdown_closure = NULL; pollset->shutdown_closure = NULL;
pollset->root_worker = NULL; pollset->root_worker = NULL;
*mu = &pollset->pollable.po.mu; *mu = &pollset->pollable_obj.po.mu;
} }
/* Convert a timespec to milliseconds: /* Convert a timespec to milliseconds:
@ -703,7 +709,10 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
} }
static const gpr_timespec round_up = { static const gpr_timespec round_up = {
.clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1}; 0, /* tv_sec */
GPR_NS_PER_MS - 1, /* tv_nsec */
GPR_TIMESPAN /* clock_type */
};
timeout = gpr_time_sub(deadline, now); timeout = gpr_time_sub(deadline, now);
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up)); int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
return millis >= 1 ? millis : 1; return millis >= 1 ? millis : 1;
@ -729,8 +738,8 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
static grpc_error *fd_become_pollable_locked(grpc_fd *fd) { static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "fd_become_pollable"; static const char *err_desc = "fd_become_pollable";
if (append_error(&error, pollable_materialize(&fd->pollable), err_desc)) { if (append_error(&error, pollable_materialize(&fd->pollable_obj), err_desc)) {
append_error(&error, pollable_add_fd(&fd->pollable, fd), err_desc); append_error(&error, pollable_add_fd(&fd->pollable_obj, fd), err_desc);
} }
return error; return error;
} }
@ -744,8 +753,8 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset_maybe_finish_shutdown(exec_ctx, pollset); pollset_maybe_finish_shutdown(exec_ctx, pollset);
} }
static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable_t *p) { static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) {
return p != &g_empty_pollable && p != &pollset->pollable; return p != &g_empty_pollable && p != &pollset->pollable_obj;
} }
static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx, static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
@ -791,9 +800,9 @@ static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
/* pollset_shutdown is guaranteed to be called before pollset_destroy. */ /* pollset_shutdown is guaranteed to be called before pollset_destroy. */
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
pollable_destroy(&pollset->pollable); pollable_destroy(&pollset->pollable_obj);
if (pollset_is_pollable_fd(pollset, pollset->current_pollable)) { if (pollset_is_pollable_fd(pollset, pollset->current_pollable_obj)) {
UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable, 2, UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable_obj, 2,
"pollset_pollable"); "pollset_pollable");
} }
GRPC_LOG_IF_ERROR("pollset_process_events", GRPC_LOG_IF_ERROR("pollset_process_events",
@ -801,7 +810,7 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
} }
static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollable_t *p, gpr_timespec now, pollable *p, gpr_timespec now,
gpr_timespec deadline) { gpr_timespec deadline) {
int timeout = poll_deadline_to_millis_timeout(deadline, now); int timeout = poll_deadline_to_millis_timeout(deadline, now);
@ -883,68 +892,69 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
worker->initialized_cv = false; worker->initialized_cv = false;
worker->kicked = false; worker->kicked = false;
worker->pollset = pollset; worker->pollset = pollset;
worker->pollable = pollset->current_pollable; worker->pollable_obj = pollset->current_pollable_obj;
if (pollset_is_pollable_fd(pollset, worker->pollable)) { if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) {
REF_BY((grpc_fd *)worker->pollable, 2, "one_poll"); REF_BY((grpc_fd *)worker->pollable_obj, 2, "one_poll");
} }
worker_insert(&pollset->root_worker, PWL_POLLSET, worker); worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
if (!worker_insert(&worker->pollable->root_worker, PWL_POLLABLE, worker)) { if (!worker_insert(&worker->pollable_obj->root_worker, PWL_POLLABLE,
worker)) {
worker->initialized_cv = true; worker->initialized_cv = true;
gpr_cv_init(&worker->cv); gpr_cv_init(&worker->cv);
if (worker->pollable != &pollset->pollable) { if (worker->pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&pollset->pollable.po.mu); gpr_mu_unlock(&pollset->pollable_obj.po.mu);
} }
if (GRPC_TRACER_ON(grpc_polling_trace) && if (GRPC_TRACER_ON(grpc_polling_trace) &&
worker->pollable->root_worker != worker) { worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset, gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
worker->pollable, worker, worker->pollable_obj, worker,
poll_deadline_to_millis_timeout(deadline, *now)); poll_deadline_to_millis_timeout(deadline, *now));
} }
while (do_poll && worker->pollable->root_worker != worker) { while (do_poll && worker->pollable_obj->root_worker != worker) {
if (gpr_cv_wait(&worker->cv, &worker->pollable->po.mu, deadline)) { if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu, deadline)) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset, gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable, worker); worker->pollable_obj, worker);
} }
do_poll = false; do_poll = false;
} else if (worker->kicked) { } else if (worker->kicked) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, worker->pollable, gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset,
worker); worker->pollable_obj, worker);
} }
do_poll = false; do_poll = false;
} else if (GRPC_TRACER_ON(grpc_polling_trace) && } else if (GRPC_TRACER_ON(grpc_polling_trace) &&
worker->pollable->root_worker != worker) { worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset, gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
worker->pollable, worker); worker->pollable_obj, worker);
} }
} }
if (worker->pollable != &pollset->pollable) { if (worker->pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&worker->pollable->po.mu); gpr_mu_unlock(&worker->pollable_obj->po.mu);
gpr_mu_lock(&pollset->pollable.po.mu); gpr_mu_lock(&pollset->pollable_obj.po.mu);
gpr_mu_lock(&worker->pollable->po.mu); gpr_mu_lock(&worker->pollable_obj->po.mu);
} }
*now = gpr_now(now->clock_type); *now = gpr_now(now->clock_type);
} }
return do_poll && pollset->shutdown_closure == NULL && return do_poll && pollset->shutdown_closure == NULL &&
pollset->current_pollable == worker->pollable; pollset->current_pollable_obj == worker->pollable_obj;
} }
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker, grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl) { grpc_pollset_worker **worker_hdl) {
if (NEW_ROOT == if (NEW_ROOT ==
worker_remove(&worker->pollable->root_worker, PWL_POLLABLE, worker)) { worker_remove(&worker->pollable_obj->root_worker, PWL_POLLABLE, worker)) {
gpr_cv_signal(&worker->pollable->root_worker->cv); gpr_cv_signal(&worker->pollable_obj->root_worker->cv);
} }
if (worker->initialized_cv) { if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv); gpr_cv_destroy(&worker->cv);
} }
if (pollset_is_pollable_fd(pollset, worker->pollable)) { if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) {
UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable, 2, "one_poll"); UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable_obj, 2, "one_poll");
} }
if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) { if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
pollset_maybe_finish_shutdown(exec_ctx, pollset); pollset_maybe_finish_shutdown(exec_ctx, pollset);
@ -972,41 +982,41 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->kicked_without_poller = false; pollset->kicked_without_poller = false;
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
if (pollset->current_pollable != &pollset->pollable) { if (pollset->current_pollable_obj != &pollset->pollable_obj) {
gpr_mu_lock(&pollset->current_pollable->po.mu); gpr_mu_lock(&pollset->current_pollable_obj->po.mu);
} }
if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) { if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!pollset->shutdown_closure); GPR_ASSERT(!pollset->shutdown_closure);
append_error(&error, pollable_materialize(worker.pollable), err_desc); append_error(&error, pollable_materialize(worker.pollable_obj), err_desc);
if (worker.pollable != &pollset->pollable) { if (worker.pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&worker.pollable->po.mu); gpr_mu_unlock(&worker.pollable_obj->po.mu);
} }
gpr_mu_unlock(&pollset->pollable.po.mu); gpr_mu_unlock(&pollset->pollable_obj.po.mu);
if (pollset->event_cursor == pollset->event_count) { if (pollset->event_cursor == pollset->event_count) {
append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable, append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable_obj,
now, deadline), now, deadline),
err_desc); err_desc);
} }
append_error(&error, pollset_process_events(exec_ctx, pollset, false), append_error(&error, pollset_process_events(exec_ctx, pollset, false),
err_desc); err_desc);
gpr_mu_lock(&pollset->pollable.po.mu); gpr_mu_lock(&pollset->pollable_obj.po.mu);
if (worker.pollable != &pollset->pollable) { if (worker.pollable_obj != &pollset->pollable_obj) {
gpr_mu_lock(&worker.pollable->po.mu); gpr_mu_lock(&worker.pollable_obj->po.mu);
} }
gpr_tls_set(&g_current_thread_pollset, 0); gpr_tls_set(&g_current_thread_pollset, 0);
gpr_tls_set(&g_current_thread_worker, 0); gpr_tls_set(&g_current_thread_worker, 0);
pollset_maybe_finish_shutdown(exec_ctx, pollset); pollset_maybe_finish_shutdown(exec_ctx, pollset);
} }
end_worker(exec_ctx, pollset, &worker, worker_hdl); end_worker(exec_ctx, pollset, &worker, worker_hdl);
if (worker.pollable != &pollset->pollable) { if (worker.pollable_obj != &pollset->pollable_obj) {
gpr_mu_unlock(&worker.pollable->po.mu); gpr_mu_unlock(&worker.pollable_obj->po.mu);
} }
if (grpc_exec_ctx_has_work(exec_ctx)) { if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->pollable.po.mu); gpr_mu_unlock(&pollset->pollable_obj.po.mu);
grpc_exec_ctx_flush(exec_ctx); grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->pollable.po.mu); gpr_mu_lock(&pollset->pollable_obj.po.mu);
} }
return error; return error;
} }
@ -1023,27 +1033,27 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
bool fd_locked) { bool fd_locked) {
static const char *err_desc = "pollset_add_fd"; static const char *err_desc = "pollset_add_fd";
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
if (pollset->current_pollable == &g_empty_pollable) { if (pollset->current_pollable_obj == &g_empty_pollable) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from empty to fd", pollset, "PS:%p add fd %p; transition pollable from empty to fd", pollset,
fd); fd);
} }
/* empty pollable --> single fd pollable_t */ /* empty pollable --> single fd pollable */
pollset_kick_all(exec_ctx, pollset); pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &fd->pollable; pollset->current_pollable_obj = &fd->pollable_obj;
if (!fd_locked) gpr_mu_lock(&fd->pollable.po.mu); if (!fd_locked) gpr_mu_lock(&fd->pollable_obj.po.mu);
append_error(&error, fd_become_pollable_locked(fd), err_desc); append_error(&error, fd_become_pollable_locked(fd), err_desc);
if (!fd_locked) gpr_mu_unlock(&fd->pollable.po.mu); if (!fd_locked) gpr_mu_unlock(&fd->pollable_obj.po.mu);
REF_BY(fd, 2, "pollset_pollable"); REF_BY(fd, 2, "pollset_pollable");
} else if (pollset->current_pollable == &pollset->pollable) { } else if (pollset->current_pollable_obj == &pollset->pollable_obj) {
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p add fd %p; already multipolling", pollset, fd); gpr_log(GPR_DEBUG, "PS:%p add fd %p; already multipolling", pollset, fd);
} }
append_error(&error, pollable_add_fd(pollset->current_pollable, fd), append_error(&error, pollable_add_fd(pollset->current_pollable_obj, fd),
err_desc); err_desc);
} else if (pollset->current_pollable != &fd->pollable) { } else if (pollset->current_pollable_obj != &fd->pollable_obj) {
grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable; grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable_obj;
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from fd %p to multipoller", "PS:%p add fd %p; transition pollable from fd %p to multipoller",
@ -1055,11 +1065,11 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read"); grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read");
grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write"); grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write");
pollset_kick_all(exec_ctx, pollset); pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &pollset->pollable; pollset->current_pollable_obj = &pollset->pollable_obj;
if (append_error(&error, pollable_materialize(&pollset->pollable), if (append_error(&error, pollable_materialize(&pollset->pollable_obj),
err_desc)) { err_desc)) {
pollable_add_fd(&pollset->pollable, had_fd); pollable_add_fd(&pollset->pollable_obj, had_fd);
pollable_add_fd(&pollset->pollable, fd); pollable_add_fd(&pollset->pollable_obj, fd);
} }
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx,
GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd, GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd,
@ -1071,9 +1081,9 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) { grpc_fd *fd) {
gpr_mu_lock(&pollset->pollable.po.mu); gpr_mu_lock(&pollset->pollable_obj.po.mu);
grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd, false); grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd, false);
gpr_mu_unlock(&pollset->pollable.po.mu); gpr_mu_unlock(&pollset->pollable_obj.po.mu);
GRPC_LOG_IF_ERROR("pollset_add_fd", error); GRPC_LOG_IF_ERROR("pollset_add_fd", error);
} }
@ -1095,7 +1105,7 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) { grpc_fd *fd) {
po_join(exec_ctx, &pss->po, &fd->pollable.po); po_join(exec_ctx, &pss->po, &fd->pollable_obj.po);
} }
static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
@ -1103,7 +1113,7 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) { grpc_pollset_set *pss, grpc_pollset *ps) {
po_join(exec_ctx, &pss->po, &ps->pollable.po); po_join(exec_ctx, &pss->po, &ps->pollable_obj.po);
} }
static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
@ -1386,34 +1396,34 @@ static void shutdown_engine(void) {
} }
static const grpc_event_engine_vtable vtable = { static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset), sizeof(grpc_pollset),
.fd_create = fd_create, fd_create,
.fd_wrapped_fd = fd_wrapped_fd, fd_wrapped_fd,
.fd_orphan = fd_orphan, fd_orphan,
.fd_shutdown = fd_shutdown, fd_shutdown,
.fd_is_shutdown = fd_is_shutdown, fd_notify_on_read,
.fd_notify_on_read = fd_notify_on_read, fd_notify_on_write,
.fd_notify_on_write = fd_notify_on_write, fd_is_shutdown,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, fd_get_read_notifier_pollset,
.pollset_init = pollset_init, pollset_init,
.pollset_shutdown = pollset_shutdown, pollset_shutdown,
.pollset_destroy = pollset_destroy, pollset_destroy,
.pollset_work = pollset_work, pollset_work,
.pollset_kick = pollset_kick, pollset_kick,
.pollset_add_fd = pollset_add_fd, pollset_add_fd,
.pollset_set_create = pollset_set_create, pollset_set_create,
.pollset_set_destroy = pollset_set_destroy, pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset, pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset, pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set, pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set, pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd, pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd, pollset_set_del_fd,
.shutdown_engine = shutdown_engine, shutdown_engine,
}; };
const grpc_event_engine_vtable *grpc_init_epollex_linux( const grpc_event_engine_vtable *grpc_init_epollex_linux(

@ -1021,10 +1021,11 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
} }
/* p->mu must be held before calling this function */ /* p->mu must be held before calling this function */
static grpc_error *pollset_kick(grpc_pollset *p, static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) { grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0); GPR_TIMER_BEGIN("pollset_kick", 0);
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
const char *err_desc = "Kick Failure"; const char *err_desc = "Kick Failure";
grpc_pollset_worker *worker = specific_worker; grpc_pollset_worker *worker = specific_worker;
if (worker != NULL) { if (worker != NULL) {
@ -1132,7 +1133,8 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
} }
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
grpc_pollset *ps, char *reason) { grpc_pollset *ps,
const char *reason) {
if (ps->po.pi != NULL) { if (ps->po.pi != NULL) {
PI_UNREF(exec_ctx, ps->po.pi, reason); PI_UNREF(exec_ctx, ps->po.pi, reason);
} }
@ -1158,7 +1160,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down); GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = true; pollset->shutting_down = true;
pollset->shutdown_done = closure; pollset->shutdown_done = closure;
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
/* If the pollset has any workers, we cannot call finish_shutdown_locked() /* If the pollset has any workers, we cannot call finish_shutdown_locked()
because it would release the underlying polling island. In such a case, we because it would release the underlying polling island. In such a case, we
@ -1670,34 +1672,34 @@ static void shutdown_engine(void) {
} }
static const grpc_event_engine_vtable vtable = { static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset), sizeof(grpc_pollset),
.fd_create = fd_create, fd_create,
.fd_wrapped_fd = fd_wrapped_fd, fd_wrapped_fd,
.fd_orphan = fd_orphan, fd_orphan,
.fd_shutdown = fd_shutdown, fd_shutdown,
.fd_is_shutdown = fd_is_shutdown, fd_notify_on_read,
.fd_notify_on_read = fd_notify_on_read, fd_notify_on_write,
.fd_notify_on_write = fd_notify_on_write, fd_is_shutdown,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, fd_get_read_notifier_pollset,
.pollset_init = pollset_init, pollset_init,
.pollset_shutdown = pollset_shutdown, pollset_shutdown,
.pollset_destroy = pollset_destroy, pollset_destroy,
.pollset_work = pollset_work, pollset_work,
.pollset_kick = pollset_kick, pollset_kick,
.pollset_add_fd = pollset_add_fd, pollset_add_fd,
.pollset_set_create = pollset_set_create, pollset_set_create,
.pollset_set_destroy = pollset_set_destroy, pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset, pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset, pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set, pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set, pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd, pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd, pollset_set_del_fd,
.shutdown_engine = shutdown_engine, shutdown_engine,
}; };
/* It is possible that GLIBC has epoll but the underlying kernel doesn't. /* It is possible that GLIBC has epoll but the underlying kernel doesn't.

@ -209,7 +209,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2 #define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
/* As per pollset_kick, with an extended set of flags (defined above) /* As per pollset_kick, with an extended set of flags (defined above)
-- mostly for fd_posix's use. */ -- mostly for fd_posix's use. */
static grpc_error *pollset_kick_ext(grpc_pollset *p, static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker, grpc_pollset_worker *specific_worker,
uint32_t flags) GRPC_MUST_USE_RESULT; uint32_t flags) GRPC_MUST_USE_RESULT;
@ -365,36 +365,39 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
return notifier; return notifier;
} }
static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) { static grpc_error *pollset_kick_locked(grpc_exec_ctx *exec_ctx,
grpc_fd_watcher *watcher) {
gpr_mu_lock(&watcher->pollset->mu); gpr_mu_lock(&watcher->pollset->mu);
GPR_ASSERT(watcher->worker); GPR_ASSERT(watcher->worker);
grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker, grpc_error *err =
pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker,
GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP); GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(&watcher->pollset->mu); gpr_mu_unlock(&watcher->pollset->mu);
return err; return err;
} }
static void maybe_wake_one_watcher_locked(grpc_fd *fd) { static void maybe_wake_one_watcher_locked(grpc_exec_ctx *exec_ctx,
grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) { if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
pollset_kick_locked(fd->inactive_watcher_root.next); pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next);
} else if (fd->read_watcher) { } else if (fd->read_watcher) {
pollset_kick_locked(fd->read_watcher); pollset_kick_locked(exec_ctx, fd->read_watcher);
} else if (fd->write_watcher) { } else if (fd->write_watcher) {
pollset_kick_locked(fd->write_watcher); pollset_kick_locked(exec_ctx, fd->write_watcher);
} }
} }
static void wake_all_watchers_locked(grpc_fd *fd) { static void wake_all_watchers_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_fd_watcher *watcher; grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next; for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) { watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
pollset_kick_locked(watcher); pollset_kick_locked(exec_ctx, watcher);
} }
if (fd->read_watcher) { if (fd->read_watcher) {
pollset_kick_locked(fd->read_watcher); pollset_kick_locked(exec_ctx, fd->read_watcher);
} }
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) { if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
pollset_kick_locked(fd->write_watcher); pollset_kick_locked(exec_ctx, fd->write_watcher);
} }
} }
@ -435,7 +438,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
if (!has_watchers(fd)) { if (!has_watchers(fd)) {
close_fd_locked(exec_ctx, fd); close_fd_locked(exec_ctx, fd);
} else { } else {
wake_all_watchers_locked(fd); wake_all_watchers_locked(exec_ctx, fd);
} }
gpr_mu_unlock(&fd->mu); gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* drop the reference */ UNREF_BY(fd, 2, reason); /* drop the reference */
@ -479,7 +482,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
/* already ready ==> queue the closure to run immediately */ /* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY; *st = CLOSURE_NOT_READY;
GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd)); GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd));
maybe_wake_one_watcher_locked(fd); maybe_wake_one_watcher_locked(exec_ctx, fd);
} else { } else {
/* upcallptr was set to a different closure. This is an error! */ /* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
@ -648,7 +651,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
} }
} }
if (kick) { if (kick) {
maybe_wake_one_watcher_locked(fd); maybe_wake_one_watcher_locked(exec_ctx, fd);
} }
if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) { if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
close_fd_locked(exec_ctx, fd); close_fd_locked(exec_ctx, fd);
@ -712,11 +715,12 @@ static void kick_append_error(grpc_error **composite, grpc_error *error) {
*composite = grpc_error_add_child(*composite, error); *composite = grpc_error_add_child(*composite, error);
} }
static grpc_error *pollset_kick_ext(grpc_pollset *p, static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker, grpc_pollset_worker *specific_worker,
uint32_t flags) { uint32_t flags) {
GPR_TIMER_BEGIN("pollset_kick_ext", 0); GPR_TIMER_BEGIN("pollset_kick_ext", 0);
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
/* pollset->mu already held */ /* pollset->mu already held */
if (specific_worker != NULL) { if (specific_worker != NULL) {
@ -782,9 +786,9 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p,
return error; return error;
} }
static grpc_error *pollset_kick(grpc_pollset *p, static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) { grpc_pollset_worker *specific_worker) {
return pollset_kick_ext(p, specific_worker, 0); return pollset_kick_ext(exec_ctx, p, specific_worker, 0);
} }
/* global state management */ /* global state management */
@ -847,7 +851,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} }
pollset->fds[pollset->fd_count++] = fd; pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller"); GRPC_FD_REF(fd, "multipoller");
pollset_kick(pollset, NULL); pollset_kick(exec_ctx, pollset, NULL);
exit: exit:
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
} }
@ -1083,7 +1087,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* check shutdown conditions */ /* check shutdown conditions */
if (pollset->shutting_down) { if (pollset->shutting_down) {
if (pollset_has_workers(pollset)) { if (pollset_has_workers(pollset)) {
pollset_kick(pollset, NULL); pollset_kick(exec_ctx, pollset, NULL);
} else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { } else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1; pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
@ -1112,7 +1116,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down); GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1; pollset->shutting_down = 1;
pollset->shutdown_done = closure; pollset->shutdown_done = closure;
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) { if (!pollset_has_workers(pollset)) {
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs); GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
} }
@ -1688,34 +1692,34 @@ static void shutdown_engine(void) {
} }
static const grpc_event_engine_vtable vtable = { static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset), sizeof(grpc_pollset),
.fd_create = fd_create, fd_create,
.fd_wrapped_fd = fd_wrapped_fd, fd_wrapped_fd,
.fd_orphan = fd_orphan, fd_orphan,
.fd_shutdown = fd_shutdown, fd_shutdown,
.fd_is_shutdown = fd_is_shutdown, fd_notify_on_read,
.fd_notify_on_read = fd_notify_on_read, fd_notify_on_write,
.fd_notify_on_write = fd_notify_on_write, fd_is_shutdown,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, fd_get_read_notifier_pollset,
.pollset_init = pollset_init, pollset_init,
.pollset_shutdown = pollset_shutdown, pollset_shutdown,
.pollset_destroy = pollset_destroy, pollset_destroy,
.pollset_work = pollset_work, pollset_work,
.pollset_kick = pollset_kick, pollset_kick,
.pollset_add_fd = pollset_add_fd, pollset_add_fd,
.pollset_set_create = pollset_set_create, pollset_set_create,
.pollset_set_destroy = pollset_set_destroy, pollset_set_destroy,
.pollset_set_add_pollset = pollset_set_add_pollset, pollset_set_add_pollset,
.pollset_set_del_pollset = pollset_set_del_pollset, pollset_set_del_pollset,
.pollset_set_add_pollset_set = pollset_set_add_pollset_set, pollset_set_add_pollset_set,
.pollset_set_del_pollset_set = pollset_set_del_pollset_set, pollset_set_del_pollset_set,
.pollset_set_add_fd = pollset_set_add_fd, pollset_set_add_fd,
.pollset_set_del_fd = pollset_set_del_fd, pollset_set_del_fd,
.shutdown_engine = shutdown_engine, shutdown_engine,
}; };
const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) { const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) {

@ -210,9 +210,9 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return g_event_engine->pollset_work(exec_ctx, pollset, worker, now, deadline); return g_event_engine->pollset_work(exec_ctx, pollset, worker, now, deadline);
} }
grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) { grpc_pollset_worker *specific_worker) {
return g_event_engine->pollset_kick(pollset, specific_worker); return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker);
} }
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,

@ -54,7 +54,7 @@ typedef struct grpc_event_engine_vtable {
grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now, grpc_pollset_worker **worker, gpr_timespec now,
gpr_timespec deadline); gpr_timespec deadline);
grpc_error *(*pollset_kick)(grpc_pollset *pollset, grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker); grpc_pollset_worker *specific_worker);
void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd); struct grpc_fd *fd);

@ -32,16 +32,14 @@
#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/spinlock.h" #include "src/core/lib/support/spinlock.h"
#define MAX_DEPTH 2
typedef struct { typedef struct {
gpr_mu mu; gpr_mu mu;
gpr_cv cv; gpr_cv cv;
grpc_closure_list elems; grpc_closure_list elems;
size_t depth;
bool shutdown; bool shutdown;
bool queued_long_job; bool queued_long_job;
gpr_thd_id id; gpr_thd_id id;
grpc_closure_list local_elems;
} thread_state; } thread_state;
static thread_state *g_thread_state; static thread_state *g_thread_state;
@ -56,10 +54,12 @@ static grpc_tracer_flag executor_trace =
static void executor_thread(void *arg); static void executor_thread(void *arg);
static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) { static void run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
size_t n = 0; int n = 0; // number of closures executed
grpc_closure *c = list.head; while (!grpc_closure_list_empty(*list)) {
grpc_closure *c = list->head;
grpc_closure_list_init(list);
while (c != NULL) { while (c != NULL) {
grpc_closure *next = c->next_data.next; grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error; grpc_error *error = c->error_data.error;
@ -74,14 +74,15 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
#ifndef NDEBUG #ifndef NDEBUG
c->scheduled = false; c->scheduled = false;
#endif #endif
n++;
c->cb(exec_ctx, c->cb_arg, error); c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
c = next; c = next;
n++;
grpc_exec_ctx_flush(exec_ctx); grpc_exec_ctx_flush(exec_ctx);
} }
}
return n; GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, n);
} }
bool grpc_executor_is_threaded() { bool grpc_executor_is_threaded() {
@ -126,7 +127,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
for (size_t i = 0; i < g_max_threads; i++) { for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_destroy(&g_thread_state[i].mu); gpr_mu_destroy(&g_thread_state[i].mu);
gpr_cv_destroy(&g_thread_state[i].cv); gpr_cv_destroy(&g_thread_state[i].cv);
run_closures(exec_ctx, g_thread_state[i].elems); run_closures(exec_ctx, &g_thread_state[i].elems);
} }
gpr_free(g_thread_state); gpr_free(g_thread_state);
gpr_tls_destroy(&g_this_thread_state); gpr_tls_destroy(&g_this_thread_state);
@ -150,14 +151,14 @@ static void executor_thread(void *arg) {
grpc_exec_ctx exec_ctx = grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL); GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
size_t subtract_depth = 0; GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(&exec_ctx);
bool used = false;
for (;;) { for (;;) {
if (GRPC_TRACER_ON(executor_trace)) { if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")", gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step", (int)(ts - g_thread_state));
(int)(ts - g_thread_state), subtract_depth);
} }
gpr_mu_lock(&ts->mu); gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) { while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
ts->queued_long_job = false; ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
@ -170,15 +171,20 @@ static void executor_thread(void *arg) {
gpr_mu_unlock(&ts->mu); gpr_mu_unlock(&ts->mu);
break; break;
} }
if (!used) {
GRPC_STATS_INC_EXECUTOR_THREADS_USED(&exec_ctx);
used = true;
}
GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx); GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
grpc_closure_list exec = ts->elems; GPR_ASSERT(grpc_closure_list_empty(ts->local_elems));
ts->local_elems = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu); gpr_mu_unlock(&ts->mu);
if (GRPC_TRACER_ON(executor_trace)) { if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state)); gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
} }
subtract_depth = run_closures(&exec_ctx, exec); run_closures(&exec_ctx, &ts->local_elems);
} }
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
@ -211,6 +217,10 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)]; ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
} else { } else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx); GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
if (is_short) {
grpc_closure_list_append(&ts->local_elems, closure, error);
return;
}
} }
thread_state *orig_ts = ts; thread_state *orig_ts = ts;
@ -250,8 +260,7 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
gpr_cv_signal(&ts->cv); gpr_cv_signal(&ts->cv);
} }
grpc_closure_list_append(&ts->elems, closure, error); grpc_closure_list_append(&ts->elems, closure, error);
ts->depth++; try_new_thread = ts->elems.head != closure &&
try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown; cur_thread_count < g_max_threads && !ts->shutdown;
if (!is_short) ts->queued_long_job = true; if (!is_short) ts->queued_long_job = true;
gpr_mu_unlock(&ts->mu); gpr_mu_unlock(&ts->mu);

@ -50,7 +50,7 @@ void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
grpc_executor_init(exec_ctx); grpc_executor_init(exec_ctx);
grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC)); grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object; g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root"; g_root_object.name = (char *)"root";
grpc_network_status_init(); grpc_network_status_init();
grpc_iomgr_platform_init(); grpc_iomgr_platform_init();
} }

@ -57,12 +57,12 @@ bool grpc_is_epollexclusive_available(void) {
close(fd); close(fd);
return false; return false;
} }
struct epoll_event ev = { struct epoll_event ev;
/* choose events that should cause an error on /* choose events that should cause an error on
EPOLLEXCLUSIVE enabled kernels - specifically the combination of EPOLLEXCLUSIVE enabled kernels - specifically the combination of
EPOLLONESHOT and EPOLLEXCLUSIVE */ EPOLLONESHOT and EPOLLEXCLUSIVE */
.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT), ev.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT);
.data.ptr = NULL}; ev.data.ptr = NULL;
if (epoll_ctl(fd, EPOLL_CTL_ADD, evfd, &ev) != 0) { if (epoll_ctl(fd, EPOLL_CTL_ADD, evfd, &ev) != 0) {
if (errno != EINVAL) { if (errno != EINVAL) {
if (!logged_why_not) { if (!logged_why_not) {

@ -76,7 +76,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* Break one polling thread out of polling work for this pollset. /* Break one polling thread out of polling work for this pollset.
If specific_worker is non-NULL, then kick that worker. */ If specific_worker is non-NULL, then kick that worker. */
grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) grpc_pollset_worker *specific_worker)
GRPC_MUST_USE_RESULT; GRPC_MUST_USE_RESULT;

@ -145,7 +145,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) { grpc_pollset_worker *specific_worker) {
GRPC_UV_ASSERT_SAME_THREAD(); GRPC_UV_ASSERT_SAME_THREAD();
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);

@ -98,7 +98,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) { grpc_closure *closure) {
pollset->shutting_down = 1; pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) { if (!pollset->is_iocp_worker) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else { } else {
@ -181,7 +181,7 @@ done:
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
grpc_error *grpc_pollset_kick(grpc_pollset *p, grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) { grpc_pollset_worker *specific_worker) {
if (specific_worker != NULL) { if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) { if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
@ -209,7 +209,7 @@ grpc_error *grpc_pollset_kick(grpc_pollset *p,
specific_worker = specific_worker =
pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET); pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
if (specific_worker != NULL) { if (specific_worker != NULL) {
grpc_pollset_kick(p, specific_worker); grpc_pollset_kick(exec_ctx, p, specific_worker);
} else if (p->is_iocp_worker) { } else if (p->is_iocp_worker) {
grpc_iocp_kick(); grpc_iocp_kick();
} else { } else {

@ -85,7 +85,7 @@ static grpc_error *blocking_resolve_address_impl(
if (s != 0) { if (s != 0) {
/* Retry if well-known service name is recognized */ /* Retry if well-known service name is recognized */
char *svc[][2] = {{"http", "80"}, {"https", "443"}}; const char *svc[][2] = {{"http", "80"}, {"https", "443"}};
for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) { for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(port, svc[i][0]) == 0) { if (strcmp(port, svc[i][0]) == 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION; GRPC_SCHEDULING_START_BLOCKING_REGION;

@ -22,7 +22,6 @@
#include <stdint.h> #include <stdint.h>
#include <string.h> #include <string.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>

@ -85,8 +85,8 @@ static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp}; socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp};
grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) { grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) {
return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_FACTORY, factory, return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_FACTORY,
&socket_factory_arg_vtable); factory, &socket_factory_arg_vtable);
} }
#endif #endif

@ -76,6 +76,6 @@ static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp}; socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) { grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) {
return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_MUTATOR, mutator, return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_MUTATOR,
&socket_mutator_arg_vtable); mutator, &socket_mutator_arg_vtable);
} }

@ -198,12 +198,12 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* event manager callback when reads are ready */ /* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_tcp_listener *sp = (grpc_tcp_listener *)arg; grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
grpc_pollset *read_notifier_pollset;
if (err != GRPC_ERROR_NONE) { if (err != GRPC_ERROR_NONE) {
goto error; goto error;
} }
grpc_pollset *read_notifier_pollset = read_notifier_pollset =
sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add( sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
&sp->server->next_pollset_to_assign, 1) % &sp->server->next_pollset_to_assign, 1) %
sp->server->pollset_count]; sp->server->pollset_count];

@ -156,10 +156,7 @@ struct shared_mutables {
gpr_mu mu; gpr_mu mu;
} GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE); } GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
static struct shared_mutables g_shared_mutables = { static struct shared_mutables g_shared_mutables;
.checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER,
.initialized = false,
};
static gpr_clock_type g_clock_type; static gpr_clock_type g_clock_type;
static gpr_timespec g_start_time; static gpr_timespec g_start_time;
@ -217,6 +214,7 @@ void grpc_timer_list_init(gpr_timespec now) {
uint32_t i; uint32_t i;
g_shared_mutables.initialized = true; g_shared_mutables.initialized = true;
g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu); gpr_mu_init(&g_shared_mutables.mu);
g_clock_type = now.clock_type; g_clock_type = now.clock_type;
g_start_time = now; g_start_time = now;

@ -79,7 +79,8 @@ static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx,
detector->is_done = 1; detector->is_done = 1;
GRPC_LOG_IF_ERROR( GRPC_LOG_IF_ERROR(
"Pollset kick", "Pollset kick",
grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent), NULL)); grpc_pollset_kick(exec_ctx,
grpc_polling_entity_pollset(&detector->pollent), NULL));
gpr_mu_unlock(g_polling_mu); gpr_mu_unlock(g_polling_mu);
} }

@ -455,14 +455,14 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create(
typedef struct { typedef struct {
grpc_channel_security_connector base; grpc_channel_security_connector base;
tsi_ssl_client_handshaker_factory *handshaker_factory; tsi_ssl_client_handshaker_factory *client_handshaker_factory;
char *target_name; char *target_name;
char *overridden_target_name; char *overridden_target_name;
} grpc_ssl_channel_security_connector; } grpc_ssl_channel_security_connector;
typedef struct { typedef struct {
grpc_server_security_connector base; grpc_server_security_connector base;
tsi_ssl_server_handshaker_factory *handshaker_factory; tsi_ssl_server_handshaker_factory *server_handshaker_factory;
} grpc_ssl_server_security_connector; } grpc_ssl_server_security_connector;
static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx, static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
@ -470,9 +470,8 @@ static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
grpc_ssl_channel_security_connector *c = grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc; (grpc_ssl_channel_security_connector *)sc;
grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds); grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
if (c->handshaker_factory != NULL) { tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory);
tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory); c->client_handshaker_factory = NULL;
}
if (c->target_name != NULL) gpr_free(c->target_name); if (c->target_name != NULL) gpr_free(c->target_name);
if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name); if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name);
gpr_free(sc); gpr_free(sc);
@ -482,9 +481,8 @@ static void ssl_server_destroy(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) { grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c = grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc; (grpc_ssl_server_security_connector *)sc;
if (c->handshaker_factory != NULL) { tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory);
tsi_ssl_server_handshaker_factory_destroy(c->handshaker_factory); c->server_handshaker_factory = NULL;
}
gpr_free(sc); gpr_free(sc);
} }
@ -496,7 +494,7 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker. // Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL; tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker( tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
c->handshaker_factory, c->client_handshaker_factory,
c->overridden_target_name != NULL ? c->overridden_target_name c->overridden_target_name != NULL ? c->overridden_target_name
: c->target_name, : c->target_name,
&tsi_hs); &tsi_hs);
@ -521,7 +519,7 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker. // Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL; tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker( tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
c->handshaker_factory, &tsi_hs); c->server_handshaker_factory, &tsi_hs);
if (result != TSI_OK) { if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.", gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result)); tsi_result_to_string(result));
@ -852,7 +850,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
result = tsi_create_ssl_client_handshaker_factory( result = tsi_create_ssl_client_handshaker_factory(
has_key_cert_pair ? &config->pem_key_cert_pair : NULL, pem_root_certs, has_key_cert_pair ? &config->pem_key_cert_pair : NULL, pem_root_certs,
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols, ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
&c->handshaker_factory); &c->client_handshaker_factory);
if (result != TSI_OK) { if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result)); tsi_result_to_string(result));
@ -897,7 +895,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
config->pem_root_certs, get_tsi_client_certificate_request_type( config->pem_root_certs, get_tsi_client_certificate_request_type(
config->client_certificate_request), config->client_certificate_request),
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols, ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
&c->handshaker_factory); &c->server_handshaker_factory);
if (result != TSI_OK) { if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result)); tsi_result_to_string(result));

@ -137,7 +137,7 @@ static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
// Create zero-copy frame protector, if implemented. // Create zero-copy frame protector, if implemented.
tsi_zero_copy_grpc_protector *zero_copy_protector = NULL; tsi_zero_copy_grpc_protector *zero_copy_protector = NULL;
tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector( tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector(
h->handshaker_result, NULL, &zero_copy_protector); exec_ctx, h->handshaker_result, NULL, &zero_copy_protector);
if (result != TSI_OK && result != TSI_UNIMPLEMENTED) { if (result != TSI_OK && result != TSI_UNIMPLEMENTED) {
error = grpc_set_tsi_error_result( error = grpc_set_tsi_error_result(
GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_ERROR_CREATE_FROM_STATIC_STRING(

@ -57,7 +57,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
} }
void gpr_default_log(gpr_log_func_args *args) { void gpr_default_log(gpr_log_func_args *args) {
char *final_slash; const char *final_slash;
char *prefix; char *prefix;
const char *display_file; const char *display_file;
char time_buffer[64]; char time_buffer[64];

@ -276,7 +276,7 @@ static void add_string_to_split(const char *beg, const char *end, char ***strs,
void gpr_string_split(const char *input, const char *sep, char ***strs, void gpr_string_split(const char *input, const char *sep, char ***strs,
size_t *nstrs) { size_t *nstrs) {
char *next; const char *next;
*strs = NULL; *strs = NULL;
*nstrs = 0; *nstrs = 0;
size_t capstrs = 0; size_t capstrs = 0;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save