Merge branch 'master' of https://github.com/grpc/grpc into ruby_php_extension

pull/12496/head
ZhouyihaiDing 8 years ago
commit 422b61b279
  1. 1
      .github/CODEOWNERS
  2. 2
      .gitignore
  3. 98
      CMakeLists.txt
  4. 0
      cmake/gRPCConfig.cmake.in
  5. 0
      cmake/gRPCConfigVersion.cmake.in
  6. 14
      doc/c-style-guide.md
  7. 1
      doc/environment_variables.md
  8. 5
      doc/service_config.md
  9. 6
      examples/cpp/helloworld/CMakeLists.txt
  10. 9
      include/grpc++/impl/codegen/call.h
  11. 6
      include/grpc++/server_builder.h
  12. 6
      include/grpc++/support/channel_arguments.h
  13. 13
      include/grpc/impl/codegen/grpc_types.h
  14. 11
      include/grpc/impl/codegen/port_platform.h
  15. 1
      src/compiler/OWNERS
  16. 10
      src/core/ext/filters/client_channel/channel_connectivity.c
  17. 319
      src/core/ext/filters/client_channel/client_channel.c
  18. 7
      src/core/ext/filters/client_channel/client_channel_factory.c
  19. 2
      src/core/ext/filters/client_channel/http_connect_handshaker.c
  20. 3
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
  21. 367
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  22. 6
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
  23. 8
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
  24. 3
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
  25. 11
      src/core/ext/filters/client_channel/lb_policy_factory.c
  26. 2
      src/core/ext/filters/client_channel/lb_policy_factory.h
  27. 2
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
  28. 37
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
  29. 11
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
  30. 5
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
  31. 9
      src/core/ext/filters/client_channel/retry_throttle.c
  32. 2
      src/core/ext/filters/client_channel/subchannel.c
  33. 38
      src/core/ext/filters/client_channel/subchannel_index.c
  34. 7
      src/core/ext/filters/client_channel/subchannel_index.h
  35. 6
      src/core/ext/filters/http/server/http_server_filter.c
  36. 2
      src/core/ext/filters/max_age/max_age_filter.c
  37. 2
      src/core/ext/transport/chttp2/client/chttp2_connector.c
  38. 18
      src/core/ext/transport/chttp2/server/chttp2_server.c
  39. 272
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  40. 20
      src/core/ext/transport/chttp2/transport/flow_control.c
  41. 17
      src/core/ext/transport/chttp2/transport/frame_settings.c
  42. 5
      src/core/ext/transport/chttp2/transport/frame_window_update.c
  43. 5
      src/core/ext/transport/chttp2/transport/incoming_metadata.c
  44. 29
      src/core/ext/transport/chttp2/transport/internal.h
  45. 3
      src/core/ext/transport/chttp2/transport/parsing.c
  46. 6
      src/core/ext/transport/chttp2/transport/stream_map.c
  47. 76
      src/core/ext/transport/chttp2/transport/writing.c
  48. 101
      src/core/ext/transport/inproc/inproc_transport.c
  49. 22
      src/core/lib/channel/channel_args.c
  50. 22
      src/core/lib/channel/channel_stack_builder.c
  51. 25
      src/core/lib/channel/connected_channel.c
  52. 14
      src/core/lib/channel/handshaker.c
  53. 2
      src/core/lib/channel/handshaker_registry.c
  54. 3
      src/core/lib/compression/stream_compression.c
  55. 2
      src/core/lib/debug/stats.c
  56. 217
      src/core/lib/debug/stats_data.c
  57. 99
      src/core/lib/debug/stats_data.h
  58. 60
      src/core/lib/debug/stats_data.yaml
  59. 35
      src/core/lib/debug/stats_data_bq_schema.sql
  60. 2
      src/core/lib/http/format_request.c
  61. 13
      src/core/lib/http/httpcli.c
  62. 7
      src/core/lib/http/parser.c
  63. 4
      src/core/lib/iomgr/closure.c
  64. 6
      src/core/lib/iomgr/combiner.c
  65. 48
      src/core/lib/iomgr/error.c
  66. 151
      src/core/lib/iomgr/ev_epoll1_linux.c
  67. 45
      src/core/lib/iomgr/ev_epollex_linux.c
  68. 18
      src/core/lib/iomgr/ev_poll_posix.c
  69. 173
      src/core/lib/iomgr/executor.c
  70. 7
      src/core/lib/iomgr/executor.h
  71. 18
      src/core/lib/iomgr/polling_entity.c
  72. 8
      src/core/lib/iomgr/polling_entity.h
  73. 2
      src/core/lib/iomgr/resolve_address_posix.c
  74. 2
      src/core/lib/iomgr/resolve_address_windows.c
  75. 6
      src/core/lib/iomgr/resource_quota.c
  76. 4
      src/core/lib/iomgr/socket_factory_posix.c
  77. 4
      src/core/lib/iomgr/socket_mutator.c
  78. 177
      src/core/lib/iomgr/tcp_posix.c
  79. 2
      src/core/lib/iomgr/timer_manager.c
  80. 2
      src/core/lib/iomgr/udp_server.c
  81. 14
      src/core/lib/iomgr/wakeup_fd_cv.c
  82. 4
      src/core/lib/iomgr/wakeup_fd_cv.h
  83. 36
      src/core/lib/security/transport/security_handshaker.c
  84. 4
      src/core/lib/slice/slice.c
  85. 56
      src/core/lib/surface/call.c
  86. 6
      src/core/lib/surface/channel.c
  87. 33
      src/core/lib/surface/completion_queue.c
  88. 9
      src/core/lib/surface/init.c
  89. 48
      src/core/lib/surface/server.c
  90. 32
      src/core/lib/transport/metadata_batch.c
  91. 8
      src/core/lib/transport/transport.c
  92. 4
      src/cpp/common/channel_arguments.cc
  93. 2
      src/python/grpcio_tests/tests/tests.json
  94. 118
      src/python/grpcio_tests/tests/unit/_cython/_common.py
  95. 131
      src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py
  96. 126
      src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py
  97. 30
      templates/CMakeLists.txt.template
  98. 8
      test/core/bad_client/bad_client.c
  99. 6
      test/core/end2end/bad_server_response_test.c
  100. 36
      test/core/end2end/fixtures/proxy.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -3,4 +3,5 @@
# repository as the source of truth for module ownership.
/**/OWNERS @markdroth @nicolasnoble @ctiller
/bazel/** @nicolasnoble @dgquintas @ctiller
/src/compiler/cpp_generator.cc @vjpai
/src/core/ext/filters/client_channel/** @markdroth @dgquintas @ctiller

2
.gitignore vendored

@ -16,7 +16,7 @@ htmlcov/
dist/
*.egg
py27/
py34/
py3[0-9]*/
# Node installation output
node_modules

@ -123,10 +123,8 @@ if("${gRPC_ZLIB_PROVIDER}" STREQUAL "module")
set(gRPC_INSTALL FALSE)
endif()
elseif("${gRPC_ZLIB_PROVIDER}" STREQUAL "package")
find_package(ZLIB)
if(TARGET ZLIB::ZLIB)
set(_gRPC_ZLIB_LIBRARIES ZLIB::ZLIB)
endif()
find_package(ZLIB REQUIRED)
set(_gRPC_ZLIB_LIBRARIES ${ZLIB_LIBRARIES})
set(_gRPC_FIND_ZLIB "if(NOT ZLIB_FOUND)\n find_package(ZLIB)\nendif()")
endif()
@ -134,7 +132,8 @@ if("${gRPC_CARES_PROVIDER}" STREQUAL "module")
if(NOT CARES_ROOT_DIR)
set(CARES_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/cares/cares)
endif()
set(CARES_STATIC ON)
set(CARES_SHARED OFF CACHE BOOL "disable shared library")
set(CARES_STATIC ON CACHE BOOL "link cares statically")
set(CARES_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/cares/cares")
add_subdirectory(third_party/cares/cares)
if(TARGET c-ares)
@ -145,7 +144,7 @@ if("${gRPC_CARES_PROVIDER}" STREQUAL "module")
set(gRPC_INSTALL FALSE)
endif()
elseif("${gRPC_CARES_PROVIDER}" STREQUAL "package")
find_package(c-ares CONFIG)
find_package(c-ares REQUIRED CONFIG)
if(TARGET c-ares::cares)
set(_gRPC_CARES_LIBRARIES c-ares::cares)
endif()
@ -179,6 +178,7 @@ if("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "module")
endif()
if(TARGET protoc)
set(_gRPC_PROTOBUF_PROTOC protoc)
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
endif()
else()
message(WARNING "gRPC_PROTOBUF_PROVIDER is \"module\" but PROTOBUF_ROOT_DIR is wrong")
@ -188,7 +188,7 @@ if("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "module")
set(gRPC_INSTALL FALSE)
endif()
elseif("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package")
find_package(Protobuf ${gRPC_PROTOBUF_PACKAGE_TYPE})
find_package(Protobuf REQUIRED ${gRPC_PROTOBUF_PACKAGE_TYPE})
if(Protobuf_FOUND OR PROTOBUF_FOUND)
if(TARGET protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME})
set(_gRPC_PROTOBUF_LIBRARIES protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME})
@ -202,8 +202,10 @@ elseif("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package")
endif()
if(TARGET protobuf::protoc)
set(_gRPC_PROTOBUF_PROTOC protobuf::protoc)
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protobuf::protoc>)
else()
set(_gRPC_PROTOBUF_PROTOC ${PROTOBUF_PROTOC_EXECUTABLE})
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE})
endif()
set(_gRPC_FIND_PROTOBUF "if(NOT Protobuf_FOUND AND NOT PROTOBUF_FOUND)\n find_package(Protobuf ${gRPC_PROTOBUF_PACKAGE_TYPE})\nendif()")
endif()
@ -231,11 +233,9 @@ if("${gRPC_SSL_PROVIDER}" STREQUAL "module")
set(gRPC_INSTALL FALSE)
endif()
elseif("${gRPC_SSL_PROVIDER}" STREQUAL "package")
find_package(OpenSSL)
if(TARGET OpenSSL::SSL)
set(_gRPC_SSL_LIBRARIES OpenSSL::SSL)
endif()
set(_gRPC_FIND_SSL "if(NOT OpenSSL_FOUND)\n find_package(OpenSSL)\nendif()")
find_package(OpenSSL REQUIRED)
set(_gRPC_SSL_LIBRARIES ${OPENSSL_LIBRARIES})
set(_gRPC_FIND_SSL "if(NOT OPENSSL_FOUND)\n find_package(OpenSSL)\nendif()")
endif()
if("${gRPC_GFLAGS_PROVIDER}" STREQUAL "module")
@ -328,7 +328,7 @@ function(protobuf_generate_grpc_cpp)
"${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}_mock.grpc.pb.h"
"${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.pb.cc"
"${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.pb.h"
COMMAND $<TARGET_FILE:${_gRPC_PROTOBUF_PROTOC}>
COMMAND ${_gRPC_PROTOBUF_PROTOC_EXECUTABLE}
ARGS --grpc_out=generate_mock_code=true:${_gRPC_PROTO_GENS_DIR}
--cpp_out=${_gRPC_PROTO_GENS_DIR}
--plugin=protoc-gen-grpc=$<TARGET_FILE:grpc_cpp_plugin>
@ -829,7 +829,7 @@ endif()
target_include_directories(gpr
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -921,7 +921,7 @@ endif()
target_include_directories(gpr_test_util
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -1216,7 +1216,7 @@ endif()
target_include_directories(grpc
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -1522,7 +1522,7 @@ endif()
target_include_directories(grpc_cronet
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -1800,7 +1800,7 @@ endif()
target_include_directories(grpc_test_util
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -2060,7 +2060,7 @@ endif()
target_include_directories(grpc_test_util_unsecure
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -2354,7 +2354,7 @@ endif()
target_include_directories(grpc_unsecure
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -2443,7 +2443,7 @@ endif()
target_include_directories(reconnect_server
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -2485,7 +2485,7 @@ endif()
target_include_directories(test_tcp_server
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -2566,7 +2566,7 @@ endif()
target_include_directories(grpc++
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -2766,7 +2766,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(grpc++_core_stats
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -3057,7 +3057,7 @@ endif()
target_include_directories(grpc++_cronet
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -3256,7 +3256,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(grpc++_error_details
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -3321,7 +3321,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(grpc++_proto_reflection_desc_db
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -3382,7 +3382,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(grpc++_reflection
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -3440,7 +3440,7 @@ endif()
target_include_directories(grpc++_test_config
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -3518,7 +3518,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(grpc++_test_util
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -3656,7 +3656,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(grpc++_test_util_unsecure
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -3796,7 +3796,7 @@ endif()
target_include_directories(grpc++_unsecure
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -3986,7 +3986,7 @@ endif()
target_include_directories(grpc_benchmark
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4045,7 +4045,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(grpc_cli_libs
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4105,7 +4105,7 @@ endif()
target_include_directories(grpc_plugin_support
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4183,7 +4183,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(http2_client_main
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4238,7 +4238,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(interop_client_helper
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4308,7 +4308,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(interop_client_main
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4359,7 +4359,7 @@ endif()
target_include_directories(interop_server_helper
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4428,7 +4428,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(interop_server_lib
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4479,7 +4479,7 @@ endif()
target_include_directories(interop_server_main
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4567,7 +4567,7 @@ protobuf_generate_grpc_cpp(
)
target_include_directories(qps
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4614,7 +4614,7 @@ endif()
target_include_directories(grpc_csharp_ext
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4709,7 +4709,7 @@ endif()
target_include_directories(ares
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4747,7 +4747,7 @@ endif()
target_include_directories(bad_client_test
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4788,7 +4788,7 @@ endif()
target_include_directories(bad_ssl_test_server
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4889,7 +4889,7 @@ endif()
target_include_directories(end2end_tests
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -4990,7 +4990,7 @@ endif()
target_include_directories(end2end_nosec_tests
PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
@ -14520,7 +14520,7 @@ if (gRPC_INSTALL)
endif()
foreach(_config gRPCConfig gRPCConfigVersion)
configure_file(tools/cmake/${_config}.cmake.in
configure_file(cmake/${_config}.cmake.in
${_config}.cmake @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${_config}.cmake
DESTINATION ${gRPC_INSTALL_CMAKEDIR}

@ -32,14 +32,14 @@ Header Files
# endif
```
- Header files should be self-contained and end in .h.
- All header files should have a #define guard to prevent multiple inclusion.
- All header files should have a `#define` guard to prevent multiple inclusion.
To guarantee uniqueness they should be based on the file's path.
For public headers: `include/grpc/grpc.h``GRPC_GRPC_H`
For private headers:
`src/core/channel/channel_stack.h`
`GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H`
`src/core/lib/channel/channel_stack.h`
`GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H`
Variable Initialization
-----------------------
@ -72,8 +72,16 @@ Symbol Names
- Non-static functions must be prefixed by `grpc_`
- Static functions must *not* be prefixed by `grpc_`
- Typenames of `struct`s , `union`s, and `enum`s must be prefixed by `grpc_` if
they are declared in a header file. They must not be prefixed by `grpc_` if
they are declared in a source file.
- Enumeration values and `#define` names must be uppercase. All other values
must be lowercase.
- Enumeration values or `#define` names defined in a header file must be
prefixed with `GRPC_` (except for `#define` macros that are being used to
substitute functions; those should follow the general rules for
functions). Enumeration values or `#define`s defined in source files must not
be prefixed with `GRPC_`.
- Multiple word identifiers use underscore as a delimiter, *never* camel
case. E.g. `variable_name`.

@ -48,6 +48,7 @@ some configuration as environment variables that can be set.
- compression - traces compression operations
- connectivity_state - traces connectivity state changes to channels
- channel_stack_builder - traces information about channel stacks being built
- executor - traces grpc's internal thread pool ('the executor')
- http - traces state in the http2 transport engine
- http1 - traces HTTP/1.x operations performed by gRPC
- inproc - traces the in-process transport

@ -24,10 +24,7 @@ The service config is a JSON string of the following form:
// opposed to backend addresses), gRPC will use grpclb (see
// https://github.com/grpc/grpc/blob/master/doc/load-balancing.md),
// regardless of what LB policy is requested either here or via the
// client API. However, if the resolver returns at least one backend
// address in addition to the balancer address(es), the client may fall
// back to the requested policy if it is unable to reach any of the
// grpclb load balancers.
// client API.
'loadBalancingPolicy': string,
// Per-method configuration. Optional.

@ -2,7 +2,11 @@
cmake_minimum_required(VERSION 2.8)
# Project
project(HelloWorld CXX)
project(HelloWorld C CXX)
if(NOT MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
endif()
# Protobuf
set(protobuf_MODULE_COMPATIBLE TRUE)

@ -169,6 +169,15 @@ class WriteOptions {
return *this;
}
/// Guarantee that all bytes have been written to the wire before completing
/// this write (usually writes are completed when they pass flow control)
inline WriteOptions& set_write_through() {
SetBit(GRPC_WRITE_THROUGH);
return *this;
}
inline bool is_write_through() const { return GetBit(GRPC_WRITE_THROUGH); }
/// Get value for the flag indicating that this is the last message, and
/// should be coalesced with trailing metadata.
///

@ -136,8 +136,10 @@ class ServerBuilder {
/// It can be invoked multiple times.
///
/// \param addr_uri The address to try to bind to the server in URI form. If
/// the scheme name is omitted, "dns:///" is assumed. Valid values include
/// dns:///localhost:1234, / 192.168.1.1:31416, dns:///[::1]:27182, etc.).
/// the scheme name is omitted, "dns:///" is assumed. To bind to any address,
/// please use IPv6 any, i.e., [::]:<port>, which also accepts IPv4
/// connections. Valid values include dns:///localhost:1234, /
/// 192.168.1.1:31416, dns:///[::1]:27182, etc.).
/// \params creds The credentials associated with the server.
/// \param selected_port[out] If not `nullptr`, gets populated with the port
/// number bound to the \a grpc::Server for the corresponding endpoint after

@ -64,6 +64,12 @@ class ChannelArguments {
/// Set the compression algorithm for the channel.
void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
/// Set the grpclb fallback timeout (in ms) for the channel. If this amount
/// of time has passed but we have not gotten any non-empty \a serverlist from
/// the balancer, we will fall back to use the backend address(es) returned by
/// the resolver.
void SetGrpclbFallbackTimeout(int fallback_timeout);
/// Set the socket mutator for the channel.
void SetSocketMutator(grpc_socket_mutator* mutator);

@ -287,7 +287,11 @@ typedef struct {
"grpc.experimental.tcp_max_read_chunk_size"
/* Timeout in milliseconds to use for calls to the grpclb load balancer.
If 0 or unset, the balancer calls will have no deadline. */
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_timeout_ms"
#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_call_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the grpclb load
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
/** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
"grpc.workaround.cronet_compression"
@ -355,8 +359,11 @@ typedef enum grpc_call_error {
/** Force compression to be disabled for a particular write
(start_write/add_metadata). Illegal on invoke/accept. */
#define GRPC_WRITE_NO_COMPRESS (0x00000002u)
/** Force this message to be written to the socket before completing it */
#define GRPC_WRITE_THROUGH (0x00000004u)
/** Mask of all valid flags. */
#define GRPC_WRITE_USED_MASK (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS)
#define GRPC_WRITE_USED_MASK \
(GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_THROUGH)
/** Initial metadata flags */
/** Signal that the call is idempotent */
@ -377,7 +384,7 @@ typedef enum grpc_call_error {
GRPC_INITIAL_METADATA_WAIT_FOR_READY | \
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST | \
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET | \
GRPC_INITIAL_METADATA_CORKED)
GRPC_INITIAL_METADATA_CORKED | GRPC_WRITE_THROUGH)
/** A single metadata element */
typedef struct grpc_metadata {

@ -409,4 +409,15 @@ typedef unsigned __int64 uint64_t;
#define CENSUSAPI GRPCAPI
#endif
#ifndef GPR_ATTRIBUTE_NO_TSAN /* (1) */
#if defined(__has_feature)
#if __has_feature(thread_sanitizer)
#define GPR_ATTRIBUTE_NO_TSAN __attribute__((no_sanitize("thread")))
#endif /* __has_feature(thread_sanitizer) */
#endif /* defined(__has_feature) */
#ifndef GPR_ATTRIBUTE_NO_TSAN /* (2) */
#define GPR_ATTRIBUTE_NO_TSAN
#endif /* GPR_ATTRIBUTE_NO_TSAN (2) */
#endif /* GPR_ATTRIBUTE_NO_TSAN (1) */
#endif /* GRPC_IMPL_CODEGEN_PORT_PLATFORM_H */

@ -0,0 +1 @@
@vjpai cpp_generator.cc

@ -86,7 +86,7 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
grpc_cq_completion *ignored) {
int delete = 0;
bool should_delete = false;
state_watcher *w = (state_watcher *)pw;
gpr_mu_lock(&w->mu);
switch (w->phase) {
@ -94,12 +94,12 @@ static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
case READY_TO_CALL_BACK:
GPR_UNREACHABLE_CODE(return );
case CALLING_BACK_AND_FINISHED:
delete = 1;
should_delete = true;
break;
}
gpr_mu_unlock(&w->mu);
if (delete) {
if (should_delete) {
delete_state_watcher(exec_ctx, w);
}
}
@ -161,12 +161,12 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw,
grpc_error *error) {
partly_done(exec_ctx, pw, true, GRPC_ERROR_REF(error));
partly_done(exec_ctx, (state_watcher *)pw, true, GRPC_ERROR_REF(error));
}
static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw,
grpc_error *error) {
partly_done(exec_ctx, pw, false, GRPC_ERROR_REF(error));
partly_done(exec_ctx, (state_watcher *)pw, false, GRPC_ERROR_REF(error));
}
int grpc_channel_num_external_connectivity_watchers(grpc_channel *channel) {

@ -85,7 +85,7 @@ static void method_parameters_unref(method_parameters *method_params) {
}
static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
method_parameters_unref(value);
method_parameters_unref((method_parameters *)value);
}
static bool parse_wait_for_ready(grpc_json *field,
@ -717,7 +717,8 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"client channel factory arg must be a pointer");
}
grpc_client_channel_factory_ref(arg->value.pointer.p);
grpc_client_channel_factory_ref(
(grpc_client_channel_factory *)arg->value.pointer.p);
chand->client_channel_factory =
(grpc_client_channel_factory *)arg->value.pointer.p;
// Get server name to resolve, using proxy mapper if needed.
@ -1016,13 +1017,11 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
// Invoked when a pick is completed, on both success or failure.
static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_error *error) {
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
if (calld->connected_subchannel == NULL) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
@ -1044,12 +1043,116 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
/** Return true if subchannel is available immediately (in which case
subchannel_ready_locked() should not be called), or false otherwise (in
which case subchannel_ready_locked() should be called when the subchannel
is available). */
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem);
// A wrapper around pick_done_locked() that is used in cases where
// either (a) the pick was deferred pending a resolver result or (b) the
// pick was done asynchronously. Removes the call's polling entity from
// chand->interested_parties before invoking pick_done_locked().
static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem, grpc_error *error) {
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
pick_done_locked(exec_ctx, elem, error);
}
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (calld->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, calld->lb_policy);
}
grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
&calld->connected_subchannel,
GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
}
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
// Unrefs the LB policy and invokes async_pick_done_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
GPR_ASSERT(calld->lb_policy != NULL);
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
// If the pick was completed synchronously, unrefs the LB policy and
// returns true.
static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
}
apply_service_config_to_call_locked(exec_ctx, elem);
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
uint32_t initial_metadata_flags =
calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata_flags;
const bool wait_for_ready_set_from_api =
initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
const bool wait_for_ready_set_from_service_config =
calld->method_params != NULL &&
calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
}
const grpc_lb_policy_pick_args inputs = {
calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata,
initial_metadata_flags, &calld->lb_token_mdelem};
// Keep a ref to the LB policy in calld while the pick is pending.
GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
calld->lb_policy = chand->lb_policy;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, chand->lb_policy, &inputs, &calld->connected_subchannel,
calld->subchannel_call_context, NULL, &calld->lb_pick_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
} else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
exec_ctx, calld->call_combiner,
GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
pick_callback_cancel_locked, elem,
grpc_combiner_scheduler(chand->combiner)));
}
return pick_done;
}
typedef struct {
grpc_call_element *elem;
@ -1069,17 +1172,17 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
gpr_free(args);
return;
}
args->finished = true;
grpc_call_element *elem = args->elem;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
// If we don't yet have a resolver result, then a closure for
// pick_after_resolver_result_done_locked() will have been added to
// chand->waiting_for_resolver_result_closures, and it may not be invoked
// until after this call has been destroyed. We mark the operation as
// finished, so that when pick_after_resolver_result_done_locked()
// is called, it will be a no-op. We also immediately invoke
// subchannel_ready_locked() to propagate the error back to the caller.
// async_pick_done_locked() to propagate the error back to the caller.
args->finished = true;
grpc_call_element *elem = args->elem;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
@ -1087,12 +1190,12 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
}
// Note: Although we are not in the call combiner here, we are
// basically stealing the call combiner from the pending pick, so
// it's safe to call subchannel_ready_locked() here -- we are
// it's safe to call async_pick_done_locked() here -- we are
// essentially calling it here instead of calling it in
// pick_after_resolver_result_done_locked().
subchannel_ready_locked(exec_ctx, elem,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
async_pick_done_locked(exec_ctx, elem,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
}
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
@ -1117,14 +1220,19 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
chand, calld);
}
subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
} else {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
}
if (pick_subchannel_locked(exec_ctx, elem)) {
subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_NONE);
if (pick_callback_start_locked(exec_ctx, elem)) {
// Even if the LB policy returns a result synchronously, we have
// already added our polling entity to chand->interested_parties
// in order to wait for the resolver result, so we need to
// remove it here. Therefore, we call async_pick_done_locked()
// instead of pick_done_locked().
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
}
}
@ -1152,154 +1260,38 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_combiner_scheduler(chand->combiner)));
}
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE && calld->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, calld->lb_policy);
}
grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
&calld->connected_subchannel,
GRPC_ERROR_REF(error));
}
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
}
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
// Unrefs the LB policy and invokes subchannel_ready_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *ignored) {
grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
GPR_ASSERT(calld->lb_policy != NULL);
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
// If the pick was completed synchronously, unrefs the LB policy and
// returns true.
static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_lb_policy_pick_args *inputs) {
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
}
// Keep a ref to the LB policy in calld while the pick is pending.
GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
calld->lb_policy = chand->lb_policy;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, chand->lb_policy, inputs, &calld->connected_subchannel,
calld->subchannel_call_context, NULL, &calld->lb_pick_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
GPR_ASSERT(calld->connected_subchannel == NULL);
if (chand->lb_policy != NULL) {
// We already have an LB policy, so ask it for a pick.
if (pick_callback_start_locked(exec_ctx, elem)) {
// Pick completed synchronously.
pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
return;
}
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
} else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
exec_ctx, calld->call_combiner,
GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
pick_callback_cancel_locked, elem,
grpc_combiner_scheduler(chand->combiner)));
}
return pick_done;
}
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
GPR_TIMER_BEGIN("pick_subchannel", 0);
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
bool pick_done = false;
if (chand->lb_policy != NULL) {
apply_service_config_to_call_locked(exec_ctx, elem);
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
uint32_t initial_metadata_flags =
calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata_flags;
const bool wait_for_ready_set_from_api =
initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
const bool wait_for_ready_set_from_service_config =
calld->method_params != NULL &&
calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
if (!wait_for_ready_set_from_api &&
wait_for_ready_set_from_service_config) {
if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
} else {
initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
}
// We do not yet have an LB policy, so wait for a resolver result.
if (chand->resolver == NULL) {
pick_done_locked(exec_ctx, elem,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
return;
}
const grpc_lb_policy_pick_args inputs = {
calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata,
initial_metadata_flags, &calld->lb_token_mdelem};
pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
} else if (chand->resolver != NULL) {
if (!chand->started_resolving) {
start_resolving_locked(exec_ctx, chand);
}
pick_after_resolver_result_start_locked(exec_ctx, elem);
} else {
subchannel_ready_locked(
exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
}
GPR_TIMER_END("pick_subchannel", 0);
return pick_done;
}
static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("start_pick_locked", 0);
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(calld->connected_subchannel == NULL);
if (pick_subchannel_locked(exec_ctx, elem)) {
// Pick was returned synchronously.
if (calld->connected_subchannel == NULL) {
GRPC_ERROR_UNREF(calld->error);
calld->error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy");
waiting_for_pick_batches_fail(exec_ctx, elem,
GRPC_ERROR_REF(calld->error));
} else {
// Create subchannel call.
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
} else {
// Pick will be done asynchronously. Add the call's polling entity to
// the channel's interested_parties, so that I/O for the resolver
// and LB policy can be done under it.
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
}
GPR_TIMER_END("start_pick_locked", 0);
// We need to wait for either a resolver result or for an async result
// from the LB policy. Add the polling entity from call_data to the
// channel_data's interested_parties, so that the I/O of the LB policy
// and resolver can be done under it. The polling entity will be
// removed in async_pick_done_locked().
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@ -1394,7 +1386,8 @@ static void cc_start_transport_stream_op_batch(
// combiner to start a pick.
if (batch->send_initial_metadata) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
chand, calld);
}
GRPC_CLOSURE_SCHED(
exec_ctx,

@ -43,14 +43,13 @@ grpc_channel* grpc_client_channel_factory_create_channel(
}
static void* factory_arg_copy(void* factory) {
grpc_client_channel_factory_ref(factory);
grpc_client_channel_factory_ref((grpc_client_channel_factory*)factory);
return factory;
}
static void factory_arg_destroy(grpc_exec_ctx* exec_ctx, void* factory) {
// TODO(roth): Remove local exec_ctx when
// https://github.com/grpc/grpc/pull/8705 is merged.
grpc_client_channel_factory_unref(exec_ctx, factory);
grpc_client_channel_factory_unref(exec_ctx,
(grpc_client_channel_factory*)factory);
}
static int factory_arg_cmp(void* factory1, void* factory2) {

@ -309,7 +309,7 @@ static void http_connect_handshaker_do_handshake(
grpc_httpcli_request request;
memset(&request, 0, sizeof(request));
request.host = server_name;
request.http.method = "CONNECT";
request.http.method = (char*)"CONNECT";
request.http.path = server_name;
request.http.hdrs = headers;
request.http.hdr_count = num_headers;

@ -75,7 +75,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
calld->client_stats = grpc_grpclb_client_stats_ref(
args->context[GRPC_GRPCLB_CLIENT_STATS].value);
(grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS]
.value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
return GRPC_ERROR_NONE;

@ -101,6 +101,7 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/combiner.h"
@ -122,6 +123,7 @@
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
@ -137,7 +139,7 @@ static grpc_error *initial_metadata_add_lb_token(
}
static void destroy_client_stats(void *arg) {
grpc_grpclb_client_stats_unref(arg);
grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
}
typedef struct wrapped_rr_closure_arg {
@ -285,7 +287,7 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
* glb_lb_policy
*/
typedef struct rr_connectivity_data rr_connectivity_data;
static const grpc_lb_policy_vtable glb_lb_policy_vtable;
typedef struct glb_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
@ -298,6 +300,10 @@ typedef struct glb_lb_policy {
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
/** timeout in milliseconds for before using fallback backend addresses.
* 0 means not using fallback. */
int lb_fallback_timeout_ms;
/** for communicating with the LB server */
grpc_channel *lb_channel;
@ -324,6 +330,9 @@ typedef struct glb_lb_policy {
* Otherwise, we delegate to the RR policy. */
size_t serverlist_index;
/** stores the backend addresses from the resolver */
grpc_lb_addresses *fallback_backend_addresses;
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
@ -344,6 +353,9 @@ typedef struct glb_lb_policy {
/** is \a lb_call_retry_timer active? */
bool retry_timer_active;
/** is \a lb_fallback_timer active? */
bool fallback_timer_active;
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
@ -366,6 +378,9 @@ typedef struct glb_lb_policy {
/* LB call retry timer callback. */
grpc_closure lb_on_call_retry;
/* LB fallback timer callback. */
grpc_closure lb_on_fallback;
grpc_call *lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
@ -389,6 +404,9 @@ typedef struct glb_lb_policy {
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
/** LB fallback timer */
grpc_timer lb_fallback_timer;
bool initial_request_sent;
bool seen_initial_response;
@ -535,6 +553,32 @@ static grpc_lb_addresses *process_serverlist_locked(
return lb_addresses;
}
/* Returns the backend addresses extracted from the given addresses */
static grpc_lb_addresses *extract_backend_addresses_locked(
grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
/* first pass: count the number of backend addresses */
size_t num_backends = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (!addresses->addresses[i].is_balancer) {
++num_backends;
}
}
/* second pass: actually populate the addresses and (empty) LB tokens */
grpc_lb_addresses *backend_addresses =
grpc_lb_addresses_create(num_backends, &lb_token_vtable);
size_t num_copied = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) continue;
const grpc_resolved_address *addr = &addresses->addresses[i].address;
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
addr->len, false /* is_balancer */,
NULL /* balancer_name */,
(void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
++num_copied;
}
return backend_addresses;
}
static void update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
@ -602,35 +646,38 @@ static bool pick_from_internal_rr_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
const grpc_lb_policy_pick_args *pick_args, bool force_async,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server *server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
}
if (server->drop) {
// Not using the RR policy, so unref it.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
// Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != NULL) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server *server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token,
wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
if (server->drop) {
// Not using the RR policy, so unref it.
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
// Update client load reporting stats to indicate the number of
// dropped calls. Note that we have to do this here instead of in
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
grpc_grpclb_client_stats_add_call_dropped_locked(
server->load_balance_token, wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
}
gpr_free(wc_arg->free_when_done);
return false;
return true;
}
gpr_free(wc_arg->free_when_done);
return true;
}
// Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
@ -668,8 +715,18 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, glb_policy->serverlist);
grpc_lb_addresses *addresses;
if (glb_policy->serverlist != NULL) {
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
} else {
// If rr_handover_locked() is invoked when we haven't received any
// serverlist from the balancer, we use the fallback backends returned by
// the resolver. Note that the fallback backend list may be empty, in which
// case the new round_robin policy will keep the requested picks pending.
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
GPR_ASSERT(addresses != NULL);
grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
@ -727,7 +784,7 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* Allocate the data for the tracking of the new RR policy's connectivity.
* It'll be deallocated in glb_rr_connectivity_changed() */
rr_connectivity_data *rr_connectivity =
gpr_zalloc(sizeof(rr_connectivity_data));
(rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity,
grpc_combiner_scheduler(glb_policy->base.combiner));
@ -775,8 +832,6 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
if (glb_policy->shutting_down) return;
grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL);
@ -869,7 +924,8 @@ static grpc_channel_args *build_lb_channel_args(
grpc_lb_addresses *lb_addresses =
grpc_lb_addresses_create(num_grpclb_addrs, NULL);
grpc_slice_hash_table_entry *targets_info_entries =
gpr_zalloc(sizeof(*targets_info_entries) * num_grpclb_addrs);
(grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
num_grpclb_addrs);
size_t lb_addresses_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@ -911,92 +967,6 @@ static grpc_channel_args *build_lb_channel_args(
return result;
}
static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error);
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
/* Count the number of gRPC-LB addresses. There must be at least one.
* TODO(roth): For now, we ignore non-balancer addresses, but in the
* future, we may change the behavior such that we fall back to using
* the non-balancer addresses if we cannot reach any balancers. In the
* fallback case, we should use the LB policy indicated by
* GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
* unset, we should default to pick_first). */
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
return NULL;
}
grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return NULL;
glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
GPR_ASSERT(arg != NULL);
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
glb_policy->server_name =
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
glb_policy->server_name);
}
grpc_uri_destroy(uri);
glb_policy->cc_factory = args->client_channel_factory;
GPR_ASSERT(glb_policy->cc_factory != NULL);
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
glb_policy->lb_call_timeout_ms =
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg =
grpc_channel_arg_string_create(GRPC_ARG_LB_POLICY_NAME, "grpclb");
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
grpc_fake_resolver_response_generator_create();
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
char *uri_str;
gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
/* Propagate initial resolution */
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
gpr_free(uri_str);
if (glb_policy->lb_channel == NULL) {
gpr_free((void *)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
gpr_free(glb_policy);
return NULL;
}
GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
glb_lb_channel_on_connectivity_changed_cb, glb_policy,
grpc_combiner_scheduler(args->combiner));
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
return &glb_policy->base;
}
static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
GPR_ASSERT(glb_policy->pending_picks == NULL);
@ -1010,7 +980,11 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
if (glb_policy->fallback_backend_addresses != NULL) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
}
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
grpc_subchannel_index_unref();
if (glb_policy->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
@ -1150,10 +1124,28 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy);
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
glb_policy->serverlist == NULL) {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec deadline = gpr_time_add(
now,
gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
glb_policy->fallback_timer_active = true;
grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
&glb_policy->lb_on_fallback, now);
}
glb_policy->started_picking = true;
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
@ -1303,7 +1295,8 @@ static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
grpc_grpclb_dropped_call_counts *drop_entries =
request->client_stats.calls_finished_with_drop.arg;
(grpc_grpclb_dropped_call_counts *)
request->client_stats.calls_finished_with_drop.arg;
return request->client_stats.num_calls_started == 0 &&
request->client_stats.num_calls_finished == 0 &&
request->client_stats.num_calls_finished_with_client_failed_to_send ==
@ -1607,6 +1600,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
} else {
/* or dispose of the fallback */
grpc_lb_addresses_destroy(exec_ctx,
glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses = NULL;
if (glb_policy->fallback_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
glb_policy->fallback_timer_active = false;
}
}
/* and update the copy in the glb_lb_policy instance. This
* serverlist instance will be destroyed either upon the next
@ -1617,9 +1619,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
} else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Received empty server list. Picks will stay pending until "
"a response with > 0 servers is received");
gpr_log(GPR_INFO, "Received empty server list, ignoring.");
}
grpc_grpclb_destroy_serverlist(serverlist);
}
@ -1666,6 +1666,26 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
}
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't do anything. */
if (glb_policy->serverlist != NULL) return;
glb_policy->fallback_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Falling back to use backends from resolver (grpclb %p)",
(void *)glb_policy);
}
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
rr_handover_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
@ -1786,6 +1806,17 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
&glb_policy->lb_channel_connectivity,
&glb_policy->lb_channel_on_connectivity_changed, NULL);
}
// Propagate update to fallback_backend_addresses if a non-empty serverlist
// hasn't been received from the balancer.
if (glb_policy->serverlist == NULL) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
if (glb_policy->rr_policy != NULL) {
rr_handover_locked(exec_ctx, glb_policy);
}
}
}
// Invoked as part of the update process. It continues watching the LB channel
@ -1865,6 +1896,94 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_notify_on_state_change_locked,
glb_update_locked};
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
/* Count the number of gRPC-LB addresses. There must be at least one. */
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
return NULL;
}
grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return NULL;
glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
GPR_ASSERT(arg != NULL);
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
glb_policy->server_name =
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
glb_policy->server_name);
}
grpc_uri_destroy(uri);
glb_policy->cc_factory = args->client_channel_factory;
GPR_ASSERT(glb_policy->cc_factory != NULL);
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
glb_policy->lb_call_timeout_ms =
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0,
INT_MAX});
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg =
grpc_channel_arg_string_create(GRPC_ARG_LB_POLICY_NAME, "grpclb");
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
/* Extract the backend addresses (may be empty) from the resolver for
* fallback. */
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
grpc_fake_resolver_response_generator_create();
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
char *uri_str;
gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
/* Propagate initial resolution */
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
gpr_free(uri_str);
if (glb_policy->lb_channel == NULL) {
gpr_free((void *)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
gpr_free(glb_policy);
return NULL;
}
grpc_subchannel_index_ref();
GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
glb_lb_channel_on_connectivity_changed_cb, glb_policy,
grpc_combiner_scheduler(args->combiner));
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
return &glb_policy->base;
}
static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
static void glb_factory_unref(grpc_lb_policy_factory *factory) {}

@ -148,7 +148,8 @@ grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
if (request->has_client_stats) {
grpc_grpclb_dropped_call_counts *drop_entries =
request->client_stats.calls_finished_with_drop.arg;
(grpc_grpclb_dropped_call_counts *)
request->client_stats.calls_finished_with_drop.arg;
grpc_grpclb_dropped_call_counts_destroy(drop_entries);
}
gpr_free(request);
@ -170,7 +171,8 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
if (!res.has_initial_response) return NULL;
grpc_grpclb_initial_response *initial_res =
gpr_malloc(sizeof(grpc_grpclb_initial_response));
(grpc_grpclb_initial_response *)gpr_malloc(
sizeof(grpc_grpclb_initial_response));
memcpy(initial_res, &res.initial_response,
sizeof(grpc_grpclb_initial_response));

@ -89,6 +89,7 @@ static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
"picked_first_destroy");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
grpc_subchannel_index_unref();
if (p->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
gpr_free(p->pending_update_args);
@ -330,8 +331,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
(void *)p, (unsigned long)addresses->num_addresses);
}
grpc_subchannel_args *sc_args =
gpr_zalloc(sizeof(*sc_args) * addresses->num_addresses);
grpc_subchannel_args *sc_args = (grpc_subchannel_args *)gpr_zalloc(
sizeof(*sc_args) * addresses->num_addresses);
/* We remove the following keys in order for subchannel keys belonging to
* subchannels point to the same address to match. */
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
@ -403,7 +404,7 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
/* Create the subchannels for the new subchannel args/addresses. */
grpc_subchannel **new_subchannels =
gpr_zalloc(sizeof(*new_subchannels) * sc_args_count);
(grpc_subchannel **)gpr_zalloc(sizeof(*new_subchannels) * sc_args_count);
size_t num_new_subchannels = 0;
for (size_t i = 0; i < sc_args_count; i++) {
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
@ -686,6 +687,7 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
}
pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p,
grpc_combiner_scheduler(args->combiner));
return &p->base;

@ -30,6 +30,7 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/combiner.h"
@ -310,6 +311,7 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
(void *)pol, (void *)pol);
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
grpc_subchannel_index_unref();
gpr_free(p);
}
@ -890,6 +892,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(args->client_channel_factory != NULL);
round_robin_lb_policy *p = (round_robin_lb_policy *)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
rr_update_locked(exec_ctx, &p->base, args);

@ -56,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
}
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
void* address, size_t address_len,
const void* address, size_t address_len,
bool is_balancer, const char* balancer_name,
void* user_data) {
GPR_ASSERT(index < addresses->num_addresses);
@ -126,13 +126,14 @@ void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx,
}
static void* lb_addresses_copy(void* addresses) {
return grpc_lb_addresses_copy(addresses);
return grpc_lb_addresses_copy((grpc_lb_addresses*)addresses);
}
static void lb_addresses_destroy(grpc_exec_ctx* exec_ctx, void* addresses) {
grpc_lb_addresses_destroy(exec_ctx, addresses);
grpc_lb_addresses_destroy(exec_ctx, (grpc_lb_addresses*)addresses);
}
static int lb_addresses_cmp(void* addresses1, void* addresses2) {
return grpc_lb_addresses_cmp(addresses1, addresses2);
return grpc_lb_addresses_cmp((grpc_lb_addresses*)addresses1,
(grpc_lb_addresses*)addresses2);
}
static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
lb_addresses_copy, lb_addresses_destroy, lb_addresses_cmp};
@ -149,7 +150,7 @@ grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
grpc_channel_args_find(channel_args, GRPC_ARG_LB_ADDRESSES);
if (lb_addresses_arg == NULL || lb_addresses_arg->type != GRPC_ARG_POINTER)
return NULL;
return lb_addresses_arg->value.pointer.p;
return (grpc_lb_addresses*)lb_addresses_arg->value.pointer.p;
}
void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {

@ -73,7 +73,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
void *address, size_t address_len,
const void *address, size_t address_len,
bool is_balancer, const char *balancer_name,
void *user_data);

@ -204,7 +204,7 @@ static char *choose_service_config(char *service_config_choice_json) {
int random_pct = rand() % 100;
int percentage;
if (sscanf(field->value, "%d", &percentage) != 1 ||
random_pct > percentage) {
random_pct > percentage || percentage == 0) {
service_config_json = NULL;
break;
}

@ -38,7 +38,7 @@ typedef struct fd_node {
/** the owner of this fd node */
grpc_ares_ev_driver *ev_driver;
/** the grpc_fd owned by this fd node */
grpc_fd *grpc_fd;
grpc_fd *fd;
/** a closure wrapping on_readable_cb, which should be invoked when the
grpc_fd in this node becomes readable. */
grpc_closure read_closure;
@ -96,15 +96,15 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
}
static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->fd));
GPR_ASSERT(!fdn->readable_registered);
GPR_ASSERT(!fdn->writable_registered);
gpr_mu_destroy(&fdn->mu);
grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->grpc_fd);
grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->fd);
/* c-ares library has closed the fd inside grpc_fd. This fd may be picked up
immediately by another thread, and should not be closed by the following
grpc_fd_orphan. */
grpc_fd_orphan(exec_ctx, fdn->grpc_fd, NULL, NULL, true /* already_closed */,
grpc_fd_orphan(exec_ctx, fdn->fd, NULL, NULL, true /* already_closed */,
"c-ares query finished");
gpr_free(fdn);
}
@ -150,9 +150,8 @@ void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
ev_driver->shutting_down = true;
fd_node *fn = ev_driver->fds;
while (fn != NULL) {
grpc_fd_shutdown(
exec_ctx, fn->grpc_fd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
grpc_fd_shutdown(exec_ctx, fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"grpc_ares_ev_driver_shutdown"));
fn = fn->next;
}
gpr_mu_unlock(&ev_driver->mu);
@ -165,7 +164,7 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
dummy_head.next = *head;
fd_node *node = &dummy_head;
while (node->next != NULL) {
if (grpc_fd_wrapped_fd(node->next->grpc_fd) == fd) {
if (grpc_fd_wrapped_fd(node->next->fd) == fd) {
fd_node *ret = node->next;
node->next = node->next->next;
*head = dummy_head.next;
@ -184,9 +183,9 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
fdn->readable_registered = false;
gpr_mu_unlock(&fdn->mu);
gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->fd));
if (error == GRPC_ERROR_NONE) {
ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->grpc_fd),
ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->fd),
ARES_SOCKET_BAD);
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
@ -211,10 +210,10 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
fdn->writable_registered = false;
gpr_mu_unlock(&fdn->mu);
gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->fd));
if (error == GRPC_ERROR_NONE) {
ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD,
grpc_fd_wrapped_fd(fdn->grpc_fd));
grpc_fd_wrapped_fd(fdn->fd));
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@ -253,7 +252,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
fdn = (fd_node *)gpr_malloc(sizeof(fd_node));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
fdn->grpc_fd = grpc_fd_create(socks[i], fd_name);
fdn->fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;
fdn->readable_registered = false;
fdn->writable_registered = false;
@ -262,8 +261,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_cb, fdn,
grpc_schedule_on_exec_ctx);
grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set,
fdn->grpc_fd);
grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set, fdn->fd);
gpr_free(fd_name);
}
fdn->next = new_list;
@ -274,9 +272,8 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (ARES_GETSOCK_READABLE(socks_bitmask, i) &&
!fdn->readable_registered) {
grpc_ares_ev_driver_ref(ev_driver);
gpr_log(GPR_DEBUG, "notify read on: %d",
grpc_fd_wrapped_fd(fdn->grpc_fd));
grpc_fd_notify_on_read(exec_ctx, fdn->grpc_fd, &fdn->read_closure);
gpr_log(GPR_DEBUG, "notify read on: %d", grpc_fd_wrapped_fd(fdn->fd));
grpc_fd_notify_on_read(exec_ctx, fdn->fd, &fdn->read_closure);
fdn->readable_registered = true;
}
// Register write_closure if the socket is writable and write_closure
@ -284,9 +281,9 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (ARES_GETSOCK_WRITABLE(socks_bitmask, i) &&
!fdn->writable_registered) {
gpr_log(GPR_DEBUG, "notify write on: %d",
grpc_fd_wrapped_fd(fdn->grpc_fd));
grpc_fd_wrapped_fd(fdn->fd));
grpc_ares_ev_driver_ref(ev_driver);
grpc_fd_notify_on_write(exec_ctx, fdn->grpc_fd, &fdn->write_closure);
grpc_fd_notify_on_write(exec_ctx, fdn->fd, &fdn->write_closure);
fdn->writable_registered = true;
}
gpr_mu_unlock(&fdn->mu);

@ -123,8 +123,8 @@ static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
static grpc_ares_hostbyname_request *create_hostbyname_request(
grpc_ares_request *parent_request, char *host, uint16_t port,
bool is_balancer) {
grpc_ares_hostbyname_request *hr =
gpr_zalloc(sizeof(grpc_ares_hostbyname_request));
grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)gpr_zalloc(
sizeof(grpc_ares_hostbyname_request));
hr->parent_request = parent_request;
hr->host = gpr_strdup(host);
hr->port = port;
@ -174,7 +174,7 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
hr->is_balancer /* is_balancer */,
hr->is_balancer ? strdup(hr->host) : NULL /* balancer_name */,
hr->is_balancer ? hr->host : NULL /* balancer_name */,
NULL /* user_data */);
char output[INET6_ADDRSTRLEN];
ares_inet_ntop(AF_INET6, &addr.sin6_addr, output, INET6_ADDRSTRLEN);
@ -195,7 +195,7 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
hr->is_balancer /* is_balancer */,
hr->is_balancer ? strdup(hr->host) : NULL /* balancer_name */,
hr->is_balancer ? hr->host : NULL /* balancer_name */,
NULL /* user_data */);
char output[INET_ADDRSTRLEN];
ares_inet_ntop(AF_INET, &addr.sin_addr, output, INET_ADDRSTRLEN);
@ -527,7 +527,8 @@ static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
grpc_resolve_address_ares_request *r =
gpr_zalloc(sizeof(grpc_resolve_address_ares_request));
(grpc_resolve_address_ares_request *)gpr_zalloc(
sizeof(grpc_resolve_address_ares_request));
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r,

@ -159,7 +159,7 @@ typedef struct set_response_closure_arg {
static void set_response_closure_fn(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
set_response_closure_arg* closure_arg = arg;
set_response_closure_arg* closure_arg = (set_response_closure_arg*)arg;
grpc_fake_resolver_response_generator* generator = closure_arg->generator;
fake_resolver* r = generator->resolver;
if (r->next_results != NULL) {
@ -178,7 +178,8 @@ void grpc_fake_resolver_response_generator_set_response(
grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator,
grpc_channel_args* next_response) {
GPR_ASSERT(generator->resolver != NULL);
set_response_closure_arg* closure_arg = gpr_zalloc(sizeof(*closure_arg));
set_response_closure_arg* closure_arg =
(set_response_closure_arg*)gpr_zalloc(sizeof(*closure_arg));
closure_arg->generator = generator;
closure_arg->next_response = grpc_channel_args_copy(next_response);
GRPC_CLOSURE_SCHED(exec_ctx,

@ -99,7 +99,7 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
int max_milli_tokens, int milli_token_ratio,
grpc_server_retry_throttle_data* old_throttle_data) {
grpc_server_retry_throttle_data* throttle_data =
gpr_malloc(sizeof(*throttle_data));
(grpc_server_retry_throttle_data*)gpr_malloc(sizeof(*throttle_data));
memset(throttle_data, 0, sizeof(*throttle_data));
gpr_ref_init(&throttle_data->refs, 1);
throttle_data->max_milli_tokens = max_milli_tokens;
@ -131,11 +131,11 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
//
static void* copy_server_name(void* key, void* unused) {
return gpr_strdup(key);
return gpr_strdup((const char*)key);
}
static long compare_server_name(void* key1, void* key2, void* unused) {
return strcmp(key1, key2);
return strcmp((const char*)key1, (const char*)key2);
}
static void destroy_server_retry_throttle_data(void* value, void* unused) {
@ -177,7 +177,8 @@ grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
const char* server_name, int max_milli_tokens, int milli_token_ratio) {
gpr_mu_lock(&g_mu);
grpc_server_retry_throttle_data* throttle_data =
gpr_avl_get(g_avl, (char*)server_name, NULL);
(grpc_server_retry_throttle_data*)gpr_avl_get(g_avl, (char*)server_name,
NULL);
if (throttle_data == NULL) {
// Entry not found. Create a new one.
throttle_data = grpc_server_retry_throttle_data_create(

@ -32,6 +32,7 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
@ -290,6 +291,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
return c;
}
GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx);
c = (grpc_subchannel *)gpr_zalloc(sizeof(*c));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);

@ -34,6 +34,8 @@ static gpr_avl g_subchannel_index;
static gpr_mu g_mu;
static gpr_refcount g_refcount;
struct grpc_subchannel_key {
grpc_subchannel_args args;
};
@ -88,24 +90,26 @@ void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
static void sck_avl_destroy(void *p, void *user_data) {
grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
grpc_subchannel_key_destroy(exec_ctx, p);
grpc_subchannel_key_destroy(exec_ctx, (grpc_subchannel_key *)p);
}
static void *sck_avl_copy(void *p, void *unused) {
return subchannel_key_copy(p);
return subchannel_key_copy((grpc_subchannel_key *)p);
}
static long sck_avl_compare(void *a, void *b, void *unused) {
return grpc_subchannel_key_compare(a, b);
return grpc_subchannel_key_compare((grpc_subchannel_key *)a,
(grpc_subchannel_key *)b);
}
static void scv_avl_destroy(void *p, void *user_data) {
grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, p, "subchannel_index");
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel *)p,
"subchannel_index");
}
static void *scv_avl_copy(void *p, void *unused) {
GRPC_SUBCHANNEL_WEAK_REF(p, "subchannel_index");
GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel *)p, "subchannel_index");
return p;
}
@ -119,15 +123,27 @@ static const gpr_avl_vtable subchannel_avl_vtable = {
void grpc_subchannel_index_init(void) {
g_subchannel_index = gpr_avl_create(&subchannel_avl_vtable);
gpr_mu_init(&g_mu);
gpr_ref_init(&g_refcount, 1);
}
void grpc_subchannel_index_shutdown(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_destroy(&g_mu);
gpr_avl_unref(g_subchannel_index, &exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
// TODO(juanlishen): This refcounting mechanism may lead to memory leackage.
// To solve that, we should force polling to flush any pending callbacks, then
// shutdown safely.
grpc_subchannel_index_unref();
}
void grpc_subchannel_index_unref(void) {
if (gpr_unref(&g_refcount)) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_destroy(&g_mu);
gpr_avl_unref(g_subchannel_index, &exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
}
}
void grpc_subchannel_index_ref(void) { gpr_ref_non_zero(&g_refcount); }
grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
grpc_subchannel_key *key) {
// Lock, and take a reference to the subchannel index.
@ -136,8 +152,8 @@ grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
gpr_avl index = gpr_avl_ref(g_subchannel_index, exec_ctx);
gpr_mu_unlock(&g_mu);
grpc_subchannel *c = (grpc_subchannel *)GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
gpr_avl_get(index, key, exec_ctx), "index_find");
grpc_subchannel *c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
(grpc_subchannel *)gpr_avl_get(index, key, exec_ctx), "index_find");
gpr_avl_unref(index, exec_ctx);
return c;

@ -59,6 +59,13 @@ void grpc_subchannel_index_init(void);
/** Shutdown the subchannel index (global) */
void grpc_subchannel_index_shutdown(void);
/** Increment the refcount (non-zero) of subchannel index (global). */
void grpc_subchannel_index_ref(void);
/** Decrement the refcount of subchannel index (global). If the refcount drops
to zero, unref the subchannel index and destroy its mutex. */
void grpc_subchannel_index_unref(void);
/** \em TEST ONLY.
* If \a force_creation is true, all key comparisons will be false, resulting in
* new subchannels always being created. Otherwise, the keys will be compared as

@ -83,12 +83,12 @@ static grpc_error *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx,
}
static void add_error(const char *error_name, grpc_error **cumulative,
grpc_error *new) {
if (new == GRPC_ERROR_NONE) return;
grpc_error *new_err) {
if (new_err == GRPC_ERROR_NONE) return;
if (*cumulative == GRPC_ERROR_NONE) {
*cumulative = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_name);
}
*cumulative = grpc_error_add_child(*cumulative, new);
*cumulative = grpc_error_add_child(*cumulative, new_err);
}
static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,

@ -402,7 +402,7 @@ static bool maybe_add_max_age_filter(grpc_exec_ctx* exec_ctx,
bool enable =
grpc_channel_arg_get_integer(
grpc_channel_args_find(channel_args, GRPC_ARG_MAX_CONNECTION_AGE_MS),
MAX_CONNECTION_AGE_INTEGER_OPTIONS) != INT_MAX &&
MAX_CONNECTION_AGE_INTEGER_OPTIONS) != INT_MAX ||
grpc_channel_arg_get_integer(
grpc_channel_args_find(channel_args, GRPC_ARG_MAX_CONNECTION_IDLE_MS),
MAX_CONNECTION_IDLE_INTEGER_OPTIONS) != INT_MAX;

@ -161,7 +161,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error));
}
gpr_mu_unlock(&c->mu);
chttp2_connector_unref(exec_ctx, arg);
chttp2_connector_unref(exec_ctx, (grpc_connector *)arg);
} else {
GPR_ASSERT(c->endpoint != NULL);
start_handshake_locked(exec_ctx, c);

@ -52,7 +52,7 @@ typedef struct {
} server_state;
typedef struct {
server_state *server_state;
server_state *svr_state;
grpc_pollset *accepting_pollset;
grpc_tcp_server_acceptor *acceptor;
grpc_handshake_manager *handshake_mgr;
@ -63,8 +63,8 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
server_connection_state *connection_state =
(server_connection_state *)args->user_data;
gpr_mu_lock(&connection_state->server_state->mu);
if (error != GRPC_ERROR_NONE || connection_state->server_state->shutdown) {
gpr_mu_lock(&connection_state->svr_state->mu);
if (error != GRPC_ERROR_NONE || connection_state->svr_state->shutdown) {
const char *error_str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Handshaking failed: %s", error_str);
@ -89,7 +89,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport *transport =
grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0);
grpc_server_setup_transport(
exec_ctx, connection_state->server_state->server, transport,
exec_ctx, connection_state->svr_state->server, transport,
connection_state->accepting_pollset, args->args);
grpc_chttp2_transport_start_reading(exec_ctx, transport,
args->read_buffer);
@ -97,11 +97,11 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
}
grpc_handshake_manager_pending_list_remove(
&connection_state->server_state->pending_handshake_mgrs,
&connection_state->svr_state->pending_handshake_mgrs,
connection_state->handshake_mgr);
gpr_mu_unlock(&connection_state->server_state->mu);
gpr_mu_unlock(&connection_state->svr_state->mu);
grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp_server);
grpc_tcp_server_unref(exec_ctx, connection_state->svr_state->tcp_server);
gpr_free(connection_state->acceptor);
gpr_free(connection_state);
}
@ -124,8 +124,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
gpr_mu_unlock(&state->mu);
grpc_tcp_server_ref(state->tcp_server);
server_connection_state *connection_state =
gpr_malloc(sizeof(*connection_state));
connection_state->server_state = state;
(server_connection_state *)gpr_malloc(sizeof(*connection_state));
connection_state->svr_state = state;
connection_state->accepting_pollset = accepting_pollset;
connection_state->acceptor = acceptor;
connection_state->handshake_mgr = handshake_mgr;

@ -84,8 +84,6 @@ grpc_tracer_flag grpc_trace_chttp2_refcount =
GRPC_TRACER_INITIALIZER(false, "chttp2_refcount");
#endif
static const grpc_transport_vtable vtable;
/* forward declarations of various callbacks that we'll build closures around */
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
@ -248,6 +246,8 @@ void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
#endif
static const grpc_transport_vtable *get_vtable(void);
static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const grpc_channel_args *channel_args,
grpc_endpoint *ep, bool is_client) {
@ -257,7 +257,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
t->base.vtable = &vtable;
t->base.vtable = get_vtable();
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
@ -557,11 +557,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
GRPC_CLOSURE_INIT(&t->write_action, write_action, t,
t->opt_target == GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT
? grpc_executor_scheduler
: grpc_schedule_on_exec_ctx);
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.is_delayed_ping_timer_set = false;
@ -589,7 +584,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->destroying = 1;
close_transport_locked(
exec_ctx, t,
@ -715,7 +710,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
grpc_chttp2_stream *s = sp;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
grpc_chttp2_transport *t = s->t;
GPR_TIMER_BEGIN("destroy_stream", 0);
@ -799,7 +794,7 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id) {
return grpc_chttp2_stream_map_find(&t->stream_map, id);
return (grpc_chttp2_stream *)grpc_chttp2_stream_map_find(&t->stream_map, id);
}
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
@ -858,6 +853,7 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
t->is_first_write_in_batch = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_SCHED(
exec_ctx,
@ -876,52 +872,100 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
}
void grpc_chttp2_become_writable(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_chttp2_stream_write_type stream_write_type, const char *reason) {
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
bool also_initiate_write, const char *reason) {
if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
}
switch (stream_write_type) {
case GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK:
break;
case GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED:
grpc_chttp2_initiate_write(exec_ctx, t, reason);
break;
case GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED:
grpc_chttp2_initiate_write(exec_ctx, t, reason);
break;
if (also_initiate_write) {
grpc_chttp2_initiate_write(exec_ctx, t, reason);
}
}
static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t,
bool early_results_scheduled,
bool partial_write) {
/* if it's not the first write in a batch, always offload to the executor:
we'll probably end up queuing against the kernel anyway, so we'll likely
get better latency overall if we switch writing work elsewhere and continue
with application work above */
if (!t->is_first_write_in_batch) {
return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
/* equivalently, if it's a partial write, we *know* we're going to be taking a
thread jump to write it because of the above, may as well do so
immediately */
if (partial_write) {
return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
switch (t->opt_target) {
case GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT:
/* executor gives us the largest probability of being able to batch a
* write with others on this transport */
return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
case GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY:
return grpc_schedule_on_exec_ctx;
}
GPR_UNREACHABLE_CODE(return NULL);
}
#define WRITE_STATE_TUPLE_TO_INT(p, i) (2 * (int)(p) + (int)(i))
static const char *begin_writing_desc(bool partial, bool inlined) {
switch (WRITE_STATE_TUPLE_TO_INT(partial, inlined)) {
case WRITE_STATE_TUPLE_TO_INT(false, false):
return "begin write in background";
case WRITE_STATE_TUPLE_TO_INT(false, true):
return "begin write in current thread";
case WRITE_STATE_TUPLE_TO_INT(true, false):
return "begin partial write in background";
case WRITE_STATE_TUPLE_TO_INT(true, true):
return "begin partial write in current thread";
}
GPR_UNREACHABLE_CODE(return "bad state tuple");
}
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("write_action_begin_locked", 0);
grpc_chttp2_transport *t = gt;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
switch (t->closed ? GRPC_CHTTP2_NOTHING_TO_WRITE
: grpc_chttp2_begin_write(exec_ctx, t)) {
case GRPC_CHTTP2_NOTHING_TO_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
break;
case GRPC_CHTTP2_PARTIAL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
"begin writing partial");
GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_FULL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"begin writing");
GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
grpc_chttp2_begin_write_result r;
if (t->closed) {
r.writing = false;
} else {
r = grpc_chttp2_begin_write(exec_ctx, t);
}
if (r.writing) {
if (r.partial) {
GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx);
}
if (!t->is_first_write_in_batch) {
GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx);
}
grpc_closure_scheduler *scheduler =
write_scheduler(t, r.early_results_scheduled, r.partial);
if (scheduler != grpc_schedule_on_exec_ctx) {
GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx);
}
set_write_state(
exec_ctx, t, r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
: GRPC_CHTTP2_WRITE_STATE_WRITING,
begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx));
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_INIT(&t->write_action,
write_action, t, scheduler),
GRPC_ERROR_NONE);
} else {
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
}
GPR_TIMER_END("write_action_begin_locked", 0);
}
static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
grpc_chttp2_transport *t = gt;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_TIMER_BEGIN("write_action", 0);
grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf,
@ -933,7 +977,7 @@ static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (error != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
@ -958,7 +1002,8 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
GPR_TIMER_MARK("state=writing_stale_no_poller", 0);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [!covered]");
"continue writing");
t->is_first_write_in_batch = false;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_RUN(
exec_ctx,
@ -1060,9 +1105,7 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
post_destructive_reclaimer(exec_ctx, t);
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
"new_stream");
grpc_chttp2_become_writable(exec_ctx, t, s, true, "new_stream");
}
/* cancel out streams that will never be started */
while (t->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@ -1111,12 +1154,14 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACER_ON(grpc_http_trace)) {
const char *errstr = grpc_error_string(error);
gpr_log(GPR_DEBUG,
"complete_closure_step: %p refs=%d flags=0x%04x desc=%s err=%s",
closure,
(int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
(int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT),
desc, errstr);
gpr_log(
GPR_DEBUG,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s",
t, closure,
(int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
(int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT), desc,
errstr, write_state_name(t->write_state));
}
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
@ -1157,9 +1202,7 @@ static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s) {
if (s->id != 0 && (!s->write_buffering ||
s->flow_controlled_buffer.length > t->write_buffer_size)) {
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
"op.send_message");
grpc_chttp2_become_writable(exec_ctx, t, s, true, "op.send_message");
}
}
@ -1191,15 +1234,19 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
} else {
grpc_chttp2_write_cb *cb = t->write_cb_pool;
if (cb == NULL) {
cb = gpr_malloc(sizeof(*cb));
cb = (grpc_chttp2_write_cb *)gpr_malloc(sizeof(*cb));
} else {
t->write_cb_pool = cb->next;
}
cb->call_at_byte = notify_offset;
cb->closure = s->fetching_send_message_finished;
s->fetching_send_message_finished = NULL;
cb->next = s->on_write_finished_cbs;
s->on_write_finished_cbs = cb;
grpc_chttp2_write_cb **list =
s->fetching_send_message->flags & GRPC_WRITE_THROUGH
? &s->on_write_finished_cbs
: &s->on_flow_controlled_cbs;
cb->next = *list;
*list = cb;
}
s->fetching_send_message = NULL;
return; /* early out */
@ -1219,7 +1266,7 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error) {
grpc_chttp2_stream *s = gs;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
grpc_chttp2_transport *t = s->t;
if (error == GRPC_ERROR_NONE) {
error = grpc_byte_stream_pull(exec_ctx, s->fetching_send_message,
@ -1254,8 +1301,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
grpc_transport_stream_op_batch *op = stream_op;
grpc_chttp2_stream *s = op->handler_private.extra_arg;
grpc_transport_stream_op_batch *op =
(grpc_transport_stream_op_batch *)stream_op;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)op->handler_private.extra_arg;
grpc_transport_stream_op_batch_payload *op_payload = op->payload;
grpc_chttp2_transport *t = s->t;
@ -1308,7 +1356,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if ((s->stream_compression_send_enabled =
(op_payload->send_initial_metadata.send_initial_metadata->idx.named
.content_encoding != NULL)) == true) {
s->compressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer));
s->compressed_data_buffer =
(grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(s->compressed_data_buffer);
}
@ -1355,14 +1404,13 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
} else {
GPR_ASSERT(s->id != 0);
grpc_chttp2_stream_write_type write_type =
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED;
bool initiate_write = true;
if (op->send_message &&
(op->payload->send_message.send_message->flags &
GRPC_WRITE_BUFFER_HINT)) {
write_type = GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK;
initiate_write = false;
}
grpc_chttp2_become_writable(exec_ctx, t, s, write_type,
grpc_chttp2_become_writable(exec_ctx, t, s, initiate_write,
"op.send_initial_metadata");
}
} else {
@ -1471,8 +1519,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
bytes before going writable */
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
grpc_chttp2_become_writable(exec_ctx, t, s, true,
"op.send_trailing_metadata");
}
}
@ -1599,7 +1646,7 @@ static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->ping_state.is_delayed_ping_timer_set = false;
grpc_chttp2_initiate_write(exec_ctx, t, "retry_send_ping");
}
@ -1651,8 +1698,9 @@ void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
void *stream_op,
grpc_error *error_ignored) {
grpc_transport_op *op = stream_op;
grpc_chttp2_transport *t = op->handler_private.extra_arg;
grpc_transport_op *op = (grpc_transport_op *)stream_op;
grpc_chttp2_transport *t =
(grpc_chttp2_transport *)op->handler_private.extra_arg;
grpc_error *close_transport = op->disconnect_with_error;
if (op->goaway_error) {
@ -1864,7 +1912,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t id, grpc_error *error) {
grpc_chttp2_stream *s = grpc_chttp2_stream_map_delete(&t->stream_map, id);
grpc_chttp2_stream *s =
(grpc_chttp2_stream *)grpc_chttp2_stream_map_delete(&t->stream_map, id);
GPR_ASSERT(s);
if (t->incoming_stream == s) {
t->incoming_stream = NULL;
@ -1995,6 +2044,21 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
return error;
}
static void flush_write_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_chttp2_write_cb **list,
grpc_error *error) {
while (*list) {
grpc_chttp2_write_cb *cb = *list;
*list = cb->next;
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
GRPC_ERROR_REF(error),
"on_write_finished_cb");
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
GRPC_ERROR_UNREF(error);
}
void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
@ -2014,16 +2078,9 @@ void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
"fetching_send_message_finished");
while (s->on_write_finished_cbs) {
grpc_chttp2_write_cb *cb = s->on_write_finished_cbs;
s->on_write_finished_cbs = cb->next;
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
GRPC_ERROR_REF(error),
"on_write_finished_cb");
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
GRPC_ERROR_UNREF(error);
flush_write_list(exec_ctx, t, s, &s->on_write_finished_cbs,
GRPC_ERROR_REF(error));
flush_write_list(exec_ctx, t, s, &s->on_flow_controlled_cbs, error);
}
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
@ -2242,8 +2299,8 @@ typedef struct {
} cancel_stream_cb_args;
static void cancel_stream_cb(void *user_data, uint32_t key, void *stream) {
cancel_stream_cb_args *args = user_data;
grpc_chttp2_stream *s = stream;
cancel_stream_cb_args *args = (cancel_stream_cb_args *)user_data;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)stream;
grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s,
GRPC_ERROR_REF(args->error));
}
@ -2267,13 +2324,11 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
break;
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
grpc_chttp2_become_writable(exec_ctx, t, s, true,
"immediate stream flowctl");
break;
case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"queue stream flowctl");
break;
}
@ -2345,7 +2400,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("reading_action_locked", 0);
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
GRPC_ERROR_REF(error);
@ -2386,9 +2441,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (t->flow_control.initial_window_update > 0) {
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
grpc_chttp2_become_writable(
exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
"unstalled");
grpc_chttp2_become_writable(exec_ctx, t, s, true, "unstalled");
}
}
t->flow_control.initial_window_update = 0;
@ -2430,7 +2483,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Start BDP ping", t->peer_string);
}
@ -2443,7 +2496,7 @@ static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping", t->peer_string);
}
@ -2492,7 +2545,7 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING);
if (t->destroying || t->closed) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@ -2524,7 +2577,7 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
grpc_timer_init(
exec_ctx, &t->keepalive_watchdog_timer,
@ -2534,7 +2587,7 @@ static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
@ -2551,7 +2604,7 @@ static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@ -2632,7 +2685,8 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
void *argp,
grpc_error *error_ignored) {
grpc_chttp2_incoming_byte_stream *bs = argp;
grpc_chttp2_incoming_byte_stream *bs =
(grpc_chttp2_incoming_byte_stream *)argp;
grpc_chttp2_transport *t = bs->transport;
grpc_chttp2_stream *s = bs->stream;
@ -2842,7 +2896,8 @@ static const grpc_byte_stream_vtable grpc_chttp2_incoming_byte_stream_vtable = {
static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream,
grpc_error *error_ignored) {
grpc_chttp2_incoming_byte_stream *bs = byte_stream;
grpc_chttp2_incoming_byte_stream *bs =
(grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_chttp2_stream *s = bs->stream;
grpc_chttp2_transport *t = s->t;
@ -2857,7 +2912,8 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
uint32_t frame_size, uint32_t flags) {
grpc_chttp2_incoming_byte_stream *incoming_byte_stream =
gpr_malloc(sizeof(*incoming_byte_stream));
(grpc_chttp2_incoming_byte_stream *)gpr_malloc(
sizeof(*incoming_byte_stream));
incoming_byte_stream->base.length = frame_size;
incoming_byte_stream->remaining_bytes = frame_size;
incoming_byte_stream->base.flags = flags;
@ -2898,7 +2954,7 @@ static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (error == GRPC_ERROR_NONE &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
/* Channel with no active streams: send a goaway to try and make it
@ -2928,11 +2984,12 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
t->destructive_reclaimer_registered = false;
if (error == GRPC_ERROR_NONE && n > 0) {
grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
grpc_chttp2_stream *s =
(grpc_chttp2_stream *)grpc_chttp2_stream_map_rand(&t->stream_map);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
@ -2976,10 +3033,13 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
destroy_transport,
chttp2_get_endpoint};
static const grpc_transport_vtable *get_vtable(void) { return &vtable; }
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
grpc_endpoint *ep, int is_client) {
grpc_chttp2_transport *t = gpr_zalloc(sizeof(grpc_chttp2_transport));
grpc_chttp2_transport *t =
(grpc_chttp2_transport *)gpr_zalloc(sizeof(grpc_chttp2_transport));
init_transport(exec_ctx, t, channel_args, ep, is_client != 0);
return &t->base;
}

@ -18,6 +18,7 @@
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include <limits.h>
#include <math.h>
#include <string.h>
@ -59,24 +60,24 @@ static void pretrace(shadow_flow_control* shadow_fc,
#define TRACE_PADDING 30
static char* fmt_int64_diff_str(int64_t old, int64_t new) {
static char* fmt_int64_diff_str(int64_t old_val, int64_t new_val) {
char* str;
if (old != new) {
gpr_asprintf(&str, "%" PRId64 " -> %" PRId64 "", old, new);
if (old_val != new_val) {
gpr_asprintf(&str, "%" PRId64 " -> %" PRId64 "", old_val, new_val);
} else {
gpr_asprintf(&str, "%" PRId64 "", old);
gpr_asprintf(&str, "%" PRId64 "", old_val);
}
char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
gpr_free(str);
return str_lp;
}
static char* fmt_uint32_diff_str(uint32_t old, uint32_t new) {
static char* fmt_uint32_diff_str(uint32_t old_val, uint32_t new_val) {
char* str;
if (new > 0 && old != new) {
gpr_asprintf(&str, "%" PRIu32 " -> %" PRIu32 "", old, new);
if (new_val > 0 && old_val != new_val) {
gpr_asprintf(&str, "%" PRIu32 " -> %" PRIu32 "", old_val, new_val);
} else {
gpr_asprintf(&str, "%" PRIu32 "", old);
gpr_asprintf(&str, "%" PRIu32 "", old_val);
}
char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
gpr_free(str);
@ -483,7 +484,8 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
if (grpc_bdp_estimator_get_bw(&tfc->bdp_estimator, &bw_dbl)) {
// we target the max of BDP or bandwidth in microseconds.
int32_t frame_size = (int32_t)GPR_CLAMP(
GPR_MAX((int32_t)bw_dbl / 1000, bdp), 16384, 16777215);
GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000, bdp), 16384,
16777215);
grpc_chttp2_flowctl_urgency frame_size_urgency = delta_is_significant(
tfc, frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE);
if (frame_size_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {

@ -44,7 +44,8 @@ static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) {
return out;
}
grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
grpc_slice grpc_chttp2_settings_create(uint32_t *old_settings,
const uint32_t *new_settings,
uint32_t force_mask, size_t count) {
size_t i;
uint32_t n = 0;
@ -52,21 +53,21 @@ grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
uint8_t *p;
for (i = 0; i < count; i++) {
n += (new[i] != old[i] || (force_mask & (1u << i)) != 0);
n += (new_settings[i] != old_settings[i] || (force_mask & (1u << i)) != 0);
}
output = GRPC_SLICE_MALLOC(9 + 6 * n);
p = fill_header(GRPC_SLICE_START_PTR(output), 6 * n, 0);
for (i = 0; i < count; i++) {
if (new[i] != old[i] || (force_mask & (1u << i)) != 0) {
if (new_settings[i] != old_settings[i] || (force_mask & (1u << i)) != 0) {
*p++ = (uint8_t)(grpc_setting_id_to_wire_id[i] >> 8);
*p++ = (uint8_t)(grpc_setting_id_to_wire_id[i]);
*p++ = (uint8_t)(new[i] >> 24);
*p++ = (uint8_t)(new[i] >> 16);
*p++ = (uint8_t)(new[i] >> 8);
*p++ = (uint8_t)(new[i]);
old[i] = new[i];
*p++ = (uint8_t)(new_settings[i] >> 24);
*p++ = (uint8_t)(new_settings[i] >> 16);
*p++ = (uint8_t)(new_settings[i] >> 8);
*p++ = (uint8_t)(new_settings[i]);
old_settings[i] = new_settings[i];
}
}

@ -99,9 +99,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_chttp2_flowctl_recv_stream_update(
&t->flow_control, &s->flow_control, received_update);
if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) {
grpc_chttp2_become_writable(
exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
"stream.read_flow_control");
grpc_chttp2_become_writable(exec_ctx, t, s, true,
"stream.read_flow_control");
}
}
} else {

@ -42,8 +42,9 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
grpc_mdelem elem) {
buffer->size += GRPC_MDELEM_LENGTH(elem);
return grpc_metadata_batch_add_tail(
exec_ctx, &buffer->batch,
gpr_arena_alloc(buffer->arena, sizeof(grpc_linked_mdelem)), elem);
exec_ctx, &buffer->batch, (grpc_linked_mdelem *)gpr_arena_alloc(
buffer->arena, sizeof(grpc_linked_mdelem)),
elem);
}
grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(

@ -262,6 +262,10 @@ struct grpc_chttp2_transport {
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** is this the first write in a series of writes?
set when we initiate writing from idle, cleared when we
initiate writing from writing+more */
bool is_first_write_in_batch;
/** is the transport destroying itself? */
uint8_t destroying;
@ -483,6 +487,7 @@ struct grpc_chttp2_stream {
grpc_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
int64_t flow_controlled_bytes_flowed;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
@ -555,6 +560,7 @@ struct grpc_chttp2_stream {
grpc_slice_buffer flow_controlled_buffer;
grpc_chttp2_write_cb *on_flow_controlled_cbs;
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
@ -595,10 +601,13 @@ struct grpc_chttp2_stream {
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason);
typedef enum {
GRPC_CHTTP2_NOTHING_TO_WRITE,
GRPC_CHTTP2_PARTIAL_WRITE,
GRPC_CHTTP2_FULL_WRITE,
typedef struct {
/** are we writing? */
bool writing;
/** if writing: was it a complete flush (false) or a partial flush (true) */
bool partial;
/** did we queue any completions as part of beginning the write */
bool early_results_scheduled;
} grpc_chttp2_begin_write_result;
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
@ -840,22 +849,12 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
typedef enum {
/* don't initiate a transport write, but piggyback on the next one */
GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
/* initiate a covered write */
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
/* initiate an uncovered write */
GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED
} grpc_chttp2_stream_write_type;
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_chttp2_stream_write_type type,
const char *reason);
bool also_initiate_write, const char *reason);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,

@ -106,7 +106,8 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
return err;
}
++cur;
++t->deframe_state;
t->deframe_state =
(grpc_chttp2_deframe_transport_state)(1 + (int)t->deframe_state);
}
if (cur == end) {
return GRPC_ERROR_NONE;

@ -72,8 +72,10 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
/* resize when less than 25% of the table is free, because compaction
won't help much */
map->capacity = capacity = 3 * capacity / 2;
map->keys = keys = gpr_realloc(keys, capacity * sizeof(uint32_t));
map->values = values = gpr_realloc(values, capacity * sizeof(void *));
map->keys = keys =
(uint32_t *)gpr_realloc(keys, capacity * sizeof(uint32_t));
map->values = values =
(void **)gpr_realloc(values, capacity * sizeof(void *));
}
}

@ -123,15 +123,18 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
(t->ping_state.pings_before_data_required != 0);
}
static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int64_t send_bytes,
grpc_chttp2_write_cb **list, grpc_error *error) {
grpc_chttp2_write_cb **list, int64_t *ctr,
grpc_error *error) {
bool sched_any = false;
grpc_chttp2_write_cb *cb = *list;
*list = NULL;
s->flow_controlled_bytes_written += send_bytes;
*ctr += send_bytes;
while (cb) {
grpc_chttp2_write_cb *next = cb->next;
if (cb->call_at_byte <= s->flow_controlled_bytes_written) {
if (cb->call_at_byte <= *ctr) {
sched_any = true;
finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
} else {
add_to_write_list(list, cb);
@ -139,6 +142,7 @@ static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
cb = next;
}
GRPC_ERROR_UNREF(error);
return sched_any;
}
static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
@ -164,6 +168,13 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
/* stats histogram counters: we increment these throughout this function,
and at the end publish to the central stats histograms */
int flow_control_writes = 0;
int initial_metadata_writes = 0;
int trailing_metadata_writes = 0;
int message_writes = 0;
GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx);
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
@ -177,6 +188,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
t->force_send_settings = 0;
t->dirtied_local_settings = 0;
t->sent_local_settings = 1;
GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx);
}
/* simple writes are queued to qbuf, and flushed here */
@ -196,13 +208,13 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
}
}
bool partial_write = false;
grpc_chttp2_begin_write_result result = {false, false, false};
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (true) {
if (t->outbuf.length > target_write_size(t)) {
partial_write = true;
result.partial = true;
break;
}
@ -246,7 +258,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
.stats = &s->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
s->send_initial_metadata, &hopt, &t->outbuf);
now_writing = true;
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
if (!t->is_client) {
@ -254,6 +265,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
}
initial_metadata_writes++;
} else {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
@ -269,10 +281,15 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
[num_extra_headers_for_trailing_metadata++] =
&s->send_initial_metadata->idx.named.content_type->md;
}
trailing_metadata_writes++;
}
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
result.early_results_scheduled = true;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_NONE,
"send_initial_metadata_finished");
}
/* send any window updates */
uint32_t stream_announce = grpc_chttp2_flowctl_maybe_send_stream_update(
@ -288,6 +305,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
}
flow_control_writes++;
}
if (sent_initial_metadata) {
/* send any body bytes, if allowed by flow control */
@ -306,6 +324,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (max_outgoing > 0) {
bool is_last_data_frame = false;
bool is_last_frame = false;
size_t sending_bytes_before = s->sending_bytes;
if (s->stream_compression_send_enabled) {
while ((s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer->length > 0) &&
@ -372,7 +391,14 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_NONE);
}
result.early_results_scheduled |=
update_list(exec_ctx, t, s,
(int64_t)(s->sending_bytes - sending_bytes_before),
&s->on_flow_controlled_cbs,
&s->flow_controlled_bytes_flowed, GRPC_ERROR_NONE);
now_writing = true;
if (s->flow_controlled_buffer.length > 0 ||
(s->stream_compression_send_enabled &&
@ -380,6 +406,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
message_writes++;
} else if (t->flow_control.remote_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
@ -415,6 +442,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
num_extra_headers_for_trailing_metadata,
s->send_trailing_metadata, &hopt,
&t->outbuf);
trailing_metadata_writes++;
}
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
@ -423,11 +451,25 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing));
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_NONE);
now_writing = true;
result.early_results_scheduled = true;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_NONE, "send_trailing_metadata_finished");
}
}
if (now_writing) {
GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
exec_ctx, initial_metadata_writes);
GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes);
GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(
exec_ctx, trailing_metadata_writes);
GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx,
flow_control_writes);
if (!grpc_chttp2_list_add_writing_stream(t, s)) {
/* already in writing list: drop ref */
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
@ -465,9 +507,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
return t->outbuf.count > 0 ? (partial_write ? GRPC_CHTTP2_PARTIAL_WRITE
: GRPC_CHTTP2_FULL_WRITE)
: GRPC_CHTTP2_NOTHING_TO_WRITE;
result.writing = t->outbuf.count > 0;
return result;
}
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@ -476,23 +517,12 @@ void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
if (s->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_REF(error), "send_initial_metadata_finished");
}
if (s->sending_bytes != 0) {
update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
&s->on_write_finished_cbs, GRPC_ERROR_REF(error));
&s->on_write_finished_cbs, &s->flow_controlled_bytes_written,
GRPC_ERROR_REF(error));
s->sending_bytes = 0;
}
if (s->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_REF(error));
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
}
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &t->outbuf);

@ -37,7 +37,6 @@
if (GRPC_TRACER_ON(grpc_inproc_trace)) gpr_log(__VA_ARGS__); \
} while (0)
static const grpc_transport_vtable inproc_vtable;
static grpc_slice g_empty_slice;
static grpc_slice g_fake_path_key;
static grpc_slice g_fake_path_value;
@ -1166,6 +1165,55 @@ static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
unref_transport(exec_ctx, t);
}
/*******************************************************************************
* INTEGRATION GLUE
*/
static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_pollset *pollset) {
// Nothing to do here
}
static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_pollset_set *pollset_set) {
// Nothing to do here
}
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return NULL;
}
/*******************************************************************************
* GLOBAL INIT AND DESTROY
*/
static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
void grpc_inproc_transport_init(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, NULL,
grpc_schedule_on_exec_ctx);
g_empty_slice = grpc_slice_from_static_buffer(NULL, 0);
grpc_slice key_tmp = grpc_slice_from_static_string(":path");
g_fake_path_key = grpc_slice_intern(key_tmp);
grpc_slice_unref_internal(&exec_ctx, key_tmp);
g_fake_path_value = grpc_slice_from_static_string("/");
grpc_slice auth_tmp = grpc_slice_from_static_string(":authority");
g_fake_auth_key = grpc_slice_intern(auth_tmp);
grpc_slice_unref_internal(&exec_ctx, auth_tmp);
g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
grpc_exec_ctx_finish(&exec_ctx);
}
static const grpc_transport_vtable inproc_vtable = {
sizeof(inproc_stream), "inproc", init_stream,
set_pollset, set_pollset_set, perform_stream_op,
perform_transport_op, destroy_stream, destroy_transport,
get_endpoint};
/*******************************************************************************
* Main inproc transport functions
*/
@ -1178,7 +1226,7 @@ static void inproc_transports_create(grpc_exec_ctx *exec_ctx,
inproc_transport *st = (inproc_transport *)gpr_zalloc(sizeof(*st));
inproc_transport *ct = (inproc_transport *)gpr_zalloc(sizeof(*ct));
// Share one lock between both sides since both sides get affected
st->mu = ct->mu = gpr_malloc(sizeof(*st->mu));
st->mu = ct->mu = (shared_mu *)gpr_malloc(sizeof(*st->mu));
gpr_mu_init(&st->mu->mu);
gpr_ref_init(&st->mu->refs, 2);
st->base.vtable = &inproc_vtable;
@ -1240,55 +1288,6 @@ grpc_channel *grpc_inproc_channel_create(grpc_server *server,
return channel;
}
/*******************************************************************************
* INTEGRATION GLUE
*/
static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_pollset *pollset) {
// Nothing to do here
}
static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_pollset_set *pollset_set) {
// Nothing to do here
}
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return NULL;
}
static const grpc_transport_vtable inproc_vtable = {
sizeof(inproc_stream), "inproc", init_stream,
set_pollset, set_pollset_set, perform_stream_op,
perform_transport_op, destroy_stream, destroy_transport,
get_endpoint};
/*******************************************************************************
* GLOBAL INIT AND DESTROY
*/
static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
void grpc_inproc_transport_init(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, NULL,
grpc_schedule_on_exec_ctx);
g_empty_slice = grpc_slice_from_static_buffer(NULL, 0);
grpc_slice key_tmp = grpc_slice_from_static_string(":path");
g_fake_path_key = grpc_slice_intern(key_tmp);
grpc_slice_unref_internal(&exec_ctx, key_tmp);
g_fake_path_value = grpc_slice_from_static_string("/");
grpc_slice auth_tmp = grpc_slice_from_static_string(":authority");
g_fake_auth_key = grpc_slice_intern(auth_tmp);
grpc_slice_unref_internal(&exec_ctx, auth_tmp);
g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_inproc_transport_shutdown(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_slice_unref_internal(&exec_ctx, g_empty_slice);

@ -86,13 +86,14 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
}
}
// Create result.
grpc_channel_args *dst = gpr_malloc(sizeof(grpc_channel_args));
grpc_channel_args *dst =
(grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
dst->num_args = num_args_to_copy + num_to_add;
if (dst->num_args == 0) {
dst->args = NULL;
return dst;
}
dst->args = gpr_malloc(sizeof(grpc_arg) * dst->num_args);
dst->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * dst->num_args);
// Copy args from src that are not being removed.
size_t dst_idx = 0;
if (src != NULL) {
@ -117,7 +118,7 @@ grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src) {
grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a,
const grpc_channel_args *b) {
const size_t max_out = (a->num_args + b->num_args);
grpc_arg *uniques = gpr_malloc(sizeof(*uniques) * max_out);
grpc_arg *uniques = (grpc_arg *)gpr_malloc(sizeof(*uniques) * max_out);
for (size_t i = 0; i < a->num_args; ++i) uniques[i] = a->args[i];
size_t uniques_idx = a->num_args;
@ -160,24 +161,25 @@ static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
/* stabilizing comparison function: since channel_args ordering matters for
* keys with the same name, we need to preserve that ordering */
static int cmp_key_stable(const void *ap, const void *bp) {
const grpc_arg *const *a = ap;
const grpc_arg *const *b = bp;
const grpc_arg *const *a = (const grpc_arg *const *)ap;
const grpc_arg *const *b = (const grpc_arg *const *)bp;
int c = strcmp((*a)->key, (*b)->key);
if (c == 0) c = GPR_ICMP(*a, *b);
return c;
}
grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
grpc_arg **args = gpr_malloc(sizeof(grpc_arg *) * a->num_args);
grpc_arg **args = (grpc_arg **)gpr_malloc(sizeof(grpc_arg *) * a->num_args);
for (size_t i = 0; i < a->num_args; i++) {
args[i] = &a->args[i];
}
if (a->num_args > 1)
qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable);
grpc_channel_args *b = gpr_malloc(sizeof(grpc_channel_args));
grpc_channel_args *b =
(grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
b->num_args = a->num_args;
b->args = gpr_malloc(sizeof(grpc_arg) * b->num_args);
b->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * b->num_args);
for (size_t i = 0; i < a->num_args; i++) {
b->args[i] = copy_arg(args[i]);
}
@ -210,7 +212,7 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) {
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
const grpc_channel_args *a) {
size_t i;
if (a == NULL) return 0;
if (a == NULL) return GRPC_COMPRESS_NONE;
for (i = 0; i < a->num_args; ++i) {
if (a->args[i].type == GRPC_ARG_INTEGER &&
!strcmp(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, a->args[i].key)) {
@ -224,7 +226,7 @@ grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
grpc_stream_compression_algorithm
grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a) {
size_t i;
if (a == NULL) return 0;
if (a == NULL) return GRPC_STREAM_COMPRESS_NONE;
for (i = 0; i < a->num_args; ++i) {
if (a->args[i].type == GRPC_ARG_INTEGER &&
!strcmp(GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,

@ -51,7 +51,8 @@ struct grpc_channel_stack_builder_iterator {
};
grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
grpc_channel_stack_builder *b = gpr_zalloc(sizeof(*b));
grpc_channel_stack_builder *b =
(grpc_channel_stack_builder *)gpr_zalloc(sizeof(*b));
b->begin.filter = NULL;
b->end.filter = NULL;
@ -76,7 +77,8 @@ const char *grpc_channel_stack_builder_get_target(
static grpc_channel_stack_builder_iterator *create_iterator_at_filter_node(
grpc_channel_stack_builder *builder, filter_node *node) {
grpc_channel_stack_builder_iterator *it = gpr_malloc(sizeof(*it));
grpc_channel_stack_builder_iterator *it =
(grpc_channel_stack_builder_iterator *)gpr_malloc(sizeof(*it));
it->builder = builder;
it->node = node;
return it;
@ -212,13 +214,13 @@ bool grpc_channel_stack_builder_prepend_filter(
static void add_after(filter_node *before, const grpc_channel_filter *filter,
grpc_post_filter_create_init_func post_init_func,
void *user_data) {
filter_node *new = (filter_node *)gpr_malloc(sizeof(*new));
new->next = before->next;
new->prev = before;
new->next->prev = new->prev->next = new;
new->filter = filter;
new->init = post_init_func;
new->init_arg = user_data;
filter_node *new_node = (filter_node *)gpr_malloc(sizeof(*new_node));
new_node->next = before->next;
new_node->prev = before;
new_node->next->prev = new_node->prev->next = new_node;
new_node->filter = filter;
new_node->init = post_init_func;
new_node->init_arg = user_data;
}
bool grpc_channel_stack_builder_add_filter_before(
@ -266,7 +268,7 @@ grpc_error *grpc_channel_stack_builder_finish(
// create an array of filters
const grpc_channel_filter **filters =
gpr_malloc(sizeof(*filters) * num_filters);
(const grpc_channel_filter **)gpr_malloc(sizeof(*filters) * num_filters);
size_t i = 0;
for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
filters[i++] = p->filter;

@ -100,8 +100,8 @@ static callback_state *get_state_for_batch(
static void con_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
if (batch->recv_initial_metadata) {
callback_state *state = &calld->recv_initial_metadata_ready;
intercept_callback(
@ -136,7 +136,7 @@ static void con_start_transport_stream_op_batch(
static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_perform_op(exec_ctx, chand->transport, op);
}
@ -144,8 +144,8 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
@ -158,8 +158,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_set_pops(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
}
@ -168,8 +168,8 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
then_schedule_closure);
@ -218,7 +218,7 @@ static void bind_transport(grpc_channel_stack *channel_stack,
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_filter);
GPR_ASSERT(cd->transport == NULL);
cd->transport = t;
cd->transport = (grpc_transport *)t;
/* HACK(ctiller): increase call stack size for the channel to make space
for channel data. We need a cleaner (but performant) way to do this,
@ -226,7 +226,8 @@ static void bind_transport(grpc_channel_stack *channel_stack,
This is only "safe" because call stacks place no additional data after
the last call element, and the last call element MUST be the connected
channel. */
channel_stack->call_stack_size += grpc_transport_stream_size(t);
channel_stack->call_stack_size +=
grpc_transport_stream_size((grpc_transport *)t);
}
bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
@ -240,6 +241,6 @@ bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
}
grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}

@ -84,7 +84,8 @@ struct grpc_handshake_manager {
};
grpc_handshake_manager* grpc_handshake_manager_create() {
grpc_handshake_manager* mgr = gpr_zalloc(sizeof(grpc_handshake_manager));
grpc_handshake_manager* mgr =
(grpc_handshake_manager*)gpr_zalloc(sizeof(grpc_handshake_manager));
gpr_mu_init(&mgr->mu);
gpr_ref_init(&mgr->refs, 1);
return mgr;
@ -137,8 +138,8 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
realloc_count = mgr->count * 2;
}
if (realloc_count > 0) {
mgr->handshakers =
gpr_realloc(mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
mgr->handshakers = (grpc_handshaker**)gpr_realloc(
mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
}
mgr->handshakers[mgr->count++] = handshaker;
gpr_mu_unlock(&mgr->mu);
@ -205,7 +206,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// handshakers together.
static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_handshake_manager* mgr = arg;
grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
gpr_mu_lock(&mgr->mu);
bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error));
gpr_mu_unlock(&mgr->mu);
@ -219,7 +220,7 @@ static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked when deadline is exceeded.
static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_handshake_manager* mgr = arg;
grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled.
grpc_handshake_manager_shutdown(
exec_ctx, mgr,
@ -241,7 +242,8 @@ void grpc_handshake_manager_do_handshake(
mgr->args.endpoint = endpoint;
mgr->args.args = grpc_channel_args_copy(channel_args);
mgr->args.user_data = user_data;
mgr->args.read_buffer = gpr_malloc(sizeof(*mgr->args.read_buffer));
mgr->args.read_buffer =
(grpc_slice_buffer*)gpr_malloc(sizeof(*mgr->args.read_buffer));
grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers.
mgr->acceptor = acceptor;

@ -34,7 +34,7 @@ typedef struct {
static void grpc_handshaker_factory_list_register(
grpc_handshaker_factory_list* list, bool at_start,
grpc_handshaker_factory* factory) {
list->list = gpr_realloc(
list->list = (grpc_handshaker_factory**)gpr_realloc(
list->list, (list->num_factories + 1) * sizeof(grpc_handshaker_factory*));
if (at_start) {
memmove(list->list + 1, list->list,

@ -159,7 +159,8 @@ bool grpc_stream_decompress(grpc_stream_compression_context *ctx,
grpc_stream_compression_context *grpc_stream_compression_context_create(
grpc_stream_compression_method method) {
grpc_stream_compression_context *ctx =
gpr_zalloc(sizeof(grpc_stream_compression_context));
(grpc_stream_compression_context *)gpr_zalloc(
sizeof(grpc_stream_compression_context));
int r;
if (ctx == NULL) {
return NULL;

@ -33,7 +33,7 @@ static size_t g_num_cores;
void grpc_stats_init(void) {
g_num_cores = GPR_MAX(1, gpr_cpu_num_cores());
grpc_stats_per_cpu_storage =
gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
(grpc_stats_data *)gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
}
void grpc_stats_shutdown(void) { gpr_free(grpc_stats_per_cpu_storage); }

@ -25,11 +25,17 @@
const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"client_calls_created",
"server_calls_created",
"cqs_created",
"client_channels_created",
"client_subchannels_created",
"server_channels_created",
"syscall_poll",
"syscall_wait",
"histogram_slow_lookups",
"syscall_write",
"syscall_read",
"tcp_backup_pollers_created",
"tcp_backup_poller_polls",
"http2_op_batches",
"http2_op_cancel",
"http2_op_send_initial_metadata",
@ -38,20 +44,30 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"http2_op_recv_initial_metadata",
"http2_op_recv_message",
"http2_op_recv_trailing_metadata",
"http2_settings_writes",
"http2_pings_sent",
"http2_writes_begun",
"http2_writes_offloaded",
"http2_writes_continued",
"http2_partial_writes",
"combiner_locks_initiated",
"combiner_locks_scheduled_items",
"combiner_locks_scheduled_final_items",
"combiner_locks_offloaded",
"executor_scheduled_items",
"executor_scheduled_short_items",
"executor_scheduled_long_items",
"executor_scheduled_to_self",
"executor_wakeup_initiated",
"executor_queue_drained",
"executor_push_retries",
"server_requested_calls",
"server_slowpath_requests_queued",
};
const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
"Number of server side calls created by this process",
"Number of completion queues created", "Number of client channels created",
"Number of client subchannels created", "Number of server channels created",
"Number of polling syscalls (epoll_wait, poll, etc) made by this process",
"Number of sleeping syscalls made by this process",
"Number of times histogram increments went through the slow (binary "
@ -59,6 +75,8 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of write syscalls (or equivalent - eg sendmsg) made by this "
"process",
"Number of read syscalls (or equivalent - eg recvmsg) made by this process",
"Number of times a backup poller has been created (this can be expensive)",
"Number of polls performed on the backup poller",
"Number of batches received by HTTP2 transport",
"Number of cancelations received by HTTP2 transport",
"Number of batches containing send initial metadata",
@ -67,20 +85,43 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of batches containing receive initial metadata",
"Number of batches containing receive message",
"Number of batches containing receive trailing metadata",
"Number of HTTP2 pings sent by process", "Number of HTTP2 writes initiated",
"Number of settings frames sent", "Number of HTTP2 pings sent by process",
"Number of HTTP2 writes initiated",
"Number of HTTP2 writes offloaded to the executor from application threads",
"Number of HTTP2 writes that finished seeing more data needed to be "
"written",
"Number of HTTP2 writes that were made knowing there was still more data "
"to be written (we cap maximum write size to syscall_write)",
"Number of combiner lock entries by process (first items queued to a "
"combiner)",
"Number of items scheduled against combiner locks",
"Number of final items scheduled against combiner locks",
"Number of combiner locks offloaded to different threads",
"Number of closures scheduled against the executor (gRPC thread pool)",
"Number of finite runtime closures scheduled against the executor (gRPC "
"thread pool)",
"Number of potentially infinite runtime closures scheduled against the "
"executor (gRPC thread pool)",
"Number of closures scheduled by the executor to the executor",
"Number of thread wakeups initiated within the executor",
"Number of times an executor queue was drained",
"Number of times we raced and were forced to retry pushing a closure to "
"the executor",
"How many calls were requested (not necessarily received) by the server",
"How many times was the server slow path taken (indicates too few "
"outstanding requests)",
};
const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"tcp_write_size", "tcp_write_iov_size", "tcp_read_size",
"tcp_read_offer", "tcp_read_offer_iov_size", "http2_send_message_size",
"tcp_write_size",
"tcp_write_iov_size",
"tcp_read_size",
"tcp_read_offer",
"tcp_read_offer_iov_size",
"http2_send_message_size",
"http2_send_initial_metadata_per_write",
"http2_send_message_per_write",
"http2_send_trailing_metadata_per_write",
"http2_send_flowctl_per_write",
"server_cqs_checked",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of bytes offered to each syscall_write",
@ -89,6 +130,12 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of bytes offered to each syscall_read",
"Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport",
"Number of streams initiated written per TCP write",
"Number of streams whose payload was written per TCP write",
"Number of streams terminated per TCP write",
"Number of flow control updates written per TCP write",
"How many completion queues were checked looking for a CQ that had "
"requested the incoming call",
};
const int grpc_stats_table_0[65] = {
0, 1, 2, 3, 4, 6, 8, 11,
@ -119,6 +166,8 @@ const uint8_t grpc_stats_table_3[102] = {
23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32,
32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
const int grpc_stats_table_4[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_5[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
@ -273,15 +322,161 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64));
}
const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64};
const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320};
const int *const grpc_stats_histo_bucket_boundaries[6] = {
void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_2,
64));
}
void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
}
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_2,
64));
}
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
}
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 64);
if (value < 3) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4625196817309499392ull) {
int bucket =
grpc_stats_table_5[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
_bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_4, 8));
}
const int grpc_stats_histo_buckets[11] = {64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 8};
const int grpc_stats_histo_start[11] = {0, 64, 128, 192, 256, 320,
384, 448, 512, 576, 640};
const int *const grpc_stats_histo_bucket_boundaries[11] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0};
void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_2,
grpc_stats_table_2, grpc_stats_table_4};
void (*const grpc_stats_inc_histogram[11])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_tcp_write_size,
grpc_stats_inc_tcp_write_iov_size,
grpc_stats_inc_tcp_read_size,
grpc_stats_inc_tcp_read_offer,
grpc_stats_inc_tcp_read_offer_iov_size,
grpc_stats_inc_http2_send_message_size};
grpc_stats_inc_http2_send_message_size,
grpc_stats_inc_http2_send_initial_metadata_per_write,
grpc_stats_inc_http2_send_message_per_write,
grpc_stats_inc_http2_send_trailing_metadata_per_write,
grpc_stats_inc_http2_send_flowctl_per_write,
grpc_stats_inc_server_cqs_checked};

@ -27,11 +27,17 @@
typedef enum {
GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED,
GRPC_STATS_COUNTER_SERVER_CALLS_CREATED,
GRPC_STATS_COUNTER_CQS_CREATED,
GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED,
GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED,
GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED,
GRPC_STATS_COUNTER_SYSCALL_POLL,
GRPC_STATS_COUNTER_SYSCALL_WAIT,
GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS,
GRPC_STATS_COUNTER_SYSCALL_WRITE,
GRPC_STATS_COUNTER_SYSCALL_READ,
GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED,
GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS,
GRPC_STATS_COUNTER_HTTP2_OP_BATCHES,
GRPC_STATS_COUNTER_HTTP2_OP_CANCEL,
GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA,
@ -40,16 +46,24 @@ typedef enum {
GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA,
GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES,
GRPC_STATS_COUNTER_HTTP2_PINGS_SENT,
GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN,
GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED,
GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED,
GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES,
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF,
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
@ -61,6 +75,11 @@ typedef enum {
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
@ -78,12 +97,31 @@ typedef enum {
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 320,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_BUCKETS = 384
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 384,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 448,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 512,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 576,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 640,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
GRPC_STATS_HISTOGRAM_BUCKETS = 648
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED)
#define GRPC_STATS_INC_CQS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CQS_CREATED)
#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED)
#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED)
#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED)
#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL)
#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \
@ -94,6 +132,11 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE)
#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ)
#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED)
#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS)
#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES)
#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \
@ -114,10 +157,18 @@ typedef enum {
#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA)
#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED)
#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
@ -130,9 +181,12 @@ typedef enum {
#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF)
@ -141,6 +195,13 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED)
#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
@ -159,10 +220,30 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \
grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x);
extern const int grpc_stats_histo_buckets[6];
extern const int grpc_stats_histo_start[6];
extern const int *const grpc_stats_histo_bucket_boundaries[6];
extern void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx,
#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
int x);
#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx), \
(int)(value))
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int x);
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
extern const int grpc_stats_histo_buckets[11];
extern const int grpc_stats_histo_start[11];
extern const int *const grpc_stats_histo_bucket_boundaries[11];
extern void (*const grpc_stats_inc_histogram[11])(grpc_exec_ctx *exec_ctx,
int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -20,6 +20,14 @@
doc: Number of client side calls created by this process
- counter: server_calls_created
doc: Number of server side calls created by this process
- counter: cqs_created
doc: Number of completion queues created
- counter: client_channels_created
doc: Number of client channels created
- counter: client_subchannels_created
doc: Number of client subchannels created
- counter: server_channels_created
doc: Number of server channels created
# polling
- counter: syscall_poll
doc: Number of polling syscalls (epoll_wait, poll, etc) made by this process
@ -54,6 +62,10 @@
max: 1024
buckets: 64
doc: Number of byte segments offered to each syscall_read
- counter: tcp_backup_pollers_created
doc: Number of times a backup poller has been created (this can be expensive)
- counter: tcp_backup_poller_polls
doc: Number of polls performed on the backup poller
# chttp2
- counter: http2_op_batches
doc: Number of batches received by HTTP2 transport
@ -75,10 +87,36 @@
max: 16777216
buckets: 64
doc: Size of messages received by HTTP2 transport
- histogram: http2_send_initial_metadata_per_write
max: 1024
buckets: 64
doc: Number of streams initiated written per TCP write
- histogram: http2_send_message_per_write
max: 1024
buckets: 64
doc: Number of streams whose payload was written per TCP write
- histogram: http2_send_trailing_metadata_per_write
max: 1024
buckets: 64
doc: Number of streams terminated per TCP write
- histogram: http2_send_flowctl_per_write
max: 1024
buckets: 64
doc: Number of flow control updates written per TCP write
- counter: http2_settings_writes
doc: Number of settings frames sent
- counter: http2_pings_sent
doc: Number of HTTP2 pings sent by process
- counter: http2_writes_begun
doc: Number of HTTP2 writes initiated
- counter: http2_writes_offloaded
doc: Number of HTTP2 writes offloaded to the executor from application threads
- counter: http2_writes_continued
doc: Number of HTTP2 writes that finished seeing more data needed to be
written
- counter: http2_partial_writes
doc: Number of HTTP2 writes that were made knowing there was still more data
to be written (we cap maximum write size to syscall_write)
# combiner locks
- counter: combiner_locks_initiated
doc: Number of combiner lock entries by process
@ -90,11 +128,29 @@
- counter: combiner_locks_offloaded
doc: Number of combiner locks offloaded to different threads
# executor
- counter: executor_scheduled_items
doc: Number of closures scheduled against the executor (gRPC thread pool)
- counter: executor_scheduled_short_items
doc: Number of finite runtime closures scheduled against the executor
(gRPC thread pool)
- counter: executor_scheduled_long_items
doc: Number of potentially infinite runtime closures scheduled against the
executor (gRPC thread pool)
- counter: executor_scheduled_to_self
doc: Number of closures scheduled by the executor to the executor
- counter: executor_wakeup_initiated
doc: Number of thread wakeups initiated within the executor
- counter: executor_queue_drained
doc: Number of times an executor queue was drained
- counter: executor_push_retries
doc: Number of times we raced and were forced to retry pushing a closure to
the executor
# server
- counter: server_requested_calls
doc: How many calls were requested (not necessarily received) by the server
- histogram: server_cqs_checked
buckets: 8
max: 64
doc: How many completion queues were checked looking for a CQ that had
requested the incoming call
- counter: server_slowpath_requests_queued
doc: How many times was the server slow path taken (indicates too few
outstanding requests)

@ -0,0 +1,35 @@
client_calls_created_per_iteration:FLOAT,
server_calls_created_per_iteration:FLOAT,
syscall_poll_per_iteration:FLOAT,
syscall_wait_per_iteration:FLOAT,
histogram_slow_lookups_per_iteration:FLOAT,
syscall_write_per_iteration:FLOAT,
syscall_read_per_iteration:FLOAT,
tcp_backup_pollers_created_per_iteration:FLOAT,
tcp_backup_poller_polls_per_iteration:FLOAT,
http2_op_batches_per_iteration:FLOAT,
http2_op_cancel_per_iteration:FLOAT,
http2_op_send_initial_metadata_per_iteration:FLOAT,
http2_op_send_message_per_iteration:FLOAT,
http2_op_send_trailing_metadata_per_iteration:FLOAT,
http2_op_recv_initial_metadata_per_iteration:FLOAT,
http2_op_recv_message_per_iteration:FLOAT,
http2_op_recv_trailing_metadata_per_iteration:FLOAT,
http2_settings_writes_per_iteration:FLOAT,
http2_pings_sent_per_iteration:FLOAT,
http2_writes_begun_per_iteration:FLOAT,
http2_writes_offloaded_per_iteration:FLOAT,
http2_writes_continued_per_iteration:FLOAT,
http2_partial_writes_per_iteration:FLOAT,
combiner_locks_initiated_per_iteration:FLOAT,
combiner_locks_scheduled_items_per_iteration:FLOAT,
combiner_locks_scheduled_final_items_per_iteration:FLOAT,
combiner_locks_offloaded_per_iteration:FLOAT,
executor_scheduled_short_items_per_iteration:FLOAT,
executor_scheduled_long_items_per_iteration:FLOAT,
executor_scheduled_to_self_per_iteration:FLOAT,
executor_wakeup_initiated_per_iteration:FLOAT,
executor_queue_drained_per_iteration:FLOAT,
executor_push_retries_per_iteration:FLOAT,
server_requested_calls_per_iteration:FLOAT,
server_slowpath_requests_queued_per_iteration:FLOAT

@ -98,7 +98,7 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
gpr_strvec_destroy(&out);
if (body_bytes) {
tmp = gpr_realloc(tmp, out_len + body_size);
tmp = (char *)gpr_realloc(tmp, out_len + body_size);
memcpy(tmp + out_len, body_bytes, body_size);
out_len += body_size;
}

@ -130,7 +130,7 @@ static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
internal_request *req = user_data;
internal_request *req = (internal_request *)user_data;
size_t i;
for (i = 0; i < req->incoming.count; i++) {
@ -159,7 +159,7 @@ static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
}
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
internal_request *req = arg;
internal_request *req = (internal_request *)arg;
if (error == GRPC_ERROR_NONE) {
on_written(exec_ctx, req);
} else {
@ -175,7 +175,7 @@ static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *ep) {
internal_request *req = arg;
internal_request *req = (internal_request *)arg;
if (!ep) {
next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@ -189,7 +189,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
internal_request *req = arg;
internal_request *req = (internal_request *)arg;
if (!req->ep) {
next_address(exec_ctx, req, GRPC_ERROR_REF(error));
@ -226,7 +226,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
internal_request *req = arg;
internal_request *req = (internal_request *)arg;
if (error != GRPC_ERROR_NONE) {
finish(exec_ctx, req, GRPC_ERROR_REF(error));
return;
@ -243,7 +243,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
const char *name, grpc_slice request_text) {
internal_request *req = gpr_malloc(sizeof(internal_request));
internal_request *req =
(internal_request *)gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
req->request_text = request_text;
grpc_http_parser_init(&req->parser, GRPC_HTTP_RESPONSE, response);

@ -28,7 +28,7 @@
grpc_tracer_flag grpc_http1_trace = GRPC_TRACER_INITIALIZER(false, "http1");
static char *buf2str(void *buffer, size_t length) {
char *out = gpr_malloc(length + 1);
char *out = (char *)gpr_malloc(length + 1);
memcpy(out, buffer, length);
out[length] = 0;
return out;
@ -197,7 +197,8 @@ static grpc_error *add_header(grpc_http_parser *parser) {
if (*hdr_count == parser->hdr_capacity) {
parser->hdr_capacity =
GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2);
*hdrs = gpr_realloc(*hdrs, parser->hdr_capacity * sizeof(**hdrs));
*hdrs = (grpc_http_header *)gpr_realloc(
*hdrs, parser->hdr_capacity * sizeof(**hdrs));
}
(*hdrs)[(*hdr_count)++] = hdr;
@ -255,7 +256,7 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
if (*body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
*body = gpr_realloc((void *)*body, parser->body_capacity);
*body = (char *)gpr_realloc((void *)*body, parser->body_capacity);
}
(*body)[*body_length] = (char)byte;
(*body_length)++;

@ -109,7 +109,7 @@ typedef struct {
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
wrapped_closure *wc = arg;
wrapped_closure *wc = (wrapped_closure *)arg;
grpc_iomgr_cb_func cb = wc->cb;
void *cb_arg = wc->cb_arg;
gpr_free(wc);
@ -124,7 +124,7 @@ grpc_closure *grpc_closure_create(const char *file, int line,
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
#endif
wrapped_closure *wc = gpr_malloc(sizeof(*wc));
wrapped_closure *wc = (wrapped_closure *)gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
#ifndef NDEBUG

@ -81,7 +81,8 @@ grpc_combiner *grpc_combiner_create(void) {
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler);
GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@ -355,7 +356,8 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) {
combiner_finally_exec(exec_ctx, closure, GRPC_ERROR_REF(error));
combiner_finally_exec(exec_ctx, (grpc_closure *)closure,
GRPC_ERROR_REF(error));
}
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) {

@ -211,7 +211,7 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
#ifndef NDEBUG
grpc_error *orig = *err;
#endif
*err = gpr_realloc(
*err = (grpc_error *)gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
@ -278,13 +278,13 @@ static void internal_set_time(grpc_error **err, grpc_error_times which,
memcpy((*err)->arena + slot, &value, sizeof(value));
}
static void internal_add_error(grpc_error **err, grpc_error *new) {
grpc_linked_error new_last = {new, UINT8_MAX};
static void internal_add_error(grpc_error **err, grpc_error *new_err) {
grpc_linked_error new_last = {new_err, UINT8_MAX};
uint8_t slot = get_placement(err, sizeof(grpc_linked_error));
if (slot == UINT8_MAX) {
gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err, new,
grpc_error_string(new));
GRPC_ERROR_UNREF(new);
gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err,
new_err, grpc_error_string(new_err));
GRPC_ERROR_UNREF(new_err);
return;
}
if ((*err)->first_err == UINT8_MAX) {
@ -321,8 +321,8 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
uint8_t initial_arena_capacity = (uint8_t)(
DEFAULT_ERROR_CAPACITY +
(uint8_t)(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY);
grpc_error *err =
gpr_malloc(sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
grpc_error *err = (grpc_error *)gpr_malloc(
sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL
return GRPC_ERROR_OOM;
}
@ -406,7 +406,8 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) {
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
}
out = gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t));
out = (grpc_error *)gpr_malloc(sizeof(*in) +
new_arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
@ -431,10 +432,10 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
intptr_t value) {
GPR_TIMER_BEGIN("grpc_error_set_int", 0);
grpc_error *new = copy_error_and_unref(src);
internal_set_int(&new, which, value);
grpc_error *new_err = copy_error_and_unref(src);
internal_set_int(&new_err, which, value);
GPR_TIMER_END("grpc_error_set_int", 0);
return new;
return new_err;
}
typedef struct {
@ -476,10 +477,10 @@ bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
grpc_slice str) {
GPR_TIMER_BEGIN("grpc_error_set_str", 0);
grpc_error *new = copy_error_and_unref(src);
internal_set_str(&new, which, str);
grpc_error *new_err = copy_error_and_unref(src);
internal_set_str(&new_err, which, str);
GPR_TIMER_END("grpc_error_set_str", 0);
return new;
return new_err;
}
bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
@ -506,10 +507,10 @@ bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) {
GPR_TIMER_BEGIN("grpc_error_add_child", 0);
grpc_error *new = copy_error_and_unref(src);
internal_add_error(&new, child);
grpc_error *new_err = copy_error_and_unref(src);
internal_add_error(&new_err, child);
GPR_TIMER_END("grpc_error_add_child", 0);
return new;
return new_err;
}
static const char *no_error_string = "\"No Error\"";
@ -530,7 +531,7 @@ typedef struct {
static void append_chr(char c, char **s, size_t *sz, size_t *cap) {
if (*sz == *cap) {
*cap = GPR_MAX(8, 3 * *cap / 2);
*s = gpr_realloc(*s, *cap);
*s = (char *)gpr_realloc(*s, *cap);
}
(*s)[(*sz)++] = c;
}
@ -582,7 +583,8 @@ static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
static void append_kv(kv_pairs *kvs, char *key, char *value) {
if (kvs->num_kvs == kvs->cap_kvs) {
kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4);
kvs->kvs = gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
kvs->kvs =
(kv_pair *)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
}
kvs->kvs[kvs->num_kvs].key = key;
kvs->kvs[kvs->num_kvs].value = value;
@ -695,8 +697,8 @@ static char *errs_string(grpc_error *err) {
}
static int cmp_kvs(const void *a, const void *b) {
const kv_pair *ka = a;
const kv_pair *kb = b;
const kv_pair *ka = (const kv_pair *)a;
const kv_pair *kb = (const kv_pair *)b;
return strcmp(ka->key, kb->key);
}
@ -731,7 +733,7 @@ const char *grpc_error_string(grpc_error *err) {
void *p = (void *)gpr_atm_acq_load(&err->atomics.error_string);
if (p != NULL) {
GPR_TIMER_END("grpc_error_string", 0);
return p;
return (const char *)p;
}
kv_pairs kvs;

@ -130,9 +130,9 @@ static void fd_global_shutdown(void);
* Pollset Declarations
*/
typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state_t;
static const char *kick_state_string(kick_state st) {
static const char *kick_state_string(kick_state_t st) {
switch (st) {
case UNKICKED:
return "UNKICKED";
@ -145,7 +145,7 @@ static const char *kick_state_string(kick_state st) {
}
struct grpc_pollset_worker {
kick_state kick_state;
kick_state_t kick_state;
int kick_state_mutator; // which line of code last changed kick state
bool initialized_cv;
grpc_pollset_worker *next;
@ -160,18 +160,18 @@ struct grpc_pollset_worker {
(worker)->kick_state_mutator = __LINE__; \
} while (false)
#define MAX_NEIGHBOURHOODS 1024
#define MAX_NEIGHBORHOODS 1024
typedef struct pollset_neighbourhood {
typedef struct pollset_neighborhood {
gpr_mu mu;
grpc_pollset *active_root;
char pad[GPR_CACHELINE_SIZE];
} pollset_neighbourhood;
} pollset_neighborhood;
struct grpc_pollset {
gpr_mu mu;
pollset_neighbourhood *neighbourhood;
bool reassigning_neighbourhood;
pollset_neighborhood *neighborhood;
bool reassigning_neighborhood;
grpc_pollset_worker *root_worker;
bool kicked_without_poller;
@ -260,7 +260,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
new_fd = gpr_malloc(sizeof(grpc_fd));
new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
new_fd->fd = fd;
@ -384,8 +384,8 @@ GPR_TLS_DECL(g_current_thread_worker);
/* The designated poller */
static gpr_atm g_active_poller;
static pollset_neighbourhood *g_neighbourhoods;
static size_t g_num_neighbourhoods;
static pollset_neighborhood *g_neighborhoods;
static size_t g_num_neighborhoods;
/* Return true if first in list */
static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
@ -424,8 +424,8 @@ static worker_remove_result worker_remove(grpc_pollset *pollset,
}
}
static size_t choose_neighbourhood(void) {
return (size_t)gpr_cpu_current_cpu() % g_num_neighbourhoods;
static size_t choose_neighborhood(void) {
return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
}
static grpc_error *pollset_global_init(void) {
@ -441,11 +441,11 @@ static grpc_error *pollset_global_init(void) {
&ev) != 0) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
g_num_neighbourhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBOURHOODS);
g_neighbourhoods =
gpr_zalloc(sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
for (size_t i = 0; i < g_num_neighbourhoods; i++) {
gpr_mu_init(&g_neighbourhoods[i].mu);
g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
g_neighborhoods = (pollset_neighborhood *)gpr_zalloc(
sizeof(*g_neighborhoods) * g_num_neighborhoods);
for (size_t i = 0; i < g_num_neighborhoods; i++) {
gpr_mu_init(&g_neighborhoods[i].mu);
}
return GRPC_ERROR_NONE;
}
@ -454,17 +454,17 @@ static void pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
for (size_t i = 0; i < g_num_neighbourhoods; i++) {
gpr_mu_destroy(&g_neighbourhoods[i].mu);
for (size_t i = 0; i < g_num_neighborhoods; i++) {
gpr_mu_destroy(&g_neighborhoods[i].mu);
}
gpr_free(g_neighbourhoods);
gpr_free(g_neighborhoods);
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->mu);
*mu = &pollset->mu;
pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
pollset->reassigning_neighbourhood = false;
pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
pollset->reassigning_neighborhood = false;
pollset->root_worker = NULL;
pollset->kicked_without_poller = false;
pollset->seen_inactive = true;
@ -477,26 +477,26 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
pollset_neighborhood *neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
retry_lock_neighbourhood:
gpr_mu_lock(&neighbourhood->mu);
retry_lock_neighborhood:
gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
if (pollset->neighbourhood != neighbourhood) {
gpr_mu_unlock(&neighbourhood->mu);
neighbourhood = pollset->neighbourhood;
if (pollset->neighborhood != neighborhood) {
gpr_mu_unlock(&neighborhood->mu);
neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
goto retry_lock_neighbourhood;
goto retry_lock_neighborhood;
}
pollset->prev->next = pollset->next;
pollset->next->prev = pollset->prev;
if (pollset == pollset->neighbourhood->active_root) {
pollset->neighbourhood->active_root =
if (pollset == pollset->neighborhood->active_root) {
pollset->neighborhood->active_root =
pollset->next == pollset ? NULL : pollset->next;
}
}
gpr_mu_unlock(&pollset->neighbourhood->mu);
gpr_mu_unlock(&pollset->neighborhood->mu);
}
gpr_mu_unlock(&pollset->mu);
gpr_mu_destroy(&pollset->mu);
@ -675,16 +675,16 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
// pollset has been observed to be inactive, we need to move back to the
// active list
bool is_reassigning = false;
if (!pollset->reassigning_neighbourhood) {
if (!pollset->reassigning_neighborhood) {
is_reassigning = true;
pollset->reassigning_neighbourhood = true;
pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
pollset->reassigning_neighborhood = true;
pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
}
pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
pollset_neighborhood *neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
// pollset unlocked: state may change (even worker->kick_state)
retry_lock_neighbourhood:
gpr_mu_lock(&neighbourhood->mu);
retry_lock_neighborhood:
gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
@ -692,17 +692,17 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
is_reassigning);
}
if (pollset->seen_inactive) {
if (neighbourhood != pollset->neighbourhood) {
gpr_mu_unlock(&neighbourhood->mu);
neighbourhood = pollset->neighbourhood;
if (neighborhood != pollset->neighborhood) {
gpr_mu_unlock(&neighborhood->mu);
neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
goto retry_lock_neighbourhood;
goto retry_lock_neighborhood;
}
/* In the brief time we released the pollset locks above, the worker MAY
have been kicked. In this case, the worker should get out of this
pollset ASAP and hence this should neither add the pollset to
neighbourhood nor mark the pollset as active.
neighborhood nor mark the pollset as active.
On a side note, the only way a worker's kick state could have changed
at this point is if it were "kicked specifically". Since the worker has
@ -710,25 +710,25 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
not visible in the "kick any" path yet */
if (worker->kick_state == UNKICKED) {
pollset->seen_inactive = false;
if (neighbourhood->active_root == NULL) {
neighbourhood->active_root = pollset->next = pollset->prev = pollset;
if (neighborhood->active_root == NULL) {
neighborhood->active_root = pollset->next = pollset->prev = pollset;
/* Make this the designated poller if there isn't one already */
if (worker->kick_state == UNKICKED &&
gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
SET_KICK_STATE(worker, DESIGNATED_POLLER);
}
} else {
pollset->next = neighbourhood->active_root;
pollset->next = neighborhood->active_root;
pollset->prev = pollset->next->prev;
pollset->next->prev = pollset->prev->next = pollset;
}
}
}
if (is_reassigning) {
GPR_ASSERT(pollset->reassigning_neighbourhood);
pollset->reassigning_neighbourhood = false;
GPR_ASSERT(pollset->reassigning_neighborhood);
pollset->reassigning_neighborhood = false;
}
gpr_mu_unlock(&neighbourhood->mu);
gpr_mu_unlock(&neighborhood->mu);
}
worker_insert(pollset, worker);
@ -763,7 +763,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
}
/* We release pollset lock in this function at a couple of places:
* 1. Briefly when assigning pollset to a neighbourhood
* 1. Briefly when assigning pollset to a neighborhood
* 2. When doing gpr_cv_wait()
* It is possible that 'kicked_without_poller' was set to true during (1) and
* 'shutting_down' is set to true during (1) or (2). If either of them is
@ -781,12 +781,12 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down;
}
static bool check_neighbourhood_for_available_poller(
pollset_neighbourhood *neighbourhood) {
GPR_TIMER_BEGIN("check_neighbourhood_for_available_poller", 0);
static bool check_neighborhood_for_available_poller(
pollset_neighborhood *neighborhood) {
GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
bool found_worker = false;
do {
grpc_pollset *inspect = neighbourhood->active_root;
grpc_pollset *inspect = neighborhood->active_root;
if (inspect == NULL) {
break;
}
@ -831,8 +831,8 @@ static bool check_neighbourhood_for_available_poller(
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
}
inspect->seen_inactive = true;
if (inspect == neighbourhood->active_root) {
neighbourhood->active_root =
if (inspect == neighborhood->active_root) {
neighborhood->active_root =
inspect->next == inspect ? NULL : inspect->next;
}
inspect->next->prev = inspect->prev;
@ -841,7 +841,7 @@ static bool check_neighbourhood_for_available_poller(
}
gpr_mu_unlock(&inspect->mu);
} while (!found_worker);
GPR_TIMER_END("check_neighbourhood_for_available_poller", 0);
GPR_TIMER_END("check_neighborhood_for_available_poller", 0);
return found_worker;
}
@ -873,32 +873,31 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
gpr_atm_no_barrier_store(&g_active_poller, 0);
size_t poller_neighbourhood_idx =
(size_t)(pollset->neighbourhood - g_neighbourhoods);
size_t poller_neighborhood_idx =
(size_t)(pollset->neighborhood - g_neighborhoods);
gpr_mu_unlock(&pollset->mu);
bool found_worker = false;
bool scan_state[MAX_NEIGHBOURHOODS];
for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
pollset_neighbourhood *neighbourhood =
&g_neighbourhoods[(poller_neighbourhood_idx + i) %
g_num_neighbourhoods];
if (gpr_mu_trylock(&neighbourhood->mu)) {
found_worker =
check_neighbourhood_for_available_poller(neighbourhood);
gpr_mu_unlock(&neighbourhood->mu);
bool scan_state[MAX_NEIGHBORHOODS];
for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
pollset_neighborhood *neighborhood =
&g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods];
if (gpr_mu_trylock(&neighborhood->mu)) {
found_worker = check_neighborhood_for_available_poller(neighborhood);
gpr_mu_unlock(&neighborhood->mu);
scan_state[i] = true;
} else {
scan_state[i] = false;
}
}
for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
if (scan_state[i]) continue;
pollset_neighbourhood *neighbourhood =
&g_neighbourhoods[(poller_neighbourhood_idx + i) %
g_num_neighbourhoods];
gpr_mu_lock(&neighbourhood->mu);
found_worker = check_neighbourhood_for_available_poller(neighbourhood);
gpr_mu_unlock(&neighbourhood->mu);
pollset_neighborhood *neighborhood =
&g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods];
gpr_mu_lock(&neighborhood->mu);
found_worker = check_neighborhood_for_available_poller(neighborhood);
gpr_mu_unlock(&neighborhood->mu);
}
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);

@ -97,12 +97,12 @@ static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg,
* pollable Declarations
*/
typedef struct pollable {
typedef struct pollable_t {
polling_obj po;
int epfd;
grpc_wakeup_fd wakeup;
grpc_pollset_worker *root_worker;
} pollable;
} pollable_t;
static const char *polling_obj_type_string(polling_obj_type t) {
switch (t) {
@ -122,7 +122,7 @@ static const char *polling_obj_type_string(polling_obj_type t) {
return "<invalid>";
}
static char *pollable_desc(pollable *p) {
static char *pollable_desc(pollable_t *p) {
char *out;
gpr_asprintf(&out, "type=%s group=%p epfd=%d wakeup=%d",
polling_obj_type_string(p->po.type), p->po.group, p->epfd,
@ -130,19 +130,19 @@ static char *pollable_desc(pollable *p) {
return out;
}
static pollable g_empty_pollable;
static pollable_t g_empty_pollable;
static void pollable_init(pollable *p, polling_obj_type type);
static void pollable_destroy(pollable *p);
static void pollable_init(pollable_t *p, polling_obj_type type);
static void pollable_destroy(pollable_t *p);
/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
static grpc_error *pollable_materialize(pollable *p);
static grpc_error *pollable_materialize(pollable_t *p);
/*******************************************************************************
* Fd Declarations
*/
struct grpc_fd {
pollable pollable;
pollable_t pollable;
int fd;
/* refst format:
bit 0 : 1=Active / 0=Orphaned
@ -193,15 +193,15 @@ struct grpc_pollset_worker {
pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
gpr_cv cv;
grpc_pollset *pollset;
pollable *pollable;
pollable_t *pollable;
};
#define MAX_EPOLL_EVENTS 100
#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
struct grpc_pollset {
pollable pollable;
pollable *current_pollable;
pollable_t pollable;
pollable_t *current_pollable;
int kick_alls_pending;
bool kicked_without_poller;
grpc_closure *shutdown_closure;
@ -451,13 +451,13 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
* Pollable Definitions
*/
static void pollable_init(pollable *p, polling_obj_type type) {
static void pollable_init(pollable_t *p, polling_obj_type type) {
po_init(&p->po, type);
p->root_worker = NULL;
p->epfd = -1;
}
static void pollable_destroy(pollable *p) {
static void pollable_destroy(pollable_t *p) {
po_destroy(&p->po);
if (p->epfd != -1) {
close(p->epfd);
@ -466,7 +466,7 @@ static void pollable_destroy(pollable *p) {
}
/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
static grpc_error *pollable_materialize(pollable *p) {
static grpc_error *pollable_materialize(pollable_t *p) {
if (p->epfd == -1) {
int new_epfd = epoll_create1(EPOLL_CLOEXEC);
if (new_epfd < 0) {
@ -492,7 +492,7 @@ static grpc_error *pollable_materialize(pollable *p) {
}
/* pollable must be materialized */
static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
static grpc_error *pollable_add_fd(pollable_t *p, grpc_fd *fd) {
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "pollable_add_fd";
const int epfd = p->epfd;
@ -599,7 +599,7 @@ static void pollset_kick_all(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_ERROR_NONE);
}
static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p,
static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable_t *p,
grpc_pollset_worker *specific_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
@ -664,7 +664,7 @@ static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p,
/* p->po.mu must be held before calling this function */
static grpc_error *pollset_kick(grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
pollable *p = pollset->current_pollable;
pollable_t *p = pollset->current_pollable;
if (p != &pollset->pollable) {
gpr_mu_lock(&p->po.mu);
}
@ -744,7 +744,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) {
static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable_t *p) {
return p != &g_empty_pollable && p != &pollset->pollable;
}
@ -762,8 +762,9 @@ static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
append_error(&error, grpc_wakeup_fd_consume_wakeup(
(void *)((~(intptr_t)1) & (intptr_t)data_ptr)),
append_error(&error,
grpc_wakeup_fd_consume_wakeup(
(grpc_wakeup_fd *)((~(intptr_t)1) & (intptr_t)data_ptr)),
err_desc);
} else {
grpc_fd *fd = (grpc_fd *)data_ptr;
@ -800,7 +801,7 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
}
static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollable *p, gpr_timespec now,
pollable_t *p, gpr_timespec now,
gpr_timespec deadline) {
int timeout = poll_deadline_to_millis_timeout(deadline, now);
@ -1028,7 +1029,7 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
"PS:%p add fd %p; transition pollable from empty to fd", pollset,
fd);
}
/* empty pollable --> single fd pollable */
/* empty pollable --> single fd pollable_t */
pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &fd->pollable;
if (!fd_locked) gpr_mu_lock(&fd->pollable.po.mu);

@ -989,6 +989,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
}
if (r < 0) {
if (errno != EINTR) {
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
@ -1009,6 +1013,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset);
}
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}
@ -1016,6 +1023,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}
fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
@ -1527,7 +1539,7 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
idx = FD_TO_IDX(fds[i].fd);
idx = GRPC_FD_TO_IDX(fds[i].fd);
fd_cvs[i].cv = &pollcv_cv;
fd_cvs[i].prev = NULL;
fd_cvs[i].next = g_cvfds.cvfds[idx].cvs;
@ -1590,8 +1602,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
remove_cvn(&g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs, &(fd_cvs[i]));
if (g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].is_set) {
remove_cvn(&g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].cvs, &(fd_cvs[i]));
if (g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].is_set) {
fds[i].revents = POLLIN;
if (res >= 0) res++;
}

@ -40,6 +40,7 @@ typedef struct {
grpc_closure_list elems;
size_t depth;
bool shutdown;
bool queued_long_job;
gpr_thd_id id;
} thread_state;
@ -50,6 +51,9 @@ static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
GPR_TLS_DECL(g_this_thread_state);
static grpc_tracer_flag executor_trace =
GRPC_TRACER_INITIALIZER(false, "executor");
static void executor_thread(void *arg);
static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
@ -59,6 +63,14 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
#else
gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
#endif
}
#ifndef NDEBUG
c->scheduled = false;
#endif
@ -66,6 +78,7 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
GRPC_ERROR_UNREF(error);
c = next;
n++;
grpc_exec_ctx_flush(exec_ctx);
}
return n;
@ -121,6 +134,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
}
void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
grpc_register_tracer(&executor_trace);
gpr_atm_no_barrier_store(&g_cur_threads, 0);
grpc_executor_set_threading(exec_ctx, true);
}
@ -138,12 +152,21 @@ static void executor_thread(void *arg) {
size_t subtract_depth = 0;
for (;;) {
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
(int)(ts - g_thread_state), subtract_depth);
}
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
if (ts->shutdown) {
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
(int)(ts - g_thread_state));
}
gpr_mu_unlock(&ts->mu);
break;
}
@ -151,52 +174,128 @@ static void executor_thread(void *arg) {
grpc_closure_list exec = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
}
subtract_depth = run_closures(&exec_ctx, exec);
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
}
static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx);
if (cur_thread_count == 0) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
return;
}
thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
if (ts == NULL) {
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
grpc_error *error, bool is_short) {
bool retry_push;
if (is_short) {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
}
gpr_mu_lock(&ts->mu);
if (grpc_closure_list_empty(ts->elems)) {
GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
gpr_cv_signal(&ts->cv);
GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
}
grpc_closure_list_append(&ts->elems, closure, error);
ts->depth++;
bool try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown;
gpr_mu_unlock(&ts->mu);
if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count < g_max_threads) {
gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
&g_thread_state[cur_thread_count], &opt);
do {
retry_push = false;
size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count == 0) {
if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
closure, closure->file_created, closure->line_created);
#else
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
#endif
}
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
return;
}
gpr_spinlock_unlock(&g_adding_thread_lock);
}
thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
if (ts == NULL) {
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
}
thread_state *orig_ts = ts;
bool try_new_thread;
for (;;) {
if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
gpr_log(
GPR_DEBUG,
"EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
closure, is_short ? "short" : "long", closure->file_created,
closure->line_created, (int)(ts - g_thread_state));
#else
gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
closure, is_short ? "short" : "long",
(int)(ts - g_thread_state));
#endif
}
gpr_mu_lock(&ts->mu);
if (ts->queued_long_job) {
// if there's a long job queued, we never queue anything else to this
// queue (since long jobs can take 'infinite' time and we need to
// guarantee no starvation)
// ... spin through queues and try again
gpr_mu_unlock(&ts->mu);
size_t idx = (size_t)(ts - g_thread_state);
ts = &g_thread_state[(idx + 1) % cur_thread_count];
if (ts == orig_ts) {
retry_push = true;
try_new_thread = true;
break;
}
continue;
}
if (grpc_closure_list_empty(ts->elems)) {
GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
gpr_cv_signal(&ts->cv);
}
grpc_closure_list_append(&ts->elems, closure, error);
ts->depth++;
try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown;
if (!is_short) ts->queued_long_job = true;
gpr_mu_unlock(&ts->mu);
break;
}
if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count < g_max_threads) {
gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
&g_thread_state[cur_thread_count], &opt);
}
gpr_spinlock_unlock(&g_adding_thread_lock);
}
if (retry_push) {
GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
}
} while (retry_push);
}
static const grpc_closure_scheduler_vtable executor_vtable = {
executor_push, executor_push, "executor"};
static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;
static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
executor_push(exec_ctx, closure, error, true);
}
static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
executor_push(exec_ctx, closure, error, false);
}
static const grpc_closure_scheduler_vtable executor_vtable_short = {
executor_push_short, executor_push_short, "executor"};
static grpc_closure_scheduler executor_scheduler_short = {
&executor_vtable_short};
static const grpc_closure_scheduler_vtable executor_vtable_long = {
executor_push_long, executor_push_long, "executor"};
static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
grpc_closure_scheduler *grpc_executor_scheduler(
grpc_executor_job_length length) {
return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
: &executor_scheduler_long;
}

@ -21,6 +21,11 @@
#include "src/core/lib/iomgr/closure.h"
typedef enum {
GRPC_EXECUTOR_SHORT,
GRPC_EXECUTOR_LONG
} grpc_executor_job_length;
/** Initialize the global executor.
*
* This mechanism is meant to outsource work (grpc_closure instances) to a
@ -28,7 +33,7 @@
* non-blocking solution available. */
void grpc_executor_init(grpc_exec_ctx *exec_ctx);
extern grpc_closure_scheduler *grpc_executor_scheduler;
grpc_closure_scheduler *grpc_executor_scheduler(grpc_executor_job_length);
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx);

@ -25,7 +25,7 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set *pollset_set) {
grpc_polling_entity pollent;
pollent.pollent.pollset_set = pollset_set;
pollent.tag = POPS_POLLSET_SET;
pollent.tag = GRPC_POLLS_POLLSET_SET;
return pollent;
}
@ -33,12 +33,12 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset(
grpc_pollset *pollset) {
grpc_polling_entity pollent;
pollent.pollent.pollset = pollset;
pollent.tag = POPS_POLLSET;
pollent.tag = GRPC_POLLS_POLLSET;
return pollent;
}
grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
if (pollent->tag == POPS_POLLSET) {
if (pollent->tag == GRPC_POLLS_POLLSET) {
return pollent->pollent.pollset;
}
return NULL;
@ -46,23 +46,23 @@ grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
grpc_pollset_set *grpc_polling_entity_pollset_set(
grpc_polling_entity *pollent) {
if (pollent->tag == POPS_POLLSET_SET) {
if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
return pollent->pollent.pollset_set;
}
return NULL;
}
bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent) {
return pollent->tag == POPS_NONE;
return pollent->tag == GRPC_POLLS_NONE;
}
void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_pollset_set *pss_dst) {
if (pollent->tag == POPS_POLLSET) {
if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
} else if (pollent->tag == POPS_POLLSET_SET) {
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != NULL);
grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst,
pollent->pollent.pollset_set);
@ -75,10 +75,10 @@ void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_pollset_set *pss_dst) {
if (pollent->tag == POPS_POLLSET) {
if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
} else if (pollent->tag == POPS_POLLSET_SET) {
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != NULL);
grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst,
pollent->pollent.pollset_set);

@ -22,6 +22,12 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
typedef enum grpc_pollset_tag {
GRPC_POLLS_NONE,
GRPC_POLLS_POLLSET,
GRPC_POLLS_POLLSET_SET
} grpc_pollset_tag;
/* A grpc_polling_entity is a pollset-or-pollset_set container. It allows
* functions that accept a pollset XOR a pollset_set to do so through an
* abstract interface. No ownership is taken. */
@ -31,7 +37,7 @@ typedef struct grpc_polling_entity {
grpc_pollset *pollset;
grpc_pollset_set *pollset_set;
} pollent;
enum pops_tag { POPS_NONE, POPS_POLLSET, POPS_POLLSET_SET } tag;
grpc_pollset_tag tag;
} grpc_polling_entity;
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(

@ -177,7 +177,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_resolved_addresses **addrs) {
request *r = (request *)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler);
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;

@ -159,7 +159,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler);
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;

@ -656,7 +656,7 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
return grpc_resource_quota_ref_internal(
channel_args->args[i].value.pointer.p);
(grpc_resource_quota *)channel_args->args[i].value.pointer.p);
} else {
gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
}
@ -666,12 +666,12 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
}
static void *rq_copy(void *rq) {
grpc_resource_quota_ref(rq);
grpc_resource_quota_ref((grpc_resource_quota *)rq);
return rq;
}
static void rq_destroy(grpc_exec_ctx *exec_ctx, void *rq) {
grpc_resource_quota_unref_internal(exec_ctx, rq);
grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota *)rq);
}
static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); }

@ -69,11 +69,11 @@ void grpc_socket_factory_unref(grpc_socket_factory *factory) {
}
static void *socket_factory_arg_copy(void *p) {
return grpc_socket_factory_ref(p);
return grpc_socket_factory_ref((grpc_socket_factory *)p);
}
static void socket_factory_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
grpc_socket_factory_unref(p);
grpc_socket_factory_unref((grpc_socket_factory *)p);
}
static int socket_factory_cmp(void *a, void *b) {

@ -60,11 +60,11 @@ void grpc_socket_mutator_unref(grpc_socket_mutator *mutator) {
}
static void *socket_mutator_arg_copy(void *p) {
return grpc_socket_mutator_ref(p);
return grpc_socket_mutator_ref((grpc_socket_mutator *)p);
}
static void socket_mutator_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
grpc_socket_mutator_unref(p);
grpc_socket_mutator_unref((grpc_socket_mutator *)p);
}
static int socket_mutator_cmp(void *a, void *b) {

@ -43,6 +43,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@ -90,8 +91,8 @@ typedef struct {
grpc_closure *release_fd_cb;
int *release_fd;
grpc_closure read_closure;
grpc_closure write_closure;
grpc_closure read_done_closure;
grpc_closure write_done_closure;
char *peer_string;
@ -99,6 +100,148 @@ typedef struct {
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
typedef struct backup_poller {
gpr_mu *pollset_mu;
grpc_closure run_poller;
} backup_poller;
#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset *)((b) + 1))
static gpr_atm g_uncovered_notifications_pending;
static gpr_atm g_backup_poller; /* backup_poller* */
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
void *arg /* grpc_tcp */,
grpc_error *error);
static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
grpc_error *error_ignored) {
backup_poller *p = (backup_poller *)bp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
}
grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p));
gpr_free(p);
}
static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
grpc_error *error_ignored) {
backup_poller *p = (backup_poller *)bp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
}
gpr_mu_lock(p->pollset_mu);
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec deadline =
gpr_time_add(now, gpr_time_from_seconds(10, GPR_TIMESPAN));
GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx);
GRPC_LOG_IF_ERROR("backup_poller:pollset_work",
grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL,
now, deadline));
gpr_mu_unlock(p->pollset_mu);
/* last "uncovered" notification is the ref that keeps us polling, if we get
* there try a cas to release it */
if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
gpr_mu_lock(p->pollset_mu);
bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
}
gpr_mu_unlock(p->pollset_mu);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
}
grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx));
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
}
GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE);
}
}
static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller);
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, (int)old_count,
(int)old_count - 1);
}
GPR_ASSERT(old_count != 1);
}
static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
backup_poller *p;
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", (int)old_count,
2 + (int)old_count);
}
if (old_count == 0) {
GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
p = (backup_poller *)gpr_malloc(sizeof(*p) + grpc_pollset_size());
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
}
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
GRPC_ERROR_NONE);
} else {
while ((p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller)) == NULL) {
// spin waiting for backup poller
}
}
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
}
grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd);
if (old_count != 0) {
drop_uncovered(exec_ctx, tcp);
}
}
static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
}
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure);
}
static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
}
cover_self(exec_ctx, tcp);
GRPC_CLOSURE_INIT(&tcp->write_done_closure,
tcp_drop_uncovered_then_handle_write, tcp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure);
}
static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
}
drop_uncovered(exec_ctx, (grpc_tcp *)arg);
tcp_handle_write(exec_ctx, arg, error);
}
static void add_to_estimate(grpc_tcp *tcp, size_t bytes) {
tcp->bytes_read_this_round += (double)bytes;
}
@ -214,6 +357,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
grpc_closure *cb = tcp->read_cb;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
@ -271,7 +415,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (errno == EAGAIN) {
finish_estimate(tcp);
/* We've consumed the edge, request a new one */
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
notify_on_read(exec_ctx, tcp);
} else {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
tcp->incoming_buffer);
@ -308,6 +452,10 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)tcpp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
@ -323,9 +471,15 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
}
grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
target_read_size, 1, tcp->incoming_buffer);
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
}
tcp_do_read(exec_ctx, tcp);
}
}
@ -334,6 +488,9 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
@ -357,9 +514,9 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = false;
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
notify_on_read(exec_ctx, tcp);
} else {
GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE);
}
}
@ -472,7 +629,7 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
notify_on_write(exec_ctx, tcp);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
@ -525,7 +682,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
notify_on_write(exec_ctx, tcp);
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
@ -602,7 +759,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
channel_args->args[i].value.pointer.p);
(grpc_resource_quota *)channel_args->args[i].value.pointer.p);
}
}
}
@ -631,10 +788,6 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
GRPC_CLOSURE_INIT(&tcp->read_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&tcp->write_closure, tcp_handle_write, tcp,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(

@ -276,7 +276,7 @@ static void timer_thread(void *completed_thread_ptr) {
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
timer_main_loop(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
timer_thread_cleanup(completed_thread_ptr);
timer_thread_cleanup((completed_thread *)completed_thread_ptr);
}
static void start_threads(void) {

@ -118,7 +118,7 @@ static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) {
const grpc_arg *arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY);
if (arg) {
GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
return arg->value.pointer.p;
return (grpc_socket_factory *)arg->value.pointer.p;
}
}
return NULL;

@ -57,7 +57,7 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
g_cvfds.free_fds = g_cvfds.free_fds->next_free;
g_cvfds.cvfds[idx].cvs = NULL;
g_cvfds.cvfds[idx].is_set = 0;
fd_info->read_fd = IDX_TO_FD(idx);
fd_info->read_fd = GRPC_IDX_TO_FD(idx);
fd_info->write_fd = -1;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
@ -66,8 +66,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
cv_node* cvn;
gpr_mu_lock(&g_cvfds.mu);
g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 1;
cvn = g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs;
g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 1;
cvn = g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs;
while (cvn) {
gpr_cv_signal(cvn->cv);
cvn = cvn->next;
@ -78,7 +78,7 @@ static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
static grpc_error* cv_fd_consume(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 0;
g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 0;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
@ -89,9 +89,9 @@ static void cv_fd_destroy(grpc_wakeup_fd* fd_info) {
}
gpr_mu_lock(&g_cvfds.mu);
// Assert that there are no active pollers
GPR_ASSERT(!g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs);
g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
g_cvfds.free_fds = &g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)];
GPR_ASSERT(!g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs);
g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
g_cvfds.free_fds = &g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)];
gpr_mu_unlock(&g_cvfds.mu);
}

@ -37,8 +37,8 @@
#include "src/core/lib/iomgr/ev_posix.h"
#define FD_TO_IDX(fd) (-(fd)-1)
#define IDX_TO_FD(idx) (-(idx)-1)
#define GRPC_FD_TO_IDX(fd) (-(fd)-1)
#define GRPC_IDX_TO_FD(idx) (-(idx)-1)
typedef struct cv_node {
gpr_cv* cv;

@ -128,13 +128,11 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
}
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
security_handshaker *h = arg;
gpr_mu_lock(&h->mu);
static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
security_handshaker *h, grpc_error *error) {
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
goto done;
return;
}
// Create zero-copy frame protector, if implemented.
tsi_zero_copy_grpc_protector *zero_copy_protector = NULL;
@ -146,7 +144,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
"Zero-copy frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
goto done;
return;
}
// Create frame protector if zero-copy frame protector is NULL.
tsi_frame_protector *protector = NULL;
@ -158,7 +156,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
"Frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
goto done;
return;
}
}
// Get unused bytes.
@ -192,7 +190,13 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
h->shutdown = true;
done:
}
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
on_peer_checked_inner(exec_ctx, h, error);
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
}
@ -254,7 +258,7 @@ static grpc_error *on_handshake_next_done_locked(
static void on_handshake_next_done_grpc_wrapper(
tsi_result result, void *user_data, const unsigned char *bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) {
security_handshaker *h = user_data;
security_handshaker *h = (security_handshaker *)user_data;
// This callback will be invoked by TSI in a non-grpc thread, so it's
// safe to create our own exec_ctx here.
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -296,7 +300,7 @@ static grpc_error *do_handshaker_next_locked(
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
security_handshaker *h = arg;
security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@ -313,7 +317,8 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
bytes_received_size += GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]);
}
if (bytes_received_size > h->handshake_buffer_size) {
h->handshake_buffer = gpr_realloc(h->handshake_buffer, bytes_received_size);
h->handshake_buffer =
(uint8_t *)gpr_realloc(h->handshake_buffer, bytes_received_size);
h->handshake_buffer_size = bytes_received_size;
}
size_t offset = 0;
@ -338,7 +343,7 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
security_handshaker *h = arg;
security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@ -415,14 +420,15 @@ static const grpc_handshaker_vtable security_handshaker_vtable = {
static grpc_handshaker *security_handshaker_create(
grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
grpc_security_connector *connector) {
security_handshaker *h = gpr_zalloc(sizeof(security_handshaker));
security_handshaker *h =
(security_handshaker *)gpr_zalloc(sizeof(security_handshaker));
grpc_handshaker_init(&security_handshaker_vtable, &h->base);
h->handshaker = handshaker;
h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
gpr_mu_init(&h->mu);
gpr_ref_init(&h->refs, 1);
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
h->handshake_buffer = (uint8_t *)gpr_malloc(h->handshake_buffer_size);
GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx);
@ -465,7 +471,7 @@ static const grpc_handshaker_vtable fail_handshaker_vtable = {
fail_handshaker_do_handshake};
static grpc_handshaker *fail_handshaker_create() {
grpc_handshaker *h = gpr_malloc(sizeof(*h));
grpc_handshaker *h = (grpc_handshaker *)gpr_malloc(sizeof(*h));
grpc_handshaker_init(&fail_handshaker_vtable, h);
return h;
}

@ -174,8 +174,8 @@ static const grpc_slice_refcount_vtable new_with_len_vtable = {
grpc_slice grpc_slice_new_with_len(void *p, size_t len,
void (*destroy)(void *, size_t)) {
grpc_slice slice;
new_with_len_slice_refcount *rc =
gpr_malloc(sizeof(new_with_len_slice_refcount));
new_with_len_slice_refcount *rc = (new_with_len_slice_refcount *)gpr_malloc(
sizeof(new_with_len_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_with_len_vtable;
rc->rc.sub_refcount = &rc->rc;

@ -135,7 +135,7 @@ typedef struct batch_control {
typedef struct {
gpr_mu child_list_mu;
grpc_call *first_child;
} parent_call;
} parent_call_t;
typedef struct {
grpc_call *parent;
@ -144,7 +144,7 @@ typedef struct {
parent->mu */
grpc_call *sibling_next;
grpc_call *sibling_prev;
} child_call;
} child_call_t;
#define RECV_NONE ((gpr_atm)0)
#define RECV_INITIAL_METADATA_FIRST ((gpr_atm)1)
@ -157,8 +157,8 @@ struct grpc_call {
grpc_polling_entity pollent;
grpc_channel *channel;
gpr_timespec start_time;
/* parent_call* */ gpr_atm parent_call_atm;
child_call *child_call;
/* parent_call_t* */ gpr_atm parent_call_atm;
child_call_t *child_call;
/* client or server call */
bool is_client;
@ -293,32 +293,32 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl);
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
grpc_error *error, bool has_cancelled);
static void add_init_error(grpc_error **composite, grpc_error *new) {
if (new == GRPC_ERROR_NONE) return;
static void add_init_error(grpc_error **composite, grpc_error *new_err) {
if (new_err == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE)
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Call creation failed");
*composite = grpc_error_add_child(*composite, new);
*composite = grpc_error_add_child(*composite, new_err);
}
void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
return gpr_arena_alloc(call->arena, size);
}
static parent_call *get_or_create_parent_call(grpc_call *call) {
parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
static parent_call_t *get_or_create_parent_call(grpc_call *call) {
parent_call_t *p = (parent_call_t *)gpr_atm_acq_load(&call->parent_call_atm);
if (p == NULL) {
p = (parent_call *)gpr_arena_alloc(call->arena, sizeof(*p));
p = (parent_call_t *)gpr_arena_alloc(call->arena, sizeof(*p));
gpr_mu_init(&p->child_list_mu);
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm)NULL, (gpr_atm)p)) {
gpr_mu_destroy(&p->child_list_mu);
p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
p = (parent_call_t *)gpr_atm_acq_load(&call->parent_call_atm);
}
}
return p;
}
static parent_call *get_parent_call(grpc_call *call) {
return (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
static parent_call_t *get_parent_call(grpc_call *call) {
return (parent_call_t *)gpr_atm_acq_load(&call->parent_call_atm);
}
grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
@ -378,15 +378,15 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
bool immediately_cancel = false;
if (args->parent_call != NULL) {
child_call *cc = call->child_call =
gpr_arena_alloc(arena, sizeof(child_call));
child_call_t *cc = call->child_call =
(child_call_t *)gpr_arena_alloc(arena, sizeof(child_call_t));
call->child_call->parent = args->parent_call;
GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
GPR_ASSERT(call->is_client);
GPR_ASSERT(!args->parent_call->is_client);
parent_call *pc = get_or_create_parent_call(args->parent_call);
parent_call_t *pc = get_or_create_parent_call(args->parent_call);
gpr_mu_lock(&pc->child_list_mu);
@ -533,7 +533,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
if (c->receiving_stream != NULL) {
grpc_byte_stream_destroy(exec_ctx, c->receiving_stream);
}
parent_call *pc = get_parent_call(c);
parent_call_t *pc = get_parent_call(c);
if (pc != NULL) {
gpr_mu_destroy(&pc->child_list_mu);
}
@ -549,7 +549,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind");
}
get_final_status(call, set_status_value_directly, &c->final_info.final_status,
get_final_status(c, set_status_value_directly, &c->final_info.final_status,
NULL);
c->final_info.stats.latency =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
@ -570,14 +570,14 @@ void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); }
void grpc_call_unref(grpc_call *c) {
if (!gpr_unref(&c->ext_ref)) return;
child_call *cc = c->child_call;
child_call_t *cc = c->child_call;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_unref", 0);
GRPC_API_TRACE("grpc_call_unref(c=%p)", 1, (c));
if (cc) {
parent_call *pc = get_parent_call(cc->parent);
parent_call_t *pc = get_parent_call(cc->parent);
gpr_mu_lock(&pc->child_list_mu);
if (c == pc->first_child) {
pc->first_child = cc->sibling_next;
@ -1309,7 +1309,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
/* propagate cancellation to any interested children */
gpr_atm_rel_store(&call->received_final_op_atm, 1);
parent_call *pc = get_parent_call(call);
parent_call_t *pc = get_parent_call(call);
if (pc != NULL) {
grpc_call *child;
gpr_mu_lock(&pc->child_list_mu);
@ -1345,7 +1345,8 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */
bctl->call = NULL;
GRPC_CLOSURE_RUN(exec_ctx, bctl->completion_data.notify_tag.tag, error);
GRPC_CLOSURE_RUN(
exec_ctx, (grpc_closure *)bctl->completion_data.notify_tag.tag, error);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@ -1474,7 +1475,7 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
* acq_load is in receiving_initial_metadata_ready() */
if (error != GRPC_ERROR_NONE || call->receiving_stream == NULL ||
!gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) {
process_data_after_md(exec_ctx, bctlp);
process_data_after_md(exec_ctx, bctl);
}
}
@ -1679,11 +1680,12 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (nops == 0) {
if (!is_notify_tag_closure) {
GPR_ASSERT(grpc_cq_begin_op(call->cq, notify_tag));
grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
free_no_op_completion, NULL,
gpr_malloc(sizeof(grpc_cq_completion)));
grpc_cq_end_op(
exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
free_no_op_completion, NULL,
(grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
} else {
GRPC_CLOSURE_SCHED(exec_ctx, notify_tag, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)notify_tag, GRPC_ERROR_NONE);
}
error = GRPC_CALL_OK;
goto done;

@ -27,6 +27,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
@ -77,6 +78,11 @@ grpc_channel *grpc_channel_create_with_builder(
grpc_channel_args *args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
grpc_channel *channel;
if (channel_stack_type == GRPC_SERVER_CHANNEL) {
GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx);
} else {
GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx);
}
grpc_error *error = grpc_channel_stack_builder_finish(
exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL,
(void **)&channel);

@ -26,6 +26,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
@ -420,6 +421,10 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
const cq_poller_vtable *poller_vtable =
&g_poller_vtable_by_poller_type[polling_type];
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_STATS_INC_CQS_CREATED(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
cq = (grpc_completion_queue *)gpr_zalloc(sizeof(grpc_completion_queue) +
vtable->data_size +
poller_vtable->size());
@ -575,12 +580,12 @@ static bool atm_inc_if_nonzero(gpr_atm *counter) {
}
static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag) {
cq_next_data *cqd = DATA_FROM_CQ(cq);
cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag) {
cq_pluck_data *cqd = DATA_FROM_CQ(cq);
cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
@ -625,7 +630,7 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
}
}
cq_next_data *cqd = DATA_FROM_CQ(cq);
cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
storage->tag = tag;
@ -686,7 +691,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
void *done_arg,
grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage) {
cq_pluck_data *cqd = DATA_FROM_CQ(cq);
cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
GPR_TIMER_BEGIN("cq_end_op_for_pluck", 0);
@ -769,7 +774,7 @@ typedef struct {
static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
cq_next_data *cqd = DATA_FROM_CQ(cq);
cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@ -820,7 +825,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
void *reserved) {
grpc_event ret;
gpr_timespec now;
cq_next_data *cqd = DATA_FROM_CQ(cq);
cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@ -953,7 +958,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
this function */
static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
cq_next_data *cqd = DATA_FROM_CQ(cq);
cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0);
@ -964,7 +969,7 @@ static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
cq_next_data *cqd = DATA_FROM_CQ(cq);
cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_next() below, that would call pollset shutdown.
@ -994,7 +999,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
static int add_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
cq_pluck_data *cqd = DATA_FROM_CQ(cq);
cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
return 0;
}
@ -1006,7 +1011,7 @@ static int add_plucker(grpc_completion_queue *cq, void *tag,
static void del_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
cq_pluck_data *cqd = DATA_FROM_CQ(cq);
cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
for (int i = 0; i < cqd->num_pluckers; i++) {
if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) {
cqd->num_pluckers--;
@ -1020,7 +1025,7 @@ static void del_plucker(grpc_completion_queue *cq, void *tag,
static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
cq_pluck_data *cqd = DATA_FROM_CQ(cq);
cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@ -1057,7 +1062,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
gpr_timespec now;
cq_pluck_data *cqd = DATA_FROM_CQ(cq);
cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@ -1181,7 +1186,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
cq_pluck_data *cqd = DATA_FROM_CQ(cq);
cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown));
@ -1195,7 +1200,7 @@ static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
* merging them is a bit tricky and probably not worth it */
static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
cq_pluck_data *cqd = DATA_FROM_CQ(cq);
cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_pluck() below, that would call pollset shutdown.

@ -36,6 +36,7 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/iomgr/timer_manager.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/alarm_internal.h"
@ -179,14 +180,16 @@ void grpc_shutdown(void) {
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
grpc_iomgr_shutdown(&exec_ctx);
gpr_timers_global_destroy();
grpc_tracer_shutdown();
grpc_executor_shutdown(&exec_ctx);
grpc_timer_manager_set_threading(false); // shutdown timer_manager thread
for (i = g_number_of_plugins; i >= 0; i--) {
if (g_all_of_the_plugins[i].destroy != NULL) {
g_all_of_the_plugins[i].destroy();
}
}
grpc_iomgr_shutdown(&exec_ctx);
gpr_timers_global_destroy();
grpc_tracer_shutdown();
grpc_mdctx_global_shutdown(&exec_ctx);
grpc_handshaker_factory_registry_shutdown(&exec_ctx);
grpc_slice_intern_shutdown();

@ -29,6 +29,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
@ -75,7 +76,7 @@ typedef struct requested_call {
grpc_call_details *details;
} batch;
struct {
registered_method *registered_method;
registered_method *method;
gpr_timespec *deadline;
grpc_byte_buffer **optional_payload;
} registered;
@ -144,7 +145,7 @@ struct call_data {
uint32_t recv_initial_metadata_flags;
grpc_metadata_array initial_metadata;
request_matcher *request_matcher;
request_matcher *matcher;
grpc_byte_buffer *payload;
grpc_closure got_initial_metadata;
@ -170,7 +171,7 @@ struct registered_method {
grpc_server_register_method_payload_handling payload_handling;
uint32_t flags;
/* one request matcher per method */
request_matcher request_matcher;
request_matcher matcher;
registered_method *next;
};
@ -333,7 +334,7 @@ static void request_matcher_destroy(request_matcher *rm) {
static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem,
grpc_error *error) {
grpc_call_unref(grpc_call_from_top_element(elem));
grpc_call_unref(grpc_call_from_top_element((grpc_call_element *)elem));
}
static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
@ -386,7 +387,7 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
if (server->started) {
request_matcher_destroy(&rm->request_matcher);
request_matcher_destroy(&rm->matcher);
}
gpr_free(rm->method);
gpr_free(rm->host);
@ -518,7 +519,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element *call_elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)call_elem->call_data;
channel_data *chand = (channel_data *)call_elem->channel_data;
request_matcher *rm = calld->request_matcher;
request_matcher *rm = calld->matcher;
grpc_server *server = rm->server;
if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
@ -540,6 +541,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
if (request_id == -1) {
continue;
} else {
GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
@ -550,6 +552,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
}
/* no cq to take the request found: queue it on the slow list */
GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
gpr_mu_lock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
@ -580,7 +583,7 @@ static void finish_start_new_rpc(
return;
}
calld->request_matcher = rm;
calld->matcher = rm;
switch (payload_handling) {
case GRPC_SRM_PAYLOAD_NONE:
@ -626,7 +629,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
&rm->server_registered_method->request_matcher,
&rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@ -644,7 +647,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
&rm->server_registered_method->request_matcher,
&rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@ -665,7 +668,7 @@ static int num_listeners(grpc_server *server) {
static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server,
grpc_cq_completion *completion) {
server_unref(exec_ctx, server);
server_unref(exec_ctx, (grpc_server *)server);
}
static int num_channels(grpc_server *server) {
@ -688,9 +691,9 @@ static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &server->unregistered_request_matcher);
for (registered_method *rm = server->registered_methods; rm;
rm = rm->next) {
request_matcher_kill_requests(exec_ctx, server, &rm->request_matcher,
request_matcher_kill_requests(exec_ctx, server, &rm->matcher,
GRPC_ERROR_REF(error));
request_matcher_zombify_all_pending_calls(exec_ctx, &rm->request_matcher);
request_matcher_zombify_all_pending_calls(exec_ctx, &rm->matcher);
}
}
GRPC_ERROR_UNREF(error);
@ -1111,15 +1114,17 @@ void grpc_server_start(grpc_server *server) {
request_matcher_init(&server->unregistered_request_matcher,
(size_t)server->max_requested_calls_per_cq, server);
for (registered_method *rm = server->registered_methods; rm; rm = rm->next) {
request_matcher_init(&rm->request_matcher,
request_matcher_init(&rm->matcher,
(size_t)server->max_requested_calls_per_cq, server);
}
server_ref(server);
server->starting = true;
GRPC_CLOSURE_SCHED(&exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server,
grpc_executor_scheduler),
GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(
&exec_ctx,
GRPC_CLOSURE_CREATE(start_listeners, server,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)),
GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
@ -1262,8 +1267,9 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* stay locked, and gather up some stuff to do */
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
if (server->shutdown_published) {
grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
NULL, gpr_malloc(sizeof(grpc_cq_completion)));
grpc_cq_end_op(
&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL,
(grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
gpr_mu_unlock(&server->mu_global);
goto done;
}
@ -1385,7 +1391,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
rm = &server->unregistered_request_matcher;
break;
case REGISTERED_CALL:
rm = &rc->data.registered.registered_method->request_matcher;
rm = &rc->data.registered.method->matcher;
break;
}
server->requested_calls_per_cq[cq_idx][request_id] = *rc;
@ -1430,6 +1436,7 @@ grpc_call_error grpc_server_request_call(
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
@ -1476,6 +1483,7 @@ grpc_call_error grpc_server_request_registered_call(
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
registered_method *rm = (registered_method *)rmp;
GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_registered_call("
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
@ -1512,7 +1520,7 @@ grpc_call_error grpc_server_request_registered_call(
rc->tag = tag;
rc->cq_bound_to_call = cq_bound_to_call;
rc->call = call;
rc->data.registered.registered_method = rm;
rc->data.registered.method = rm;
rc->data.registered.deadline = deadline;
rc->initial_metadata = initial_metadata;
rc->data.registered.optional_payload = optional_payload;

@ -233,32 +233,32 @@ void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx,
void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
grpc_linked_mdelem *storage,
grpc_slice value) {
grpc_mdelem old = storage->md;
grpc_mdelem new = grpc_mdelem_from_slices(
exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old)), value);
storage->md = new;
GRPC_MDELEM_UNREF(exec_ctx, old);
grpc_mdelem old_mdelem = storage->md;
grpc_mdelem new_mdelem = grpc_mdelem_from_slices(
exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value);
storage->md = new_mdelem;
GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
}
grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx,
grpc_metadata_batch *batch,
grpc_linked_mdelem *storage,
grpc_mdelem new) {
grpc_mdelem new_mdelem) {
assert_valid_callouts(exec_ctx, batch);
grpc_error *error = GRPC_ERROR_NONE;
grpc_mdelem old = storage->md;
if (!grpc_slice_eq(GRPC_MDKEY(new), GRPC_MDKEY(old))) {
grpc_mdelem old_mdelem = storage->md;
if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) {
maybe_unlink_callout(batch, storage);
storage->md = new;
storage->md = new_mdelem;
error = maybe_link_callout(batch, storage);
if (error != GRPC_ERROR_NONE) {
unlink_storage(&batch->list, storage);
GRPC_MDELEM_UNREF(exec_ctx, storage->md);
}
} else {
storage->md = new;
storage->md = new_mdelem;
}
GRPC_MDELEM_UNREF(exec_ctx, old);
GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
assert_valid_callouts(exec_ctx, batch);
return error;
}
@ -302,12 +302,12 @@ grpc_error *grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx,
grpc_error *error = GRPC_ERROR_NONE;
while (l) {
grpc_linked_mdelem *next = l->next;
grpc_filtered_mdelem new = func(exec_ctx, user_data, l->md);
add_error(&error, new.error, composite_error_string);
if (GRPC_MDISNULL(new.md)) {
grpc_filtered_mdelem new_mdelem = func(exec_ctx, user_data, l->md);
add_error(&error, new_mdelem.error, composite_error_string);
if (GRPC_MDISNULL(new_mdelem.md)) {
grpc_metadata_batch_remove(exec_ctx, batch, l);
} else if (new.md.payload != l->md.payload) {
grpc_metadata_batch_substitute(exec_ctx, batch, l, new.md);
} else if (new_mdelem.md.payload != l->md.payload) {
grpc_metadata_batch_substitute(exec_ctx, batch, l, new_mdelem.md);
}
l = next;
}

@ -72,7 +72,8 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
cope with.
Throw this over to the executor (on a core-owned thread) and process it
there. */
refcount->destroy.scheduler = grpc_executor_scheduler;
refcount->destroy.scheduler =
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
}
@ -101,8 +102,9 @@ static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) {
grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount *refcount,
void *buffer, size_t length) {
slice_stream_ref(&refcount->slice_refcount);
return (grpc_slice){.refcount = &refcount->slice_refcount,
.data.refcounted = {.bytes = buffer, .length = length}};
return (grpc_slice){
.refcount = &refcount->slice_refcount,
.data.refcounted = {.bytes = (uint8_t *)buffer, .length = length}};
}
static const grpc_slice_refcount_vtable stream_ref_slice_vtable = {

@ -86,6 +86,10 @@ void ChannelArguments::SetCompressionAlgorithm(
SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, algorithm);
}
void ChannelArguments::SetGrpclbFallbackTimeout(int fallback_timeout) {
SetInt(GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, fallback_timeout);
}
void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
if (!mutator) {
return;

@ -26,6 +26,8 @@
"unit._credentials_test.CredentialsTest",
"unit._cython._cancel_many_calls_test.CancelManyCallsTest",
"unit._cython._channel_test.ChannelTest",
"unit._cython._no_messages_server_completion_queue_per_call_test.Test",
"unit._cython._no_messages_single_server_completion_queue_test.Test",
"unit._cython._read_some_but_not_all_responses_test.ReadSomeButNotAllResponsesTest",
"unit._cython.cygrpc_test.InsecureServerInsecureClient",
"unit._cython.cygrpc_test.SecureServerSecureClient",

@ -0,0 +1,118 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for tests of the Cython layer of gRPC Python."""
import collections
import threading
from grpc._cython import cygrpc
RPC_COUNT = 4000
INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
EMPTY_FLAGS = 0
INVOCATION_METADATA = cygrpc.Metadata(
(cygrpc.Metadatum(b'client-md-key', b'client-md-key'),
cygrpc.Metadatum(b'client-md-key-bin', b'\x00\x01' * 3000),))
INITIAL_METADATA = cygrpc.Metadata(
(cygrpc.Metadatum(b'server-initial-md-key', b'server-initial-md-value'),
cygrpc.Metadatum(b'server-initial-md-key-bin', b'\x00\x02' * 3000),))
TRAILING_METADATA = cygrpc.Metadata(
(cygrpc.Metadatum(b'server-trailing-md-key', b'server-trailing-md-value'),
cygrpc.Metadatum(b'server-trailing-md-key-bin', b'\x00\x03' * 3000),))
class QueueDriver(object):
def __init__(self, condition, completion_queue):
self._condition = condition
self._completion_queue = completion_queue
self._due = collections.defaultdict(int)
self._events = collections.defaultdict(list)
def add_due(self, tags):
if not self._due:
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events[event.tag].append(event)
self._due[event.tag] -= 1
self._condition.notify_all()
if self._due[event.tag] <= 0:
self._due.pop(event.tag)
if not self._due:
return
thread = threading.Thread(target=in_thread)
thread.start()
for tag in tags:
self._due[tag] += 1
def event_with_tag(self, tag):
with self._condition:
while True:
if self._events[tag]:
return self._events[tag].pop(0)
else:
self._condition.wait()
def execute_many_times(behavior):
return tuple(behavior() for _ in range(RPC_COUNT))
class OperationResult(
collections.namedtuple('OperationResult', (
'start_batch_result', 'completion_type', 'success',))):
pass
SUCCESSFUL_OPERATION_RESULT = OperationResult(
cygrpc.CallError.ok, cygrpc.CompletionType.operation_complete, True)
class RpcTest(object):
def setUp(self):
self.server_completion_queue = cygrpc.CompletionQueue()
self.server = cygrpc.Server(cygrpc.ChannelArgs([]))
self.server.register_completion_queue(self.server_completion_queue)
port = self.server.add_http2_port(b'[::]:0')
self.server.start()
self.channel = cygrpc.Channel('localhost:{}'.format(port).encode(),
cygrpc.ChannelArgs([]))
self._server_shutdown_tag = 'server_shutdown_tag'
self.server_condition = threading.Condition()
self.server_driver = QueueDriver(self.server_condition,
self.server_completion_queue)
with self.server_condition:
self.server_driver.add_due({
self._server_shutdown_tag,
})
self.client_condition = threading.Condition()
self.client_completion_queue = cygrpc.CompletionQueue()
self.client_driver = QueueDriver(self.client_condition,
self.client_completion_queue)
def tearDown(self):
self.server.shutdown(self.server_completion_queue,
self._server_shutdown_tag)
self.server.cancel_all_calls()

@ -0,0 +1,131 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test a corner-case at the level of the Cython API."""
import threading
import unittest
from grpc._cython import cygrpc
from tests.unit._cython import _common
class Test(_common.RpcTest, unittest.TestCase):
def _do_rpcs(self):
server_call_condition = threading.Condition()
server_call_completion_queue = cygrpc.CompletionQueue()
server_call_driver = _common.QueueDriver(server_call_condition,
server_call_completion_queue)
server_request_call_tag = 'server_request_call_tag'
server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
server_complete_rpc_tag = 'server_complete_rpc_tag'
with self.server_condition:
server_request_call_start_batch_result = self.server.request_call(
server_call_completion_queue, self.server_completion_queue,
server_request_call_tag)
self.server_driver.add_due({
server_request_call_tag,
})
client_call = self.channel.create_call(
None, _common.EMPTY_FLAGS, self.client_completion_queue,
b'/twinkies', None, _common.INFINITE_FUTURE)
client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
client_complete_rpc_tag = 'client_complete_rpc_tag'
with self.client_condition:
client_receive_initial_metadata_start_batch_result = (
client_call.start_client_batch(
cygrpc.Operations([
cygrpc.operation_receive_initial_metadata(
_common.EMPTY_FLAGS),
]), client_receive_initial_metadata_tag))
client_complete_rpc_start_batch_result = client_call.start_client_batch(
cygrpc.Operations([
cygrpc.operation_send_initial_metadata(
_common.INVOCATION_METADATA, _common.EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(
_common.EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(
_common.EMPTY_FLAGS),
]), client_complete_rpc_tag)
self.client_driver.add_due({
client_receive_initial_metadata_tag,
client_complete_rpc_tag,
})
server_request_call_event = self.server_driver.event_with_tag(
server_request_call_tag)
with server_call_condition:
server_send_initial_metadata_start_batch_result = (
server_request_call_event.operation_call.start_server_batch([
cygrpc.operation_send_initial_metadata(
_common.INITIAL_METADATA, _common.EMPTY_FLAGS),
], server_send_initial_metadata_tag))
server_call_driver.add_due({
server_send_initial_metadata_tag,
})
server_send_initial_metadata_event = server_call_driver.event_with_tag(
server_send_initial_metadata_tag)
with server_call_condition:
server_complete_rpc_start_batch_result = (
server_request_call_event.operation_call.start_server_batch([
cygrpc.operation_receive_close_on_server(
_common.EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.TRAILING_METADATA, cygrpc.StatusCode.ok,
b'test details', _common.EMPTY_FLAGS),
], server_complete_rpc_tag))
server_call_driver.add_due({
server_complete_rpc_tag,
})
server_complete_rpc_event = server_call_driver.event_with_tag(
server_complete_rpc_tag)
client_receive_initial_metadata_event = self.client_driver.event_with_tag(
client_receive_initial_metadata_tag)
client_complete_rpc_event = self.client_driver.event_with_tag(
client_complete_rpc_tag)
return (_common.OperationResult(server_request_call_start_batch_result,
server_request_call_event.type,
server_request_call_event.success),
_common.OperationResult(
client_receive_initial_metadata_start_batch_result,
client_receive_initial_metadata_event.type,
client_receive_initial_metadata_event.success),
_common.OperationResult(client_complete_rpc_start_batch_result,
client_complete_rpc_event.type,
client_complete_rpc_event.success),
_common.OperationResult(
server_send_initial_metadata_start_batch_result,
server_send_initial_metadata_event.type,
server_send_initial_metadata_event.success),
_common.OperationResult(server_complete_rpc_start_batch_result,
server_complete_rpc_event.type,
server_complete_rpc_event.success),)
def test_rpcs(self):
expecteds = [(_common.SUCCESSFUL_OPERATION_RESULT,) *
5] * _common.RPC_COUNT
actuallys = _common.execute_many_times(self._do_rpcs)
self.assertSequenceEqual(expecteds, actuallys)
if __name__ == '__main__':
unittest.main(verbosity=2)

@ -0,0 +1,126 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test a corner-case at the level of the Cython API."""
import threading
import unittest
from grpc._cython import cygrpc
from tests.unit._cython import _common
class Test(_common.RpcTest, unittest.TestCase):
def _do_rpcs(self):
server_request_call_tag = 'server_request_call_tag'
server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
server_complete_rpc_tag = 'server_complete_rpc_tag'
with self.server_condition:
server_request_call_start_batch_result = self.server.request_call(
self.server_completion_queue, self.server_completion_queue,
server_request_call_tag)
self.server_driver.add_due({
server_request_call_tag,
})
client_call = self.channel.create_call(
None, _common.EMPTY_FLAGS, self.client_completion_queue,
b'/twinkies', None, _common.INFINITE_FUTURE)
client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
client_complete_rpc_tag = 'client_complete_rpc_tag'
with self.client_condition:
client_receive_initial_metadata_start_batch_result = (
client_call.start_client_batch(
cygrpc.Operations([
cygrpc.operation_receive_initial_metadata(
_common.EMPTY_FLAGS),
]), client_receive_initial_metadata_tag))
client_complete_rpc_start_batch_result = client_call.start_client_batch(
cygrpc.Operations([
cygrpc.operation_send_initial_metadata(
_common.INVOCATION_METADATA, _common.EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(
_common.EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(
_common.EMPTY_FLAGS),
]), client_complete_rpc_tag)
self.client_driver.add_due({
client_receive_initial_metadata_tag,
client_complete_rpc_tag,
})
server_request_call_event = self.server_driver.event_with_tag(
server_request_call_tag)
with self.server_condition:
server_send_initial_metadata_start_batch_result = (
server_request_call_event.operation_call.start_server_batch([
cygrpc.operation_send_initial_metadata(
_common.INITIAL_METADATA, _common.EMPTY_FLAGS),
], server_send_initial_metadata_tag))
self.server_driver.add_due({
server_send_initial_metadata_tag,
})
server_send_initial_metadata_event = self.server_driver.event_with_tag(
server_send_initial_metadata_tag)
with self.server_condition:
server_complete_rpc_start_batch_result = (
server_request_call_event.operation_call.start_server_batch([
cygrpc.operation_receive_close_on_server(
_common.EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.TRAILING_METADATA, cygrpc.StatusCode.ok,
b'test details', _common.EMPTY_FLAGS),
], server_complete_rpc_tag))
self.server_driver.add_due({
server_complete_rpc_tag,
})
server_complete_rpc_event = self.server_driver.event_with_tag(
server_complete_rpc_tag)
client_receive_initial_metadata_event = self.client_driver.event_with_tag(
client_receive_initial_metadata_tag)
client_complete_rpc_event = self.client_driver.event_with_tag(
client_complete_rpc_tag)
return (_common.OperationResult(server_request_call_start_batch_result,
server_request_call_event.type,
server_request_call_event.success),
_common.OperationResult(
client_receive_initial_metadata_start_batch_result,
client_receive_initial_metadata_event.type,
client_receive_initial_metadata_event.success),
_common.OperationResult(client_complete_rpc_start_batch_result,
client_complete_rpc_event.type,
client_complete_rpc_event.success),
_common.OperationResult(
server_send_initial_metadata_start_batch_result,
server_send_initial_metadata_event.type,
server_send_initial_metadata_event.success),
_common.OperationResult(server_complete_rpc_start_batch_result,
server_complete_rpc_event.type,
server_complete_rpc_event.success),)
def test_rpcs(self):
expecteds = [(_common.SUCCESSFUL_OPERATION_RESULT,) *
5] * _common.RPC_COUNT
actuallys = _common.execute_many_times(self._do_rpcs)
self.assertSequenceEqual(expecteds, actuallys)
if __name__ == '__main__':
unittest.main(verbosity=2)

@ -168,10 +168,8 @@
set(gRPC_INSTALL FALSE)
endif()
elseif("<%text>${gRPC_ZLIB_PROVIDER}</%text>" STREQUAL "package")
find_package(ZLIB)
if(TARGET ZLIB::ZLIB)
set(_gRPC_ZLIB_LIBRARIES ZLIB::ZLIB)
endif()
find_package(ZLIB REQUIRED)
set(_gRPC_ZLIB_LIBRARIES <%text>${ZLIB_LIBRARIES}</%text>)
set(_gRPC_FIND_ZLIB "if(NOT ZLIB_FOUND)\n find_package(ZLIB)\nendif()")
endif()
@ -179,7 +177,8 @@
if(NOT CARES_ROOT_DIR)
set(CARES_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/cares/cares)
endif()
set(CARES_STATIC ON)
set(CARES_SHARED OFF CACHE BOOL "disable shared library")
set(CARES_STATIC ON CACHE BOOL "link cares statically")
set(CARES_INCLUDE_DIR "<%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/cares/cares")
add_subdirectory(third_party/cares/cares)
if(TARGET c-ares)
@ -190,7 +189,7 @@
set(gRPC_INSTALL FALSE)
endif()
elseif("<%text>${gRPC_CARES_PROVIDER}</%text>" STREQUAL "package")
find_package(c-ares CONFIG)
find_package(c-ares REQUIRED CONFIG)
if(TARGET c-ares::cares)
set(_gRPC_CARES_LIBRARIES c-ares::cares)
endif()
@ -224,6 +223,7 @@
endif()
if(TARGET protoc)
set(_gRPC_PROTOBUF_PROTOC protoc)
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
endif()
else()
message(WARNING "gRPC_PROTOBUF_PROVIDER is \"module\" but PROTOBUF_ROOT_DIR is wrong")
@ -233,7 +233,7 @@
set(gRPC_INSTALL FALSE)
endif()
elseif("<%text>${gRPC_PROTOBUF_PROVIDER}</%text>" STREQUAL "package")
find_package(Protobuf <%text>${gRPC_PROTOBUF_PACKAGE_TYPE}</%text>)
find_package(Protobuf REQUIRED <%text>${gRPC_PROTOBUF_PACKAGE_TYPE}</%text>)
if(Protobuf_FOUND OR PROTOBUF_FOUND)
if(TARGET protobuf::<%text>${_gRPC_PROTOBUF_LIBRARY_NAME}</%text>)
set(_gRPC_PROTOBUF_LIBRARIES protobuf::<%text>${_gRPC_PROTOBUF_LIBRARY_NAME}</%text>)
@ -247,8 +247,10 @@
endif()
if(TARGET protobuf::protoc)
set(_gRPC_PROTOBUF_PROTOC protobuf::protoc)
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protobuf::protoc>)
else()
set(_gRPC_PROTOBUF_PROTOC <%text>${PROTOBUF_PROTOC_EXECUTABLE}</%text>)
set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE <%text>${PROTOBUF_PROTOC_EXECUTABLE}</%text>)
endif()
set(_gRPC_FIND_PROTOBUF "if(NOT Protobuf_FOUND AND NOT PROTOBUF_FOUND)\n find_package(Protobuf <%text>${gRPC_PROTOBUF_PACKAGE_TYPE}</%text>)\nendif()")
endif()
@ -276,11 +278,9 @@
set(gRPC_INSTALL FALSE)
endif()
elseif("<%text>${gRPC_SSL_PROVIDER}</%text>" STREQUAL "package")
find_package(OpenSSL)
if(TARGET OpenSSL::SSL)
set(_gRPC_SSL_LIBRARIES OpenSSL::SSL)
endif()
set(_gRPC_FIND_SSL "if(NOT OpenSSL_FOUND)\n find_package(OpenSSL)\nendif()")
find_package(OpenSSL REQUIRED)
set(_gRPC_SSL_LIBRARIES <%text>${OPENSSL_LIBRARIES}</%text>)
set(_gRPC_FIND_SSL "if(NOT OPENSSL_FOUND)\n find_package(OpenSSL)\nendif()")
endif()
if("<%text>${gRPC_GFLAGS_PROVIDER}</%text>" STREQUAL "module")
@ -373,7 +373,7 @@
<%text>"${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}_mock.grpc.pb.h"</%text>
<%text>"${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.pb.cc"</%text>
<%text>"${_gRPC_PROTO_GENS_DIR}/${RELFIL_WE}.pb.h"</%text>
COMMAND <%text>$<TARGET_FILE:${_gRPC_PROTOBUF_PROTOC}></%text>
COMMAND <%text>${_gRPC_PROTOBUF_PROTOC_EXECUTABLE}</%text>
ARGS --grpc_out=<%text>generate_mock_code=true:${_gRPC_PROTO_GENS_DIR}</%text>
--cpp_out=<%text>${_gRPC_PROTO_GENS_DIR}</%text>
--plugin=protoc-gen-grpc=$<TARGET_FILE:grpc_cpp_plugin>
@ -512,7 +512,7 @@
% endfor
target_include_directories(${lib.name}
PUBLIC <%text>$<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include></%text>
PUBLIC <%text>$<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include></%text>
PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>
PRIVATE <%text>${BORINGSSL_ROOT_DIR}</%text>/include
PRIVATE <%text>${PROTOBUF_ROOT_DIR}</%text>/src
@ -631,7 +631,7 @@
endif()
foreach(_config gRPCConfig gRPCConfigVersion)
configure_file(tools/cmake/<%text>${_config}</%text>.cmake.in
configure_file(cmake/<%text>${_config}</%text>.cmake.in
<%text>${_config}</%text>.cmake @ONLY)
install(FILES <%text>${CMAKE_CURRENT_BINARY_DIR}/${_config}</%text>.cmake
DESTINATION <%text>${gRPC_INSTALL_CMAKEDIR}</%text>

@ -45,18 +45,18 @@ typedef struct {
} thd_args;
static void thd_func(void *arg) {
thd_args *a = arg;
thd_args *a = (thd_args *)arg;
a->validator(a->server, a->cq, a->registered_method);
gpr_event_set(&a->done_thd, (void *)1);
}
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
thd_args *a = arg;
thd_args *a = (thd_args *)arg;
gpr_event_set(&a->done_write, (void *)1);
}
static void server_setup_transport(void *ts, grpc_transport *transport) {
thd_args *a = ts;
thd_args *a = (thd_args *)ts;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_server_setup_transport(&exec_ctx, a->server, transport, NULL,
grpc_server_get_channel_args(a->server));
@ -64,7 +64,7 @@ static void server_setup_transport(void *ts, grpc_transport *transport) {
}
static void read_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_event *read_done = arg;
gpr_event *read_done = (gpr_event *)arg;
gpr_event_set(read_done, (void *)1);
}

@ -136,7 +136,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_pollset *accepting_pollset,
grpc_tcp_server_acceptor *acceptor) {
gpr_free(acceptor);
test_tcp_server *server = arg;
test_tcp_server *server = (test_tcp_server *)arg;
GRPC_CLOSURE_INIT(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_write, done_write, NULL, grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&state.temp_incoming_buffer);
@ -237,7 +237,7 @@ typedef struct {
} poll_args;
static void actually_poll_server(void *arg) {
poll_args *pa = arg;
poll_args *pa = (poll_args *)arg;
gpr_timespec deadline = n_sec_deadline(10);
while (true) {
bool done = gpr_atm_acq_load(&state.done_atm) != 0;
@ -259,7 +259,7 @@ static void poll_server_until_read_done(test_tcp_server *server,
gpr_atm_rel_store(&state.done_atm, 0);
state.write_done = 0;
gpr_thd_id id;
poll_args *pa = gpr_malloc(sizeof(*pa));
poll_args *pa = (poll_args *)gpr_malloc(sizeof(*pa));
pa->server = server;
pa->signal_when_done = signal_when_done;
gpr_thd_new(&id, actually_poll_server, pa, NULL);

@ -80,7 +80,7 @@ grpc_end2end_proxy *grpc_end2end_proxy_create(const grpc_end2end_proxy_def *def,
int proxy_port = grpc_pick_unused_port_or_die();
int server_port = grpc_pick_unused_port_or_die();
grpc_end2end_proxy *proxy = gpr_malloc(sizeof(*proxy));
grpc_end2end_proxy *proxy = (grpc_end2end_proxy *)gpr_malloc(sizeof(*proxy));
memset(proxy, 0, sizeof(*proxy));
gpr_join_host_port(&proxy->proxy_port, "localhost", proxy_port);
@ -106,14 +106,14 @@ grpc_end2end_proxy *grpc_end2end_proxy_create(const grpc_end2end_proxy_def *def,
}
static closure *new_closure(void (*func)(void *arg, int success), void *arg) {
closure *cl = gpr_malloc(sizeof(*cl));
closure *cl = (closure *)gpr_malloc(sizeof(*cl));
cl->func = func;
cl->arg = arg;
return cl;
}
static void shutdown_complete(void *arg, int success) {
grpc_end2end_proxy *proxy = arg;
grpc_end2end_proxy *proxy = (grpc_end2end_proxy *)arg;
proxy->shutdown = 1;
grpc_completion_queue_shutdown(proxy->cq);
}
@ -146,12 +146,12 @@ static void unrefpc(proxy_call *pc, const char *reason) {
static void refpc(proxy_call *pc, const char *reason) { gpr_ref(&pc->refs); }
static void on_c2p_sent_initial_metadata(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
unrefpc(pc, "on_c2p_sent_initial_metadata");
}
static void on_p2s_recv_initial_metadata(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
grpc_op op;
grpc_call_error err;
@ -172,14 +172,14 @@ static void on_p2s_recv_initial_metadata(void *arg, int success) {
}
static void on_p2s_sent_initial_metadata(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
unrefpc(pc, "on_p2s_sent_initial_metadata");
}
static void on_c2p_recv_msg(void *arg, int success);
static void on_p2s_sent_message(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
grpc_op op;
grpc_call_error err;
@ -199,12 +199,12 @@ static void on_p2s_sent_message(void *arg, int success) {
}
static void on_p2s_sent_close(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
unrefpc(pc, "on_p2s_sent_close");
}
static void on_c2p_recv_msg(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
grpc_op op;
grpc_call_error err;
@ -235,7 +235,7 @@ static void on_c2p_recv_msg(void *arg, int success) {
static void on_p2s_recv_msg(void *arg, int success);
static void on_c2p_sent_message(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
grpc_op op;
grpc_call_error err;
@ -255,7 +255,7 @@ static void on_c2p_sent_message(void *arg, int success) {
}
static void on_p2s_recv_msg(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
grpc_op op;
grpc_call_error err;
@ -275,12 +275,12 @@ static void on_p2s_recv_msg(void *arg, int success) {
}
static void on_c2p_sent_status(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
unrefpc(pc, "on_c2p_sent_status");
}
static void on_p2s_status(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
grpc_op op;
grpc_call_error err;
@ -305,18 +305,18 @@ static void on_p2s_status(void *arg, int success) {
}
static void on_c2p_closed(void *arg, int success) {
proxy_call *pc = arg;
proxy_call *pc = (proxy_call *)arg;
unrefpc(pc, "on_c2p_closed");
}
static void on_new_call(void *arg, int success) {
grpc_end2end_proxy *proxy = arg;
grpc_end2end_proxy *proxy = (grpc_end2end_proxy *)arg;
grpc_call_error err;
if (success) {
grpc_op op;
memset(&op, 0, sizeof(op));
proxy_call *pc = gpr_malloc(sizeof(*pc));
proxy_call *pc = (proxy_call *)gpr_malloc(sizeof(*pc));
memset(pc, 0, sizeof(*pc));
pc->proxy = proxy;
GPR_SWAP(grpc_metadata_array, pc->c2p_initial_metadata,
@ -404,7 +404,7 @@ static void request_call(grpc_end2end_proxy *proxy) {
}
static void thread_main(void *arg) {
grpc_end2end_proxy *proxy = arg;
grpc_end2end_proxy *proxy = (grpc_end2end_proxy *)arg;
closure *cl;
for (;;) {
grpc_event ev = grpc_completion_queue_next(
@ -416,7 +416,7 @@ static void thread_main(void *arg) {
case GRPC_QUEUE_SHUTDOWN:
return;
case GRPC_OP_COMPLETE:
cl = ev.tag;
cl = (closure *)ev.tag;
cl->func(cl->arg, ev.success);
gpr_free(cl);
break;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save