Merge github.com:grpc/grpc into cpp_bazelness

pull/9780/head
Craig Tiller 8 years ago
commit bf515b6f7f
  1. 3
      .pylintrc
  2. 30
      BUILD
  3. 270
      CMakeLists.txt
  4. 405
      Makefile
  5. 6
      Rakefile
  6. 18
      WORKSPACE
  7. 4
      binding.gyp
  8. 124
      build.yaml
  9. 162
      doc/service_config.md
  10. 2
      examples/ruby/grpc-demo.gemspec
  11. 6
      examples/ruby/without_protobuf/README.md
  12. 49
      examples/ruby/without_protobuf/echo_client.rb
  13. 59
      examples/ruby/without_protobuf/echo_server.rb
  14. 49
      examples/ruby/without_protobuf/echo_services_noprotobuf.rb
  15. 4
      grpc.gemspec
  16. 62
      include/grpc++/ext/health_check_service_server_builder_option.h
  17. 67
      include/grpc++/health_check_service_interface.h
  18. 3
      include/grpc++/impl/codegen/server_context.h
  19. 12
      include/grpc++/server.h
  20. 35
      include/grpc/impl/codegen/atm_gcc_atomic.h
  21. 4
      include/grpc/impl/codegen/port_platform.h
  22. 3
      setup.py
  23. 4
      src/core/ext/census/grpc_filter.c
  24. 81
      src/core/ext/client_channel/client_channel.c
  25. 89
      src/core/ext/client_channel/lb_policy.c
  26. 85
      src/core/ext/client_channel/lb_policy.h
  27. 1
      src/core/ext/client_channel/lb_policy_factory.h
  28. 25
      src/core/ext/client_channel/resolver.c
  29. 35
      src/core/ext/client_channel/resolver.h
  30. 1
      src/core/ext/client_channel/resolver_factory.h
  31. 4
      src/core/ext/client_channel/resolver_registry.c
  32. 3
      src/core/ext/client_channel/resolver_registry.h
  33. 2
      src/core/ext/client_channel/subchannel.c
  34. 170
      src/core/ext/lb_policy/grpclb/grpclb.c
  35. 181
      src/core/ext/lb_policy/pick_first/pick_first.c
  36. 100
      src/core/ext/lb_policy/round_robin/round_robin.c
  37. 2
      src/core/ext/load_reporting/load_reporting_filter.c
  38. 59
      src/core/ext/resolver/dns/native/dns_resolver.c
  39. 44
      src/core/ext/resolver/sockaddr/sockaddr_resolver.c
  40. 57
      src/core/ext/transport/chttp2/server/chttp2_server.c
  41. 7
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  42. 15
      src/core/lib/channel/channel_stack.c
  43. 2
      src/core/lib/channel/channel_stack.h
  44. 2
      src/core/lib/channel/compress_filter.c
  45. 2
      src/core/lib/channel/connected_channel.c
  46. 104
      src/core/lib/channel/deadline_filter.c
  47. 12
      src/core/lib/channel/deadline_filter.h
  48. 37
      src/core/lib/channel/handshaker.c
  49. 16
      src/core/lib/channel/handshaker.h
  50. 2
      src/core/lib/channel/http_client_filter.c
  51. 10
      src/core/lib/channel/http_server_filter.c
  52. 2
      src/core/lib/channel/message_size_filter.c
  53. 4
      src/core/lib/iomgr/error.h
  54. 338
      src/core/lib/iomgr/ev_epoll_linux.c
  55. 5
      src/core/lib/iomgr/resolve_address_uv.c
  56. 4
      src/core/lib/iomgr/socket_utils_windows.c
  57. 14
      src/core/lib/iomgr/tcp_client_uv.c
  58. 18
      src/core/lib/iomgr/timer_generic.c
  59. 2
      src/core/lib/iomgr/timer_generic.h
  60. 12
      src/core/lib/iomgr/timer_uv.c
  61. 2
      src/core/lib/iomgr/timer_uv.h
  62. 2
      src/core/lib/security/transport/client_auth_filter.c
  63. 2
      src/core/lib/security/transport/server_auth_filter.c
  64. 10
      src/core/lib/support/sync_posix.c
  65. 145
      src/core/lib/surface/call.c
  66. 2
      src/core/lib/surface/lame_client.c
  67. 6
      src/core/lib/surface/server.c
  68. 4
      src/core/plugin_registry/grpc_cronet_plugin_registry.c
  69. 4
      src/cpp/common/channel_filter.h
  70. 166
      src/cpp/server/health/default_health_check_service.cc
  71. 78
      src/cpp/server/health/default_health_check_service.h
  72. 24
      src/cpp/server/health/health.pb.c
  73. 72
      src/cpp/server/health/health.pb.h
  74. 33
      src/cpp/server/health/health_check_service.cc
  75. 50
      src/cpp/server/health/health_check_service_server_builder_option.cc
  76. 37
      src/cpp/server/server_cc.cc
  77. 13
      src/cpp/server/server_context.cc
  78. 29
      src/csharp/README.md
  79. 44
      src/node/ext/completion_queue_threadpool.cc
  80. 9
      src/node/src/client.js
  81. 32
      src/node/src/server.js
  82. 107
      src/node/test/surface_test.js
  83. 82
      src/objective-c/tests/CronetUnitTests/CronetUnitTests.m
  84. 1
      src/proto/grpc/health/v1/health.options
  85. 39
      src/proto/grpc/reflection/v1alpha/BUILD
  86. 9
      src/python/grpcio/grpc/_channel.py
  87. 4
      src/python/grpcio/grpc/_common.py
  88. 21
      src/python/grpcio/grpc/_server.py
  89. 6
      src/python/grpcio_tests/tests/unit/_metadata_test.py
  90. 1
      src/ruby/ext/grpc/extconf.rb
  91. 2
      src/ruby/lib/grpc/generic/client_stub.rb
  92. 4
      templates/Makefile.template
  93. 4
      templates/binding.gyp.template
  94. 4
      templates/grpc.gemspec.template
  95. 30
      templates/tools/run_tests/generated/tests.json.template
  96. 2
      test/core/channel/channel_stack_test.c
  97. 39
      test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
  98. 12
      test/core/client_channel/resolvers/dns_resolver_test.c
  99. 17
      test/core/client_channel/resolvers/sockaddr_resolver_test.c
  100. 2
      test/core/end2end/fake_resolver.c
  101. Some files were not shown because too many files have changed in this diff Show More

@ -30,6 +30,5 @@
#TODO: Enable too-many-nested-blocks
#TODO: Enable super-init-not-called
#TODO: Enable no-self-use
#TODO: Enable no-member
disable=missing-docstring,too-few-public-methods,too-many-arguments,no-init,duplicate-code,invalid-name,suppressed-message,locally-disabled,protected-access,no-name-in-module,unused-argument,fixme,wrong-import-order,no-value-for-parameter,cyclic-import,unused-variable,redefined-outer-name,unused-import,too-many-instance-attributes,broad-except,too-many-locals,too-many-lines,redefined-variable-type,next-method-called,import-error,useless-else-on-loop,too-many-return-statements,too-many-nested-blocks,super-init-not-called,no-self-use,no-member
disable=missing-docstring,too-few-public-methods,too-many-arguments,no-init,duplicate-code,invalid-name,suppressed-message,locally-disabled,protected-access,no-name-in-module,unused-argument,fixme,wrong-import-order,no-value-for-parameter,cyclic-import,unused-variable,redefined-outer-name,unused-import,too-many-instance-attributes,broad-except,too-many-locals,too-many-lines,redefined-variable-type,next-method-called,import-error,useless-else-on-loop,too-many-return-statements,too-many-nested-blocks,super-init-not-called,no-self-use

30
BUILD

@ -1076,8 +1076,8 @@ grpc_cc_library(
"src/core/ext/transport/cronet/transport/cronet_transport.c",
],
hdrs = [
"third_party/objective_c/Cronet/bidirectional_stream_c.h",
"src/core/ext/transport/cronet/transport/cronet_transport.h",
"third_party/objective_c/Cronet/bidirectional_stream_c.h",
],
language = "c",
public_hdrs = [
@ -1128,11 +1128,16 @@ grpc_cc_library(
"src/cpp/common/channel_filter.cc",
"src/cpp/common/completion_queue_cc.cc",
"src/cpp/common/core_codegen.cc",
"src/cpp/common/resource_quota_cc.cc",
"src/cpp/common/rpc_method.cc",
"src/cpp/common/version_cc.cc",
"src/cpp/server/async_generic_service.cc",
"src/cpp/server/create_default_thread_pool.cc",
"src/cpp/server/dynamic_thread_pool.cc",
"src/cpp/server/health/default_health_check_service.cc",
"src/cpp/server/health/health.pb.c",
"src/cpp/server/health/health_check_service.cc",
"src/cpp/server/health/health_check_service_server_builder_option.cc",
"src/cpp/server/server_builder.cc",
"src/cpp/server/server_cc.cc",
"src/cpp/server/server_context.cc",
@ -1149,6 +1154,8 @@ grpc_cc_library(
"src/cpp/client/create_channel_internal.h",
"src/cpp/common/channel_filter.h",
"src/cpp/server/dynamic_thread_pool.h",
"src/cpp/server/health/default_health_check_service.h",
"src/cpp/server/health/health.pb.h",
"src/cpp/server/thread_pool_interface.h",
"src/cpp/thread_manager/thread_manager.h",
],
@ -1160,9 +1167,11 @@ grpc_cc_library(
"include/grpc++/completion_queue.h",
"include/grpc++/create_channel.h",
"include/grpc++/create_channel_posix.h",
"include/grpc++/ext/health_check_service_server_builder_option.h",
"include/grpc++/generic/async_generic_service.h",
"include/grpc++/generic/generic_stub.h",
"include/grpc++/grpc++.h",
"include/grpc++/health_check_service_interface.h",
"include/grpc++/impl/call.h",
"include/grpc++/impl/client_unary_call.h",
"include/grpc++/impl/codegen/core_codegen.h",
@ -1290,3 +1299,22 @@ grpc_cc_library(
"grpc++_codegen_base",
],
)
grpc_cc_library(
name = "grpc++_reflection",
srcs = [
"src/cpp/ext/proto_server_reflection.cc",
"src/cpp/ext/proto_server_reflection_plugin.cc",
],
hdrs = [
"src/cpp/ext/proto_server_reflection.h",
],
language = "c++",
public_hdrs = [
"include/grpc++/ext/proto_server_reflection_plugin.h",
],
deps = [
":grpc++",
"//src/proto/grpc/reflection/v1alpha:reflection_proto",
],
)

@ -420,9 +420,6 @@ if(_gRPC_PLATFORM_LINUX)
add_dependencies(buildtests_c httpscli_test)
endif()
add_dependencies(buildtests_c init_test)
add_dependencies(buildtests_c internal_api_canary_iomgr_test)
add_dependencies(buildtests_c internal_api_canary_support_test)
add_dependencies(buildtests_c internal_api_canary_transport_test)
add_dependencies(buildtests_c invalid_call_argument_test)
add_dependencies(buildtests_c json_rewrite)
add_dependencies(buildtests_c json_rewrite_test)
@ -570,9 +567,18 @@ add_dependencies(buildtests_cxx alarm_cpp_test)
add_dependencies(buildtests_cxx async_end2end_test)
add_dependencies(buildtests_cxx auth_property_iterator_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx bm_call_create)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx bm_closure)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx bm_cq)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx bm_error)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx bm_fullstack)
endif()
add_dependencies(buildtests_cxx channel_arguments_test)
@ -597,6 +603,7 @@ add_dependencies(buildtests_cxx grpc_cli)
add_dependencies(buildtests_cxx grpc_tool_test)
add_dependencies(buildtests_cxx grpclb_api_test)
add_dependencies(buildtests_cxx grpclb_test)
add_dependencies(buildtests_cxx health_service_end2end_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx http2_client)
endif()
@ -1327,6 +1334,8 @@ add_library(grpc_cronet
src/core/lib/tsi/ssl_transport_security.c
src/core/lib/tsi/transport_security.c
src/core/ext/transport/chttp2/client/chttp2_connector.c
src/core/ext/load_reporting/load_reporting.c
src/core/ext/load_reporting/load_reporting_filter.c
src/core/plugin_registry/grpc_cronet_plugin_registry.c
)
@ -2061,6 +2070,10 @@ add_library(grpc++
src/cpp/server/async_generic_service.cc
src/cpp/server/create_default_thread_pool.cc
src/cpp/server/dynamic_thread_pool.cc
src/cpp/server/health/default_health_check_service.cc
src/cpp/server/health/health.pb.c
src/cpp/server/health/health_check_service.cc
src/cpp/server/health/health_check_service_server_builder_option.cc
src/cpp/server/server_builder.cc
src/cpp/server/server_cc.cc
src/cpp/server/server_context.cc
@ -2114,9 +2127,11 @@ foreach(_hdr
include/grpc++/completion_queue.h
include/grpc++/create_channel.h
include/grpc++/create_channel_posix.h
include/grpc++/ext/health_check_service_server_builder_option.h
include/grpc++/generic/async_generic_service.h
include/grpc++/generic/generic_stub.h
include/grpc++/grpc++.h
include/grpc++/health_check_service_interface.h
include/grpc++/impl/call.h
include/grpc++/impl/client_unary_call.h
include/grpc++/impl/codegen/core_codegen.h
@ -2241,6 +2256,10 @@ add_library(grpc++_cronet
src/cpp/server/async_generic_service.cc
src/cpp/server/create_default_thread_pool.cc
src/cpp/server/dynamic_thread_pool.cc
src/cpp/server/health/default_health_check_service.cc
src/cpp/server/health/health.pb.c
src/cpp/server/health/health_check_service.cc
src/cpp/server/health/health_check_service_server_builder_option.cc
src/cpp/server/server_builder.cc
src/cpp/server/server_cc.cc
src/cpp/server/server_context.cc
@ -2478,9 +2497,11 @@ foreach(_hdr
include/grpc++/completion_queue.h
include/grpc++/create_channel.h
include/grpc++/create_channel_posix.h
include/grpc++/ext/health_check_service_server_builder_option.h
include/grpc++/generic/async_generic_service.h
include/grpc++/generic/generic_stub.h
include/grpc++/grpc++.h
include/grpc++/health_check_service_interface.h
include/grpc++/impl/call.h
include/grpc++/impl/client_unary_call.h
include/grpc++/impl/codegen/core_codegen.h
@ -2751,6 +2772,10 @@ endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_library(grpc++_test_util
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/health/v1/health.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/health/v1/health.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/health/v1/health.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/health/v1/health.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo_messages.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo_messages.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo_messages.pb.h
@ -2783,6 +2808,9 @@ if(WIN32 AND MSVC)
endif()
endif()
protobuf_generate_grpc_cpp(
src/proto/grpc/health/v1/health.proto
)
protobuf_generate_grpc_cpp(
src/proto/grpc/testing/echo_messages.proto
)
@ -2899,6 +2927,10 @@ add_library(grpc++_unsecure
src/cpp/server/async_generic_service.cc
src/cpp/server/create_default_thread_pool.cc
src/cpp/server/dynamic_thread_pool.cc
src/cpp/server/health/default_health_check_service.cc
src/cpp/server/health/health.pb.c
src/cpp/server/health/health_check_service.cc
src/cpp/server/health/health_check_service_server_builder_option.cc
src/cpp/server/server_builder.cc
src/cpp/server/server_cc.cc
src/cpp/server/server_context.cc
@ -2952,9 +2984,11 @@ foreach(_hdr
include/grpc++/completion_queue.h
include/grpc++/create_channel.h
include/grpc++/create_channel_posix.h
include/grpc++/ext/health_check_service_server_builder_option.h
include/grpc++/generic/async_generic_service.h
include/grpc++/generic/generic_stub.h
include/grpc++/grpc++.h
include/grpc++/health_check_service_interface.h
include/grpc++/impl/call.h
include/grpc++/impl/client_unary_call.h
include/grpc++/impl/codegen/core_codegen.h
@ -5915,87 +5949,6 @@ target_link_libraries(init_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(internal_api_canary_iomgr_test
test/core/internal_api_canaries/iomgr.c
)
target_include_directories(internal_api_canary_iomgr_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
)
target_link_libraries(internal_api_canary_iomgr_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(internal_api_canary_support_test
test/core/internal_api_canaries/iomgr.c
)
target_include_directories(internal_api_canary_support_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
)
target_link_libraries(internal_api_canary_support_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(internal_api_canary_transport_test
test/core/internal_api_canaries/iomgr.c
)
target_include_directories(internal_api_canary_transport_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
)
target_link_libraries(internal_api_canary_transport_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr_test_util
gpr
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(invalid_call_argument_test
test/core/end2end/invalid_call_argument_test.c
)
@ -7415,6 +7368,44 @@ endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(bm_call_create
test/cpp/microbenchmarks/bm_call_create.cc
third_party/googletest/src/gtest-all.cc
)
target_include_directories(bm_call_create
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/include
PRIVATE third_party/googletest
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(bm_call_create
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
benchmark
grpc++_test_util
grpc_test_util
grpc++
grpc
gpr_test_util
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(bm_closure
test/cpp/microbenchmarks/bm_closure.cc
third_party/googletest/src/gtest-all.cc
@ -7451,6 +7442,82 @@ endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(bm_cq
test/cpp/microbenchmarks/bm_cq.cc
third_party/googletest/src/gtest-all.cc
)
target_include_directories(bm_cq
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/include
PRIVATE third_party/googletest
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(bm_cq
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
benchmark
grpc++_test_util
grpc_test_util
grpc++
grpc
gpr_test_util
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(bm_error
test/cpp/microbenchmarks/bm_error.cc
third_party/googletest/src/gtest-all.cc
)
target_include_directories(bm_error
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/include
PRIVATE third_party/googletest
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(bm_error
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
benchmark
grpc++_test_util
grpc_test_util
grpc++
grpc
gpr_test_util
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(bm_fullstack
test/cpp/microbenchmarks/bm_fullstack.cc
third_party/googletest/src/gtest-all.cc
@ -8504,6 +8571,41 @@ target_link_libraries(grpclb_test
${_gRPC_GFLAGS_LIBRARIES}
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(health_service_end2end_test
test/cpp/end2end/health_service_end2end_test.cc
third_party/googletest/src/gtest-all.cc
)
target_include_directories(health_service_end2end_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${BORINGSSL_ROOT_DIR}/include
PRIVATE ${PROTOBUF_ROOT_DIR}/src
PRIVATE ${BENCHMARK_ROOT_DIR}/include
PRIVATE ${ZLIB_ROOT_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
PRIVATE third_party/googletest/include
PRIVATE third_party/googletest
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(health_service_end2end_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
grpc++
grpc
gpr_test_util
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)

@ -217,7 +217,7 @@ CC_counters = $(DEFAULT_CC)
CXX_counters = $(DEFAULT_CXX)
LD_counters = $(DEFAULT_CC)
LDXX_counters = $(DEFAULT_CXX)
CPPFLAGS_counters = -O2 -DGPR_MU_COUNTERS
CPPFLAGS_counters = -O2 -DGPR_LOW_LEVEL_COUNTERS
DEFINES_counters = NDEBUG
@ -980,9 +980,6 @@ httpcli_format_request_test: $(BINDIR)/$(CONFIG)/httpcli_format_request_test
httpcli_test: $(BINDIR)/$(CONFIG)/httpcli_test
httpscli_test: $(BINDIR)/$(CONFIG)/httpscli_test
init_test: $(BINDIR)/$(CONFIG)/init_test
internal_api_canary_iomgr_test: $(BINDIR)/$(CONFIG)/internal_api_canary_iomgr_test
internal_api_canary_support_test: $(BINDIR)/$(CONFIG)/internal_api_canary_support_test
internal_api_canary_transport_test: $(BINDIR)/$(CONFIG)/internal_api_canary_transport_test
invalid_call_argument_test: $(BINDIR)/$(CONFIG)/invalid_call_argument_test
json_fuzzer_test: $(BINDIR)/$(CONFIG)/json_fuzzer_test
json_rewrite: $(BINDIR)/$(CONFIG)/json_rewrite
@ -1043,7 +1040,10 @@ wakeup_fd_cv_test: $(BINDIR)/$(CONFIG)/wakeup_fd_cv_test
alarm_cpp_test: $(BINDIR)/$(CONFIG)/alarm_cpp_test
async_end2end_test: $(BINDIR)/$(CONFIG)/async_end2end_test
auth_property_iterator_test: $(BINDIR)/$(CONFIG)/auth_property_iterator_test
bm_call_create: $(BINDIR)/$(CONFIG)/bm_call_create
bm_closure: $(BINDIR)/$(CONFIG)/bm_closure
bm_cq: $(BINDIR)/$(CONFIG)/bm_cq
bm_error: $(BINDIR)/$(CONFIG)/bm_error
bm_fullstack: $(BINDIR)/$(CONFIG)/bm_fullstack
channel_arguments_test: $(BINDIR)/$(CONFIG)/channel_arguments_test
channel_filter_test: $(BINDIR)/$(CONFIG)/channel_filter_test
@ -1072,6 +1072,7 @@ grpc_ruby_plugin: $(BINDIR)/$(CONFIG)/grpc_ruby_plugin
grpc_tool_test: $(BINDIR)/$(CONFIG)/grpc_tool_test
grpclb_api_test: $(BINDIR)/$(CONFIG)/grpclb_api_test
grpclb_test: $(BINDIR)/$(CONFIG)/grpclb_test
health_service_end2end_test: $(BINDIR)/$(CONFIG)/health_service_end2end_test
http2_client: $(BINDIR)/$(CONFIG)/http2_client
hybrid_end2end_test: $(BINDIR)/$(CONFIG)/hybrid_end2end_test
interop_client: $(BINDIR)/$(CONFIG)/interop_client
@ -1335,9 +1336,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/httpcli_test \
$(BINDIR)/$(CONFIG)/httpscli_test \
$(BINDIR)/$(CONFIG)/init_test \
$(BINDIR)/$(CONFIG)/internal_api_canary_iomgr_test \
$(BINDIR)/$(CONFIG)/internal_api_canary_support_test \
$(BINDIR)/$(CONFIG)/internal_api_canary_transport_test \
$(BINDIR)/$(CONFIG)/invalid_call_argument_test \
$(BINDIR)/$(CONFIG)/json_rewrite \
$(BINDIR)/$(CONFIG)/json_rewrite_test \
@ -1450,7 +1448,10 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/alarm_cpp_test \
$(BINDIR)/$(CONFIG)/async_end2end_test \
$(BINDIR)/$(CONFIG)/auth_property_iterator_test \
$(BINDIR)/$(CONFIG)/bm_call_create \
$(BINDIR)/$(CONFIG)/bm_closure \
$(BINDIR)/$(CONFIG)/bm_cq \
$(BINDIR)/$(CONFIG)/bm_error \
$(BINDIR)/$(CONFIG)/bm_fullstack \
$(BINDIR)/$(CONFIG)/channel_arguments_test \
$(BINDIR)/$(CONFIG)/channel_filter_test \
@ -1472,6 +1473,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/grpc_tool_test \
$(BINDIR)/$(CONFIG)/grpclb_api_test \
$(BINDIR)/$(CONFIG)/grpclb_test \
$(BINDIR)/$(CONFIG)/health_service_end2end_test \
$(BINDIR)/$(CONFIG)/http2_client \
$(BINDIR)/$(CONFIG)/hybrid_end2end_test \
$(BINDIR)/$(CONFIG)/interop_client \
@ -1555,7 +1557,10 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/alarm_cpp_test \
$(BINDIR)/$(CONFIG)/async_end2end_test \
$(BINDIR)/$(CONFIG)/auth_property_iterator_test \
$(BINDIR)/$(CONFIG)/bm_call_create \
$(BINDIR)/$(CONFIG)/bm_closure \
$(BINDIR)/$(CONFIG)/bm_cq \
$(BINDIR)/$(CONFIG)/bm_error \
$(BINDIR)/$(CONFIG)/bm_fullstack \
$(BINDIR)/$(CONFIG)/channel_arguments_test \
$(BINDIR)/$(CONFIG)/channel_filter_test \
@ -1577,6 +1582,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/grpc_tool_test \
$(BINDIR)/$(CONFIG)/grpclb_api_test \
$(BINDIR)/$(CONFIG)/grpclb_test \
$(BINDIR)/$(CONFIG)/health_service_end2end_test \
$(BINDIR)/$(CONFIG)/http2_client \
$(BINDIR)/$(CONFIG)/hybrid_end2end_test \
$(BINDIR)/$(CONFIG)/interop_client \
@ -1860,8 +1866,6 @@ test_c: buildtests_c
flaky_test_c: buildtests_c
$(E) "[RUN] Testing lb_policies_test"
$(Q) $(BINDIR)/$(CONFIG)/lb_policies_test || ( echo test lb_policies_test failed ; exit 1 )
$(E) "[RUN] Testing mlog_test"
$(Q) $(BINDIR)/$(CONFIG)/mlog_test || ( echo test mlog_test failed ; exit 1 )
@ -1873,8 +1877,14 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/async_end2end_test || ( echo test async_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing auth_property_iterator_test"
$(Q) $(BINDIR)/$(CONFIG)/auth_property_iterator_test || ( echo test auth_property_iterator_test failed ; exit 1 )
$(E) "[RUN] Testing bm_call_create"
$(Q) $(BINDIR)/$(CONFIG)/bm_call_create || ( echo test bm_call_create failed ; exit 1 )
$(E) "[RUN] Testing bm_closure"
$(Q) $(BINDIR)/$(CONFIG)/bm_closure || ( echo test bm_closure failed ; exit 1 )
$(E) "[RUN] Testing bm_cq"
$(Q) $(BINDIR)/$(CONFIG)/bm_cq || ( echo test bm_cq failed ; exit 1 )
$(E) "[RUN] Testing bm_error"
$(Q) $(BINDIR)/$(CONFIG)/bm_error || ( echo test bm_error failed ; exit 1 )
$(E) "[RUN] Testing bm_fullstack"
$(Q) $(BINDIR)/$(CONFIG)/bm_fullstack || ( echo test bm_fullstack failed ; exit 1 )
$(E) "[RUN] Testing channel_arguments_test"
@ -1913,6 +1923,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/grpclb_api_test || ( echo test grpclb_api_test failed ; exit 1 )
$(E) "[RUN] Testing grpclb_test"
$(Q) $(BINDIR)/$(CONFIG)/grpclb_test || ( echo test grpclb_test failed ; exit 1 )
$(E) "[RUN] Testing health_service_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/health_service_end2end_test || ( echo test health_service_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing interop_test"
$(Q) $(BINDIR)/$(CONFIG)/interop_test || ( echo test interop_test failed ; exit 1 )
$(E) "[RUN] Testing mock_test"
@ -2061,6 +2073,21 @@ $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc:
$(Q) mkdir -p $(@D)
$(Q) echo "$(GRPCXX_UNSECURE_PC_FILE)" | tr , '\n' >$@
ifeq ($(NO_PROTOC),true)
$(GENDIR)/src/proto/grpc/health/v1/health.pb.cc: protoc_dep_error
$(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc: protoc_dep_error
else
$(GENDIR)/src/proto/grpc/health/v1/health.pb.cc: src/proto/grpc/health/v1/health.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS)
$(E) "[PROTOC] Generating protobuf CC file from $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --cpp_out=$(GENDIR) $<
$(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc: src/proto/grpc/health/v1/health.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS)
$(E) "[GRPC] Generating gRPC's protobuf service CC file from $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $<
endif
ifeq ($(NO_PROTOC),true)
$(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc: protoc_dep_error
$(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc: protoc_dep_error
@ -2609,15 +2636,15 @@ ifeq ($(SYSTEM),MINGW32)
$(LIBDIR)/$(CONFIG)/gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGPR_OBJS) $(ZLIB_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/gpr$(SHARED_VERSION_CORE).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE)-dll.a -o $(LIBDIR)/$(CONFIG)/gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/gpr$(SHARED_VERSION_CORE).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE)-dll.a -o $(LIBDIR)/$(CONFIG)/gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGPR_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGPR_OBJS) $(ZLIB_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGPR_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.3 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.3 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGPR_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBS)
$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).so.3
$(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).so
endif
@ -2942,15 +2969,15 @@ ifeq ($(SYSTEM),MINGW32)
$(LIBDIR)/$(CONFIG)/grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGRPC_OBJS) $(ZLIB_DEP) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc$(SHARED_VERSION_CORE).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc$(SHARED_VERSION_CORE).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGRPC_OBJS) $(ZLIB_DEP) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBS)
$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).so.3
$(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).so
endif
@ -3160,6 +3187,8 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/tsi/ssl_transport_security.c \
src/core/lib/tsi/transport_security.c \
src/core/ext/transport/chttp2/client/chttp2_connector.c \
src/core/ext/load_reporting/load_reporting.c \
src/core/ext/load_reporting/load_reporting_filter.c \
src/core/plugin_registry/grpc_cronet_plugin_registry.c \
PUBLIC_HEADERS_C += \
@ -3224,15 +3253,15 @@ ifeq ($(SYSTEM),MINGW32)
$(LIBDIR)/$(CONFIG)/grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGRPC_CRONET_OBJS) $(ZLIB_DEP) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc_cronet$(SHARED_VERSION_CORE).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc_cronet$(SHARED_VERSION_CORE).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_CRONET_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGRPC_CRONET_OBJS) $(ZLIB_DEP) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_CRONET_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_CRONET_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBS)
$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).so.3
$(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).so
endif
@ -3743,15 +3772,15 @@ ifeq ($(SYSTEM),MINGW32)
$(LIBDIR)/$(CONFIG)/grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGRPC_UNSECURE_OBJS) $(ZLIB_DEP) $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc_unsecure$(SHARED_VERSION_CORE).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc_unsecure$(SHARED_VERSION_CORE).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGRPC_UNSECURE_OBJS) $(ZLIB_DEP) $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) $(LDLIBS)
$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).so.3
$(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).so
endif
@ -3866,6 +3895,10 @@ LIBGRPC++_SRC = \
src/cpp/server/async_generic_service.cc \
src/cpp/server/create_default_thread_pool.cc \
src/cpp/server/dynamic_thread_pool.cc \
src/cpp/server/health/default_health_check_service.cc \
src/cpp/server/health/health.pb.c \
src/cpp/server/health/health_check_service.cc \
src/cpp/server/health/health_check_service_server_builder_option.cc \
src/cpp/server/server_builder.cc \
src/cpp/server/server_cc.cc \
src/cpp/server/server_context.cc \
@ -3886,9 +3919,11 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/completion_queue.h \
include/grpc++/create_channel.h \
include/grpc++/create_channel_posix.h \
include/grpc++/ext/health_check_service_server_builder_option.h \
include/grpc++/generic/async_generic_service.h \
include/grpc++/generic/generic_stub.h \
include/grpc++/grpc++.h \
include/grpc++/health_check_service_interface.h \
include/grpc++/impl/call.h \
include/grpc++/impl/client_unary_call.h \
include/grpc++/impl/codegen/core_codegen.h \
@ -4012,15 +4047,15 @@ ifeq ($(SYSTEM),MINGW32)
$(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP): $(LIBGRPC++_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION_CPP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc$(SHARED_VERSION_CORE)-dll
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION_CPP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgrpc$(SHARED_VERSION_CORE)-dll
else
$(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP): $(LIBGRPC++_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgrpc.$(SHARED_EXT_CORE) $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgrpc
else
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgrpc
$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP).so
endif
@ -4059,6 +4094,10 @@ LIBGRPC++_CRONET_SRC = \
src/cpp/server/async_generic_service.cc \
src/cpp/server/create_default_thread_pool.cc \
src/cpp/server/dynamic_thread_pool.cc \
src/cpp/server/health/default_health_check_service.cc \
src/cpp/server/health/health.pb.c \
src/cpp/server/health/health_check_service.cc \
src/cpp/server/health/health_check_service_server_builder_option.cc \
src/cpp/server/server_builder.cc \
src/cpp/server/server_cc.cc \
src/cpp/server/server_context.cc \
@ -4262,9 +4301,11 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/completion_queue.h \
include/grpc++/create_channel.h \
include/grpc++/create_channel_posix.h \
include/grpc++/ext/health_check_service_server_builder_option.h \
include/grpc++/generic/async_generic_service.h \
include/grpc++/generic/generic_stub.h \
include/grpc++/grpc++.h \
include/grpc++/health_check_service_interface.h \
include/grpc++/impl/call.h \
include/grpc++/impl/client_unary_call.h \
include/grpc++/impl/codegen/core_codegen.h \
@ -4397,15 +4438,15 @@ ifeq ($(SYSTEM),MINGW32)
$(LIBDIR)/$(CONFIG)/grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP): $(LIBGRPC++_CRONET_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_cronet$(SHARED_VERSION_CPP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_CRONET_OBJS) $(LDLIBS) $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr$(SHARED_VERSION_CORE)-dll -lgrpc_cronet$(SHARED_VERSION_CORE)-dll
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_cronet$(SHARED_VERSION_CPP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_CRONET_OBJS) $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgpr$(SHARED_VERSION_CORE)-dll -lgrpc_cronet$(SHARED_VERSION_CORE)-dll
else
$(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP): $(LIBGRPC++_CRONET_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgpr.$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_cronet.$(SHARED_EXT_CORE) $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_CRONET_OBJS) $(LDLIBS) $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_cronet
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_CRONET_OBJS) $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgpr -lgrpc_cronet
else
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_cronet.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_CRONET_OBJS) $(LDLIBS) $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_cronet
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_cronet.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_CRONET_OBJS) $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgpr -lgrpc_cronet
$(Q) ln -sf $(SHARED_PREFIX)grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP).so
endif
@ -4520,15 +4561,15 @@ ifeq ($(SYSTEM),MINGW32)
$(LIBDIR)/$(CONFIG)/grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP): $(LIBGRPC++_REFLECTION_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_reflection$(SHARED_VERSION_CPP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++$(SHARED_VERSION_CPP)-dll
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_reflection$(SHARED_VERSION_CPP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_REFLECTION_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgrpc++$(SHARED_VERSION_CPP)-dll
else
$(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP): $(LIBGRPC++_REFLECTION_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgrpc++.$(SHARED_EXT_CPP) $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_REFLECTION_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgrpc++
else
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_REFLECTION_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgrpc++
$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP).so
endif
@ -4597,6 +4638,7 @@ endif
LIBGRPC++_TEST_UTIL_SRC = \
$(GENDIR)/src/proto/grpc/health/v1/health.pb.cc $(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc \
$(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc \
@ -4705,13 +4747,13 @@ ifneq ($(NO_DEPS),true)
-include $(LIBGRPC++_TEST_UTIL_OBJS:.o=.dep)
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/byte_buffer_proto_helper.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/create_test_channel.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/string_ref_helper.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/subprocess.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/test_credentials_provider.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(GENDIR)/src/proto/grpc/health/v1/health.pb.cc $(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/byte_buffer_proto_helper.o: $(GENDIR)/src/proto/grpc/health/v1/health.pb.cc $(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/create_test_channel.o: $(GENDIR)/src/proto/grpc/health/v1/health.pb.cc $(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/string_ref_helper.o: $(GENDIR)/src/proto/grpc/health/v1/health.pb.cc $(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/subprocess.o: $(GENDIR)/src/proto/grpc/health/v1/health.pb.cc $(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/test/cpp/util/test_credentials_provider.o: $(GENDIR)/src/proto/grpc/health/v1/health.pb.cc $(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o: $(GENDIR)/src/proto/grpc/health/v1/health.pb.cc $(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
LIBGRPC++_UNSECURE_SRC = \
@ -4735,6 +4777,10 @@ LIBGRPC++_UNSECURE_SRC = \
src/cpp/server/async_generic_service.cc \
src/cpp/server/create_default_thread_pool.cc \
src/cpp/server/dynamic_thread_pool.cc \
src/cpp/server/health/default_health_check_service.cc \
src/cpp/server/health/health.pb.c \
src/cpp/server/health/health_check_service.cc \
src/cpp/server/health/health_check_service_server_builder_option.cc \
src/cpp/server/server_builder.cc \
src/cpp/server/server_cc.cc \
src/cpp/server/server_context.cc \
@ -4755,9 +4801,11 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/completion_queue.h \
include/grpc++/create_channel.h \
include/grpc++/create_channel_posix.h \
include/grpc++/ext/health_check_service_server_builder_option.h \
include/grpc++/generic/async_generic_service.h \
include/grpc++/generic/generic_stub.h \
include/grpc++/grpc++.h \
include/grpc++/health_check_service_interface.h \
include/grpc++/impl/call.h \
include/grpc++/impl/client_unary_call.h \
include/grpc++/impl/codegen/core_codegen.h \
@ -4869,15 +4917,15 @@ ifeq ($(SYSTEM),MINGW32)
$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION_CPP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr$(SHARED_VERSION_CORE)-dll -lgrpc_unsecure$(SHARED_VERSION_CORE)-dll
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION_CPP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgpr$(SHARED_VERSION_CORE)-dll -lgrpc_unsecure$(SHARED_VERSION_CORE)-dll
else
$(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP): $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBDIR)/$(CONFIG)/libgpr.$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.$(SHARED_EXT_CORE)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgpr -lgrpc_unsecure
else
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure
$(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBGRPC++_UNSECURE_OBJS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) -lgpr -lgrpc_unsecure
$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP).so
endif
@ -5410,15 +5458,15 @@ ifeq ($(SYSTEM),MINGW32)
$(LIBDIR)/$(CONFIG)/grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP): $(LIBGRPC_CSHARP_EXT_OBJS) $(ZLIB_DEP) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc_csharp_ext$(SHARED_VERSION_CSHARP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,--output-def=$(LIBDIR)/$(CONFIG)/grpc_csharp_ext$(SHARED_VERSION_CSHARP).def -Wl,--out-implib=$(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP)-dll.a -o $(LIBDIR)/$(CONFIG)/grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(LIBGRPC_CSHARP_EXT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP): $(LIBGRPC_CSHARP_EXT_OBJS) $(ZLIB_DEP) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_DEP)
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
ifeq ($(SYSTEM),Darwin)
$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(LIBGRPC_CSHARP_EXT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) $(LDLIBS)
else
$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS)
$(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.3 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(LIBGRPC_CSHARP_EXT_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) $(LDLIBS)
$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP).so.1
$(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP).so
endif
@ -10303,102 +10351,6 @@ endif
endif
INTERNAL_API_CANARY_IOMGR_TEST_SRC = \
test/core/internal_api_canaries/iomgr.c \
INTERNAL_API_CANARY_IOMGR_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(INTERNAL_API_CANARY_IOMGR_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/internal_api_canary_iomgr_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/internal_api_canary_iomgr_test: $(INTERNAL_API_CANARY_IOMGR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(INTERNAL_API_CANARY_IOMGR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/internal_api_canary_iomgr_test
endif
$(OBJDIR)/$(CONFIG)/test/core/internal_api_canaries/iomgr.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_internal_api_canary_iomgr_test: $(INTERNAL_API_CANARY_IOMGR_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(INTERNAL_API_CANARY_IOMGR_TEST_OBJS:.o=.dep)
endif
endif
INTERNAL_API_CANARY_SUPPORT_TEST_SRC = \
test/core/internal_api_canaries/iomgr.c \
INTERNAL_API_CANARY_SUPPORT_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(INTERNAL_API_CANARY_SUPPORT_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/internal_api_canary_support_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/internal_api_canary_support_test: $(INTERNAL_API_CANARY_SUPPORT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(INTERNAL_API_CANARY_SUPPORT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/internal_api_canary_support_test
endif
$(OBJDIR)/$(CONFIG)/test/core/internal_api_canaries/iomgr.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_internal_api_canary_support_test: $(INTERNAL_API_CANARY_SUPPORT_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(INTERNAL_API_CANARY_SUPPORT_TEST_OBJS:.o=.dep)
endif
endif
INTERNAL_API_CANARY_TRANSPORT_TEST_SRC = \
test/core/internal_api_canaries/iomgr.c \
INTERNAL_API_CANARY_TRANSPORT_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(INTERNAL_API_CANARY_TRANSPORT_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/internal_api_canary_transport_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/internal_api_canary_transport_test: $(INTERNAL_API_CANARY_TRANSPORT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(INTERNAL_API_CANARY_TRANSPORT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/internal_api_canary_transport_test
endif
$(OBJDIR)/$(CONFIG)/test/core/internal_api_canaries/iomgr.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_internal_api_canary_transport_test: $(INTERNAL_API_CANARY_TRANSPORT_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(INTERNAL_API_CANARY_TRANSPORT_TEST_OBJS:.o=.dep)
endif
endif
INVALID_CALL_ARGUMENT_TEST_SRC = \
test/core/end2end/invalid_call_argument_test.c \
@ -12352,6 +12304,50 @@ endif
endif
BM_CALL_CREATE_SRC = \
test/cpp/microbenchmarks/bm_call_create.cc \
BM_CALL_CREATE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(BM_CALL_CREATE_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/bm_call_create: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/bm_call_create: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_call_create: $(PROTOBUF_DEP) $(BM_CALL_CREATE_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_CALL_CREATE_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_call_create
endif
endif
$(BM_CALL_CREATE_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_call_create.o: $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_bm_call_create: $(BM_CALL_CREATE_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(BM_CALL_CREATE_OBJS:.o=.dep)
endif
endif
BM_CLOSURE_SRC = \
test/cpp/microbenchmarks/bm_closure.cc \
@ -12396,6 +12392,94 @@ endif
endif
BM_CQ_SRC = \
test/cpp/microbenchmarks/bm_cq.cc \
BM_CQ_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(BM_CQ_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/bm_cq: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/bm_cq: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_cq: $(PROTOBUF_DEP) $(BM_CQ_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_CQ_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_cq
endif
endif
$(BM_CQ_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_cq.o: $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_bm_cq: $(BM_CQ_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(BM_CQ_OBJS:.o=.dep)
endif
endif
BM_ERROR_SRC = \
test/cpp/microbenchmarks/bm_error.cc \
BM_ERROR_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(BM_ERROR_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/bm_error: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/bm_error: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/bm_error: $(PROTOBUF_DEP) $(BM_ERROR_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(BM_ERROR_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_error
endif
endif
$(BM_ERROR_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_error.o: $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_bm_error: $(BM_ERROR_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(BM_ERROR_OBJS:.o=.dep)
endif
endif
BM_FULLSTACK_SRC = \
test/cpp/microbenchmarks/bm_fullstack.cc \
@ -13572,6 +13656,49 @@ endif
$(OBJDIR)/$(CONFIG)/test/cpp/grpclb/grpclb_test.o: $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc
HEALTH_SERVICE_END2END_TEST_SRC = \
test/cpp/end2end/health_service_end2end_test.cc \
HEALTH_SERVICE_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(HEALTH_SERVICE_END2END_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/health_service_end2end_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/health_service_end2end_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/health_service_end2end_test: $(PROTOBUF_DEP) $(HEALTH_SERVICE_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(HEALTH_SERVICE_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/health_service_end2end_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/health_service_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_health_service_end2end_test: $(HEALTH_SERVICE_END2END_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(HEALTH_SERVICE_END2END_TEST_OBJS:.o=.dep)
endif
endif
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.

@ -93,7 +93,7 @@ task 'dlls' do
[ w64, w32 ].each do |opt|
env_comp = "CC=#{opt[:cross]}-gcc "
env_comp += "LD=#{opt[:cross]}-gcc "
docker_for_windows "#{env} #{env_comp} make -j #{out} && #{opt[:cross]}-strip -x -S #{out} && cp #{out} #{opt[:out]}"
docker_for_windows "gem update --system && #{env} #{env_comp} make -j #{out} && #{opt[:cross]}-strip -x -S #{out} && cp #{out} #{opt[:out]}"
end
end
@ -107,10 +107,10 @@ task 'gem:native' do
if RUBY_PLATFORM =~ /darwin/
FileUtils.touch 'grpc_c.32.ruby'
FileUtils.touch 'grpc_c.64.ruby'
system "rake cross native gem RUBY_CC_VERSION=2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
system "rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
else
Rake::Task['dlls'].execute
docker_for_windows "bundle && rake cross native gem RUBY_CC_VERSION=2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
docker_for_windows "gem update --system && bundle && rake cross native gem RUBY_CC_VERSION=2.4.0:2.3.0:2.2.2:2.1.5:2.0.0 V=#{verbose} GRPC_CONFIG=#{grpc_config}"
end
end

@ -34,32 +34,42 @@ bind(
)
bind(
<<<<<<< HEAD
name = "benchmark",
actual = "@submodule_benchmark//:benchmark",
=======
name = "gflags",
actual = "@com_github_gflags_gflags//:gflags",
>>>>>>> f2cf3d94a1a46db2ebb2c13c3476e58ecdae6006
)
new_local_repository(
name = "submodule_boringssl",
path = "third_party/boringssl-with-bazel",
build_file = "third_party/boringssl-with-bazel/BUILD",
path = "third_party/boringssl-with-bazel",
)
new_local_repository(
name = "submodule_zlib",
path = "third_party/zlib",
build_file = "third_party/zlib.BUILD",
path = "third_party/zlib",
)
new_local_repository(
name = "submodule_protobuf",
path = "third_party/protobuf",
build_file = "third_party/protobuf/BUILD",
path = "third_party/protobuf",
)
new_local_repository(
name = "submodule_gtest",
path = "third_party/googletest",
build_file = "third_party/gtest.BUILD",
path = "third_party/googletest",
)
local_repository(
name = "com_github_gflags_gflags",
path = "third_party/gflags",
)
new_local_repository(

@ -51,7 +51,9 @@
'conditions': [
['runtime=="node"', {
'defines': [
'GRPC_UV'
# Disabling this while bugs are ironed out. Uncomment this to
# re-enable libuv integration in C core.
# 'GRPC_UV'
]
}],
['OS!="win" and runtime=="electron"', {

@ -769,9 +769,11 @@ filegroups:
- include/grpc++/completion_queue.h
- include/grpc++/create_channel.h
- include/grpc++/create_channel_posix.h
- include/grpc++/ext/health_check_service_server_builder_option.h
- include/grpc++/generic/async_generic_service.h
- include/grpc++/generic/generic_stub.h
- include/grpc++/grpc++.h
- include/grpc++/health_check_service_interface.h
- include/grpc++/impl/call.h
- include/grpc++/impl/client_unary_call.h
- include/grpc++/impl/codegen/core_codegen.h
@ -809,6 +811,8 @@ filegroups:
- src/cpp/client/create_channel_internal.h
- src/cpp/common/channel_filter.h
- src/cpp/server/dynamic_thread_pool.h
- src/cpp/server/health/default_health_check_service.h
- src/cpp/server/health/health.pb.h
- src/cpp/server/thread_pool_interface.h
- src/cpp/thread_manager/thread_manager.h
src:
@ -829,6 +833,10 @@ filegroups:
- src/cpp/server/async_generic_service.cc
- src/cpp/server/create_default_thread_pool.cc
- src/cpp/server/dynamic_thread_pool.cc
- src/cpp/server/health/default_health_check_service.cc
- src/cpp/server/health/health.pb.c
- src/cpp/server/health/health_check_service.cc
- src/cpp/server/health/health_check_service_server_builder_option.cc
- src/cpp/server/server_builder.cc
- src/cpp/server/server_cc.cc
- src/cpp/server/server_context.cc
@ -970,6 +978,7 @@ libs:
- grpc_base
- grpc_transport_cronet_client_secure
- grpc_transport_chttp2_client_secure
- grpc_load_reporting
generate_plugin_registry: true
platforms:
- linux
@ -1169,6 +1178,7 @@ libs:
- test/cpp/util/subprocess.h
- test/cpp/util/test_credentials_provider.h
src:
- src/proto/grpc/health/v1/health.proto
- src/proto/grpc/testing/echo_messages.proto
- src/proto/grpc/testing/echo.proto
- src/proto/grpc/testing/duplicate/echo_duplicate.proto
@ -2240,39 +2250,6 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: internal_api_canary_iomgr_test
build: test
run: false
language: c
src:
- test/core/internal_api_canaries/iomgr.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: internal_api_canary_support_test
build: test
run: false
language: c
src:
- test/core/internal_api_canaries/iomgr.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: internal_api_canary_transport_test
build: test
run: false
language: c
src:
- test/core/internal_api_canaries/iomgr.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: invalid_call_argument_test
cpu_cost: 0.1
build: test
@ -2348,8 +2325,8 @@ targets:
- gpr
- name: lb_policies_test
cpu_cost: 0.1
flaky: true
build: test
run: false
language: c
src:
- test/core/client_channel/lb_policies_test.c
@ -2985,6 +2962,26 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: bm_call_create
build: test
language: c++
src:
- test/cpp/microbenchmarks/bm_call_create.cc
deps:
- benchmark
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
args:
- --benchmark_min_time=0
defaults: benchmark
platforms:
- mac
- linux
- posix
- name: bm_closure
build: test
language: c++
@ -3003,6 +3000,46 @@ targets:
- mac
- linux
- posix
- name: bm_cq
build: test
language: c++
src:
- test/cpp/microbenchmarks/bm_cq.cc
deps:
- benchmark
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
args:
- --benchmark_min_time=0
defaults: benchmark
platforms:
- mac
- linux
- posix
- name: bm_error
build: test
language: c++
src:
- test/cpp/microbenchmarks/bm_error.cc
deps:
- benchmark
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
args:
- --benchmark_min_time=0
defaults: benchmark
platforms:
- mac
- linux
- posix
- name: bm_fullstack
build: test
language: c++
@ -3019,10 +3056,14 @@ targets:
args:
- --benchmark_min_time=0
defaults: benchmark
excluded_poll_engines:
- poll
- poll-cv
platforms:
- mac
- linux
- posix
timeout_seconds: 1200
- name: channel_arguments_test
gtest: true
build: test
@ -3351,6 +3392,19 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: health_service_end2end_test
gtest: true
build: test
language: c++
src:
- test/cpp/end2end/health_service_end2end_test.cc
deps:
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
- name: http2_client
build: test
run: false
@ -3900,7 +3954,7 @@ configs:
CPPFLAGS: -O2 -DGRPC_BASIC_PROFILER -DGRPC_TIMERS_RDTSC
DEFINES: NDEBUG
counters:
CPPFLAGS: -O2 -DGPR_MU_COUNTERS
CPPFLAGS: -O2 -DGPR_LOW_LEVEL_COUNTERS
DEFINES: NDEBUG
dbg:
CPPFLAGS: -O0

@ -12,105 +12,105 @@ The service config is a JSON string of the following form:
```
{
# Load balancing policy name.
# Supported values are 'round_robin' and 'grpclb'.
# Optional; if unset, the default behavior is pick the first available
# backend.
# Note that if the resolver returns only balancer addresses and no
# backend addresses, gRPC will always use the 'grpclb' policy,
# regardless of what this field is set to.
// Load balancing policy name.
// Supported values are 'round_robin' and 'grpclb'.
// Optional; if unset, the default behavior is pick the first available
// backend.
// Note that if the resolver returns only balancer addresses and no
// backend addresses, gRPC will always use the 'grpclb' policy,
// regardless of what this field is set to.
'loadBalancingPolicy': string,
# Per-method configuration. Optional.
// Per-method configuration. Optional.
'methodConfig': [
{
# The names of the methods to which this method config applies. There
# must be at least one name. Each name entry must be unique across the
# entire service config. If the 'method' field is empty, then this
# method config specifies the defaults for all methods for the specified
# service.
#
# For example, let's say that the service config contains the following
# method config entries:
#
# 'methodConfig': [
# { 'name': [ { 'service': 'MyService' } ] ... },
# { 'name': [ { 'service': 'MyService', 'method': 'Foo' } ] ... }
# ]
#
# For a request for MyService/Foo, we will use the second entry, because
# it exactly matches the service and method name.
# For a request for MyService/Bar, we will use the first entry, because
# it provides the default for all methods of MyService.
// The names of the methods to which this method config applies. There
// must be at least one name. Each name entry must be unique across the
// entire service config. If the 'method' field is empty, then this
// method config specifies the defaults for all methods for the specified
// service.
//
// For example, let's say that the service config contains the following
// method config entries:
//
// 'methodConfig': [
// { 'name': [ { 'service': 'MyService' } ] ... },
// { 'name': [ { 'service': 'MyService', 'method': 'Foo' } ] ... }
// ]
//
// For a request for MyService/Foo, we will use the second entry, because
// it exactly matches the service and method name.
// For a request for MyService/Bar, we will use the first entry, because
// it provides the default for all methods of MyService.
'name': [
{
# RPC service name. Required.
# If using gRPC with protobuf as the IDL, then this will be of
# the form "pkg.service_name", where "pkg" is the package name
# defined in the proto file.
// RPC service name. Required.
// If using gRPC with protobuf as the IDL, then this will be of
// the form "pkg.service_name", where "pkg" is the package name
// defined in the proto file.
'service': string,
# RPC method name. Optional (see above).
// RPC method name. Optional (see above).
'method': string,
}
],
# Whether RPCs sent to this method should wait until the connection is
# ready by default. If false, the RPC will abort immediately if there
# is a transient failure connecting to the server. Otherwise, gRPC will
# attempt to connect until the deadline is exceeded.
#
# The value specified via the gRPC client API will override the value
# set here. However, note that setting the value in the client API will
# also affect transient errors encountered during name resolution,
# which cannot be caught by the value here, since the service config
# is obtained by the gRPC client via name resolution.
// Whether RPCs sent to this method should wait until the connection is
// ready by default. If false, the RPC will abort immediately if there
// is a transient failure connecting to the server. Otherwise, gRPC will
// attempt to connect until the deadline is exceeded.
//
// The value specified via the gRPC client API will override the value
// set here. However, note that setting the value in the client API will
// also affect transient errors encountered during name resolution,
// which cannot be caught by the value here, since the service config
// is obtained by the gRPC client via name resolution.
'waitForReady': bool,
# The default timeout in seconds for RPCs sent to this method. This can
# be overridden in code. If no reply is received in the specified amount
# of time, the request is aborted and a deadline-exceeded error status
# is returned to the caller.
#
# The actual deadline used will be the minimum of the value specified
# here and the value set by the application via the gRPC client API.
# If either one is not set, then the other will be used.
# If neither is set, then the request has no deadline.
#
# The format of the value is that of the 'Duration' type defined here:
# https://developers.google.com/protocol-buffers/docs/proto3#json
// The default timeout in seconds for RPCs sent to this method. This can
// be overridden in code. If no reply is received in the specified amount
// of time, the request is aborted and a deadline-exceeded error status
// is returned to the caller.
//
// The actual deadline used will be the minimum of the value specified
// here and the value set by the application via the gRPC client API.
// If either one is not set, then the other will be used.
// If neither is set, then the request has no deadline.
//
// The format of the value is that of the 'Duration' type defined here:
// https://developers.google.com/protocol-buffers/docs/proto3#json
'timeout': string,
# The maximum allowed payload size for an individual request or object
# in a stream (client->server) in bytes. The size which is measured is
# the serialized, uncompressed payload in bytes. This applies both
# to streaming and non-streaming requests.
#
# The actual value used is the minimum of the value specified here and
# the value set by the application via the gRPC client API.
# If either one is not set, then the other will be used.
# If neither is set, then the built-in default is used.
#
# If a client attempts to send an object larger than this value, it
# will not be sent and the client will see an error.
# Note that 0 is a valid value, meaning that the request message must
# be empty.
// The maximum allowed payload size for an individual request or object
// in a stream (client->server) in bytes. The size which is measured is
// the serialized, uncompressed payload in bytes. This applies both
// to streaming and non-streaming requests.
//
// The actual value used is the minimum of the value specified here and
// the value set by the application via the gRPC client API.
// If either one is not set, then the other will be used.
// If neither is set, then the built-in default is used.
//
// If a client attempts to send an object larger than this value, it
// will not be sent and the client will see an error.
// Note that 0 is a valid value, meaning that the request message must
// be empty.
'maxRequestMessageBytes': number,
# The maximum allowed payload size for an individual response or object
# in a stream (server->client) in bytes. The size which is measured is
# the serialized, uncompressed payload in bytes. This applies both
# to streaming and non-streaming requests.
#
# The actual value used is the minimum of the value specified here and
# the value set by the application via the gRPC client API.
# If either one is not set, then the other will be used.
# If neither is set, then the built-in default is used.
#
# If a server attempts to send an object larger than this value, it
# will not be sent, and the client will see an error.
# Note that 0 is a valid value, meaning that the response message must
# be empty.
// The maximum allowed payload size for an individual response or object
// in a stream (server->client) in bytes. The size which is measured is
// the serialized, uncompressed payload in bytes. This applies both
// to streaming and non-streaming requests.
//
// The actual value used is the minimum of the value specified here and
// the value set by the application via the gRPC client API.
// If either one is not set, then the other will be used.
// If neither is set, then the built-in default is used.
//
// If a server attempts to send an object larger than this value, it
// will not be sent, and the client will see an error.
// Note that 0 is a valid value, meaning that the response message must
// be empty.
'maxResponseMessageBytes': number
}
]

@ -17,7 +17,7 @@ Gem::Specification.new do |s|
s.require_paths = ['lib']
s.platform = Gem::Platform::RUBY
s.add_dependency 'grpc', '~> 1.0.0'
s.add_dependency 'grpc', '~> 1.0'
s.add_development_dependency 'bundler', '~> 1.7'
end

@ -0,0 +1,6 @@
gRPC (Ruby) without protobuf
========================
This directory contains a simple example of using gRPC without protobuf.
This is mainly intended to show basic usage of the GRPC::GenericService module

@ -0,0 +1,49 @@
#!/usr/bin/env ruby
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Sample app that connects to a 'EchoWithoutProtobuf' service.
#
# Usage: $ path/to/echo_client.rb
this_dir = File.expand_path(File.dirname(__FILE__))
$LOAD_PATH.unshift(this_dir) unless $LOAD_PATH.include?(this_dir)
require 'grpc'
require 'echo_services_noprotobuf'
def main
stub = EchoWithoutProtobuf::Stub.new('localhost:50051', :this_channel_is_insecure)
user = ARGV.size > 0 ? ARGV[0] : 'world'
message = stub.echo("hello #{user}")
p "Reponse: #{message}"
end
main

@ -0,0 +1,59 @@
#!/usr/bin/env ruby
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Sample gRPC server that implements the EchoWithoutProtobuf service.
#
# Usage: $ path/to/echo_server.rb
this_dir = File.expand_path(File.dirname(__FILE__))
$LOAD_PATH.unshift(this_dir) unless $LOAD_PATH.include?(this_dir)
require 'grpc'
require 'echo_services_noprotobuf'
# EchoServer is simple server that implements the EchoWithoutProtobuf server.
class EchoServer < EchoWithoutProtobuf::Service
# echo implements the EchoWithoutProtobuf 'Echo' rpc method.
def echo(echo_req, _unused_call)
echo_req
end
end
# main starts an RpcServer that receives requests to EchoWithoutProtobuf at the sample
# server port.
def main
s = GRPC::RpcServer.new
s.add_http2_port('0.0.0.0:50051', :this_port_is_insecure)
s.handle(EchoServer)
s.run_till_terminated
end
main

@ -0,0 +1,49 @@
# Original file comments:
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
require 'grpc'
module EchoWithoutProtobuf
# The 'echo without protobuf' service definition.
class Service
include GRPC::GenericService
self.marshal_class_method = :try_convert
self.unmarshal_class_method = :try_convert
self.service_name = 'EchoWithoutProtobuf'
# Request and response are plain strings
rpc :Echo, String, String
end
Stub = Service.rpc_stub_class
end

@ -27,7 +27,7 @@ Gem::Specification.new do |s|
s.require_paths = %w( src/ruby/bin src/ruby/lib src/ruby/pb )
s.platform = Gem::Platform::RUBY
s.add_dependency 'google-protobuf', '~> 3.1.0'
s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'googleauth', '~> 0.5.1'
s.add_development_dependency 'bundler', '~> 1.9'
@ -35,7 +35,7 @@ Gem::Specification.new do |s|
s.add_development_dependency 'logging', '~> 2.0'
s.add_development_dependency 'simplecov', '~> 0.9'
s.add_development_dependency 'rake', '~> 10.4'
s.add_development_dependency 'rake-compiler', '~> 0.9'
s.add_development_dependency 'rake-compiler', '~> 1.0'
s.add_development_dependency 'rake-compiler-dock', '~> 0.5.1'
s.add_development_dependency 'rspec', '~> 3.2'
s.add_development_dependency 'rubocop', '~> 0.30.0'

@ -0,0 +1,62 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_EXT_HEALTH_CHECK_SERVICE_SERVER_BUILDER_OPTION_H
#define GRPCXX_EXT_HEALTH_CHECK_SERVICE_SERVER_BUILDER_OPTION_H
#include <memory>
#include <grpc++/health_check_service_interface.h>
#include <grpc++/impl/server_builder_option.h>
#include <grpc++/support/config.h>
namespace grpc {
class HealthCheckServiceServerBuilderOption : public ServerBuilderOption {
public:
// The ownership of hc will be taken and transferred to the grpc server.
// To explicitly disable default service, pass in a nullptr.
explicit HealthCheckServiceServerBuilderOption(
std::unique_ptr<HealthCheckServiceInterface> hc);
~HealthCheckServiceServerBuilderOption() override {}
void UpdateArguments(ChannelArguments* args) override;
void UpdatePlugins(
std::vector<std::unique_ptr<ServerBuilderPlugin>>* plugins) override;
private:
std::unique_ptr<HealthCheckServiceInterface> hc_;
};
} // namespace grpc
#endif // GRPCXX_EXT_HEALTH_CHECK_SERVICE_SERVER_BUILDER_OPTION_H

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,55 +31,38 @@
*
*/
#include <set>
#ifndef GRPCXX_HEALTH_CHECK_SERVICE_INTERFACE_H
#define GRPCXX_HEALTH_CHECK_SERVICE_INTERFACE_H
#include <grpc/support/log.h>
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/report.h"
#include "test/cpp/util/benchmark_config.h"
extern "C" {
#include "src/core/lib/iomgr/pollset_posix.h"
}
#include <grpc++/support/config.h>
namespace grpc {
namespace testing {
static const int WARMUP = 5;
static const int BENCHMARK = 5;
static void RunQPS() {
gpr_log(GPR_INFO, "Running QPS test");
const char kHealthCheckServiceInterfaceArg[] =
"grpc.health_check_service_interface";
ClientConfig client_config;
client_config.set_client_type(ASYNC_CLIENT);
client_config.set_outstanding_rpcs_per_channel(1000);
client_config.set_client_channels(8);
client_config.set_async_client_threads(8);
client_config.set_rpc_type(UNARY);
client_config.mutable_load_params()->mutable_closed_loop();
// The gRPC server uses this interface to expose the health checking service
// without depending on protobuf.
class HealthCheckServiceInterface {
public:
virtual ~HealthCheckServiceInterface() {}
ServerConfig server_config;
server_config.set_server_type(ASYNC_SERVER);
server_config.set_async_server_threads(4);
// Set or change the serving status of the given service_name.
virtual void SetServingStatus(const grpc::string& service_name,
bool serving) = 0;
// Apply to all registered service names.
virtual void SetServingStatus(bool serving) = 0;
};
const auto result =
RunScenario(client_config, 1, server_config, 1, WARMUP, BENCHMARK, -2);
// Enable/disable the default health checking service. This applies to all C++
// servers created afterwards. For each server, user can override the default
// with a HealthCheckServiceServerBuilderOption.
// NOT thread safe.
void EnableDefaultHealthCheckService(bool enable);
GetReporter()->ReportQPSPerCore(*result);
GetReporter()->ReportLatency(*result);
}
// NOT thread safe.
bool DefaultHealthCheckServiceEnabled();
} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
grpc::testing::InitBenchmark(&argc, &argv, true);
grpc_platform_become_multipoller = grpc_poll_become_multipoller;
grpc::testing::RunQPS();
return 0;
}
#endif // GRPCXX_HEALTH_CHECK_SERVICE_INTERFACE_H

@ -213,8 +213,7 @@ class ServerContext {
void BeginCompletionOp(Call* call);
ServerContext(gpr_timespec deadline, grpc_metadata* metadata,
size_t metadata_count);
ServerContext(gpr_timespec deadline, grpc_metadata_array* arr);
void set_call(grpc_call* call) { call_ = call; }

@ -55,12 +55,10 @@ struct grpc_server;
namespace grpc {
class GenericServerContext;
class AsyncGenericService;
class ServerAsyncStreamingInterface;
class HealthCheckServiceInterface;
class ServerContext;
class ServerInitializer;
class ThreadPoolInterface;
/// Models a gRPC server.
///
@ -99,6 +97,11 @@ class Server final : public ServerInterface, private GrpcLibraryCodegen {
// Returns a \em raw pointer to the underlying grpc_server instance.
grpc_server* c_server();
/// Returns the health check service.
HealthCheckServiceInterface* GetHealthCheckService() const {
return health_check_service_.get();
}
private:
friend class AsyncGenericService;
friend class ServerBuilder;
@ -216,6 +219,9 @@ class Server final : public ServerInterface, private GrpcLibraryCodegen {
grpc_server* server_;
std::unique_ptr<ServerInitializer> server_initializer_;
std::unique_ptr<HealthCheckServiceInterface> health_check_service_;
bool health_check_service_disabled_;
};
} // namespace grpc

@ -40,6 +40,20 @@
typedef intptr_t gpr_atm;
#ifdef GPR_LOW_LEVEL_COUNTERS
extern gpr_atm gpr_counter_atm_cas;
extern gpr_atm gpr_counter_atm_add;
#define GPR_ATM_INC_COUNTER(counter) \
__atomic_fetch_add(&counter, 1, __ATOMIC_RELAXED)
#define GPR_ATM_INC_CAS_THEN(blah) \
(GPR_ATM_INC_COUNTER(gpr_counter_atm_cas), blah)
#define GPR_ATM_INC_ADD_THEN(blah) \
(GPR_ATM_INC_COUNTER(gpr_counter_atm_add), blah)
#else
#define GPR_ATM_INC_CAS_THEN(blah) blah
#define GPR_ATM_INC_ADD_THEN(blah) blah
#endif
#define gpr_atm_full_barrier() (__atomic_thread_fence(__ATOMIC_SEQ_CST))
#define gpr_atm_acq_load(p) (__atomic_load_n((p), __ATOMIC_ACQUIRE))
@ -50,25 +64,28 @@ typedef intptr_t gpr_atm;
(__atomic_store_n((p), (intptr_t)(value), __ATOMIC_RELAXED))
#define gpr_atm_no_barrier_fetch_add(p, delta) \
(__atomic_fetch_add((p), (intptr_t)(delta), __ATOMIC_RELAXED))
GPR_ATM_INC_ADD_THEN( \
__atomic_fetch_add((p), (intptr_t)(delta), __ATOMIC_RELAXED))
#define gpr_atm_full_fetch_add(p, delta) \
(__atomic_fetch_add((p), (intptr_t)(delta), __ATOMIC_ACQ_REL))
GPR_ATM_INC_ADD_THEN( \
__atomic_fetch_add((p), (intptr_t)(delta), __ATOMIC_ACQ_REL))
static __inline int gpr_atm_no_barrier_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
return __atomic_compare_exchange_n(p, &o, n, 0, __ATOMIC_RELAXED,
__ATOMIC_RELAXED);
return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n(
p, &o, n, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED));
}
static __inline int gpr_atm_acq_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
return __atomic_compare_exchange_n(p, &o, n, 0, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED);
return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n(
p, &o, n, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED));
}
static __inline int gpr_atm_rel_cas(gpr_atm *p, gpr_atm o, gpr_atm n) {
return __atomic_compare_exchange_n(p, &o, n, 0, __ATOMIC_RELEASE,
__ATOMIC_RELAXED);
return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n(
p, &o, n, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED));
}
#define gpr_atm_full_xchg(p, n) __atomic_exchange_n((p), (n), __ATOMIC_ACQ_REL)
#define gpr_atm_full_xchg(p, n) \
GPR_ATM_INC_CAS_THEN(__atomic_exchange_n((p), (n), __ATOMIC_ACQ_REL))
#endif /* GRPC_IMPL_CODEGEN_ATM_GCC_ATOMIC_H */

@ -297,6 +297,10 @@
#endif
#ifdef _MSC_VER
#ifdef _PYTHON_MSVC
// The Python 3.5 Windows runtime is missing InetNtop
#define GPR_WIN_INET_NTOP
#endif // _PYTHON_MSVC
#if _MSC_VER < 1700
typedef __int8 int8_t;
typedef __int16 int16_t;

@ -105,8 +105,11 @@ if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
elif 'win32' in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -D_PYTHON_MSVC'
elif "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden -fno-wrapv'
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ''
if "linux" in sys.platform or "darwin" in sys.platform:

@ -127,7 +127,7 @@ static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
@ -146,7 +146,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));

@ -183,7 +183,7 @@ typedef struct client_channel_channel_data {
grpc_pollset_set *interested_parties;
/* the following properties are guarded by a mutex since API's require them
to be instantaniously available */
to be instantaneously available */
gpr_mu info_mu;
char *info_lb_policy_name;
/** service config in JSON form */
@ -200,9 +200,9 @@ typedef struct {
grpc_lb_policy *lb_policy;
} lb_policy_connectivity_watcher;
static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state);
static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state);
static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand,
@ -213,7 +213,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
state == GRPC_CHANNEL_SHUTDOWN) &&
chand->lb_policy != NULL) {
/* cancel picks with wait_for_ready=false */
grpc_lb_policy_cancel_picks(
grpc_lb_policy_cancel_picks_locked(
exec_ctx, chand->lb_policy,
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
/* check= */ 0, GRPC_ERROR_REF(error));
@ -230,14 +230,14 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
if (w->lb_policy == w->chand->lb_policy) {
if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver);
GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
w->chand->lb_policy = NULL;
}
set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
GRPC_ERROR_REF(error), "lb_changed");
if (w->state != GRPC_CHANNEL_SHUTDOWN) {
watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state);
}
}
@ -245,9 +245,9 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
gpr_free(w);
}
static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
@ -256,8 +256,8 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_combiner_scheduler(chand->combiner, false));
w->state = current_state;
w->lb_policy = lb_policy;
grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
&w->on_changed);
grpc_lb_policy_notify_on_state_change_locked(exec_ctx, lb_policy, &w->state,
&w->on_changed);
}
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
@ -313,13 +313,14 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_args lb_policy_args;
lb_policy_args.args = chand->resolver_result;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy_args.combiner = chand->combiner;
lb_policy =
grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
if (lb_policy != NULL) {
GRPC_LB_POLICY_REF(lb_policy, "config_change");
GRPC_ERROR_UNREF(state_error);
state =
grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
state = grpc_lb_policy_check_connectivity_locked(exec_ctx, lb_policy,
&state_error);
}
// Find service config.
channel_arg =
@ -383,14 +384,15 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
set_channel_connectivity_state_locked(
exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
if (lb_policy != NULL) {
watch_lb_policy(exec_ctx, chand, lb_policy, state);
watch_lb_policy_locked(exec_ctx, chand, lb_policy, state);
}
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
grpc_resolver_next_locked(exec_ctx, chand->resolver,
&chand->resolver_result,
&chand->on_resolver_result_changed);
} else {
if (chand->resolver != NULL) {
grpc_resolver_shutdown(exec_ctx, chand->resolver);
grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
}
@ -403,7 +405,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
}
if (exit_idle) {
grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
grpc_lb_policy_exit_idle_locked(exec_ctx, lb_policy);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
}
@ -440,7 +442,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_closure_sched(exec_ctx, op->send_ping,
GRPC_ERROR_CREATE("Ping with no load balancing"));
} else {
grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
}
op->send_ping = NULL;
@ -451,7 +453,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
set_channel_connectivity_state_locked(
exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
grpc_resolver_shutdown(exec_ctx, chand->resolver);
grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
if (!chand->started_resolving) {
@ -550,7 +552,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->resolver = grpc_resolver_create(
exec_ctx, proxy_name != NULL ? proxy_name : arg->value.string,
new_args != NULL ? new_args : args->channel_args,
chand->interested_parties);
chand->interested_parties, chand->combiner);
if (proxy_name != NULL) gpr_free(proxy_name);
if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
if (chand->resolver == NULL) {
@ -559,13 +561,23 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_resolver *resolver = arg;
grpc_resolver_shutdown_locked(exec_ctx, resolver);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
}
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
if (chand->resolver != NULL) {
grpc_resolver_shutdown(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
grpc_closure_sched(
exec_ctx,
grpc_closure_create(shutdown_resolver_locked, chand->resolver,
grpc_combiner_scheduler(chand->combiner, false)),
GRPC_ERROR_NONE);
}
if (chand->client_channel_factory != NULL) {
grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory);
@ -797,8 +809,9 @@ static bool pick_subchannel_locked(
if (initial_metadata == NULL) {
if (chand->lb_policy != NULL) {
grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
connected_subchannel, GRPC_ERROR_REF(error));
grpc_lb_policy_cancel_pick_locked(exec_ctx, chand->lb_policy,
connected_subchannel,
GRPC_ERROR_REF(error));
}
for (closure = chand->waiting_for_config_closures.head; closure != NULL;
closure = closure->next_data.next) {
@ -837,7 +850,7 @@ static bool pick_subchannel_locked(
const grpc_lb_policy_pick_args inputs = {
initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
gpr_inf_future(GPR_CLOCK_MONOTONIC)};
const bool result = grpc_lb_policy_pick(
const bool result = grpc_lb_policy_pick_locked(
exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
GPR_TIMER_END("pick_subchannel", 0);
@ -846,8 +859,9 @@ static bool pick_subchannel_locked(
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
grpc_resolver_next_locked(exec_ctx, chand->resolver,
&chand->resolver_result,
&chand->on_resolver_result_changed);
}
if (chand->resolver != NULL) {
cpa = gpr_malloc(sizeof(*cpa));
@ -1123,7 +1137,7 @@ static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// Initialize data members.
@ -1204,14 +1218,15 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
channel_data *chand = arg;
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
chand->exit_idle_when_lb_policy_arrives = true;
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
chand->started_resolving = true;
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
grpc_resolver_next_locked(exec_ctx, chand->resolver,
&chand->resolver_result,
&chand->on_resolver_result_changed);
}
}
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect");

@ -32,14 +32,17 @@
*/
#include "src/core/ext/client_channel/lb_policy.h"
#include "src/core/lib/iomgr/combiner.h"
#define WEAK_REF_BITS 16
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable) {
const grpc_lb_policy_vtable *vtable,
grpc_combiner *combiner) {
policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
policy->interested_parties = grpc_pollset_set_create();
policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
}
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
@ -71,6 +74,13 @@ void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_lb_policy *policy = arg;
policy->vtable->shutdown_locked(exec_ctx, policy);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
}
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
@ -79,10 +89,15 @@ void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
policy->vtable->shutdown(exec_ctx, policy);
grpc_closure_sched(
exec_ctx,
grpc_closure_create(shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner, false)),
GRPC_ERROR_NONE);
} else {
grpc_lb_policy_weak_unref(exec_ctx,
policy REF_FUNC_PASS_ARGS("strong-unref"));
}
grpc_lb_policy_weak_unref(exec_ctx,
policy REF_FUNC_PASS_ARGS("strong-unref"));
}
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
@ -95,52 +110,58 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(exec_ctx, policy->interested_parties);
grpc_combiner *combiner = policy->combiner;
policy->vtable->destroy(exec_ctx, policy);
GRPC_COMBINER_UNREF(exec_ctx, combiner, "lb_policy");
}
}
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete) {
return policy->vtable->pick(exec_ctx, policy, pick_args, target, user_data,
on_complete);
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
void **user_data, grpc_closure *on_complete) {
return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
user_data, on_complete);
}
void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connected_subchannel **target,
grpc_error *error) {
policy->vtable->cancel_pick(exec_ctx, policy, target, error);
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connected_subchannel **target,
grpc_error *error) {
policy->vtable->cancel_pick_locked(exec_ctx, policy, target, error);
}
void grpc_lb_policy_cancel_picks(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
policy->vtable->cancel_picks(exec_ctx, policy, initial_metadata_flags_mask,
initial_metadata_flags_eq, error);
void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
policy->vtable->cancel_picks_locked(exec_ctx, policy,
initial_metadata_flags_mask,
initial_metadata_flags_eq, error);
}
void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
policy->vtable->exit_idle(exec_ctx, policy);
void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy) {
policy->vtable->exit_idle_locked(exec_ctx, policy);
}
void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure) {
policy->vtable->ping_one(exec_ctx, policy, closure);
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_closure *closure) {
policy->vtable->ping_one_locked(exec_ctx, policy, closure);
}
void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connectivity_state *state,
grpc_closure *closure) {
policy->vtable->notify_on_state_change(exec_ctx, policy, state, closure);
void grpc_lb_policy_notify_on_state_change_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connectivity_state *state, grpc_closure *closure) {
policy->vtable->notify_on_state_change_locked(exec_ctx, policy, state,
closure);
}
grpc_connectivity_state grpc_lb_policy_check_connectivity(
grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error) {
return policy->vtable->check_connectivity(exec_ctx, policy,
connectivity_error);
return policy->vtable->check_connectivity_locked(exec_ctx, policy,
connectivity_error);
}

@ -51,6 +51,8 @@ struct grpc_lb_policy {
gpr_atm ref_pair;
/* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set *interested_parties;
/* combiner under which lb_policy actions take place */
grpc_combiner *combiner;
};
/** Extra arguments for an LB pick */
@ -69,42 +71,44 @@ typedef struct grpc_lb_policy_pick_args {
struct grpc_lb_policy_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** \see grpc_lb_policy_pick */
int (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete);
int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete);
/** \see grpc_lb_policy_cancel_pick */
void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connected_subchannel **target, grpc_error *error);
void (*cancel_pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connected_subchannel **target,
grpc_error *error);
/** \see grpc_lb_policy_cancel_picks */
void (*cancel_picks)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq, grpc_error *error);
void (*cancel_picks_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error);
/** \see grpc_lb_policy_ping_one */
void (*ping_one)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure);
void (*ping_one_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure);
/** Try to enter a READY connectivity state */
void (*exit_idle)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void (*exit_idle_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** check the current connectivity of the lb_policy */
grpc_connectivity_state (*check_connectivity)(
grpc_connectivity_state (*check_connectivity_locked)(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the policy. Calling with a NULL \a
state cancels the subscription. */
void (*notify_on_state_change)(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connectivity_state *state,
grpc_closure *closure);
void (*notify_on_state_change_locked)(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connectivity_state *state,
grpc_closure *closure);
};
/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
@ -144,7 +148,8 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** called by concrete implementations to initialize the base struct */
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable);
const grpc_lb_policy_vtable *vtable,
grpc_combiner *combiner);
/** Finds an appropriate subchannel for a call, based on \a pick_args.
@ -159,43 +164,45 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete);
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target,
void **user_data, grpc_closure *on_complete);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
against one of the connected subchannels managed by \a policy. */
void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure);
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_closure *closure);
/** Cancel picks for \a target.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connected_subchannel **target,
grpc_error *error);
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connected_subchannel **target,
grpc_error *error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given
in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq
when AND'd with \a initial_metadata_flags_mask */
void grpc_lb_policy_cancel_picks(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error);
void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error);
/** Try to enter a READY connectivity state */
void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy);
/* Call notify when the connectivity state of a channel changes from \a *state.
* Updates \a *state with the new state of the policy */
void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connectivity_state *state,
grpc_closure *closure);
void grpc_lb_policy_notify_on_state_change_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connectivity_state *state, grpc_closure *closure);
grpc_connectivity_state grpc_lb_policy_check_connectivity(
grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);

@ -107,6 +107,7 @@ grpc_arg grpc_lb_addresses_create_channel_arg(
typedef struct grpc_lb_policy_args {
grpc_client_channel_factory *client_channel_factory;
grpc_channel_args *args;
grpc_combiner *combiner;
} grpc_lb_policy_args;
struct grpc_lb_policy_factory_vtable {

@ -32,10 +32,13 @@
*/
#include "src/core/ext/client_channel/resolver.h"
#include "src/core/lib/iomgr/combiner.h"
void grpc_resolver_init(grpc_resolver *resolver,
const grpc_resolver_vtable *vtable) {
const grpc_resolver_vtable *vtable,
grpc_combiner *combiner) {
resolver->vtable = vtable;
resolver->combiner = GRPC_COMBINER_REF(combiner, "resolver");
gpr_ref_init(&resolver->refs, 1);
}
@ -62,20 +65,24 @@ void grpc_resolver_unref(grpc_resolver *resolver,
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
#endif
if (gpr_unref(&resolver->refs)) {
grpc_combiner *combiner = resolver->combiner;
resolver->vtable->destroy(exec_ctx, resolver);
GRPC_COMBINER_UNREF(exec_ctx, combiner, "resolver");
}
}
void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
resolver->vtable->shutdown(exec_ctx, resolver);
void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
resolver->vtable->shutdown_locked(exec_ctx, resolver);
}
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
resolver->vtable->channel_saw_error(exec_ctx, resolver);
void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
resolver->vtable->channel_saw_error_locked(exec_ctx, resolver);
}
void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result, grpc_closure *on_complete) {
resolver->vtable->next(exec_ctx, resolver, result, on_complete);
void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result,
grpc_closure *on_complete) {
resolver->vtable->next_locked(exec_ctx, resolver, result, on_complete);
}

@ -44,14 +44,16 @@ typedef struct grpc_resolver_vtable grpc_resolver_vtable;
struct grpc_resolver {
const grpc_resolver_vtable *vtable;
gpr_refcount refs;
grpc_combiner *combiner;
};
struct grpc_resolver_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result, grpc_closure *on_complete);
void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*channel_saw_error_locked)(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver);
void (*next_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result, grpc_closure *on_complete);
};
#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
@ -70,21 +72,30 @@ void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
#endif
void grpc_resolver_init(grpc_resolver *resolver,
const grpc_resolver_vtable *vtable);
const grpc_resolver_vtable *vtable,
grpc_combiner *combiner);
void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver);
/** Notification that the channel has seen an error on some address.
Can be used as a hint that re-resolution is desirable soon. */
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver);
Can be used as a hint that re-resolution is desirable soon.
Must be called from the combiner passed as a resolver_arg at construction
time.*/
void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver);
/** Get the next result from the resolver. Expected to set \a *result with
new channel args and then schedule \a on_complete for execution.
If resolution is fatally broken, set \a *result to NULL and
schedule \a on_complete. */
void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result, grpc_closure *on_complete);
schedule \a on_complete.
Must be called from the combiner passed as a resolver_arg at construction
time.*/
void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result,
grpc_closure *on_complete);
#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_H */

@ -50,6 +50,7 @@ typedef struct grpc_resolver_args {
grpc_uri *uri;
const grpc_channel_args *args;
grpc_pollset_set *pollset_set;
grpc_combiner *combiner;
} grpc_resolver_args;
struct grpc_resolver_factory_vtable {

@ -133,7 +133,8 @@ static grpc_resolver_factory *resolve_factory(const char *target,
grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_args *args,
grpc_pollset_set *pollset_set) {
grpc_pollset_set *pollset_set,
grpc_combiner *combiner) {
grpc_uri *uri = NULL;
char *canonical_target = NULL;
grpc_resolver_factory *factory =
@ -144,6 +145,7 @@ grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
resolver_args.uri = uri;
resolver_args.args = args;
resolver_args.pollset_set = pollset_set;
resolver_args.combiner = combiner;
resolver =
grpc_resolver_factory_create_resolver(exec_ctx, factory, &resolver_args);
grpc_uri_destroy(uri);

@ -65,7 +65,8 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory);
should not be NULL. */
grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_args *args,
grpc_pollset_set *pollset_set);
grpc_pollset_set *pollset_set,
grpc_combiner *combiner);
/** Find a resolver factory given a name and return an (owned-by-the-caller)
* reference to it */

@ -438,7 +438,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&w->subchannel->mu);
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher");
gpr_free(w);
follow_up->cb(exec_ctx, follow_up->cb_arg, error);
grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {

@ -115,6 +115,7 @@
#include "src/core/ext/lb_policy/grpclb/grpclb_channel.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
@ -285,9 +286,6 @@ typedef struct glb_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
/** mutex protecting remaining members */
gpr_mu mu;
/** who the client is trying to communicate with */
const char *server_name;
grpc_client_channel_factory *cc_factory;
@ -557,9 +555,9 @@ static bool pick_from_internal_rr_locked(
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
GPR_ASSERT(rr_policy != NULL);
const bool pick_done =
grpc_lb_policy_pick(exec_ctx, rr_policy, pick_args, target,
(void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, rr_policy, pick_args, target, (void **)&wc_arg->lb_token,
&wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
if (grpc_lb_glb_trace) {
@ -590,6 +588,7 @@ static grpc_lb_policy *create_rr_locked(
grpc_lb_policy_args args;
memset(&args, 0, sizeof(args));
args.client_channel_factory = glb_policy->cc_factory;
args.combiner = glb_policy->base.combiner;
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, serverlist);
@ -608,8 +607,8 @@ static grpc_lb_policy *create_rr_locked(
return rr;
}
static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
@ -633,8 +632,8 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
grpc_error *new_rr_state_error = NULL;
const grpc_connectivity_state new_rr_state =
grpc_lb_policy_check_connectivity(exec_ctx, new_rr_policy,
&new_rr_state_error);
grpc_lb_policy_check_connectivity_locked(exec_ctx, new_rr_policy,
&new_rr_state_error);
/* Connectivity state is a function of the new RR policy just created */
const bool replace_old_rr = update_lb_connectivity_status_locked(
exec_ctx, glb_policy, new_rr_state, new_rr_state_error);
@ -677,17 +676,18 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
rr_connectivity_data *rr_connectivity =
gpr_malloc(sizeof(rr_connectivity_data));
memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
rr_connectivity, grpc_schedule_on_exec_ctx);
grpc_closure_init(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
rr_connectivity->glb_policy = glb_policy;
rr_connectivity->state = new_rr_state;
/* Subscribe to changes to the connectivity of the new RR */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
&rr_connectivity->state,
&rr_connectivity->on_change);
grpc_lb_policy_exit_idle(exec_ctx, glb_policy->rr_policy);
grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
&rr_connectivity->state,
&rr_connectivity->on_change);
grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
/* Update picks and pings in wait */
pending_pick *pp;
@ -713,17 +713,16 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy,
&pping->wrapped_notify_arg.wrapper_closure);
grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
&pping->wrapped_notify_arg.wrapper_closure);
}
}
static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
rr_connectivity_data *rr_connectivity = arg;
glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
gpr_mu_lock(&glb_policy->mu);
const bool shutting_down = glb_policy->shutting_down;
bool unref_needed = false;
GRPC_ERROR_REF(error);
@ -740,11 +739,10 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
update_lb_connectivity_status_locked(exec_ctx, glb_policy,
rr_connectivity->state, error);
/* Resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
&rr_connectivity->state,
&rr_connectivity->on_change);
grpc_lb_policy_notify_on_state_change_locked(
exec_ctx, glb_policy->rr_policy, &rr_connectivity->state,
&rr_connectivity->on_change);
}
gpr_mu_unlock(&glb_policy->mu);
if (unref_needed) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"rr_connectivity_cb");
@ -899,8 +897,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
gpr_free(glb_policy);
return NULL;
}
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable);
gpr_mu_init(&glb_policy->mu);
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
return &glb_policy->base;
@ -918,13 +915,11 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
gpr_mu_destroy(&glb_policy->mu);
gpr_free(glb_policy);
}
static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
glb_policy->shutting_down = true;
pending_pick *pp = glb_policy->pending_picks;
@ -941,7 +936,6 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
* the cancel, needs to acquire that same lock */
grpc_call *lb_call = glb_policy->lb_call;
gpr_mu_unlock(&glb_policy->mu);
/* glb_policy->lb_call and this local lb_call must be consistent at this point
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
@ -967,11 +961,10 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
}
static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
@ -987,16 +980,15 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
gpr_mu_unlock(&glb_policy->mu);
GRPC_ERROR_UNREF(error);
}
static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
@ -1012,7 +1004,6 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
gpr_mu_unlock(&glb_policy->mu);
GRPC_ERROR_UNREF(error);
}
@ -1025,19 +1016,17 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
query_for_backends_locked(exec_ctx, glb_policy);
}
static void glb_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
gpr_mu_unlock(&glb_policy->mu);
}
static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete) {
static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
grpc_closure_sched(
@ -1048,7 +1037,6 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
glb_policy->deadline = pick_args->deadline;
bool pick_done;
@ -1087,53 +1075,43 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pick_done = false;
}
gpr_mu_unlock(&glb_policy->mu);
return pick_done;
}
static grpc_connectivity_state glb_check_connectivity(
static grpc_connectivity_state glb_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_error **connectivity_error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
grpc_connectivity_state st;
gpr_mu_lock(&glb_policy->mu);
st = grpc_connectivity_state_get(&glb_policy->state_tracker,
connectivity_error);
gpr_mu_unlock(&glb_policy->mu);
return st;
return grpc_connectivity_state_get(&glb_policy->state_tracker,
connectivity_error);
}
static void glb_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
if (glb_policy->rr_policy) {
grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy, closure);
grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
} else {
add_pending_ping(&glb_policy->pending_pings, closure);
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
}
gpr_mu_unlock(&glb_policy->mu);
}
static void glb_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_connectivity_state *current,
grpc_closure *notify) {
static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_connectivity_state *current,
grpc_closure *notify) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &glb_policy->state_tracker, current, notify);
gpr_mu_unlock(&glb_policy->mu);
}
static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
@ -1162,11 +1140,11 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_grpclb_request_destroy(request);
grpc_closure_init(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received, glb_policy,
grpc_schedule_on_exec_ctx);
lb_on_server_status_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_closure_init(&glb_policy->lb_on_response_received,
lb_on_response_received, glb_policy,
grpc_schedule_on_exec_ctx);
lb_on_response_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner, false));
gpr_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
@ -1261,14 +1239,13 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
gpr_mu_lock(&glb_policy->mu);
if (glb_policy->lb_response_payload != NULL) {
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
@ -1342,20 +1319,17 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
gpr_mu_unlock(&glb_policy->mu);
} else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received" weak ref taken in
* query_for_backends_locked() and reused in every reception loop */
gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_response_received_empty_payload");
}
}
static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->shutting_down) {
if (grpc_lb_glb_trace) {
@ -1365,15 +1339,13 @@ static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
}
gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_on_retry_timer");
}
static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = arg;
gpr_mu_lock(&glb_policy->mu);
GPR_ASSERT(glb_policy->lb_call != NULL);
@ -1408,21 +1380,27 @@ static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
grpc_closure_init(&glb_policy->lb_on_call_retry, lb_call_on_retry_timer,
glb_policy, grpc_schedule_on_exec_ctx);
grpc_closure_init(
&glb_policy->lb_on_call_retry, lb_call_on_retry_timer_locked,
glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry, now);
}
gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received");
}
/* Code wiring the policy with the rest of the core */
static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_destroy, glb_shutdown, glb_pick,
glb_cancel_pick, glb_cancel_picks, glb_ping_one,
glb_exit_idle, glb_check_connectivity, glb_notify_on_state_change};
glb_destroy,
glb_shutdown_locked,
glb_pick_locked,
glb_cancel_pick_locked,
glb_cancel_picks_locked,
glb_ping_one_locked,
glb_exit_idle_locked,
glb_check_connectivity_locked,
glb_notify_on_state_change_locked};
static void glb_factory_ref(grpc_lb_policy_factory *factory) {}

@ -38,6 +38,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
@ -57,11 +58,11 @@ typedef struct {
grpc_closure connectivity_changed;
/** the selected channel (a grpc_connected_subchannel) */
gpr_atm selected;
/** remaining members are protected by the combiner */
/** the selected channel */
grpc_connected_subchannel *selected;
/** mutex protecting remaining members */
gpr_mu mu;
/** have we started picking? */
int started_picking;
/** are we shut down? */
@ -77,32 +78,24 @@ typedef struct {
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
#define GET_SELECTED(p) \
((grpc_connected_subchannel *)gpr_atm_acq_load(&(p)->selected))
static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
grpc_connected_subchannel *selected = GET_SELECTED(p);
size_t i;
GPR_ASSERT(p->pending_picks == NULL);
for (i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
if (selected != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, selected, "picked_first");
if (p->selected != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
gpr_mu_destroy(&p->mu);
gpr_free(p);
}
static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
grpc_connected_subchannel *selected;
gpr_mu_lock(&p->mu);
selected = GET_SELECTED(p);
p->shutdown = 1;
pp = p->pending_picks;
p->pending_picks = NULL;
@ -110,15 +103,14 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE("Channel shutdown"), "shutdown");
/* cancel subscription */
if (selected != NULL) {
if (p->selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, selected, NULL, NULL, &p->connectivity_changed);
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
} else if (p->num_subchannels > 0) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
}
gpr_mu_unlock(&p->mu);
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
@ -128,12 +120,11 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
}
static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@ -150,17 +141,15 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@ -177,7 +166,6 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
@ -192,63 +180,48 @@ static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
&p->connectivity_changed);
}
static void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
gpr_mu_lock(&p->mu);
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
gpr_mu_unlock(&p->mu);
}
static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete) {
static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
/* Check atomically for a selected channel */
grpc_connected_subchannel *selected = GET_SELECTED(p);
if (selected != NULL) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
if (p->selected != NULL) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
return 1;
}
/* No subchannel selected yet, so acquire lock and then attempt again */
gpr_mu_lock(&p->mu);
selected = GET_SELECTED(p);
if (selected) {
gpr_mu_unlock(&p->mu);
*target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
return 1;
} else {
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->on_complete = on_complete;
p->pending_picks = pp;
gpr_mu_unlock(&p->mu);
return 0;
/* No subchannel selected yet, so try again */
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->on_complete = on_complete;
p->pending_picks = pp;
return 0;
}
static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
pick_first_lb_policy *p = arg;
static void destroy_subchannels_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
size_t i;
size_t num_subchannels = p->num_subchannels;
grpc_subchannel **subchannels;
gpr_mu_lock(&p->mu);
subchannels = p->subchannels;
p->num_subchannels = 0;
p->subchannels = NULL;
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
for (i = 0; i < num_subchannels; i++) {
@ -258,25 +231,19 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(subchannels);
}
static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
pick_first_lb_policy *p = arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
grpc_connected_subchannel *selected;
GRPC_ERROR_REF(error);
gpr_mu_lock(&p->mu);
selected = GET_SELECTED(p);
if (p->shutdown) {
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_ERROR_UNREF(error);
return;
} else if (selected != NULL) {
} else if (p->selected != NULL) {
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */
p->checking_connectivity = GRPC_CHANNEL_SHUTDOWN;
@ -286,7 +253,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_SHUTDOWN) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, selected, p->base.interested_parties,
exec_ctx, p->selected, p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
@ -301,26 +268,21 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
"connecting_ready");
selected_subchannel = p->subchannels[p->checking_subchannel];
selected =
grpc_subchannel_get_connected_subchannel(selected_subchannel);
GPR_ASSERT(selected != NULL);
GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked_first");
p->selected = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected_subchannel),
"picked_first");
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
grpc_closure_sched(exec_ctx,
grpc_closure_create(destroy_subchannels, p,
grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
destroy_subchannels_locked(exec_ctx, p);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, selected, p->base.interested_parties,
exec_ctx, p->selected, p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
@ -387,48 +349,44 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
}
gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_error **error) {
static grpc_connectivity_state pf_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
grpc_connectivity_state st;
gpr_mu_lock(&p->mu);
st = grpc_connectivity_state_get(&p->state_tracker, error);
gpr_mu_unlock(&p->mu);
return st;
return grpc_connectivity_state_get(&p->state_tracker, error);
}
static void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_connectivity_state *current,
grpc_closure *notify) {
static void pf_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_connectivity_state *current,
grpc_closure *notify) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
gpr_mu_lock(&p->mu);
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
gpr_mu_unlock(&p->mu);
}
static void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
grpc_connected_subchannel *selected = GET_SELECTED(p);
if (selected) {
grpc_connected_subchannel_ping(exec_ctx, selected, closure);
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
} else {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"));
}
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_destroy, pf_shutdown, pf_pick,
pf_cancel_pick, pf_cancel_picks, pf_ping_one,
pf_exit_idle, pf_check_connectivity, pf_notify_on_state_change};
pf_destroy,
pf_shutdown_locked,
pf_pick_locked,
pf_cancel_pick_locked,
pf_cancel_picks_locked,
pf_ping_one_locked,
pf_exit_idle_locked,
pf_check_connectivity_locked,
pf_notify_on_state_change_locked};
static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
@ -489,10 +447,9 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
}
p->num_subchannels = subchannel_idx;
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p,
grpc_schedule_on_exec_ctx);
gpr_mu_init(&p->mu);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed_locked, p,
grpc_combiner_scheduler(args->combiner, false));
return &p->base;
}

@ -67,6 +67,7 @@
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/static_metadata.h"
@ -134,7 +135,6 @@ typedef struct {
struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
gpr_mu mu;
/** total number of addresses received at creation time */
size_t num_addresses;
@ -293,7 +293,6 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
gpr_mu_destroy(&p->mu);
elem = p->ready_list.next;
while (elem != NULL && elem != &p->ready_list) {
@ -309,12 +308,11 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
}
static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
size_t i;
gpr_mu_lock(&p->mu);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
}
@ -335,15 +333,13 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
gpr_mu_unlock(&p->mu);
}
static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@ -360,17 +356,15 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@ -388,11 +382,11 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
@ -411,23 +405,20 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
}
}
static void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
static void rr_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
gpr_mu_lock(&p->mu);
if (!p->started_picking) {
start_picking(exec_ctx, p);
start_picking_locked(exec_ctx, p);
}
gpr_mu_unlock(&p->mu);
}
static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete) {
static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
ready_list *selected;
gpr_mu_lock(&p->mu);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
@ -449,12 +440,11 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
gpr_mu_unlock(&p->mu);
return 1;
} else {
/* no pick currently available. Save for later in list of pending picks */
if (!p->started_picking) {
start_picking(exec_ctx, p);
start_picking_locked(exec_ctx, p);
}
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
@ -463,7 +453,6 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->user_data = user_data;
p->pending_picks = pp;
gpr_mu_unlock(&p->mu);
return 0;
}
}
@ -538,17 +527,15 @@ static grpc_connectivity_state update_lb_connectivity_status(
return sd->curr_connectivity_state;
}
static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
GRPC_ERROR_REF(error);
gpr_mu_lock(&p->mu);
if (p->shutdown) {
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
GRPC_ERROR_UNREF(error);
return;
@ -645,56 +632,51 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
break;
}
gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_error **error) {
static grpc_connectivity_state rr_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
grpc_connectivity_state st;
gpr_mu_lock(&p->mu);
st = grpc_connectivity_state_get(&p->state_tracker, error);
gpr_mu_unlock(&p->mu);
return st;
return grpc_connectivity_state_get(&p->state_tracker, error);
}
static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_connectivity_state *current,
grpc_closure *notify) {
static void rr_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol,
grpc_connectivity_state *current,
grpc_closure *notify) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
gpr_mu_lock(&p->mu);
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
gpr_mu_unlock(&p->mu);
}
static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *selected;
grpc_connected_subchannel *target;
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
gpr_mu_unlock(&p->mu);
target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"rr_picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else {
gpr_mu_unlock(&p->mu);
grpc_closure_sched(exec_ctx, closure,
GRPC_ERROR_CREATE("Round Robin not connected"));
}
}
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_destroy, rr_shutdown, rr_pick,
rr_cancel_pick, rr_cancel_picks, rr_ping_one,
rr_exit_idle, rr_check_connectivity, rr_notify_on_state_change};
rr_destroy,
rr_shutdown_locked,
rr_pick_locked,
rr_cancel_pick_locked,
rr_cancel_picks_locked,
rr_ping_one_locked,
rr_exit_idle_locked,
rr_check_connectivity_locked,
rr_notify_on_state_change_locked};
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
@ -762,7 +744,8 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
}
++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed, sd, grpc_schedule_on_exec_ctx);
rr_connectivity_changed_locked, sd,
grpc_combiner_scheduler(args->combiner, false));
}
}
if (subchannel_idx == 0) {
@ -779,7 +762,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
p->ready_list.next = NULL;
p->ready_list_last_pick = &p->ready_list;
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
@ -787,7 +770,6 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "Created RR policy at %p with %lu subchannels",
(void *)p, (unsigned long)p->num_subchannels);
}
gpr_mu_init(&p->mu);
return &p->base;
}

@ -100,7 +100,7 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(call_data));

@ -40,6 +40,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/backoff.h"
@ -63,8 +64,6 @@ typedef struct {
/** pollset_set to drive the name resolution process */
grpc_pollset_set *interested_parties;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** are we currently resolving? */
bool resolving;
/** which version of the result have we published? */
@ -95,18 +94,20 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r);
static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_channel_args **target_result,
grpc_closure *on_complete);
static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *r);
static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_channel_args **target_result,
grpc_closure *on_complete);
static const grpc_resolver_vtable dns_resolver_vtable = {
dns_destroy, dns_shutdown, dns_channel_saw_error, dns_next};
dns_destroy, dns_shutdown_locked, dns_channel_saw_error_locked,
dns_next_locked};
static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->have_retry_timer) {
grpc_timer_cancel(exec_ctx, &r->retry_timer);
}
@ -116,25 +117,21 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
GRPC_ERROR_CREATE("Resolver Shutdown"));
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
}
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (!r->resolving) {
gpr_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
}
gpr_mu_unlock(&r->mu);
}
static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **target_result,
grpc_closure *on_complete) {
static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **target_result,
grpc_closure *on_complete) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
@ -144,30 +141,26 @@ static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
} else {
dns_maybe_finish_next_locked(exec_ctx, r);
}
gpr_mu_unlock(&r->mu);
}
static void dns_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
dns_resolver *r = arg;
gpr_mu_lock(&r->mu);
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
dns_start_resolving_locked(exec_ctx, r);
}
}
gpr_mu_unlock(&r->mu);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
}
static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
dns_resolver *r = arg;
grpc_channel_args *result = NULL;
gpr_mu_lock(&r->mu);
GPR_ASSERT(r->resolving);
r->resolving = false;
if (r->addresses != NULL) {
@ -198,8 +191,8 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
grpc_closure_init(&r->on_retry, dns_on_retry_timer, r,
grpc_schedule_on_exec_ctx);
grpc_closure_init(&r->on_retry, dns_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner, false));
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
}
if (r->resolved_result != NULL) {
@ -208,7 +201,6 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
r->resolved_result = result;
r->resolved_version++;
dns_maybe_finish_next_locked(exec_ctx, r);
gpr_mu_unlock(&r->mu);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
@ -221,7 +213,8 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
r->addresses = NULL;
grpc_resolve_address(
exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
grpc_closure_create(dns_on_resolved, r, grpc_schedule_on_exec_ctx),
grpc_closure_create(dns_on_resolved_locked, r,
grpc_combiner_scheduler(r->base.combiner, false)),
&r->addresses);
}
@ -240,7 +233,6 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
dns_resolver *r = (dns_resolver *)gr;
gpr_mu_destroy(&r->mu);
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
}
@ -264,8 +256,7 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
// Create resolver.
dns_resolver *r = gpr_malloc(sizeof(dns_resolver));
memset(r, 0, sizeof(*r));
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &dns_resolver_vtable);
grpc_resolver_init(&r->base, &dns_resolver_vtable, args->combiner);
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
r->channel_args = grpc_channel_args_copy(args->args);

@ -45,6 +45,7 @@
#include "src/core/ext/client_channel/parse_address.h"
#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/slice/slice_internal.h"
@ -58,8 +59,6 @@ typedef struct {
grpc_lb_addresses *addresses;
/** channel args */
grpc_channel_args *channel_args;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** have we published? */
bool published;
/** pending next completion, or NULL */
@ -73,48 +72,43 @@ static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
sockaddr_resolver *r);
static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *r);
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_channel_args **target_result,
grpc_closure *on_complete);
static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *r);
static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_channel_args **target_result,
grpc_closure *on_complete);
static const grpc_resolver_vtable sockaddr_resolver_vtable = {
sockaddr_destroy, sockaddr_shutdown, sockaddr_channel_saw_error,
sockaddr_next};
sockaddr_destroy, sockaddr_shutdown_locked,
sockaddr_channel_saw_error_locked, sockaddr_next_locked};
static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_result = NULL;
grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
}
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
r->published = false;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
gpr_mu_unlock(&r->mu);
}
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **target_result,
grpc_closure *on_complete) {
static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
grpc_channel_args **target_result,
grpc_closure *on_complete) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
gpr_mu_unlock(&r->mu);
}
static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
@ -131,7 +125,6 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
sockaddr_resolver *r = (sockaddr_resolver *)gr;
gpr_mu_destroy(&r->mu);
grpc_lb_addresses_destroy(exec_ctx, r->addresses);
grpc_channel_args_destroy(exec_ctx, r->channel_args);
gpr_free(r);
@ -201,8 +194,7 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
memset(r, 0, sizeof(*r));
r->addresses = addresses;
r->channel_args = grpc_channel_args_copy(args->args);
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner);
return &r->base;
}

@ -55,11 +55,6 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/server.h"
typedef struct pending_handshake_manager_node {
grpc_handshake_manager *handshake_mgr;
struct pending_handshake_manager_node *next;
} pending_handshake_manager_node;
typedef struct {
grpc_server *server;
grpc_tcp_server *tcp_server;
@ -68,7 +63,7 @@ typedef struct {
bool shutdown;
grpc_closure tcp_server_shutdown_complete;
grpc_closure *server_destroy_listener_done;
pending_handshake_manager_node *pending_handshake_mgrs;
grpc_handshake_manager *pending_handshake_mgrs;
} server_state;
typedef struct {
@ -78,44 +73,6 @@ typedef struct {
grpc_handshake_manager *handshake_mgr;
} server_connection_state;
static void pending_handshake_manager_add_locked(
server_state *state, grpc_handshake_manager *handshake_mgr) {
pending_handshake_manager_node *node = gpr_malloc(sizeof(*node));
node->handshake_mgr = handshake_mgr;
node->next = state->pending_handshake_mgrs;
state->pending_handshake_mgrs = node;
}
static void pending_handshake_manager_remove_locked(
server_state *state, grpc_handshake_manager *handshake_mgr) {
pending_handshake_manager_node **prev_node = &state->pending_handshake_mgrs;
for (pending_handshake_manager_node *node = state->pending_handshake_mgrs;
node != NULL; node = node->next) {
if (node->handshake_mgr == handshake_mgr) {
*prev_node = node->next;
gpr_free(node);
break;
}
prev_node = &node->next;
}
}
static void pending_handshake_manager_shutdown_locked(grpc_exec_ctx *exec_ctx,
server_state *state,
grpc_error *why) {
pending_handshake_manager_node *prev_node = NULL;
for (pending_handshake_manager_node *node = state->pending_handshake_mgrs;
node != NULL; node = node->next) {
grpc_handshake_manager_shutdown(exec_ctx, node->handshake_mgr,
GRPC_ERROR_REF(why));
gpr_free(prev_node);
prev_node = node;
}
gpr_free(prev_node);
state->pending_handshake_mgrs = NULL;
GRPC_ERROR_UNREF(why);
}
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_handshaker_args *args = arg;
@ -153,8 +110,9 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args_destroy(exec_ctx, args->args);
}
}
pending_handshake_manager_remove_locked(connection_state->server_state,
connection_state->handshake_mgr);
grpc_handshake_manager_pending_list_remove(
&connection_state->server_state->pending_handshake_mgrs,
connection_state->handshake_mgr);
gpr_mu_unlock(&connection_state->server_state->mu);
grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp_server);
@ -174,7 +132,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
return;
}
grpc_handshake_manager *handshake_mgr = grpc_handshake_manager_create();
pending_handshake_manager_add_locked(state, handshake_mgr);
grpc_handshake_manager_pending_list_add(&state->pending_handshake_mgrs,
handshake_mgr);
gpr_mu_unlock(&state->mu);
grpc_tcp_server_ref(state->tcp_server);
server_connection_state *connection_state =
@ -213,8 +172,8 @@ static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&state->mu);
grpc_closure *destroy_done = state->server_destroy_listener_done;
GPR_ASSERT(state->shutdown);
pending_handshake_manager_shutdown_locked(exec_ctx, state,
GRPC_ERROR_REF(error));
grpc_handshake_manager_pending_list_shutdown_all(
exec_ctx, state->pending_handshake_mgrs, GRPC_ERROR_REF(error));
gpr_mu_unlock(&state->mu);
// Flush queued work before destroying handshaker factory, since that
// may do a synchronous unref.

@ -1114,8 +1114,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_chttp2_list_add_waiting_for_concurrency(t, s);
maybe_start_some_streams(exec_ctx, t);
} else {
grpc_chttp2_cancel_stream(exec_ctx, t, s,
GRPC_ERROR_CREATE("Transport closed"));
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
grpc_error_set_int(GRPC_ERROR_CREATE("Transport closed"),
GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAVAILABLE));
}
} else {
GPR_ASSERT(s->id != 0);

@ -173,7 +173,6 @@ grpc_error *grpc_call_stack_init(
grpc_slice path, gpr_timespec start_time, gpr_timespec deadline,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
size_t count = channel_stack->count;
grpc_call_element *call_elems;
char *user_data;
@ -188,13 +187,15 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
args.start_time = start_time;
const grpc_call_element_args args = {
.start_time = start_time,
.call_stack = call_stack,
.server_transport_data = transport_server_data,
.context = context,
.path = path,
.deadline = deadline,
};
for (i = 0; i < count; i++) {
args.call_stack = call_stack;
args.server_transport_data = transport_server_data;
args.context = context;
args.path = path;
args.deadline = deadline;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;

@ -131,7 +131,7 @@ typedef struct {
argument. */
grpc_error *(*init_call_elem)(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args);
const grpc_call_element_args *args);
void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent);

@ -274,7 +274,7 @@ static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;

@ -83,7 +83,7 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
int r = grpc_transport_init_stream(

@ -52,9 +52,6 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = arg;
grpc_deadline_state* deadline_state = elem->call_data;
gpr_mu_lock(&deadline_state->timer_mu);
deadline_state->timer_pending = false;
gpr_mu_unlock(&deadline_state->timer_mu);
if (error != GRPC_ERROR_CANCELLED) {
grpc_call_element_signal_error(
exec_ctx, elem,
@ -66,53 +63,64 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
}
// Starts the deadline timer.
static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
// Note: We do not start the timer if there is already a timer
// pending. This should be okay, because this is only called from two
// functions exported by this module: grpc_deadline_state_start(), which
// starts the initial timer, and grpc_deadline_state_reset(), which
// cancels any pre-existing timer before starting a new one. In
// particular, we want to ensure that if grpc_deadline_state_start()
// winds up trying to start the timer after grpc_deadline_state_reset()
// has already done so, we ignore the value from the former.
if (!deadline_state->timer_pending &&
gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// Take a reference to the call stack, to be owned by the timer.
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
deadline_state->timer_pending = true;
grpc_closure_init(&deadline_state->timer_callback, timer_callback, elem,
grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline,
&deadline_state->timer_callback,
gpr_now(GPR_CLOCK_MONOTONIC));
}
}
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
return;
}
grpc_deadline_state* deadline_state = elem->call_data;
gpr_mu_lock(&deadline_state->timer_mu);
start_timer_if_needed_locked(exec_ctx, elem, deadline);
gpr_mu_unlock(&deadline_state->timer_mu);
grpc_deadline_timer_state cur_state;
grpc_closure* closure = NULL;
retry:
cur_state =
(grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
switch (cur_state) {
case GRPC_DEADLINE_STATE_PENDING:
// Note: We do not start the timer if there is already a timer
return;
case GRPC_DEADLINE_STATE_FINISHED:
if (gpr_atm_rel_cas(&deadline_state->timer_state,
GRPC_DEADLINE_STATE_FINISHED,
GRPC_DEADLINE_STATE_PENDING)) {
// If we've already created and destroyed a timer, we always create a
// new closure: we have no other guarantee that the inlined closure is
// not in use (it may hold a pending call to timer_callback)
closure = grpc_closure_create(timer_callback, elem,
grpc_schedule_on_exec_ctx);
} else {
goto retry;
}
break;
case GRPC_DEADLINE_STATE_INITIAL:
if (gpr_atm_rel_cas(&deadline_state->timer_state,
GRPC_DEADLINE_STATE_INITIAL,
GRPC_DEADLINE_STATE_PENDING)) {
closure =
grpc_closure_init(&deadline_state->timer_callback, timer_callback,
elem, grpc_schedule_on_exec_ctx);
} else {
goto retry;
}
break;
}
GPR_ASSERT(closure);
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
gpr_now(GPR_CLOCK_MONOTONIC));
}
// Cancels the deadline timer.
static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
if (deadline_state->timer_pending) {
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
deadline_state->timer_pending = false;
}
}
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
gpr_mu_lock(&deadline_state->timer_mu);
cancel_timer_if_needed_locked(exec_ctx, deadline_state);
gpr_mu_unlock(&deadline_state->timer_mu);
if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
GRPC_DEADLINE_STATE_FINISHED)) {
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
} else {
// timer was either in STATE_INITAL (nothing to cancel)
// OR in STATE_FINISHED (again nothing to cancel)
}
}
// Callback run when the call is complete.
@ -120,8 +128,8 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_deadline_state* deadline_state = arg;
cancel_timer_if_needed(exec_ctx, deadline_state);
// Invoke the next callback.
deadline_state->next_on_complete->cb(
exec_ctx, deadline_state->next_on_complete->cb_arg, error);
grpc_closure_run(exec_ctx, deadline_state->next_on_complete,
GRPC_ERROR_REF(error));
}
// Inject our own on_complete callback into op.
@ -138,14 +146,12 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_deadline_state* deadline_state = elem->call_data;
memset(deadline_state, 0, sizeof(*deadline_state));
deadline_state->call_stack = call_stack;
gpr_mu_init(&deadline_state->timer_mu);
}
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
grpc_deadline_state* deadline_state = elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
gpr_mu_destroy(&deadline_state->timer_mu);
}
// Callback and associated state for starting the timer after call stack
@ -187,10 +193,8 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
gpr_mu_lock(&deadline_state->timer_mu);
cancel_timer_if_needed_locked(exec_ctx, deadline_state);
start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
gpr_mu_unlock(&deadline_state->timer_mu);
cancel_timer_if_needed(exec_ctx, deadline_state);
start_timer_if_needed(exec_ctx, elem, new_deadline);
}
void grpc_deadline_state_client_start_transport_stream_op(
@ -244,7 +248,7 @@ typedef struct server_call_data {
// Constructor for call_data. Used for both client and server filters.
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_call_element_args* args) {
const grpc_call_element_args* args) {
// Note: size of call data is different between client and server.
memset(elem->call_data, 0, elem->filter->sizeof_call_data);
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);

@ -35,16 +35,18 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/timer.h"
typedef enum grpc_deadline_timer_state {
GRPC_DEADLINE_STATE_INITIAL,
GRPC_DEADLINE_STATE_PENDING,
GRPC_DEADLINE_STATE_FINISHED
} grpc_deadline_timer_state;
// State used for filters that enforce call deadlines.
// Must be the first field in the filter's call_data.
typedef struct grpc_deadline_state {
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
// Guards access to timer_pending and timer.
gpr_mu timer_mu;
// True if the timer callback is currently pending.
bool timer_pending;
// The deadline timer.
gpr_atm timer_state;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when the call is complete.

@ -92,6 +92,10 @@ struct grpc_handshake_manager {
void* user_data;
// Handshaker args.
grpc_handshaker_args args;
// Links to the previous and next managers in a list of all pending handshakes
// Used at server side only.
grpc_handshake_manager* prev;
grpc_handshake_manager* next;
};
grpc_handshake_manager* grpc_handshake_manager_create() {
@ -102,6 +106,39 @@ grpc_handshake_manager* grpc_handshake_manager_create() {
return mgr;
}
void grpc_handshake_manager_pending_list_add(grpc_handshake_manager** head,
grpc_handshake_manager* mgr) {
GPR_ASSERT(mgr->prev == NULL);
GPR_ASSERT(mgr->next == NULL);
mgr->next = *head;
if (*head) {
(*head)->prev = mgr;
}
*head = mgr;
}
void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head,
grpc_handshake_manager* mgr) {
if (mgr->next != NULL) {
mgr->next->prev = mgr->prev;
}
if (mgr->prev != NULL) {
mgr->prev->next = mgr->next;
} else {
GPR_ASSERT(*head == mgr);
*head = mgr->next;
}
}
void grpc_handshake_manager_pending_list_shutdown_all(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why) {
while (head != NULL) {
grpc_handshake_manager_shutdown(exec_ctx, head, GRPC_ERROR_REF(why));
head = head->next;
}
GRPC_ERROR_UNREF(why);
}
static bool is_power_of_2(size_t n) { return (n & (n - 1)) == 0; }
void grpc_handshake_manager_add(grpc_handshake_manager* mgr,

@ -163,4 +163,20 @@ void grpc_handshake_manager_do_handshake(
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data);
/// Add \a mgr to the server side list of all pending handshake managers, the
/// list starts with \a *head.
// Not thread-safe. Caller needs to synchronize.
void grpc_handshake_manager_pending_list_add(grpc_handshake_manager** head,
grpc_handshake_manager* mgr);
/// Remove \a mgr from the server side list of all pending handshake managers.
// Not thread-safe. Caller needs to synchronize.
void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head,
grpc_handshake_manager* mgr);
/// Shutdown all pending handshake managers on the server side.
// Not thread-safe. Caller needs to synchronize.
void grpc_handshake_manager_pending_list_shutdown_all(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why);
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */

@ -386,7 +386,7 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
calld->on_done_recv_initial_metadata = NULL;
calld->on_done_recv_trailing_metadata = NULL;

@ -252,12 +252,11 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
err);
grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true;
}
calld->on_complete->cb(exec_ctx, calld->on_complete->cb_arg, err);
grpc_closure_run(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err));
}
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
@ -268,8 +267,7 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
/* do nothing. This is probably a GET request, and payload will be returned
in hs_on_complete callback. */
} else {
calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
err);
grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
}
}
@ -343,7 +341,7 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */

@ -166,7 +166,7 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
// Constructor for call_data.
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_call_element_args* args) {
const grpc_call_element_args* args) {
channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;

@ -137,8 +137,8 @@ typedef enum {
} grpc_error_times;
/// The following "special" errors can be propagated without allocating memory.
/// They are always even so that other code (particularly combiner locks) can
/// safely use the lower bit for themselves.
/// They are always even so that other code (particularly combiner locks,
/// polling engines) can safely use the lower bit for themselves.
#define GRPC_ERROR_NONE ((grpc_error *)NULL)
#define GRPC_ERROR_OOM ((grpc_error *)2)

@ -68,7 +68,7 @@ static int grpc_polling_trace = 0; /* Disabled by default */
gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
}
/* Uncomment the following enable extra checks on poll_object operations */
/* Uncomment the following to enable extra checks on poll_object operations */
/* #define PO_DEBUG */
static int grpc_wakeup_signal = -1;
@ -140,24 +140,61 @@ struct grpc_fd {
Ref/Unref by two to avoid altering the orphaned bit */
gpr_atm refst;
/* Indicates that the fd is shutdown and that any pending read/write closures
should fail */
bool shutdown;
grpc_error *shutdown_error; /* reason for shutdown: set iff shutdown==true */
/* Internally stores data of type (grpc_error *). If the FD is shutdown, this
contains reason for shutdown (i.e a pointer to grpc_error) ORed with
FD_SHUTDOWN_BIT. Since address allocations are word-aligned, the lower bit
of (grpc_error *) addresses is guaranteed to be zero. Even if the
(grpc_error *), is of special types like GRPC_ERROR_NONE, GRPC_ERROR_OOM
etc, the lower bit is guaranteed to be zero.
/* The fd is either closed or we relinquished control of it. In either cases,
this indicates that the 'fd' on this structure is no longer valid */
Once an fd is shutdown, any pending or future read/write closures on the
fd should fail */
gpr_atm shutdown_error;
/* The fd is either closed or we relinquished control of it. In either
cases, this indicates that the 'fd' on this structure is no longer
valid */
bool orphaned;
/* TODO: sreek - Move this to a lockfree implementation */
grpc_closure *read_closure;
grpc_closure *write_closure;
/* Closures to call when the fd is readable or writable respectively. These
fields contain one of the following values:
CLOSURE_READY : The fd has an I/O event of interest but there is no
closure yet to execute
CLOSURE_NOT_READY : The fd has no I/O event of interest
closure ptr : The closure to be executed when the fd has an I/O
event of interest
shutdown_error | FD_SHUTDOWN_BIT :
'shutdown_error' field ORed with FD_SHUTDOWN_BIT.
This indicates that the fd is shutdown. Since all
memory allocations are word-aligned, the lower two
bits of the shutdown_error pointer are always 0. So
it is safe to OR these with FD_SHUTDOWN_BIT
Valid state transitions:
<closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY
| | ^ | ^ | |
| | | | | | |
| +--------------4----------+ 6 +---------2---------------+ |
| | |
| v |
+-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+
For 1, 4 : See set_ready() function
For 2, 3 : See notify_on() function
For 5,6,7: See set_shutdown() function */
gpr_atm read_closure;
gpr_atm write_closure;
struct grpc_fd *freelist_next;
grpc_closure *on_done_closure;
/* The pollset that last noticed that the fd is readable */
grpc_pollset *read_notifier_pollset;
/* The pollset that last noticed that the fd is readable. The actual type
* stored in this is (grpc_pollset *) */
gpr_atm read_notifier_pollset;
grpc_iomgr_object iomgr_object;
};
@ -180,8 +217,10 @@ static void fd_unref(grpc_fd *fd);
static void fd_global_init(void);
static void fd_global_shutdown(void);
#define CLOSURE_NOT_READY ((grpc_closure *)0)
#define CLOSURE_READY ((grpc_closure *)1)
#define CLOSURE_NOT_READY ((gpr_atm)0)
#define CLOSURE_READY ((gpr_atm)2)
#define FD_SHUTDOWN_BIT 1
/*******************************************************************************
* Polling island Declarations
@ -908,7 +947,11 @@ static void unref_by(grpc_fd *fd, int n) {
fd->freelist_next = fd_freelist;
fd_freelist = fd;
grpc_iomgr_unregister_object(&fd->iomgr_object);
if (fd->shutdown) GRPC_ERROR_UNREF(fd->shutdown_error);
grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
/* Clear the least significant bit if it set (in case fd was shutdown) */
err = (grpc_error *)((intptr_t)err & ~FD_SHUTDOWN_BIT);
GRPC_ERROR_UNREF(err);
gpr_mu_unlock(&fd_freelist_mu);
} else {
@ -972,13 +1015,14 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
new_fd->shutdown = false;
gpr_atm_no_barrier_store(&new_fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE);
new_fd->orphaned = false;
new_fd->read_closure = CLOSURE_NOT_READY;
new_fd->write_closure = CLOSURE_NOT_READY;
gpr_atm_no_barrier_store(&new_fd->read_closure, CLOSURE_NOT_READY);
gpr_atm_no_barrier_store(&new_fd->write_closure, CLOSURE_NOT_READY);
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
new_fd->on_done_closure = NULL;
new_fd->read_notifier_pollset = NULL;
gpr_mu_unlock(&new_fd->po.mu);
@ -1060,101 +1104,206 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
GRPC_ERROR_UNREF(error);
}
static grpc_error *fd_shutdown_error(grpc_fd *fd) {
if (!fd->shutdown) {
return GRPC_ERROR_NONE;
} else {
return GRPC_ERROR_CREATE_REFERENCING("FD shutdown", &fd->shutdown_error, 1);
static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
grpc_closure *closure) {
while (true) {
/* Fast-path: CLOSURE_NOT_READY -> <closure>.
The 'release' cas here matches the 'acquire' load in set_ready and
set_shutdown ensuring that the closure (scheduled by set_ready or
set_shutdown) happens-after the I/O event on the fd */
if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) {
return; /* Fast-path successful. Return */
}
/* Slowpath. The 'acquire' load matches the 'release' cas in set_ready and
set_shutdown */
gpr_atm curr = gpr_atm_acq_load(state);
switch (curr) {
case CLOSURE_NOT_READY: {
break; /* retry */
}
case CLOSURE_READY: {
/* Change the state to CLOSURE_NOT_READY. Schedule the closure if
successful. If not, the state most likely transitioned to shutdown.
We should retry.
This can be a no-barrier cas since the state is being transitioned to
CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any
closure when transitioning out of CLOSURE_NO_READY state (i.e there
is no other code that needs to 'happen-after' this) */
if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
return; /* Slow-path successful. Return */
}
break; /* retry */
}
default: {
/* 'curr' is either a closure or the fd is shutdown(in which case 'curr'
contains a pointer to the shutdown-error). If the fd is shutdown,
schedule the closure with the shutdown error */
if ((curr & FD_SHUTDOWN_BIT) > 0) {
grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
grpc_closure_sched(
exec_ctx, closure,
GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1));
return;
}
/* There is already a closure!. This indicates a bug in the code */
gpr_log(GPR_ERROR,
"notify_on called with a previous callback still pending");
abort();
}
}
}
GPR_UNREACHABLE_CODE(return );
}
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) {
if (fd->shutdown) {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */
*st = closure;
} else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd));
} else {
/* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR,
"User called a notify_on function with a previous callback still "
"pending");
abort();
static void set_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
grpc_error *shutdown_err) {
/* Try the fast-path first (i.e expect the current value to be
CLOSURE_NOT_READY */
gpr_atm curr = CLOSURE_NOT_READY;
gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
while (true) {
/* The 'release' cas here matches the 'acquire' load in notify_on to ensure
that the closure it schedules 'happens-after' the set_shutdown is called
on the fd */
if (gpr_atm_rel_cas(state, curr, new_state)) {
return; /* Fast-path successful. Return */
}
/* Fallback to slowpath. This 'acquire' load matches the 'release' cas in
notify_on and set_ready */
curr = gpr_atm_acq_load(state);
switch (curr) {
case CLOSURE_READY: {
break; /* retry */
}
case CLOSURE_NOT_READY: {
break; /* retry */
}
default: {
/* 'curr' is either a closure or the fd is already shutdown */
/* If fd is already shutdown, we are done */
if ((curr & FD_SHUTDOWN_BIT) > 0) {
return;
}
/* Fd is not shutdown. Schedule the closure and move the state to
shutdown state. The 'release' cas here matches the 'acquire' load in
notify_on to ensure that the closure it schedules 'happens-after'
the set_shutdown is called on the fd */
if (gpr_atm_rel_cas(state, curr, new_state)) {
grpc_closure_sched(
exec_ctx, (grpc_closure *)curr,
GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1));
return;
}
/* 'curr' was a closure but now changed to a different state. We will
have to retry */
break;
}
}
}
GPR_UNREACHABLE_CODE(return );
}
/* returns 1 if state becomes not ready */
static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st) {
if (*st == CLOSURE_READY) {
/* duplicate ready ==> ignore */
return 0;
} else if (*st == CLOSURE_NOT_READY) {
/* not ready, and not waiting ==> flag ready */
*st = CLOSURE_READY;
return 0;
} else {
/* waiting ==> queue closure */
grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd));
*st = CLOSURE_NOT_READY;
return 1;
static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state) {
/* Try an optimistic case first (i.e assume current state is
CLOSURE_NOT_READY).
This 'release' cas matches the 'acquire' load in notify_on ensuring that
any closure (scheduled by notify_on) 'happens-after' the return from
epoll_pwait */
if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) {
return; /* early out */
}
/* The 'acquire' load here matches the 'release' cas in notify_on and
set_shutdown */
gpr_atm curr = gpr_atm_acq_load(state);
switch (curr) {
case CLOSURE_READY: {
/* Already ready. We are done here */
break;
}
case CLOSURE_NOT_READY: {
/* The state was not CLOSURE_NOT_READY when we checked initially at the
beginning of this function but now it is CLOSURE_NOT_READY again.
This is only possible if the state transitioned out of
CLOSURE_NOT_READY to either CLOSURE_READY or <some closure> and then
back to CLOSURE_NOT_READY again (i.e after we entered this function,
the fd became "ready" and the necessary actions were already done).
So there is no need to make the state CLOSURE_READY now */
break;
}
default: {
/* 'curr' is either a closure or the fd is shutdown */
if ((curr & FD_SHUTDOWN_BIT) > 0) {
/* The fd is shutdown. Do nothing */
} else if (gpr_atm_no_barrier_cas(state, curr, CLOSURE_NOT_READY)) {
/* The cas above was no-barrier since the state is being transitioned to
CLOSURE_NOT_READY; notify_on and set_shutdown do not schedule any
closures when transitioning out of CLOSURE_NO_READY state (i.e there
is no other code that needs to 'happen-after' this) */
grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
}
/* else the state changed again (only possible by either a racing
set_ready or set_shutdown functions. In both these cases, the closure
would have been scheduled for execution. So we are done here */
break;
}
}
}
static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
grpc_fd *fd) {
grpc_pollset *notifier = NULL;
gpr_mu_lock(&fd->po.mu);
notifier = fd->read_notifier_pollset;
gpr_mu_unlock(&fd->po.mu);
return notifier;
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
return (grpc_pollset *)notifier;
}
static bool fd_is_shutdown(grpc_fd *fd) {
gpr_mu_lock(&fd->po.mu);
const bool r = fd->shutdown;
gpr_mu_unlock(&fd->po.mu);
return r;
grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
return (((intptr_t)err & FD_SHUTDOWN_BIT) > 0);
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
gpr_mu_lock(&fd->po.mu);
/* Do the actual shutdown only once */
if (!fd->shutdown) {
fd->shutdown = true;
fd->shutdown_error = why;
/* Store the shutdown error ORed with FD_SHUTDOWN_BIT in fd->shutdown_error */
if (gpr_atm_rel_cas(&fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE,
(gpr_atm)why | FD_SHUTDOWN_BIT)) {
shutdown(fd->fd, SHUT_RDWR);
/* Flush any pending read and write closures. Since fd->shutdown is 'true'
at this point, the closures would be called with 'success = false' */
set_ready_locked(exec_ctx, fd, &fd->read_closure);
set_ready_locked(exec_ctx, fd, &fd->write_closure);
set_shutdown(exec_ctx, fd, &fd->read_closure, why);
set_shutdown(exec_ctx, fd, &fd->write_closure, why);
} else {
/* Shutdown already called */
GRPC_ERROR_UNREF(why);
}
gpr_mu_unlock(&fd->po.mu);
}
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
gpr_mu_lock(&fd->po.mu);
notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
gpr_mu_unlock(&fd->po.mu);
notify_on(exec_ctx, fd, &fd->read_closure, closure);
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
gpr_mu_lock(&fd->po.mu);
notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
gpr_mu_unlock(&fd->po.mu);
notify_on(exec_ctx, fd, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
@ -1343,18 +1492,19 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
/* Need the fd->po.mu since we might be racing with fd_notify_on_read */
gpr_mu_lock(&fd->po.mu);
set_ready_locked(exec_ctx, fd, &fd->read_closure);
fd->read_notifier_pollset = notifier;
gpr_mu_unlock(&fd->po.mu);
set_ready(exec_ctx, fd, &fd->read_closure);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
sets (This can happen briefly during polling island merges). In such cases
it does not really matter which notifer is set as the read_notifier_pollset
(They would both point to the same polling island anyway) */
/* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
/* Need the fd->po.mu since we might be racing with fd_notify_on_write */
gpr_mu_lock(&fd->po.mu);
set_ready_locked(exec_ctx, fd, &fd->write_closure);
gpr_mu_unlock(&fd->po.mu);
set_ready(exec_ctx, fd, &fd->write_closure);
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
@ -1737,7 +1887,7 @@ retry:
(void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
(void *)bag);
/* No need to lock 'pi_new' here since this is a new polling island
* and no one has a reference to it yet */
and no one has a reference to it yet */
polling_island_remove_all_fds_locked(pi_new, true, &error);
/* Ref and unref so that the polling island gets deleted during unref

@ -113,14 +113,15 @@ static grpc_error *try_split_host_port(const char *name,
/* parse name, splitting it into host and port parts */
grpc_error *error;
gpr_split_host_port(name, host, port);
if (host == NULL) {
if (*host == NULL) {
char *msg;
gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
error = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return error;
}
if (port == NULL) {
if (*port == NULL) {
// TODO(murgatroid99): add tests for this case
if (default_port == NULL) {
char *msg;
gpr_asprintf(&msg, "no port in name '%s'", name);

@ -41,8 +41,12 @@
#include <grpc/support/log.h>
const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
#ifdef GPR_WIN_INET_NTOP
return inet_ntop(af, src, dst, size);
#else
/* Windows InetNtopA wants a mutable ip pointer */
return InetNtopA(af, (void *)src, dst, size);
#endif /* GPR_WIN_INET_NTOP */
}
#endif /* GRPC_WINDOWS_SOCKETUTILS */

@ -46,6 +46,8 @@
#include "src/core/lib/iomgr/tcp_uv.h"
#include "src/core/lib/iomgr/timer.h"
extern int grpc_tcp_trace;
typedef struct grpc_uv_tcp_connect {
uv_connect_t connect_req;
grpc_timer alarm;
@ -70,6 +72,12 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
grpc_error *error) {
int done;
grpc_uv_tcp_connect *connect = acp;
if (grpc_tcp_trace) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name, str);
grpc_error_free_string(str);
}
if (error == GRPC_ERROR_NONE) {
/* error == NONE implies that the timer ran out, and wasn't cancelled. If
it was cancelled, then the handler that cancelled it also should close
@ -145,6 +153,12 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect;
if (grpc_tcp_trace) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
connect->addr_name);
}
// TODO(murgatroid99): figure out what the return value here means
uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
(const struct sockaddr *)resolved_addr->addr,

@ -180,25 +180,25 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
GPR_ASSERT(now.clock_type == g_clock_type);
timer->closure = closure;
timer->deadline = deadline;
timer->triggered = 0;
if (!g_initialized) {
timer->triggered = 1;
timer->pending = false;
grpc_closure_sched(
exec_ctx, timer->closure,
GRPC_ERROR_CREATE("Attempt to create timer before initialization"));
return;
}
gpr_mu_lock(&shard->mu);
timer->pending = true;
if (gpr_time_cmp(deadline, now) <= 0) {
timer->triggered = 1;
timer->pending = false;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
gpr_mu_unlock(&shard->mu);
/* early out */
return;
}
/* TODO(ctiller): check deadline expired */
gpr_mu_lock(&shard->mu);
grpc_time_averaged_stats_add_sample(&shard->stats,
ts_to_dbl(gpr_time_sub(deadline, now)));
if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) {
@ -243,9 +243,9 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
gpr_mu_lock(&shard->mu);
if (!timer->triggered) {
if (timer->pending) {
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
timer->triggered = 1;
timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
} else {
@ -296,7 +296,7 @@ static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
}
timer = grpc_timer_heap_top(&shard->heap);
if (gpr_time_cmp(timer->deadline, now) > 0) return NULL;
timer->triggered = 1;
timer->pending = false;
grpc_timer_heap_pop(&shard->heap);
return timer;
}

@ -40,7 +40,7 @@
struct grpc_timer {
gpr_timespec deadline;
uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
int triggered;
bool pending;
struct grpc_timer *next;
struct grpc_timer *prev;
grpc_closure *closure;

@ -53,8 +53,8 @@ static void stop_uv_timer(uv_timer_t *handle) {
void run_expired_timer(uv_timer_t *handle) {
grpc_timer *timer = (grpc_timer *)handle->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(!timer->triggered);
timer->triggered = 1;
GPR_ASSERT(timer->pending);
timer->pending = 0;
grpc_closure_sched(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
stop_uv_timer(handle);
grpc_exec_ctx_finish(&exec_ctx);
@ -67,11 +67,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
uv_timer_t *uv_timer;
timer->closure = closure;
if (gpr_time_cmp(deadline, now) <= 0) {
timer->triggered = 1;
timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
return;
}
timer->triggered = 0;
timer->pending = 1;
timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
uv_timer = gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), uv_timer);
@ -81,8 +81,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
}
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
if (!timer->triggered) {
timer->triggered = 1;
if (timer->pending) {
timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
stop_uv_timer((uv_timer_t *)timer->uv_timer);
}

@ -41,7 +41,7 @@ struct grpc_timer {
/* This is actually a uv_timer_t*, but we want to keep platform-specific
types out of headers */
void *uv_timer;
int triggered;
int pending;
};
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */

@ -302,7 +302,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(*calld));
return GRPC_ERROR_NONE;

@ -197,7 +197,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;

@ -42,8 +42,10 @@
#include <time.h>
#include "src/core/lib/profiling/timers.h"
#ifdef GPR_MU_COUNTERS
gpr_atm grpc_mu_locks = 0;
#ifdef GPR_LOW_LEVEL_COUNTERS
gpr_atm gpr_mu_locks = 0;
gpr_atm gpr_counter_atm_cas = 0;
gpr_atm gpr_counter_atm_add = 0;
#endif
void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
@ -51,8 +53,8 @@ void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
void gpr_mu_destroy(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
void gpr_mu_lock(gpr_mu* mu) {
#ifdef GPR_MU_COUNTERS
gpr_atm_no_barrier_fetch_add(&grpc_mu_locks, 1);
#ifdef GPR_LOW_LEVEL_COUNTERS
GPR_ATM_INC_COUNTER(gpr_mu_locks);
#endif
GPR_TIMER_BEGIN("gpr_mu_lock", 0);
GPR_ASSERT(pthread_mutex_lock(mu) == 0);

@ -101,6 +101,17 @@ typedef struct {
grpc_error *error;
} received_status;
static gpr_atm pack_received_status(received_status r) {
return r.is_set ? (1 | (gpr_atm)r.error) : 0;
}
static received_status unpack_received_status(gpr_atm atm) {
return (atm & 1) == 0
? (received_status){.is_set = false, .error = GRPC_ERROR_NONE}
: (received_status){.is_set = true,
.error = (grpc_error *)(atm & ~(gpr_atm)1)};
}
#define MAX_ERRORS_PER_BATCH 3
typedef struct batch_control {
@ -142,8 +153,6 @@ struct grpc_call {
bool destroy_called;
/** flag indicating that cancellation is inherited */
bool cancellation_is_inherited;
/** bitmask of live batches */
uint8_t used_batches;
/** which ops are in-flight */
bool sent_initial_metadata;
bool sending_message;
@ -165,8 +174,8 @@ struct grpc_call {
Element 0 is initial metadata, element 1 is trailing metadata. */
grpc_metadata_array *buffered_metadata[2];
/* Received call statuses from various sources */
received_status status[STATUS_SOURCE_COUNT];
/* Packed received call statuses from various sources */
gpr_atm status[STATUS_SOURCE_COUNT];
/* Call data useful used for reporting. Only valid after the call has
* completed */
@ -446,7 +455,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
GRPC_ERROR_UNREF(c->status[i].error);
GRPC_ERROR_UNREF(
unpack_received_status(gpr_atm_no_barrier_load(&c->status[i])).error);
}
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c);
@ -614,13 +624,12 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
*/
static bool get_final_status_from(
grpc_call *call, status_source from_source, bool allow_ok_status,
grpc_call *call, grpc_error *error, bool allow_ok_status,
void (*set_value)(grpc_status_code code, void *user_data),
void *set_value_user_data, grpc_slice *details) {
grpc_status_code code;
const char *msg = NULL;
grpc_error_get_status(call->status[from_source].error, call->send_deadline,
&code, &msg, NULL);
grpc_error_get_status(error, call->send_deadline, &code, &msg, NULL);
if (code == GRPC_STATUS_OK && !allow_ok_status) {
return false;
}
@ -638,12 +647,15 @@ static void get_final_status(grpc_call *call,
void *user_data),
void *set_value_user_data, grpc_slice *details) {
int i;
received_status status[STATUS_SOURCE_COUNT];
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i]));
}
if (grpc_call_error_trace) {
gpr_log(GPR_DEBUG, "get_final_status %s", call->is_client ? "CLI" : "SVR");
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (call->status[i].is_set) {
gpr_log(GPR_DEBUG, " %d: %s", i,
grpc_error_string(call->status[i].error));
if (status[i].is_set) {
gpr_log(GPR_DEBUG, " %d: %s", i, grpc_error_string(status[i].error));
}
}
}
@ -653,9 +665,9 @@ static void get_final_status(grpc_call *call,
/* search for the best status we can present: ideally the error we use has a
clearly defined grpc-status, and we'll prefer that. */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (call->status[i].is_set &&
grpc_error_has_clear_grpc_status(call->status[i].error)) {
if (get_final_status_from(call, (status_source)i, allow_ok_status != 0,
if (status[i].is_set &&
grpc_error_has_clear_grpc_status(status[i].error)) {
if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details)) {
return;
}
@ -663,8 +675,8 @@ static void get_final_status(grpc_call *call,
}
/* If no clearly defined status exists, search for 'anything' */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (call->status[i].is_set) {
if (get_final_status_from(call, (status_source)i, allow_ok_status != 0,
if (status[i].is_set) {
if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details)) {
return;
}
@ -681,12 +693,13 @@ static void get_final_status(grpc_call *call,
static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
status_source source, grpc_error *error) {
if (call->status[source].is_set) {
if (!gpr_atm_rel_cas(&call->status[source],
pack_received_status((received_status){
.is_set = false, .error = GRPC_ERROR_NONE}),
pack_received_status((received_status){
.is_set = true, .error = error}))) {
GRPC_ERROR_UNREF(error);
return;
}
call->status[source].is_set = true;
call->status[source].error = error;
}
/*******************************************************************************
@ -997,25 +1010,48 @@ static bool are_initial_metadata_flags_valid(uint32_t flags, bool is_client) {
return !(flags & invalid_positions);
}
static batch_control *allocate_batch_control(grpc_call *call) {
size_t i;
for (i = 0; i < MAX_CONCURRENT_BATCHES; i++) {
if ((call->used_batches & (1 << i)) == 0) {
call->used_batches = (uint8_t)(call->used_batches | (uint8_t)(1 << i));
return &call->active_batches[i];
}
static int batch_slot_for_op(grpc_op_type type) {
switch (type) {
case GRPC_OP_SEND_INITIAL_METADATA:
return 0;
case GRPC_OP_SEND_MESSAGE:
return 1;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
case GRPC_OP_SEND_STATUS_FROM_SERVER:
return 2;
case GRPC_OP_RECV_INITIAL_METADATA:
return 3;
case GRPC_OP_RECV_MESSAGE:
return 4;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
case GRPC_OP_RECV_STATUS_ON_CLIENT:
return 5;
}
GPR_UNREACHABLE_CODE(return 123456789);
}
static batch_control *allocate_batch_control(grpc_call *call,
const grpc_op *ops,
size_t num_ops) {
int slot = batch_slot_for_op(ops[0].op);
for (size_t i = 1; i < num_ops; i++) {
int op_slot = batch_slot_for_op(ops[i].op);
slot = GPR_MIN(slot, op_slot);
}
batch_control *bctl = &call->active_batches[slot];
if (bctl->call != NULL) {
return NULL;
}
return NULL;
memset(bctl, 0, sizeof(*bctl));
bctl->call = call;
return bctl;
}
static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_cq_completion *storage) {
batch_control *bctl = user_data;
grpc_call *call = bctl->call;
gpr_mu_lock(&call->mu);
call->used_batches = (uint8_t)(
call->used_batches & ~(uint8_t)(1 << (bctl - call->active_batches)));
gpr_mu_unlock(&call->mu);
bctl->call = NULL;
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
}
@ -1098,12 +1134,8 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
bctl->call = NULL;
grpc_closure_run(exec_ctx, bctl->notify_tag, error);
gpr_mu_lock(&call->mu);
bctl->call->used_batches =
(uint8_t)(bctl->call->used_batches &
~(uint8_t)(1 << (bctl - bctl->call->active_batches)));
gpr_mu_unlock(&call->mu);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@ -1315,6 +1347,11 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
finish_batch_step(exec_ctx, bctl);
}
static void free_no_op_completion(grpc_exec_ctx *exec_ctx, void *p,
grpc_cq_completion *completion) {
gpr_free(completion);
}
static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_call *call, const grpc_op *ops,
size_t nops, void *notify_tag,
@ -1329,32 +1366,34 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_metadata compression_md;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
/* TODO(ctiller): this feels like it could be made lock-free */
gpr_mu_lock(&call->mu);
bctl = allocate_batch_control(call);
memset(bctl, 0, sizeof(*bctl));
bctl->call = call;
bctl->notify_tag = notify_tag;
bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
grpc_transport_stream_op *stream_op = &bctl->op;
memset(stream_op, 0, sizeof(*stream_op));
stream_op->covered_by_poller = true;
if (nops == 0) {
GRPC_CALL_INTERNAL_REF(call, "completion");
if (!is_notify_tag_closure) {
grpc_cq_begin_op(call->cq, notify_tag);
grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
free_no_op_completion, NULL,
gpr_malloc(sizeof(grpc_cq_completion)));
} else {
grpc_closure_sched(exec_ctx, notify_tag, GRPC_ERROR_NONE);
}
gpr_mu_unlock(&call->mu);
post_batch_completion(exec_ctx, bctl);
error = GRPC_CALL_OK;
goto done;
}
/* TODO(ctiller): this feels like it could be made lock-free */
bctl = allocate_batch_control(call, ops, nops);
if (bctl == NULL) {
return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
}
bctl->notify_tag = notify_tag;
bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
gpr_mu_lock(&call->mu);
grpc_transport_stream_op *stream_op = &bctl->op;
memset(stream_op, 0, sizeof(*stream_op));
stream_op->covered_by_poller = true;
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
op = &ops[i];

@ -122,7 +122,7 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
gpr_atm_no_barrier_store(&calld->filled_metadata, 0);
return GRPC_ERROR_NONE;

@ -879,7 +879,7 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data));
@ -1198,7 +1198,9 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
crm->server_registered_method = rm;
crm->flags = rm->flags;
crm->has_host = has_host;
crm->host = host;
if (has_host) {
crm->host = host;
}
crm->method = method;
}
GPR_ASSERT(slots <= UINT32_MAX);

@ -37,10 +37,14 @@ extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
extern void grpc_client_channel_init(void);
extern void grpc_client_channel_shutdown(void);
extern void grpc_load_reporting_plugin_init(void);
extern void grpc_load_reporting_plugin_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_chttp2_plugin_init,
grpc_chttp2_plugin_shutdown);
grpc_register_plugin(grpc_client_channel_init,
grpc_client_channel_shutdown);
grpc_register_plugin(grpc_load_reporting_plugin_init,
grpc_load_reporting_plugin_shutdown);
}

@ -244,7 +244,7 @@ class CallData {
/// Initializes the call data.
virtual grpc_error *Init(grpc_exec_ctx *exec_ctx, ChannelData *channel_data,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
return GRPC_ERROR_NONE;
}
@ -308,7 +308,7 @@ class ChannelFilter final {
static grpc_error *InitCallElement(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
ChannelDataType *channel_data = (ChannelDataType *)elem->channel_data;
// Construct the object in the already-allocated memory.
CallDataType *call_data = new (elem->call_data) CallDataType();

@ -0,0 +1,166 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <memory>
#include <mutex>
#include <grpc++/impl/codegen/method_handler_impl.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/cpp/server/health/default_health_check_service.h"
#include "src/cpp/server/health/health.pb.h"
#include "third_party/nanopb/pb_decode.h"
#include "third_party/nanopb/pb_encode.h"
namespace grpc {
namespace {
const char kHealthCheckMethodName[] = "/grpc.health.v1.Health/Check";
} // namespace
DefaultHealthCheckService::HealthCheckServiceImpl::HealthCheckServiceImpl(
DefaultHealthCheckService* service)
: service_(service), method_(nullptr) {
MethodHandler* handler =
new RpcMethodHandler<HealthCheckServiceImpl, ByteBuffer, ByteBuffer>(
std::mem_fn(&HealthCheckServiceImpl::Check), this);
method_ = new RpcServiceMethod(kHealthCheckMethodName, RpcMethod::NORMAL_RPC,
handler);
AddMethod(method_);
}
Status DefaultHealthCheckService::HealthCheckServiceImpl::Check(
ServerContext* context, const ByteBuffer* request, ByteBuffer* response) {
// Decode request.
std::vector<Slice> slices;
request->Dump(&slices);
uint8_t* request_bytes = nullptr;
bool request_bytes_owned = false;
size_t request_size = 0;
grpc_health_v1_HealthCheckRequest request_struct;
if (slices.empty()) {
request_struct.has_service = false;
} else if (slices.size() == 1) {
request_bytes = const_cast<uint8_t*>(slices[0].begin());
request_size = slices[0].size();
} else {
request_bytes_owned = true;
request_bytes = static_cast<uint8_t*>(gpr_malloc(request->Length()));
uint8_t* copy_to = request_bytes;
for (size_t i = 0; i < slices.size(); i++) {
memcpy(copy_to, slices[i].begin(), slices[i].size());
copy_to += slices[i].size();
}
}
if (request_bytes != nullptr) {
pb_istream_t istream = pb_istream_from_buffer(request_bytes, request_size);
bool decode_status = pb_decode(
&istream, grpc_health_v1_HealthCheckRequest_fields, &request_struct);
if (request_bytes_owned) {
gpr_free(request_bytes);
}
if (!decode_status) {
return Status(StatusCode::INVALID_ARGUMENT, "");
}
}
// Check status from the associated default health checking service.
DefaultHealthCheckService::ServingStatus serving_status =
service_->GetServingStatus(
request_struct.has_service ? request_struct.service : "");
if (serving_status == DefaultHealthCheckService::NOT_FOUND) {
return Status(StatusCode::NOT_FOUND, "");
}
// Encode response
grpc_health_v1_HealthCheckResponse response_struct;
response_struct.has_status = true;
response_struct.status =
serving_status == DefaultHealthCheckService::SERVING
? grpc_health_v1_HealthCheckResponse_ServingStatus_SERVING
: grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING;
pb_ostream_t ostream;
memset(&ostream, 0, sizeof(ostream));
pb_encode(&ostream, grpc_health_v1_HealthCheckResponse_fields,
&response_struct);
grpc_slice response_slice = grpc_slice_malloc(ostream.bytes_written);
ostream = pb_ostream_from_buffer(GRPC_SLICE_START_PTR(response_slice),
GRPC_SLICE_LENGTH(response_slice));
bool encode_status = pb_encode(
&ostream, grpc_health_v1_HealthCheckResponse_fields, &response_struct);
if (!encode_status) {
return Status(StatusCode::INTERNAL, "Failed to encode response.");
}
Slice encoded_response(response_slice, Slice::STEAL_REF);
ByteBuffer response_buffer(&encoded_response, 1);
response->Swap(&response_buffer);
return Status::OK;
}
DefaultHealthCheckService::DefaultHealthCheckService() {
services_map_.emplace("", true);
}
void DefaultHealthCheckService::SetServingStatus(
const grpc::string& service_name, bool serving) {
std::lock_guard<std::mutex> lock(mu_);
services_map_[service_name] = serving;
}
void DefaultHealthCheckService::SetServingStatus(bool serving) {
std::lock_guard<std::mutex> lock(mu_);
for (auto iter = services_map_.begin(); iter != services_map_.end(); ++iter) {
iter->second = serving;
}
}
DefaultHealthCheckService::ServingStatus
DefaultHealthCheckService::GetServingStatus(
const grpc::string& service_name) const {
std::lock_guard<std::mutex> lock(mu_);
const auto& iter = services_map_.find(service_name);
if (iter == services_map_.end()) {
return NOT_FOUND;
}
return iter->second ? SERVING : NOT_SERVING;
}
DefaultHealthCheckService::HealthCheckServiceImpl*
DefaultHealthCheckService::GetHealthCheckService() {
GPR_ASSERT(impl_ == nullptr);
impl_.reset(new HealthCheckServiceImpl(this));
return impl_.get();
}
} // namespace grpc

@ -0,0 +1,78 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CPP_SERVER_DEFAULT_HEALTH_CHECK_SERVICE_H
#define GRPC_INTERNAL_CPP_SERVER_DEFAULT_HEALTH_CHECK_SERVICE_H
#include <mutex>
#include <grpc++/health_check_service_interface.h>
#include <grpc++/impl/codegen/service_type.h>
#include <grpc++/support/byte_buffer.h>
namespace grpc {
// Default implementation of HealthCheckServiceInterface. Server will create and
// own it.
class DefaultHealthCheckService final : public HealthCheckServiceInterface {
public:
// The service impl to register with the server.
class HealthCheckServiceImpl : public Service {
public:
explicit HealthCheckServiceImpl(DefaultHealthCheckService* service);
Status Check(ServerContext* context, const ByteBuffer* request,
ByteBuffer* response);
private:
const DefaultHealthCheckService* const service_;
RpcServiceMethod* method_;
};
DefaultHealthCheckService();
void SetServingStatus(const grpc::string& service_name,
bool serving) override;
void SetServingStatus(bool serving) override;
enum ServingStatus { NOT_FOUND, SERVING, NOT_SERVING };
ServingStatus GetServingStatus(const grpc::string& service_name) const;
HealthCheckServiceImpl* GetHealthCheckService();
private:
mutable std::mutex mu_;
std::map<grpc::string, bool> services_map_;
std::unique_ptr<HealthCheckServiceImpl> impl_;
};
} // namespace grpc
#endif // GRPC_INTERNAL_CPP_SERVER_DEFAULT_HEALTH_CHECK_SERVICE_H

@ -0,0 +1,24 @@
/* Automatically generated nanopb constant definitions */
/* Generated by nanopb-0.3.7-dev */
#include "src/cpp/server/health/health.pb.h"
/* @@protoc_insertion_point(includes) */
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.
#endif
const pb_field_t grpc_health_v1_HealthCheckRequest_fields[2] = {
PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, grpc_health_v1_HealthCheckRequest, service, service, 0),
PB_LAST_FIELD
};
const pb_field_t grpc_health_v1_HealthCheckResponse_fields[2] = {
PB_FIELD( 1, UENUM , OPTIONAL, STATIC , FIRST, grpc_health_v1_HealthCheckResponse, status, status, 0),
PB_LAST_FIELD
};
/* @@protoc_insertion_point(eof) */

@ -0,0 +1,72 @@
/* Automatically generated nanopb header */
/* Generated by nanopb-0.3.7-dev */
#ifndef PB_GRPC_HEALTH_V1_HEALTH_PB_H_INCLUDED
#define PB_GRPC_HEALTH_V1_HEALTH_PB_H_INCLUDED
#include "third_party/nanopb/pb.h"
/* @@protoc_insertion_point(includes) */
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Enum definitions */
typedef enum _grpc_health_v1_HealthCheckResponse_ServingStatus {
grpc_health_v1_HealthCheckResponse_ServingStatus_UNKNOWN = 0,
grpc_health_v1_HealthCheckResponse_ServingStatus_SERVING = 1,
grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING = 2
} grpc_health_v1_HealthCheckResponse_ServingStatus;
#define _grpc_health_v1_HealthCheckResponse_ServingStatus_MIN grpc_health_v1_HealthCheckResponse_ServingStatus_UNKNOWN
#define _grpc_health_v1_HealthCheckResponse_ServingStatus_MAX grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING
#define _grpc_health_v1_HealthCheckResponse_ServingStatus_ARRAYSIZE ((grpc_health_v1_HealthCheckResponse_ServingStatus)(grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING+1))
/* Struct definitions */
typedef struct _grpc_health_v1_HealthCheckRequest {
bool has_service;
char service[200];
/* @@protoc_insertion_point(struct:grpc_health_v1_HealthCheckRequest) */
} grpc_health_v1_HealthCheckRequest;
typedef struct _grpc_health_v1_HealthCheckResponse {
bool has_status;
grpc_health_v1_HealthCheckResponse_ServingStatus status;
/* @@protoc_insertion_point(struct:grpc_health_v1_HealthCheckResponse) */
} grpc_health_v1_HealthCheckResponse;
/* Default values for struct fields */
/* Initializer values for message structs */
#define grpc_health_v1_HealthCheckRequest_init_default {false, ""}
#define grpc_health_v1_HealthCheckResponse_init_default {false, (grpc_health_v1_HealthCheckResponse_ServingStatus)0}
#define grpc_health_v1_HealthCheckRequest_init_zero {false, ""}
#define grpc_health_v1_HealthCheckResponse_init_zero {false, (grpc_health_v1_HealthCheckResponse_ServingStatus)0}
/* Field tags (for use in manual encoding/decoding) */
#define grpc_health_v1_HealthCheckRequest_service_tag 1
#define grpc_health_v1_HealthCheckResponse_status_tag 1
/* Struct field encoding specification for nanopb */
extern const pb_field_t grpc_health_v1_HealthCheckRequest_fields[2];
extern const pb_field_t grpc_health_v1_HealthCheckResponse_fields[2];
/* Maximum encoded size of messages (where known) */
#define grpc_health_v1_HealthCheckRequest_size 203
#define grpc_health_v1_HealthCheckResponse_size 2
/* Message IDs (where set with "msgid" option) */
#ifdef PB_MSGID
#define HEALTH_MESSAGES \
#endif
#ifdef __cplusplus
} /* extern "C" */
#endif
/* @@protoc_insertion_point(eof) */
#endif

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,28 +31,19 @@
*
*/
/*******************************************************************************
* NOTE: If this test fails to compile, then the api changes are likely to cause
* merge failures downstream. Please pay special attention to reviewing
* these changes, and solicit help as appropriate when merging downstream.
*
* This test is NOT expected to be run directly.
******************************************************************************/
#include <grpc++/health_check_service_interface.h>
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/tmpfile.h"
namespace grpc {
namespace {
bool g_grpc_default_health_check_service_enabled = false;
} // namesapce
static void test_code(void) {
/* env.h */
gpr_set_env("abc", gpr_getenv("xyz"));
/* load_file.h */
grpc_load_file("abc", 1, NULL);
/* tmpfile.h */
fclose(gpr_tmpfile("foo", NULL));
bool DefaultHealthCheckServiceEnabled() {
return g_grpc_default_health_check_service_enabled;
}
int main(void) {
if (false) test_code();
return 0;
void EnableDefaultHealthCheckService(bool enable) {
g_grpc_default_health_check_service_enabled = enable;
}
} // namespace grpc

@ -0,0 +1,50 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc++/ext/health_check_service_server_builder_option.h>
namespace grpc {
HealthCheckServiceServerBuilderOption::HealthCheckServiceServerBuilderOption(
std::unique_ptr<HealthCheckServiceInterface> hc)
: hc_(std::move(hc)) {}
// Hand over hc_ to the server.
void HealthCheckServiceServerBuilderOption::UpdateArguments(
ChannelArguments* args) {
args->SetPointer(kHealthCheckServiceInterfaceArg, hc_.release());
}
void HealthCheckServiceServerBuilderOption::UpdatePlugins(
std::vector<std::unique_ptr<ServerBuilderPlugin>>* plugins) {}
} // namespace grpc

@ -37,6 +37,7 @@
#include <grpc++/completion_queue.h>
#include <grpc++/generic/async_generic_service.h>
#include <grpc++/impl/codegen/async_unary_call.h>
#include <grpc++/impl/codegen/completion_queue_tag.h>
#include <grpc++/impl/grpc_library.h>
#include <grpc++/impl/method_handler_impl.h>
@ -51,6 +52,7 @@
#include <grpc/support/log.h>
#include "src/core/lib/profiling/timers.h"
#include "src/cpp/server/health/default_health_check_service.h"
#include "src/cpp/thread_manager/thread_manager.h"
namespace grpc {
@ -187,8 +189,7 @@ class Server::SyncRequest final : public CompletionQueueTag {
explicit CallData(Server* server, SyncRequest* mrd)
: cq_(mrd->cq_),
call_(mrd->call_, server, &cq_, server->max_receive_message_size()),
ctx_(mrd->deadline_, mrd->request_metadata_.metadata,
mrd->request_metadata_.count),
ctx_(mrd->deadline_, &mrd->request_metadata_),
has_request_payload_(mrd->has_request_payload_),
request_payload_(mrd->request_payload_),
method_(mrd->method_) {
@ -342,6 +343,7 @@ class Server::SyncRequestThreadManager : public ThreadManager {
int cq_timeout_msec_;
std::vector<std::unique_ptr<SyncRequest>> sync_requests_;
std::unique_ptr<RpcServiceMethod> unknown_method_;
std::unique_ptr<RpcServiceMethod> health_check_;
std::shared_ptr<Server::GlobalCallbacks> global_callbacks_;
};
@ -358,7 +360,8 @@ Server::Server(
shutdown_notified_(false),
has_generic_service_(false),
server_(nullptr),
server_initializer_(new ServerInitializer(this)) {
server_initializer_(new ServerInitializer(this)),
health_check_service_disabled_(false) {
g_gli_initializer.summon();
gpr_once_init(&g_once_init_callbacks, InitGlobalCallbacks);
global_callbacks_ = g_callbacks;
@ -374,6 +377,19 @@ Server::Server(
grpc_channel_args channel_args;
args->SetChannelArgs(&channel_args);
for (size_t i = 0; i < channel_args.num_args; i++) {
if (0 ==
strcmp(channel_args.args[i].key, kHealthCheckServiceInterfaceArg)) {
if (channel_args.args[i].value.pointer.p == nullptr) {
health_check_service_disabled_ = true;
} else {
health_check_service_.reset(static_cast<HealthCheckServiceInterface*>(
channel_args.args[i].value.pointer.p));
}
break;
}
}
server_ = grpc_server_create(&channel_args, nullptr);
}
@ -480,6 +496,21 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
GPR_ASSERT(!started_);
global_callbacks_->PreServerStart(this);
started_ = true;
// Only create default health check service when user did not provide an
// explicit one.
if (health_check_service_ == nullptr && !health_check_service_disabled_ &&
DefaultHealthCheckServiceEnabled()) {
if (sync_server_cqs_->empty()) {
gpr_log(GPR_ERROR,
"Default health check service disabled at async-only server.");
} else {
auto* default_hc_service = new DefaultHealthCheckService;
health_check_service_.reset(default_hc_service);
RegisterService(nullptr, default_hc_service->GetHealthCheckService());
}
}
grpc_server_start(server_);
if (!has_generic_service_) {

@ -33,7 +33,9 @@
#include <grpc++/server_context.h>
#include <algorithm>
#include <mutex>
#include <utility>
#include <grpc++/completion_queue.h>
#include <grpc++/impl/call.h>
@ -133,8 +135,7 @@ ServerContext::ServerContext()
sent_initial_metadata_(false),
compression_level_set_(false) {}
ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata* metadata,
size_t metadata_count)
ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
: completion_op_(nullptr),
has_notify_when_done_tag_(false),
async_notify_when_done_tag_(nullptr),
@ -143,12 +144,8 @@ ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata* metadata,
cq_(nullptr),
sent_initial_metadata_(false),
compression_level_set_(false) {
for (size_t i = 0; i < metadata_count; i++) {
client_metadata_.map()->insert(
std::pair<grpc::string_ref, grpc::string_ref>(
StringRefFromSlice(&metadata[i].key),
StringRefFromSlice(&metadata[i].value)));
}
std::swap(*client_metadata_.arr(), *arr);
client_metadata_.FillMap();
}
ServerContext::~ServerContext() {

@ -7,19 +7,19 @@ A C# implementation of gRPC.
SUPPORTED PLATFORMS
------------------
- [.NET Core](https://dotnet.github.io/) on Linux, Windows and Mac OS X
- .NET Framework 4.5+ (Windows)
- [.NET Core](https://dotnet.github.io/) on Linux, Windows and Mac OS X (starting from version 1.0.1)
- Mono 4+ on Linux, Windows and Mac OS X
PREREQUISITES
--------------
When using gRPC C# under .NET Core you only need to [install .NET Core](https://www.microsoft.com/net/core).
- Windows: .NET Framework 4.5+, Visual Studio 2013 or 2015
- Linux: Mono 4+, MonoDevelop 5.9+ (with NuGet add-in installed)
- Mac OS X: Xamarin Studio 5.9+
HOW TO USE
--------------
@ -27,7 +27,7 @@ HOW TO USE
- Open Visual Studio / MonoDevelop / Xamarin Studio and start a new project/solution.
- Add the [Grpc](https://www.nuget.org/packages/Grpc/) NuGet package as a dependency (Project options -> Manage NuGet Packages).
- Add the [Grpc](https://www.nuget.org/packages/Grpc/) NuGet package as a dependency (Project options -> Manage NuGet Packages).
- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add the [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) NuGet package that contains Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin.
@ -71,23 +71,10 @@ DOCUMENTATION
- [Helloworld Example][]
- [RouteGuide Tutorial][]
CONTENTS
--------
- ext:
The extension library that wraps C API to be more digestible by C#.
- Grpc.Auth:
gRPC OAuth2/JWT support.
- Grpc.Core:
The main gRPC C# library.
- Grpc.Examples:
API examples for math.proto
- Grpc.Examples.MathClient:
An example client that sends requests to math server.
- Grpc.Examples.MathServer:
An example server that implements a simple math service.
- Grpc.IntegrationTesting:
Cross-language gRPC implementation testing (interop testing).
PERFORMANCE
-----------
For best gRPC C# performance, use [.NET Core](https://dotnet.github.io/) and the Server GC mode `"System.GC.Server": true` for your applications.
THE NATIVE DEPENDENCY
---------------

@ -78,6 +78,8 @@ class CompletionQueueAsyncWorker : public Nan::AsyncWorker {
void HandleErrorCallback();
private:
static void TryAddWorker();
grpc_event result;
static grpc_completion_queue *queue;
@ -118,20 +120,21 @@ void CompletionQueueAsyncWorker::Execute() {
grpc_completion_queue *CompletionQueueAsyncWorker::GetQueue() { return queue; }
void CompletionQueueAsyncWorker::Next() {
#ifndef GRPC_UV
Nan::HandleScope scope;
if (current_threads < max_queue_threads) {
void CompletionQueueAsyncWorker::TryAddWorker() {
if (current_threads < max_queue_threads && waiting_next_calls > 0) {
current_threads += 1;
waiting_next_calls -= 1;
CompletionQueueAsyncWorker *worker = new CompletionQueueAsyncWorker();
Nan::AsyncQueueWorker(worker);
} else {
waiting_next_calls += 1;
}
GPR_ASSERT(current_threads <= max_queue_threads);
GPR_ASSERT((current_threads == max_queue_threads) ||
(waiting_next_calls == 0));
#endif
}
void CompletionQueueAsyncWorker::Next() {
waiting_next_calls += 1;
TryAddWorker();
}
void CompletionQueueAsyncWorker::Init(Local<Object> exports) {
@ -143,17 +146,8 @@ void CompletionQueueAsyncWorker::Init(Local<Object> exports) {
void CompletionQueueAsyncWorker::HandleOKCallback() {
Nan::HandleScope scope;
if (waiting_next_calls > 0) {
waiting_next_calls -= 1;
// Old worker removed, new worker added. current_threads += 0
CompletionQueueAsyncWorker *worker = new CompletionQueueAsyncWorker();
Nan::AsyncQueueWorker(worker);
} else {
current_threads -= 1;
}
GPR_ASSERT(current_threads <= max_queue_threads);
GPR_ASSERT((current_threads == max_queue_threads) ||
(waiting_next_calls == 0));
current_threads -= 1;
TryAddWorker();
Nan::Callback *callback = GetTagCallback(result.tag);
Local<Value> argv[] = {Nan::Null(), GetTagNodeValue(result.tag)};
callback->Call(2, argv);
@ -162,18 +156,9 @@ void CompletionQueueAsyncWorker::HandleOKCallback() {
}
void CompletionQueueAsyncWorker::HandleErrorCallback() {
if (waiting_next_calls > 0) {
waiting_next_calls -= 1;
// Old worker removed, new worker added. current_threads += 0
CompletionQueueAsyncWorker *worker = new CompletionQueueAsyncWorker();
Nan::AsyncQueueWorker(worker);
} else {
current_threads -= 1;
}
GPR_ASSERT(current_threads <= max_queue_threads);
GPR_ASSERT((current_threads == max_queue_threads) ||
(waiting_next_calls == 0));
Nan::HandleScope scope;
current_threads -= 1;
TryAddWorker();
Nan::Callback *callback = GetTagCallback(result.tag);
Local<Value> argv[] = {Nan::Error(ErrorMessage())};
@ -189,6 +174,7 @@ grpc_completion_queue *GetCompletionQueue() {
}
void CompletionQueueNext() {
gpr_log(GPR_DEBUG, "Called CompletionQueueNext");
CompletionQueueAsyncWorker::Next();
}

@ -108,7 +108,7 @@ function _write(chunk, encoding, callback) {
but passing an object that causes a serialization failure is a misuse
of the API anyway, so that's OK. The primary purpose here is to give the
programmer a useful error and to stop the stream properly */
this.call.cancelWithStatus(grpc.status.INTERNAL, "Serialization failure");
this.call.cancelWithStatus(grpc.status.INTERNAL, 'Serialization failure');
callback(e);
}
if (_.isFinite(encoding)) {
@ -831,13 +831,12 @@ exports.waitForClientReady = function(client, deadline, callback) {
*/
exports.makeProtobufClientConstructor = function(service, options) {
var method_attrs = common.getProtobufServiceAttrs(service, options);
var deprecatedArgumentOrder = false;
if (options) {
deprecatedArgumentOrder = options.deprecatedArgumentOrder;
if (!options) {
options = {deprecatedArgumentOrder: false};
}
var Client = exports.makeClientConstructor(
method_attrs, common.fullyQualifiedName(service),
deprecatedArgumentOrder);
options);
Client.service = service;
Client.service.grpc_options = options;
return Client;

@ -121,20 +121,20 @@ function sendUnaryResponse(call, value, serialize, metadata, flags) {
if (metadata) {
statusMetadata = metadata;
}
status.metadata = statusMetadata._getCoreRepresentation();
if (!call.metadataSent) {
end_batch[grpc.opType.SEND_INITIAL_METADATA] =
(new Metadata())._getCoreRepresentation();
call.metadataSent = true;
}
var message;
try {
message = serialize(value);
} catch (e) {
e.code = grpc.status.INTERNAL;
handleError(e);
handleError(call, e);
return;
}
status.metadata = statusMetadata._getCoreRepresentation();
if (!call.metadataSent) {
end_batch[grpc.opType.SEND_INITIAL_METADATA] =
(new Metadata())._getCoreRepresentation();
call.metadataSent = true;
}
message.grpcWriteFlags = flags;
end_batch[grpc.opType.SEND_MESSAGE] = message;
end_batch[grpc.opType.SEND_STATUS_FROM_SERVER] = status;
@ -280,11 +280,6 @@ function _write(chunk, encoding, callback) {
/* jshint validthis: true */
var batch = {};
var self = this;
if (!this.call.metadataSent) {
batch[grpc.opType.SEND_INITIAL_METADATA] =
(new Metadata())._getCoreRepresentation();
this.call.metadataSent = true;
}
var message;
try {
message = this.serialize(chunk);
@ -293,6 +288,11 @@ function _write(chunk, encoding, callback) {
callback(e);
return;
}
if (!this.call.metadataSent) {
batch[grpc.opType.SEND_INITIAL_METADATA] =
(new Metadata())._getCoreRepresentation();
this.call.metadataSent = true;
}
if (_.isFinite(encoding)) {
/* Attach the encoding if it is a finite number. This is the closest we
* can get to checking that it is valid flags */
@ -728,11 +728,17 @@ var defaultHandler = {
* method implementation for the provided service.
*/
Server.prototype.addService = function(service, implementation) {
if (!_.isObjectLike(service) || !_.isObjectLike(implementation)) {
throw new Error('addService requires two objects as arguments');
}
if (_.keys(service).length === 0) {
throw new Error('Cannot add an empty service to a server');
}
if (this.started) {
throw new Error('Can\'t add a service to a started server.');
}
var self = this;
_.each(service, function(attrs, name) {
_.forOwn(service, function(attrs, name) {
var method_type;
if (attrs.requestStream) {
if (attrs.responseStream) {

@ -607,6 +607,113 @@ describe('Client malformed response handling', function() {
call.end();
});
});
describe('Server serialization failure handling', function() {
function serializeFail(obj) {
throw new Error('Serialization failed');
}
var client;
var server;
before(function() {
var test_proto = ProtoBuf.loadProtoFile(__dirname + '/test_service.proto');
var test_service = test_proto.lookup('TestService');
var malformed_test_service = {
unary: {
path: '/TestService/Unary',
requestStream: false,
responseStream: false,
requestDeserialize: _.identity,
responseSerialize: serializeFail
},
clientStream: {
path: '/TestService/ClientStream',
requestStream: true,
responseStream: false,
requestDeserialize: _.identity,
responseSerialize: serializeFail
},
serverStream: {
path: '/TestService/ServerStream',
requestStream: false,
responseStream: true,
requestDeserialize: _.identity,
responseSerialize: serializeFail
},
bidiStream: {
path: '/TestService/BidiStream',
requestStream: true,
responseStream: true,
requestDeserialize: _.identity,
responseSerialize: serializeFail
}
};
server = new grpc.Server();
server.addService(malformed_test_service, {
unary: function(call, cb) {
cb(null, {});
},
clientStream: function(stream, cb) {
stream.on('data', function() {/* Ignore requests */});
stream.on('end', function() {
cb(null, {});
});
},
serverStream: function(stream) {
stream.write({});
stream.end();
},
bidiStream: function(stream) {
stream.on('data', function() {
// Ignore requests
stream.write({});
});
stream.on('end', function() {
stream.end();
});
}
});
var port = server.bind('localhost:0', server_insecure_creds);
var Client = surface_client.makeProtobufClientConstructor(test_service);
client = new Client('localhost:' + port, grpc.credentials.createInsecure());
server.start();
});
after(function() {
server.forceShutdown();
});
it('should get an INTERNAL status with a unary call', function(done) {
client.unary({}, function(err, data) {
assert(err);
assert.strictEqual(err.code, grpc.status.INTERNAL);
done();
});
});
it('should get an INTERNAL status with a client stream call', function(done) {
var call = client.clientStream(function(err, data) {
assert(err);
assert.strictEqual(err.code, grpc.status.INTERNAL);
done();
});
call.write({});
call.end();
});
it('should get an INTERNAL status with a server stream call', function(done) {
var call = client.serverStream({});
call.on('data', function(){});
call.on('error', function(err) {
assert.strictEqual(err.code, grpc.status.INTERNAL);
done();
});
});
it('should get an INTERNAL status with a bidi stream call', function(done) {
var call = client.bidiStream();
call.on('data', function(){});
call.on('error', function(err) {
assert.strictEqual(err.code, grpc.status.INTERNAL);
done();
});
call.write({});
call.end();
});
});
describe('Other conditions', function() {
var test_service;
var Client;

@ -321,44 +321,8 @@ unsigned int parse_h2_length(const char *field) {
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_c;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &response_payload_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), (void *)1, NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
__weak XCTestExpectation *expectation = [self expectationWithDescription:@"Coalescing"];
__weak XCTestExpectation *expectation =
[self expectationWithDescription:@"Coalescing"];
dispatch_async(
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
@ -425,6 +389,46 @@ unsigned int parse_h2_length(const char *field) {
[expectation fulfill];
});
// Guarantees that server is listening to the port before client connects.
sleep(1);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_c;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &response_payload_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), (void *)1, NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, (void *)1, 1);
cq_verify(cqv);
@ -445,7 +449,7 @@ unsigned int parse_h2_length(const char *field) {
grpc_completion_queue_shutdown(cq);
drain_cq(cq);
grpc_completion_queue_destroy(cq);
[self waitForExpectationsWithTimeout:4 handler:nil];
}

@ -0,0 +1 @@
grpc.health.v1.HealthCheckRequest.service max_size:200

@ -0,0 +1,39 @@
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
licenses(["notice"]) # 3-clause BSD
package(default_visibility = ["//visibility:public"])
load("//bazel:grpc_build_system.bzl", "grpc_proto_library")
grpc_proto_library(
name = "reflection_proto",
srcs = ["reflection.proto"],
)

@ -39,11 +39,10 @@ from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_EMPTY_METADATA = cygrpc.Metadata(())
_UNARY_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
@ -138,8 +137,8 @@ def _abort(state, code, details):
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = _EMPTY_METADATA
state.trailing_metadata = _EMPTY_METADATA
state.initial_metadata = _common.EMPTY_METADATA
state.trailing_metadata = _common.EMPTY_METADATA
def _handle_event(event, state, response_deserializer):
@ -435,7 +434,7 @@ def _start_unary_request(request, timeout, request_serializer):
deadline, deadline_timespec = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), _EMPTY_METADATA, _EMPTY_METADATA,
state = _RPCState((), _common.EMPTY_METADATA, _common.EMPTY_METADATA,
grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)

@ -37,7 +37,7 @@ import six
import grpc
from grpc._cython import cygrpc
_EMPTY_METADATA = cygrpc.Metadata(())
EMPTY_METADATA = cygrpc.Metadata(())
CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
cygrpc.ConnectivityState.idle:
@ -107,7 +107,7 @@ def channel_args(options):
def cygrpc_metadata(application_metadata):
return _EMPTY_METADATA if application_metadata is None else cygrpc.Metadata(
return EMPTY_METADATA if application_metadata is None else cygrpc.Metadata(
cygrpc.Metadatum(encode(key), encode(value))
for key, value in application_metadata)

@ -57,7 +57,6 @@ _CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_EMPTY_METADATA = cygrpc.Metadata(())
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
@ -143,7 +142,7 @@ def _abort(state, call, code, details):
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
_common.EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata),
effective_code, effective_details, _EMPTY_FLAGS),)
@ -416,7 +415,7 @@ def _send_response(rpc_event, state, serialized_response):
else:
if state.initial_metadata_allowed:
operations = (cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
_common.EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_response,
_EMPTY_FLAGS),)
state.initial_metadata_allowed = False
@ -446,8 +445,8 @@ def _status(rpc_event, state, serialized_response):
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
_EMPTY_FLAGS))
cygrpc.operation_send_initial_metadata(
_common.EMPTY_METADATA, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(
cygrpc.operation_send_message(serialized_response,
@ -549,12 +548,12 @@ def _find_method_handler(rpc_event, generic_handlers):
def _handle_unrecognized_method(rpc_event):
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
b'Method not found!', _EMPTY_FLAGS),)
operations = (cygrpc.operation_send_initial_metadata(_common.EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
b'Method not found!', _EMPTY_FLAGS),)
rpc_state = _RPCState()
rpc_event.operation_call.start_server_batch(
operations, lambda ignored_event: (rpc_state, (),))

@ -32,7 +32,7 @@ import unittest
import weakref
import grpc
from grpc import _grpcio_metadata
from grpc import _channel
from grpc.framework.foundation import logging_pool
from tests.unit import test_common
@ -49,8 +49,6 @@ _UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
_CLIENT_METADATA = (('client-md-key', 'client-md-key'),
('client-md-key-bin', b'\x00\x01'))
@ -76,7 +74,7 @@ def validate_client_metadata(test, servicer_context):
_CLIENT_METADATA, servicer_context.invocation_metadata()))
test.assertTrue(
user_agent(servicer_context.invocation_metadata())
.startswith('primary-agent ' + _USER_AGENT))
.startswith('primary-agent ' + _channel._USER_AGENT))
test.assertTrue(
user_agent(servicer_context.invocation_metadata())
.endswith('secondary-agent'))

@ -102,7 +102,6 @@ $CFLAGS << ' -std=c99 '
$CFLAGS << ' -Wall '
$CFLAGS << ' -Wextra '
$CFLAGS << ' -pedantic '
$CFLAGS << ' -Werror '
$CFLAGS << ' -Wno-format '
output = File.join('grpc', 'grpc_c')

@ -84,7 +84,7 @@ module GRPC
# channel:
#
# - :channel_override
# when present, this must be a pre-created GRPC::Channel. If it's
# when present, this must be a pre-created GRPC::Core::Channel. If it's
# present the host and arbitrary keyword arg areignored, and the RPC
# connection uses this channel.
#

@ -1492,7 +1492,7 @@
out_mingbase = '$(LIBDIR)/$(CONFIG)/' + lib.name + '$(SHARED_VERSION_' + lang_to_var[lib.language] + ')'
out_libbase = '$(LIBDIR)/$(CONFIG)/lib' + lib.name + '$(SHARED_VERSION_' + lang_to_var[lib.language] + ')'
common = '$(LIB' + lib.name.upper() + '_OBJS) $(LDLIBS)'
common = '$(LIB' + lib.name.upper() + '_OBJS)'
link_libs = ''
lib_deps = ' $(ZLIB_DEP)'
@ -1542,6 +1542,8 @@
ldflags = '$(LDFLAGS)'
if lib.get('LDFLAGS', None):
ldflags += ' ' + lib['LDFLAGS']
common = common + ' $(LDLIBS)'
%>
% if lib.build == "all":

@ -53,7 +53,9 @@
'conditions': [
['runtime=="node"', {
'defines': [
'GRPC_UV'
# Disabling this while bugs are ironed out. Uncomment this to
# re-enable libuv integration in C core.
# 'GRPC_UV'
]
}],
['OS!="win" and runtime=="electron"', {

@ -29,7 +29,7 @@
s.require_paths = %w( src/ruby/bin src/ruby/lib src/ruby/pb )
s.platform = Gem::Platform::RUBY
s.add_dependency 'google-protobuf', '~> 3.1.0'
s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'googleauth', '~> 0.5.1'
s.add_development_dependency 'bundler', '~> 1.9'
@ -37,7 +37,7 @@
s.add_development_dependency 'logging', '~> 2.0'
s.add_development_dependency 'simplecov', '~> 0.9'
s.add_development_dependency 'rake', '~> 10.4'
s.add_development_dependency 'rake-compiler', '~> 0.9'
s.add_development_dependency 'rake-compiler', '~> 1.0'
s.add_development_dependency 'rake-compiler-dock', '~> 0.5.1'
s.add_development_dependency 'rspec', '~> 3.2'
s.add_development_dependency 'rubocop', '~> 0.30.0'

@ -2,18 +2,28 @@
--- |
<%!
import json
def gen_one_target(tgt):
out = {"name": tgt.name,
"language": tgt.language,
"platforms": tgt.platforms,
"ci_platforms": tgt.ci_platforms,
"gtest": tgt.gtest,
"exclude_configs": tgt.get("exclude_configs", []),
"exclude_iomgrs": tgt.get("exclude_iomgrs", []),
"args": tgt.get("args", []),
"flaky": tgt.flaky,
"cpu_cost": tgt.get("cpu_cost", 1.0)}
timeout_seconds = tgt.get("timeout_seconds", None)
if timeout_seconds:
out['timeout_seconds'] = timeout_seconds
excluded_poll_engines = tgt.get("excluded_poll_engines", None)
if excluded_poll_engines:
out['excluded_poll_engines'] = excluded_poll_engines
return out
%>
${json.dumps([{"name": tgt.name,
"language": tgt.language,
"platforms": tgt.platforms,
"ci_platforms": tgt.ci_platforms,
"gtest": tgt.gtest,
"exclude_configs": tgt.get("exclude_configs", []),
"exclude_iomgrs": tgt.get("exclude_iomgrs", []),
"args": tgt.get("args", []),
"flaky": tgt.flaky,
"cpu_cost": tgt.get("cpu_cost", 1.0)}
${json.dumps([gen_one_target(tgt)
for tgt in targets
if tgt.get('run', True) and tgt.build == 'test'] +
tests,

@ -57,7 +57,7 @@ static grpc_error *channel_init_func(grpc_exec_ctx *exec_ctx,
static grpc_error *call_init_func(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
const grpc_call_element_args *args) {
++*(int *)(elem->channel_data);
*(int *)(elem->call_data) = 0;
return GRPC_ERROR_NONE;

@ -36,14 +36,17 @@
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include "src/core/ext/client_channel/resolver.h"
#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "test/core/util/test_config.h"
static gpr_mu g_mu;
static bool g_fail_resolution = true;
static grpc_combiner *g_combiner;
static grpc_error *my_resolve_address(const char *name, const char *addr,
grpc_resolved_addresses **addrs) {
@ -71,6 +74,7 @@ static grpc_resolver *create_resolver(grpc_exec_ctx *exec_ctx,
grpc_resolver_args args;
memset(&args, 0, sizeof(args));
args.uri = uri;
args.combiner = g_combiner;
grpc_resolver *resolver =
grpc_resolver_factory_create_resolver(exec_ctx, factory, &args);
grpc_resolver_factory_unref(factory);
@ -96,11 +100,41 @@ static bool wait_loop(int deadline_seconds, gpr_event *ev) {
return false;
}
typedef struct next_args {
grpc_resolver *resolver;
grpc_channel_args **result;
grpc_closure *on_complete;
} next_args;
static void call_resolver_next_now_lock_taken(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error_unused) {
next_args *a = arg;
grpc_resolver_next_locked(exec_ctx, a->resolver, a->result, a->on_complete);
gpr_free(a);
}
static void call_resolver_next_after_locking(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
grpc_channel_args **result,
grpc_closure *on_complete) {
next_args *a = gpr_malloc(sizeof(*a));
a->resolver = resolver;
a->result = result;
a->on_complete = on_complete;
grpc_closure_sched(
exec_ctx,
grpc_closure_create(call_resolver_next_now_lock_taken, a,
grpc_combiner_scheduler(resolver->combiner, false)),
GRPC_ERROR_NONE);
}
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
gpr_mu_init(&g_mu);
g_combiner = grpc_combiner_create(NULL);
grpc_blocking_resolve_address = my_resolve_address;
grpc_channel_args *result = (grpc_channel_args *)1;
@ -108,7 +142,7 @@ int main(int argc, char **argv) {
grpc_resolver *resolver = create_resolver(&exec_ctx, "dns:test");
gpr_event ev1;
gpr_event_init(&ev1);
grpc_resolver_next(
call_resolver_next_after_locking(
&exec_ctx, resolver, &result,
grpc_closure_create(on_done, &ev1, grpc_schedule_on_exec_ctx));
grpc_exec_ctx_flush(&exec_ctx);
@ -117,7 +151,7 @@ int main(int argc, char **argv) {
gpr_event ev2;
gpr_event_init(&ev2);
grpc_resolver_next(
call_resolver_next_after_locking(
&exec_ctx, resolver, &result,
grpc_closure_create(on_done, &ev2, grpc_schedule_on_exec_ctx));
grpc_exec_ctx_flush(&exec_ctx);
@ -126,6 +160,7 @@ int main(int argc, char **argv) {
grpc_channel_args_destroy(&exec_ctx, result);
GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test");
GRPC_COMBINER_UNREF(&exec_ctx, g_combiner, "test");
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();

@ -36,8 +36,11 @@
#include <grpc/support/log.h>
#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/lib/iomgr/combiner.h"
#include "test/core/util/test_config.h"
static grpc_combiner *g_combiner;
static void test_succeeds(grpc_resolver_factory *factory, const char *string) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_uri *uri = grpc_uri_parse(string, 0);
@ -48,6 +51,7 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) {
GPR_ASSERT(uri);
memset(&args, 0, sizeof(args));
args.uri = uri;
args.combiner = g_combiner;
resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args);
GPR_ASSERT(resolver != NULL);
GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_succeeds");
@ -65,6 +69,7 @@ static void test_fails(grpc_resolver_factory *factory, const char *string) {
GPR_ASSERT(uri);
memset(&args, 0, sizeof(args));
args.uri = uri;
args.combiner = g_combiner;
resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args);
GPR_ASSERT(resolver == NULL);
grpc_uri_destroy(uri);
@ -76,6 +81,8 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
g_combiner = grpc_combiner_create(NULL);
dns = grpc_resolver_factory_lookup("dns");
test_succeeds(dns, "dns:10.2.1.1");
@ -84,6 +91,11 @@ int main(int argc, char **argv) {
test_fails(dns, "ipv4://8.8.8.8/8.8.8.8:8888");
grpc_resolver_factory_unref(dns);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_COMBINER_UNREF(&exec_ctx, g_combiner, "test");
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_shutdown();
return 0;

@ -39,9 +39,12 @@
#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "test/core/util/test_config.h"
static grpc_combiner *g_combiner;
typedef struct on_resolution_arg {
char *expected_server_name;
grpc_channel_args *resolver_result;
@ -62,6 +65,7 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) {
GPR_ASSERT(uri);
memset(&args, 0, sizeof(args));
args.uri = uri;
args.combiner = g_combiner;
resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args);
GPR_ASSERT(resolver != NULL);
@ -71,8 +75,8 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) {
grpc_closure *on_resolution = grpc_closure_create(
on_resolution_cb, &on_res_arg, grpc_schedule_on_exec_ctx);
grpc_resolver_next(&exec_ctx, resolver, &on_res_arg.resolver_result,
on_resolution);
grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result,
on_resolution);
GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_succeeds");
grpc_exec_ctx_finish(&exec_ctx);
grpc_uri_destroy(uri);
@ -88,6 +92,7 @@ static void test_fails(grpc_resolver_factory *factory, const char *string) {
GPR_ASSERT(uri);
memset(&args, 0, sizeof(args));
args.uri = uri;
args.combiner = g_combiner;
resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args);
GPR_ASSERT(resolver == NULL);
grpc_uri_destroy(uri);
@ -99,6 +104,8 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
g_combiner = grpc_combiner_create(NULL);
ipv4 = grpc_resolver_factory_lookup("ipv4");
ipv6 = grpc_resolver_factory_lookup("ipv6");
@ -118,6 +125,12 @@ int main(int argc, char **argv) {
grpc_resolver_factory_unref(ipv4);
grpc_resolver_factory_unref(ipv6);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_COMBINER_UNREF(&exec_ctx, g_combiner, "test");
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_shutdown();
return 0;

@ -213,7 +213,7 @@ static grpc_resolver* fake_resolver_create(grpc_exec_ctx* exec_ctx,
r->channel_args = grpc_channel_args_copy(args->args);
r->addresses = addresses;
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &fake_resolver_vtable);
grpc_resolver_init(&r->base, &fake_resolver_vtable, args->combiner);
return &r->base;
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save