Merge remote-tracking branch 'upstream/master' into route_response

reviewable/pr22280/r4
Donna Dionne 5 years ago
commit 0306829596
  1. 2
      .github/ISSUE_TEMPLATE/bug_report.md
  2. 2
      .github/ISSUE_TEMPLATE/cleanup_request.md
  3. 2
      .github/ISSUE_TEMPLATE/feature_request.md
  4. 2
      .github/pull_request_template.md
  5. 18
      BUILD
  6. 2
      BUILD.gn
  7. 56
      CMakeLists.txt
  8. 152
      Makefile
  9. 6
      README.md
  10. 5
      bazel/cython_library.bzl
  11. 25
      build_autogenerated.yaml
  12. 1
      config.m4
  13. 1
      config.w32
  14. 2
      examples/cpp/compression/greeter_client.cc
  15. 8
      examples/cpp/helloworld/CMakeLists.txt
  16. 68
      examples/python/xds/README.md
  17. 134
      examples/python/xds/helloworld_pb2.py
  18. 46
      examples/python/xds/helloworld_pb2_grpc.py
  19. 5
      examples/python/xds/requirements.txt
  20. 94
      examples/python/xds/server.py
  21. 2
      gRPC-C++.podspec
  22. 3
      gRPC-Core.podspec
  23. 2
      grpc.gemspec
  24. 2
      grpc.gyp
  25. 8
      include/grpc/grpc_security.h
  26. 18
      include/grpcpp/impl/codegen/client_callback_impl.h
  27. 11
      include/grpcpp/impl/codegen/method_handler_impl.h
  28. 2
      include/grpcpp/impl/codegen/sync_stream_impl.h
  29. 2
      package.xml
  30. 76
      src/compiler/cpp_generator.cc
  31. 29
      src/core/ext/filters/client_channel/client_channel.cc
  32. 51
      src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
  33. 17
      src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h
  34. 45
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  35. 89
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
  36. 40
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h
  37. 5
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
  38. 7
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  39. 143
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  40. 23
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  41. 43
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  42. 3
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
  43. 6
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
  44. 15
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  45. 8
      src/core/ext/filters/client_channel/resolver_result_parsing.h
  46. 15
      src/core/ext/filters/client_channel/server_address.cc
  47. 13
      src/core/ext/filters/client_channel/server_address.h
  48. 160
      src/core/ext/filters/client_channel/service_config.cc
  49. 26
      src/core/ext/filters/client_channel/service_config.h
  50. 25
      src/core/ext/filters/client_channel/xds/xds_api.cc
  51. 31
      src/core/ext/filters/client_channel/xds/xds_api.h
  52. 8
      src/core/ext/filters/client_channel/xds/xds_channel.cc
  53. 192
      src/core/ext/filters/client_channel/xds/xds_client.cc
  54. 18
      src/core/ext/filters/client_channel/xds/xds_client.h
  55. 25
      src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h
  56. 7
      src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
  57. 14
      src/core/lib/security/security_connector/tls/tls_security_connector.cc
  58. 14
      src/core/lib/slice/slice_internal.h
  59. 8
      src/core/tsi/ssl_transport_security.cc
  60. 49
      src/core/tsi/test_creds/README
  61. 40
      src/core/tsi/test_creds/badclient.key
  62. 35
      src/core/tsi/test_creds/badclient.pem
  63. 40
      src/core/tsi/test_creds/badserver.key
  64. 35
      src/core/tsi/test_creds/badserver.pem
  65. 40
      src/core/tsi/test_creds/ca.key
  66. 31
      src/core/tsi/test_creds/ca.pem
  67. 40
      src/core/tsi/test_creds/client.key
  68. 30
      src/core/tsi/test_creds/client.pem
  69. 40
      src/core/tsi/test_creds/server0.key
  70. 30
      src/core/tsi/test_creds/server0.pem
  71. 40
      src/core/tsi/test_creds/server1.key
  72. 34
      src/core/tsi/test_creds/server1.pem
  73. 6
      src/cpp/README.md
  74. 10
      src/cpp/common/tls_credentials_options.cc
  75. 9
      src/cpp/server/server_cc.cc
  76. 31
      src/csharp/Grpc.IntegrationTesting/data/ca.pem
  77. 40
      src/csharp/Grpc.IntegrationTesting/data/server1.key
  78. 34
      src/csharp/Grpc.IntegrationTesting/data/server1.pem
  79. 2
      src/csharp/Grpc.Tools/build/_protobuf/Google.Protobuf.Tools.targets
  80. 7
      src/objective-c/README-CFSTREAM.md
  81. 83
      src/objective-c/tests/InteropTests/InteropTests.m
  82. 31
      src/objective-c/tests/TestCertificates.bundle/test-certificates.pem
  83. 2
      src/php/ext/grpc/config.m4
  84. 31
      src/php/tests/data/ca.pem
  85. 40
      src/php/tests/data/server1.key
  86. 34
      src/php/tests/data/server1.pem
  87. 10
      src/python/grpcio/grpc/__init__.py
  88. 37
      src/python/grpcio/grpc/_auth.py
  89. 8
      src/python/grpcio/grpc/_cython/BUILD.bazel
  90. 20
      src/python/grpcio/grpc/_cython/_cygrpc/aio/call.pyx.pxi
  91. 1
      src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pxd.pxi
  92. 9
      src/python/grpcio/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi
  93. 2
      src/python/grpcio/grpc/_cython/_cygrpc/aio/channel.pxd.pxi
  94. 15
      src/python/grpcio/grpc/_cython/_cygrpc/aio/channel.pyx.pxi
  95. 13
      src/python/grpcio/grpc/_cython/_cygrpc/aio/common.pyx.pxi
  96. 43
      src/python/grpcio/grpc/_cython/_cygrpc/aio/completion_queue.pxd.pxi
  97. 93
      src/python/grpcio/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi
  98. 23
      src/python/grpcio/grpc/_cython/_cygrpc/aio/grpc_aio.pxd.pxi
  99. 195
      src/python/grpcio/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi
  100. 11
      src/python/grpcio/grpc/_cython/_cygrpc/aio/iomgr/iomgr.pyx.pxi
  101. Some files were not shown because too many files have changed in this diff Show More

@ -2,7 +2,7 @@
name: Report a bug
about: Create a report to help us improve
labels: kind/bug, priority/P2
assignees: karthikravis
assignees: nicolasnoble
---

@ -2,7 +2,7 @@
name: Request a cleanup
about: Suggest a cleanup in our repository
labels: kind/internal cleanup, priority/P2
assignees: karthikravis
assignees: nicolasnoble
---

@ -2,7 +2,7 @@
name: Request a feature
about: Suggest an idea for this project
labels: kind/enhancement, priority/P2
assignees: karthikravis
assignees: nicolasnoble
---

@ -8,4 +8,4 @@ If you know who should review your pull request, please remove the mentioning be
-->
@karthikravis
@nicolasnoble

18
BUILD

@ -1236,6 +1236,21 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc_grpclb_balancer_addresses",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h",
],
language = "c++",
deps = [
"grpc_base",
"grpc_client_channel",
],
)
grpc_cc_library(
name = "grpc_lb_policy_grpclb",
srcs = [
@ -1256,6 +1271,7 @@ grpc_cc_library(
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_grpclb_balancer_addresses",
"grpc_lb_upb",
"grpc_resolver_fake",
"grpc_transport_chttp2_client_insecure",
@ -1282,6 +1298,7 @@ grpc_cc_library(
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_grpclb_balancer_addresses",
"grpc_lb_upb",
"grpc_resolver_fake",
"grpc_secure",
@ -1619,6 +1636,7 @@ grpc_cc_library(
deps = [
"grpc_base",
"grpc_client_channel",
"grpc_grpclb_balancer_addresses",
"grpc_resolver_dns_selection",
],
)

@ -229,6 +229,8 @@ config("grpc_config") {
"src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc",
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc",

@ -161,7 +161,7 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
if(MSVC)
include(cmake/msvc_static_runtime.cmake)
@ -447,7 +447,7 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_c check_gcp_environment_linux_test)
add_dependencies(buildtests_c check_gcp_environment_windows_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c client_ssl)
add_dependencies(buildtests_c client_ssl_test)
endif()
add_dependencies(buildtests_c cmdline_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
@ -481,10 +481,10 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_c fd_posix_test)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c fling)
add_dependencies(buildtests_c fling_stream_test)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c fling_stream)
add_dependencies(buildtests_c fling_test)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c fork_test)
@ -497,10 +497,10 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_c grpc_completion_queue_test)
add_dependencies(buildtests_c grpc_ipv6_loopback_available_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c handshake_server_with_readahead_handshaker)
add_dependencies(buildtests_c handshake_server_with_readahead_handshaker_test)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c handshake_verify_peer_options)
add_dependencies(buildtests_c handshake_verify_peer_options_test)
endif()
add_dependencies(buildtests_c histogram_test)
add_dependencies(buildtests_c host_port_test)
@ -555,7 +555,7 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_c sequential_connectivity_test)
add_dependencies(buildtests_c server_chttp2_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c server_ssl)
add_dependencies(buildtests_c server_ssl_test)
endif()
add_dependencies(buildtests_c server_test)
add_dependencies(buildtests_c slice_buffer_test)
@ -1319,6 +1319,7 @@ add_library(grpc
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@ -1972,6 +1973,7 @@ add_library(grpc_unsecure
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@ -4619,11 +4621,11 @@ endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(client_ssl
add_executable(client_ssl_test
test/core/handshake/client_ssl.cc
)
target_include_directories(client_ssl
target_include_directories(client_ssl_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
@ -4635,7 +4637,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(client_ssl
target_link_libraries(client_ssl_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
@ -5279,15 +5281,15 @@ endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(fling
add_executable(fling_stream_test
test/core/end2end/data/client_certs.cc
test/core/end2end/data/server1_cert.cc
test/core/end2end/data/server1_key.cc
test/core/end2end/data/test_root_cert.cc
test/core/fling/fling_test.cc
test/core/fling/fling_stream_test.cc
)
target_include_directories(fling
target_include_directories(fling_stream_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
@ -5299,7 +5301,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(fling
target_link_libraries(fling_stream_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
@ -5314,15 +5316,15 @@ endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(fling_stream
add_executable(fling_test
test/core/end2end/data/client_certs.cc
test/core/end2end/data/server1_cert.cc
test/core/end2end/data/server1_key.cc
test/core/end2end/data/test_root_cert.cc
test/core/fling/fling_stream_test.cc
test/core/fling/fling_test.cc
)
target_include_directories(fling_stream
target_include_directories(fling_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
@ -5334,7 +5336,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(fling_stream
target_link_libraries(fling_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
@ -5589,12 +5591,12 @@ endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(handshake_server_with_readahead_handshaker
add_executable(handshake_server_with_readahead_handshaker_test
test/core/handshake/readahead_handshaker_server_ssl.cc
test/core/handshake/server_ssl_common.cc
)
target_include_directories(handshake_server_with_readahead_handshaker
target_include_directories(handshake_server_with_readahead_handshaker_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
@ -5606,7 +5608,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(handshake_server_with_readahead_handshaker
target_link_libraries(handshake_server_with_readahead_handshaker_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
@ -5621,11 +5623,11 @@ endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(handshake_verify_peer_options
add_executable(handshake_verify_peer_options_test
test/core/handshake/verify_peer_options.cc
)
target_include_directories(handshake_verify_peer_options
target_include_directories(handshake_verify_peer_options_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
@ -5637,7 +5639,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(handshake_verify_peer_options
target_link_libraries(handshake_verify_peer_options_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
@ -6850,12 +6852,12 @@ endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(server_ssl
add_executable(server_ssl_test
test/core/handshake/server_ssl.cc
test/core/handshake/server_ssl_common.cc
)
target_include_directories(server_ssl
target_include_directories(server_ssl_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
@ -6867,7 +6869,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
${_gRPC_ZLIB_INCLUDE_DIR}
)
target_link_libraries(server_ssl
target_link_libraries(server_ssl_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc

@ -1040,7 +1040,7 @@ channel_stack_builder_test: $(BINDIR)/$(CONFIG)/channel_stack_builder_test
channel_stack_test: $(BINDIR)/$(CONFIG)/channel_stack_test
check_gcp_environment_linux_test: $(BINDIR)/$(CONFIG)/check_gcp_environment_linux_test
check_gcp_environment_windows_test: $(BINDIR)/$(CONFIG)/check_gcp_environment_windows_test
client_ssl: $(BINDIR)/$(CONFIG)/client_ssl
client_ssl_test: $(BINDIR)/$(CONFIG)/client_ssl_test
cmdline_test: $(BINDIR)/$(CONFIG)/cmdline_test
combiner_test: $(BINDIR)/$(CONFIG)/combiner_test
completion_queue_threading_test: $(BINDIR)/$(CONFIG)/completion_queue_threading_test
@ -1062,8 +1062,8 @@ fake_resolver_test: $(BINDIR)/$(CONFIG)/fake_resolver_test
fake_transport_security_test: $(BINDIR)/$(CONFIG)/fake_transport_security_test
fd_conservation_posix_test: $(BINDIR)/$(CONFIG)/fd_conservation_posix_test
fd_posix_test: $(BINDIR)/$(CONFIG)/fd_posix_test
fling: $(BINDIR)/$(CONFIG)/fling
fling_stream: $(BINDIR)/$(CONFIG)/fling_stream
fling_stream_test: $(BINDIR)/$(CONFIG)/fling_stream_test
fling_test: $(BINDIR)/$(CONFIG)/fling_test
fork_test: $(BINDIR)/$(CONFIG)/fork_test
format_request_test: $(BINDIR)/$(CONFIG)/format_request_test
frame_handler_test: $(BINDIR)/$(CONFIG)/frame_handler_test
@ -1072,8 +1072,8 @@ grpc_alts_credentials_options_test: $(BINDIR)/$(CONFIG)/grpc_alts_credentials_op
grpc_byte_buffer_reader_test: $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test
grpc_completion_queue_test: $(BINDIR)/$(CONFIG)/grpc_completion_queue_test
grpc_ipv6_loopback_available_test: $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test
handshake_server_with_readahead_handshaker: $(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker
handshake_verify_peer_options: $(BINDIR)/$(CONFIG)/handshake_verify_peer_options
handshake_server_with_readahead_handshaker_test: $(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker_test
handshake_verify_peer_options_test: $(BINDIR)/$(CONFIG)/handshake_verify_peer_options_test
histogram_test: $(BINDIR)/$(CONFIG)/histogram_test
host_port_test: $(BINDIR)/$(CONFIG)/host_port_test
hpack_encoder_test: $(BINDIR)/$(CONFIG)/hpack_encoder_test
@ -1114,7 +1114,7 @@ secure_endpoint_test: $(BINDIR)/$(CONFIG)/secure_endpoint_test
security_connector_test: $(BINDIR)/$(CONFIG)/security_connector_test
sequential_connectivity_test: $(BINDIR)/$(CONFIG)/sequential_connectivity_test
server_chttp2_test: $(BINDIR)/$(CONFIG)/server_chttp2_test
server_ssl: $(BINDIR)/$(CONFIG)/server_ssl
server_ssl_test: $(BINDIR)/$(CONFIG)/server_ssl_test
server_test: $(BINDIR)/$(CONFIG)/server_test
slice_buffer_test: $(BINDIR)/$(CONFIG)/slice_buffer_test
slice_string_helpers_test: $(BINDIR)/$(CONFIG)/slice_string_helpers_test
@ -1417,7 +1417,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/channel_stack_test \
$(BINDIR)/$(CONFIG)/check_gcp_environment_linux_test \
$(BINDIR)/$(CONFIG)/check_gcp_environment_windows_test \
$(BINDIR)/$(CONFIG)/client_ssl \
$(BINDIR)/$(CONFIG)/client_ssl_test \
$(BINDIR)/$(CONFIG)/cmdline_test \
$(BINDIR)/$(CONFIG)/combiner_test \
$(BINDIR)/$(CONFIG)/completion_queue_threading_test \
@ -1439,8 +1439,8 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/fake_transport_security_test \
$(BINDIR)/$(CONFIG)/fd_conservation_posix_test \
$(BINDIR)/$(CONFIG)/fd_posix_test \
$(BINDIR)/$(CONFIG)/fling \
$(BINDIR)/$(CONFIG)/fling_stream \
$(BINDIR)/$(CONFIG)/fling_stream_test \
$(BINDIR)/$(CONFIG)/fling_test \
$(BINDIR)/$(CONFIG)/fork_test \
$(BINDIR)/$(CONFIG)/format_request_test \
$(BINDIR)/$(CONFIG)/frame_handler_test \
@ -1449,8 +1449,8 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test \
$(BINDIR)/$(CONFIG)/grpc_completion_queue_test \
$(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test \
$(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker \
$(BINDIR)/$(CONFIG)/handshake_verify_peer_options \
$(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker_test \
$(BINDIR)/$(CONFIG)/handshake_verify_peer_options_test \
$(BINDIR)/$(CONFIG)/histogram_test \
$(BINDIR)/$(CONFIG)/host_port_test \
$(BINDIR)/$(CONFIG)/hpack_encoder_test \
@ -1491,7 +1491,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/security_connector_test \
$(BINDIR)/$(CONFIG)/sequential_connectivity_test \
$(BINDIR)/$(CONFIG)/server_chttp2_test \
$(BINDIR)/$(CONFIG)/server_ssl \
$(BINDIR)/$(CONFIG)/server_ssl_test \
$(BINDIR)/$(CONFIG)/server_test \
$(BINDIR)/$(CONFIG)/slice_buffer_test \
$(BINDIR)/$(CONFIG)/slice_string_helpers_test \
@ -1910,8 +1910,8 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/check_gcp_environment_linux_test || ( echo test check_gcp_environment_linux_test failed ; exit 1 )
$(E) "[RUN] Testing check_gcp_environment_windows_test"
$(Q) $(BINDIR)/$(CONFIG)/check_gcp_environment_windows_test || ( echo test check_gcp_environment_windows_test failed ; exit 1 )
$(E) "[RUN] Testing client_ssl"
$(Q) $(BINDIR)/$(CONFIG)/client_ssl || ( echo test client_ssl failed ; exit 1 )
$(E) "[RUN] Testing client_ssl_test"
$(Q) $(BINDIR)/$(CONFIG)/client_ssl_test || ( echo test client_ssl_test failed ; exit 1 )
$(E) "[RUN] Testing cmdline_test"
$(Q) $(BINDIR)/$(CONFIG)/cmdline_test || ( echo test cmdline_test failed ; exit 1 )
$(E) "[RUN] Testing combiner_test"
@ -1954,10 +1954,10 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/fd_conservation_posix_test || ( echo test fd_conservation_posix_test failed ; exit 1 )
$(E) "[RUN] Testing fd_posix_test"
$(Q) $(BINDIR)/$(CONFIG)/fd_posix_test || ( echo test fd_posix_test failed ; exit 1 )
$(E) "[RUN] Testing fling"
$(Q) $(BINDIR)/$(CONFIG)/fling || ( echo test fling failed ; exit 1 )
$(E) "[RUN] Testing fling_stream"
$(Q) $(BINDIR)/$(CONFIG)/fling_stream || ( echo test fling_stream failed ; exit 1 )
$(E) "[RUN] Testing fling_stream_test"
$(Q) $(BINDIR)/$(CONFIG)/fling_stream_test || ( echo test fling_stream_test failed ; exit 1 )
$(E) "[RUN] Testing fling_test"
$(Q) $(BINDIR)/$(CONFIG)/fling_test || ( echo test fling_test failed ; exit 1 )
$(E) "[RUN] Testing fork_test"
$(Q) $(BINDIR)/$(CONFIG)/fork_test || ( echo test fork_test failed ; exit 1 )
$(E) "[RUN] Testing format_request_test"
@ -1974,10 +1974,10 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/grpc_completion_queue_test || ( echo test grpc_completion_queue_test failed ; exit 1 )
$(E) "[RUN] Testing grpc_ipv6_loopback_available_test"
$(Q) $(BINDIR)/$(CONFIG)/grpc_ipv6_loopback_available_test || ( echo test grpc_ipv6_loopback_available_test failed ; exit 1 )
$(E) "[RUN] Testing handshake_server_with_readahead_handshaker"
$(Q) $(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker || ( echo test handshake_server_with_readahead_handshaker failed ; exit 1 )
$(E) "[RUN] Testing handshake_verify_peer_options"
$(Q) $(BINDIR)/$(CONFIG)/handshake_verify_peer_options || ( echo test handshake_verify_peer_options failed ; exit 1 )
$(E) "[RUN] Testing handshake_server_with_readahead_handshaker_test"
$(Q) $(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker_test || ( echo test handshake_server_with_readahead_handshaker_test failed ; exit 1 )
$(E) "[RUN] Testing handshake_verify_peer_options_test"
$(Q) $(BINDIR)/$(CONFIG)/handshake_verify_peer_options_test || ( echo test handshake_verify_peer_options_test failed ; exit 1 )
$(E) "[RUN] Testing histogram_test"
$(Q) $(BINDIR)/$(CONFIG)/histogram_test || ( echo test histogram_test failed ; exit 1 )
$(E) "[RUN] Testing host_port_test"
@ -2054,12 +2054,10 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/secure_endpoint_test || ( echo test secure_endpoint_test failed ; exit 1 )
$(E) "[RUN] Testing security_connector_test"
$(Q) $(BINDIR)/$(CONFIG)/security_connector_test || ( echo test security_connector_test failed ; exit 1 )
$(E) "[RUN] Testing sequential_connectivity_test"
$(Q) $(BINDIR)/$(CONFIG)/sequential_connectivity_test || ( echo test sequential_connectivity_test failed ; exit 1 )
$(E) "[RUN] Testing server_chttp2_test"
$(Q) $(BINDIR)/$(CONFIG)/server_chttp2_test || ( echo test server_chttp2_test failed ; exit 1 )
$(E) "[RUN] Testing server_ssl"
$(Q) $(BINDIR)/$(CONFIG)/server_ssl || ( echo test server_ssl failed ; exit 1 )
$(E) "[RUN] Testing server_ssl_test"
$(Q) $(BINDIR)/$(CONFIG)/server_ssl_test || ( echo test server_ssl_test failed ; exit 1 )
$(E) "[RUN] Testing server_test"
$(Q) $(BINDIR)/$(CONFIG)/server_test || ( echo test server_test failed ; exit 1 )
$(E) "[RUN] Testing slice_buffer_test"
@ -2186,6 +2184,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/bm_fullstack_streaming_ping_pong || ( echo test bm_fullstack_streaming_ping_pong failed ; exit 1 )
$(E) "[RUN] Testing bm_fullstack_unary_ping_pong"
$(Q) $(BINDIR)/$(CONFIG)/bm_fullstack_unary_ping_pong || ( echo test bm_fullstack_unary_ping_pong failed ; exit 1 )
$(E) "[RUN] Testing bm_metadata"
$(Q) $(BINDIR)/$(CONFIG)/bm_metadata || ( echo test bm_metadata failed ; exit 1 )
$(E) "[RUN] Testing bm_pollset"
$(Q) $(BINDIR)/$(CONFIG)/bm_pollset || ( echo test bm_pollset failed ; exit 1 )
$(E) "[RUN] Testing bm_timer"
@ -2206,8 +2206,6 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/channelz_registry_test || ( echo test channelz_registry_test failed ; exit 1 )
$(E) "[RUN] Testing channelz_service_test"
$(Q) $(BINDIR)/$(CONFIG)/channelz_service_test || ( echo test channelz_service_test failed ; exit 1 )
$(E) "[RUN] Testing channelz_test"
$(Q) $(BINDIR)/$(CONFIG)/channelz_test || ( echo test channelz_test failed ; exit 1 )
$(E) "[RUN] Testing cli_call_test"
$(Q) $(BINDIR)/$(CONFIG)/cli_call_test || ( echo test cli_call_test failed ; exit 1 )
$(E) "[RUN] Testing client_callback_end2end_test"
@ -2216,8 +2214,6 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/client_channel_stress_test || ( echo test client_channel_stress_test failed ; exit 1 )
$(E) "[RUN] Testing client_interceptors_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/client_interceptors_end2end_test || ( echo test client_interceptors_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing client_lb_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/client_lb_end2end_test || ( echo test client_lb_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing codegen_test_full"
$(Q) $(BINDIR)/$(CONFIG)/codegen_test_full || ( echo test codegen_test_full failed ; exit 1 )
$(E) "[RUN] Testing codegen_test_minimal"
@ -2234,8 +2230,6 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/destroy_grpclb_channel_with_active_connect_stress_test || ( echo test destroy_grpclb_channel_with_active_connect_stress_test failed ; exit 1 )
$(E) "[RUN] Testing duplicate_header_bad_client_test"
$(Q) $(BINDIR)/$(CONFIG)/duplicate_header_bad_client_test || ( echo test duplicate_header_bad_client_test failed ; exit 1 )
$(E) "[RUN] Testing end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/end2end_test || ( echo test end2end_test failed ; exit 1 )
$(E) "[RUN] Testing error_details_test"
$(Q) $(BINDIR)/$(CONFIG)/error_details_test || ( echo test error_details_test failed ; exit 1 )
$(E) "[RUN] Testing eventmanager_libuv_test"
@ -3655,6 +3649,7 @@ LIBGRPC_SRC = \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
@ -4283,6 +4278,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
@ -7619,34 +7615,34 @@ endif
endif
CLIENT_SSL_SRC = \
CLIENT_SSL_TEST_SRC = \
test/core/handshake/client_ssl.cc \
CLIENT_SSL_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CLIENT_SSL_SRC))))
CLIENT_SSL_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CLIENT_SSL_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/client_ssl: openssl_dep_error
$(BINDIR)/$(CONFIG)/client_ssl_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/client_ssl: $(CLIENT_SSL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(BINDIR)/$(CONFIG)/client_ssl_test: $(CLIENT_SSL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(CLIENT_SSL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/client_ssl
$(Q) $(LDXX) $(LDFLAGS) $(CLIENT_SSL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/client_ssl_test
endif
$(OBJDIR)/$(CONFIG)/test/core/handshake/client_ssl.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
deps_client_ssl: $(CLIENT_SSL_OBJS:.o=.dep)
deps_client_ssl_test: $(CLIENT_SSL_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(CLIENT_SSL_OBJS:.o=.dep)
-include $(CLIENT_SSL_TEST_OBJS:.o=.dep)
endif
endif
@ -8353,28 +8349,28 @@ endif
endif
FLING_SRC = \
FLING_STREAM_TEST_SRC = \
test/core/end2end/data/client_certs.cc \
test/core/end2end/data/server1_cert.cc \
test/core/end2end/data/server1_key.cc \
test/core/end2end/data/test_root_cert.cc \
test/core/fling/fling_test.cc \
test/core/fling/fling_stream_test.cc \
FLING_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(FLING_SRC))))
FLING_STREAM_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(FLING_STREAM_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/fling: openssl_dep_error
$(BINDIR)/$(CONFIG)/fling_stream_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/fling: $(FLING_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(BINDIR)/$(CONFIG)/fling_stream_test: $(FLING_STREAM_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(FLING_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/fling
$(Q) $(LDXX) $(LDFLAGS) $(FLING_STREAM_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/fling_stream_test
endif
@ -8386,39 +8382,39 @@ $(OBJDIR)/$(CONFIG)/test/core/end2end/data/server1_key.o: $(LIBDIR)/$(CONFIG)/l
$(OBJDIR)/$(CONFIG)/test/core/end2end/data/test_root_cert.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(OBJDIR)/$(CONFIG)/test/core/fling/fling_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(OBJDIR)/$(CONFIG)/test/core/fling/fling_stream_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
deps_fling: $(FLING_OBJS:.o=.dep)
deps_fling_stream_test: $(FLING_STREAM_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(FLING_OBJS:.o=.dep)
-include $(FLING_STREAM_TEST_OBJS:.o=.dep)
endif
endif
FLING_STREAM_SRC = \
FLING_TEST_SRC = \
test/core/end2end/data/client_certs.cc \
test/core/end2end/data/server1_cert.cc \
test/core/end2end/data/server1_key.cc \
test/core/end2end/data/test_root_cert.cc \
test/core/fling/fling_stream_test.cc \
test/core/fling/fling_test.cc \
FLING_STREAM_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(FLING_STREAM_SRC))))
FLING_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(FLING_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/fling_stream: openssl_dep_error
$(BINDIR)/$(CONFIG)/fling_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/fling_stream: $(FLING_STREAM_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(BINDIR)/$(CONFIG)/fling_test: $(FLING_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(FLING_STREAM_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/fling_stream
$(Q) $(LDXX) $(LDFLAGS) $(FLING_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/fling_test
endif
@ -8430,13 +8426,13 @@ $(OBJDIR)/$(CONFIG)/test/core/end2end/data/server1_key.o: $(LIBDIR)/$(CONFIG)/l
$(OBJDIR)/$(CONFIG)/test/core/end2end/data/test_root_cert.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(OBJDIR)/$(CONFIG)/test/core/fling/fling_stream_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(OBJDIR)/$(CONFIG)/test/core/fling/fling_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
deps_fling_stream: $(FLING_STREAM_OBJS:.o=.dep)
deps_fling_test: $(FLING_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(FLING_STREAM_OBJS:.o=.dep)
-include $(FLING_TEST_OBJS:.o=.dep)
endif
endif
@ -8715,25 +8711,25 @@ endif
endif
HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_SRC = \
HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_TEST_SRC = \
test/core/handshake/readahead_handshaker_server_ssl.cc \
test/core/handshake/server_ssl_common.cc \
HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_SRC))))
HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker: openssl_dep_error
$(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker: $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker_test: $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker
$(Q) $(LDXX) $(LDFLAGS) $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker_test
endif
@ -8741,43 +8737,43 @@ $(OBJDIR)/$(CONFIG)/test/core/handshake/readahead_handshaker_server_ssl.o: $(LI
$(OBJDIR)/$(CONFIG)/test/core/handshake/server_ssl_common.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
deps_handshake_server_with_readahead_handshaker: $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_OBJS:.o=.dep)
deps_handshake_server_with_readahead_handshaker_test: $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_OBJS:.o=.dep)
-include $(HANDSHAKE_SERVER_WITH_READAHEAD_HANDSHAKER_TEST_OBJS:.o=.dep)
endif
endif
HANDSHAKE_VERIFY_PEER_OPTIONS_SRC = \
HANDSHAKE_VERIFY_PEER_OPTIONS_TEST_SRC = \
test/core/handshake/verify_peer_options.cc \
HANDSHAKE_VERIFY_PEER_OPTIONS_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(HANDSHAKE_VERIFY_PEER_OPTIONS_SRC))))
HANDSHAKE_VERIFY_PEER_OPTIONS_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(HANDSHAKE_VERIFY_PEER_OPTIONS_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/handshake_verify_peer_options: openssl_dep_error
$(BINDIR)/$(CONFIG)/handshake_verify_peer_options_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/handshake_verify_peer_options: $(HANDSHAKE_VERIFY_PEER_OPTIONS_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(BINDIR)/$(CONFIG)/handshake_verify_peer_options_test: $(HANDSHAKE_VERIFY_PEER_OPTIONS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(HANDSHAKE_VERIFY_PEER_OPTIONS_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/handshake_verify_peer_options
$(Q) $(LDXX) $(LDFLAGS) $(HANDSHAKE_VERIFY_PEER_OPTIONS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/handshake_verify_peer_options_test
endif
$(OBJDIR)/$(CONFIG)/test/core/handshake/verify_peer_options.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
deps_handshake_verify_peer_options: $(HANDSHAKE_VERIFY_PEER_OPTIONS_OBJS:.o=.dep)
deps_handshake_verify_peer_options_test: $(HANDSHAKE_VERIFY_PEER_OPTIONS_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(HANDSHAKE_VERIFY_PEER_OPTIONS_OBJS:.o=.dep)
-include $(HANDSHAKE_VERIFY_PEER_OPTIONS_TEST_OBJS:.o=.dep)
endif
endif
@ -10137,25 +10133,25 @@ endif
endif
SERVER_SSL_SRC = \
SERVER_SSL_TEST_SRC = \
test/core/handshake/server_ssl.cc \
test/core/handshake/server_ssl_common.cc \
SERVER_SSL_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(SERVER_SSL_SRC))))
SERVER_SSL_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(SERVER_SSL_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/server_ssl: openssl_dep_error
$(BINDIR)/$(CONFIG)/server_ssl_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/server_ssl: $(SERVER_SSL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(BINDIR)/$(CONFIG)/server_ssl_test: $(SERVER_SSL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(SERVER_SSL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/server_ssl
$(Q) $(LDXX) $(LDFLAGS) $(SERVER_SSL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/server_ssl_test
endif
@ -10163,11 +10159,11 @@ $(OBJDIR)/$(CONFIG)/test/core/handshake/server_ssl.o: $(LIBDIR)/$(CONFIG)/libgr
$(OBJDIR)/$(CONFIG)/test/core/handshake/server_ssl_common.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a
deps_server_ssl: $(SERVER_SSL_OBJS:.o=.dep)
deps_server_ssl_test: $(SERVER_SSL_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(SERVER_SSL_OBJS:.o=.dep)
-include $(SERVER_SSL_TEST_OBJS:.o=.dep)
endif
endif

@ -28,6 +28,7 @@ For instructions on how to use the language-specific gRPC runtime for a project,
* [Dart](https://github.com/grpc/grpc-dart): pub package `grpc`
* [Go](https://github.com/grpc/grpc-go): `go get google.golang.org/grpc`
* [Java](https://github.com/grpc/grpc-java): Use JARs from Maven Central Repository
* [Kotlin](https://github.com/grpc/grpc-kotlin): Use JARs from Maven Central Repository
* [Node](https://github.com/grpc/grpc-node): `npm install grpc`
* [Objective-C](src/objective-c): Add `gRPC-ProtoRPC` dependency to podspec
* [PHP](src/php): `pecl install grpc`
@ -77,8 +78,9 @@ Libraries in different languages may be in various states of development. We are
| Language | Source repo |
|-------------------------|------------------------------------------------------|
| Java | [grpc-java](https://github.com/grpc/grpc-java) |
| Go | [grpc-go](https://github.com/grpc/grpc-go) |
| Java | [grpc-java](https://github.com/grpc/grpc-java) |
| Kotlin | [grpc-kotlin](https://github.com/grpc/grpc-kotlin) |
| Go | [grpc-go](https://github.com/grpc/grpc-go) |
| NodeJS | [grpc-node](https://github.com/grpc/grpc-node) |
| WebJS | [grpc-web](https://github.com/grpc/grpc-web) |
| Dart | [grpc-dart](https://github.com/grpc/grpc-dart) |

@ -63,12 +63,15 @@ def pyx_library(name, deps = [], py_deps = [], srcs = [], **kwargs):
)
shared_objects.append(shared_object_name)
data = shared_objects[:]
data += kwargs.pop("data", [])
# Now create a py_library with these shared objects as data.
native.py_library(
name = name,
srcs = py_srcs,
deps = py_deps,
srcs_version = "PY2AND3",
data = shared_objects,
data = data,
**kwargs
)

@ -385,6 +385,7 @@ libs:
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@ -742,6 +743,7 @@ libs:
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@ -1279,6 +1281,7 @@ libs:
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@ -1571,6 +1574,7 @@ libs:
- src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
- src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@ -3043,7 +3047,7 @@ targets:
- gpr
- address_sorting
- upb
- name: client_ssl
- name: client_ssl_test
build: test
language: c
headers: []
@ -3357,7 +3361,7 @@ targets:
- linux
- posix
- mac
- name: fling
- name: fling_stream_test
build: test
language: c
headers:
@ -3367,7 +3371,7 @@ targets:
- test/core/end2end/data/server1_cert.cc
- test/core/end2end/data/server1_key.cc
- test/core/end2end/data/test_root_cert.cc
- test/core/fling/fling_test.cc
- test/core/fling/fling_stream_test.cc
deps:
- grpc_test_util
- grpc
@ -3378,7 +3382,7 @@ targets:
- linux
- posix
- mac
- name: fling_stream
- name: fling_test
build: test
language: c
headers:
@ -3388,7 +3392,7 @@ targets:
- test/core/end2end/data/server1_cert.cc
- test/core/end2end/data/server1_key.cc
- test/core/end2end/data/test_root_cert.cc
- test/core/fling/fling_stream_test.cc
- test/core/fling/fling_test.cc
deps:
- grpc_test_util
- grpc
@ -3510,7 +3514,7 @@ targets:
- gpr
- address_sorting
- upb
- name: handshake_server_with_readahead_handshaker
- name: handshake_server_with_readahead_handshaker_test
build: test
language: c
headers:
@ -3528,7 +3532,7 @@ targets:
- linux
- posix
- mac
- name: handshake_verify_peer_options
- name: handshake_verify_peer_options_test
build: test
language: c
headers: []
@ -4085,6 +4089,7 @@ targets:
- upb
- name: sequential_connectivity_test
build: test
run: false
language: c
headers:
- test/core/end2end/data/ssl_test_data.h
@ -4112,7 +4117,7 @@ targets:
- gpr
- address_sorting
- upb
- name: server_ssl
- name: server_ssl_test
build: test
language: c
headers:
@ -5196,7 +5201,6 @@ targets:
- posix
- name: bm_metadata
build: test
run: false
language: c++
headers: []
src:
@ -5441,6 +5445,7 @@ targets:
- name: channelz_test
gtest: true
build: test
run: false
language: c++
headers:
- test/cpp/util/channel_trace_proto_helper.h
@ -5578,6 +5583,7 @@ targets:
- name: client_lb_end2end_test
gtest: true
build: test
run: false
language: c++
headers:
- test/core/util/test_lb_policies.h
@ -5732,6 +5738,7 @@ targets:
- name: end2end_test
gtest: true
build: test
run: false
language: c++
headers:
- test/cpp/end2end/interceptors_util.h

@ -53,6 +53,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \

@ -22,6 +22,7 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\lb_policy\\child_policy_handler.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\client_load_reporting_filter.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_balancer_addresses.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_channel_secure.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\grpclb_client_stats.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\grpclb\\load_balancer_api.cc " +

@ -85,7 +85,7 @@ int main(int argc, char** argv) {
args.SetCompressionAlgorithm(GRPC_COMPRESS_GZIP);
GreeterClient greeter(grpc::CreateCustomChannel(
"localhost:50051", grpc::InsecureChannelCredentials(), args));
std::string user("world");
std::string user("world world world world");
std::string reply = greeter.SayHello(user);
std::cout << "Greeter received: " << reply << std::endl;

@ -60,7 +60,7 @@ if(GRPC_AS_SUBMODULE)
else()
set(_PROTOBUF_PROTOC $<TARGET_FILE:protobuf::protoc>)
endif()
set(_GRPC_GRPCPP_UNSECURE grpc++_unsecure)
set(_GRPC_GRPCPP grpc++)
if(CMAKE_CROSSCOMPILING)
find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin)
else()
@ -87,7 +87,7 @@ elseif(GRPC_FETCHCONTENT)
set(_PROTOBUF_LIBPROTOBUF libprotobuf)
set(_REFLECTION grpc++_reflection)
set(_PROTOBUF_PROTOC $<TARGET_FILE:protoc>)
set(_GRPC_GRPCPP_UNSECURE grpc++_unsecure)
set(_GRPC_GRPCPP grpc++)
if(CMAKE_CROSSCOMPILING)
find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin)
else()
@ -116,7 +116,7 @@ else()
find_package(gRPC CONFIG REQUIRED)
message(STATUS "Using gRPC ${gRPC_VERSION}")
set(_GRPC_GRPCPP_UNSECURE gRPC::grpc++_unsecure)
set(_GRPC_GRPCPP gRPC::grpc++)
if(CMAKE_CROSSCOMPILING)
find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin)
else()
@ -155,6 +155,6 @@ foreach(_target
${hw_grpc_srcs})
target_link_libraries(${_target}
${_REFLECTION}
${_GRPC_GRPCPP_UNSECURE}
${_GRPC_GRPCPP}
${_PROTOBUF_LIBPROTOBUF})
endforeach()

@ -0,0 +1,68 @@
gRPC Hostname Example
=====================
The hostname example is a Hello World server whose response includes its
hostname. It also supports health and reflection services. This makes it a good
server to test infrastructure, like load balancing.
The example requires grpc to already be built. You are strongly encouraged
to check out a git release tag, since there will already be a build of gRPC
available.
### Run the example
1. Navigate to this directory:
```sh
cd grpc/examples/python/xds
```
2. Run the server
```sh
virtualenv venv -p python3
source venv/bin/activate
pip install -r requirements.txt
python server.py
```
3. Verify the Server
This step is not strictly necessary, but you can use it as a sanity check if
you'd like. If you don't have it, install
[`grpcurl`](https://github.com/fullstorydev/grpcurl/releases). This will allow
you to manually test the service.
Exercise your server's application-layer service:
```sh
> grpcurl --plaintext -d '{"name": "you"}' localhost:50051
{
"message": "Hello you from rbell.svl.corp.google.com!"
}
```
Make sure that all of your server's services are available via reflection:
```sh
> grpcurl --plaintext localhost:50051 list
grpc.health.v1.Health
grpc.reflection.v1alpha.ServerReflection
helloworld.Greeter
```
Make sure that your services are reporting healthy:
```sh
> grpcurl --plaintext -d '{"service": "helloworld.Greeter"}' localhost:50051
grpc.health.v1.Health/Check
{
"status": "SERVING"
}
> grpcurl --plaintext -d '{"service": ""}' localhost:50051
grpc.health.v1.Health/Check
{
"status": "SERVING"
}
```

@ -0,0 +1,134 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: helloworld.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='helloworld.proto',
package='helloworld',
syntax='proto3',
serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3')
)
_HELLOREQUEST = _descriptor.Descriptor(
name='HelloRequest',
full_name='helloworld.HelloRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='helloworld.HelloRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=60,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='helloworld.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='helloworld.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=91,
)
DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
DESCRIPTOR = _HELLOREQUEST,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
))
_sym_db.RegisterMessage(HelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
_GREETER = _descriptor.ServiceDescriptor(
name='Greeter',
full_name='helloworld.Greeter',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=93,
serialized_end=166,
methods=[
_descriptor.MethodDescriptor(
name='SayHello',
full_name='helloworld.Greeter.SayHello',
index=0,
containing_service=None,
input_type=_HELLOREQUEST,
output_type=_HELLOREPLY,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_GREETER)
DESCRIPTOR.services_by_name['Greeter'] = _GREETER
# @@protoc_insertion_point(module_scope)

@ -0,0 +1,46 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import helloworld_pb2 as helloworld__pb2
class GreeterStub(object):
"""The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SayHello = channel.unary_unary(
'/helloworld.Greeter/SayHello',
request_serializer=helloworld__pb2.HelloRequest.SerializeToString,
response_deserializer=helloworld__pb2.HelloReply.FromString,
)
class GreeterServicer(object):
"""The greeting service definition.
"""
def SayHello(self, request, context):
"""Sends a greeting
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GreeterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SayHello': grpc.unary_unary_rpc_method_handler(
servicer.SayHello,
request_deserializer=helloworld__pb2.HelloRequest.FromString,
response_serializer=helloworld__pb2.HelloReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'helloworld.Greeter', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))

@ -0,0 +1,5 @@
grpcio>=1.28.0
protobuf
grpcio-reflection
grpcio-health-checking

@ -0,0 +1,94 @@
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter server."""
from concurrent import futures
import argparse
import logging
import multiprocessing
import socket
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
from grpc_reflection.v1alpha import reflection
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
_DESCRIPTION = "A general purpose dummy server."
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def __init__(self, hostname: str):
self._hostname = hostname if hostname else socket.gethostname()
def SayHello(self, request: helloworld_pb2.HelloRequest,
context: grpc.ServicerContext) -> helloworld_pb2.HelloReply:
return helloworld_pb2.HelloReply(
message=f"Hello {request.name} from {self._hostname}!")
def serve(port: int, hostname: str):
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()))
# Add the application servicer to the server.
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(hostname), server)
# Create a health check servicer. We use the non-blocking implementation
# to avoid thread starvation.
health_servicer = health.HealthServicer(
experimental_non_blocking=True,
experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
health_pb2_grpc.add_HealthServicer_to_server(health_servicer, server)
# Create a tuple of all of the services we want to export via reflection.
services = tuple(
service.full_name
for service in helloworld_pb2.DESCRIPTOR.services_by_name.values()) + (
reflection.SERVICE_NAME, health.SERVICE_NAME)
# Add the reflection service to the server.
reflection.enable_server_reflection(services, server)
server.add_insecure_port(f"[::]:{port}")
server.start()
# Mark all services as healthy.
overall_server_health = ""
for service in services + (overall_server_health,):
health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
# Park the main application thread.
server.wait_for_termination()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=_DESCRIPTION)
parser.add_argument("port",
default=50051,
type=int,
nargs="?",
help="The port on which to listen.")
parser.add_argument("hostname",
type=str,
default=None,
nargs="?",
help="The name clients will see in responses.")
args = parser.parse_args()
logging.basicConfig()
serve(args.port, args.hostname)

@ -236,6 +236,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
@ -685,6 +686,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',

@ -212,6 +212,8 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
@ -1033,6 +1035,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',

@ -134,6 +134,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc )

@ -445,6 +445,7 @@
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
@ -934,6 +935,7 @@
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',

@ -714,6 +714,10 @@ GRPCAPI grpc_server_credentials* grpc_local_server_credentials_create(
/** --- TLS channel/server credentials ---
* It is used for experimental purpose for now and subject to change. */
/** Struct for indicating errors. It is used for
* experimental purpose for now and subject to change. */
typedef struct grpc_tls_error_details grpc_tls_error_details;
/** Config for TLS key materials. It is used for
* experimental purpose for now and subject to change. */
typedef struct grpc_tls_key_materials_config grpc_tls_key_materials_config;
@ -857,7 +861,7 @@ struct grpc_tls_credential_reload_arg {
void* cb_user_data;
grpc_tls_key_materials_config* key_materials_config;
grpc_ssl_certificate_config_reload_status status;
const char* error_details;
grpc_tls_error_details* error_details;
grpc_tls_credential_reload_config* config;
void* context;
void (*destroy_context)(void* ctx);
@ -935,7 +939,7 @@ struct grpc_tls_server_authorization_check_arg {
const char* peer_cert;
const char* peer_cert_full_chain;
grpc_status_code status;
const char* error_details;
grpc_tls_error_details* error_details;
grpc_tls_server_authorization_check_config* config;
void* context;
void (*destroy_context)(void* ctx);

@ -272,7 +272,10 @@ class ClientBidiReactor {
void RemoveHold() { stream_->RemoveHold(); }
/// Notifies the application that all operations associated with this RPC
/// have completed and provides the RPC status outcome.
/// have completed and all Holds have been removed. OnDone provides the RPC
/// status outcome for both successful and failed RPCs and will be called in
/// all cases. If it is not called, it indicates an application-level problem
/// (like failure to remove a hold).
///
/// \param[in] s The status outcome of this RPC
virtual void OnDone(const ::grpc::Status& /*s*/) {}
@ -283,19 +286,21 @@ class ClientBidiReactor {
/// call of OnReadDone or OnDone.
///
/// \param[in] ok Was the initial metadata read successfully? If false, no
/// new read/write operation will succeed.
/// new read/write operation will succeed, and any further
/// Start* operations should not be called.
virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
/// Notifies the application that a StartRead operation completed.
///
/// \param[in] ok Was it successful? If false, no new read/write operation
/// will succeed.
/// will succeed, and any further Start* should not be called.
virtual void OnReadDone(bool /*ok*/) {}
/// Notifies the application that a StartWrite operation completed.
/// Notifies the application that a StartWrite or StartWriteLast operation
/// completed.
///
/// \param[in] ok Was it successful? If false, no new read/write operation
/// will succeed.
/// will succeed, and any further Start* should not be called.
virtual void OnWriteDone(bool /*ok*/) {}
/// Notifies the application that a StartWritesDone operation completed. Note
@ -303,7 +308,8 @@ class ClientBidiReactor {
/// those that are implicitly invoked as part of a StartWriteLast.
///
/// \param[in] ok Was it successful? If false, the application will later see
/// the failure reflected as a bad status in OnDone.
/// the failure reflected as a bad status in OnDone and no
/// further Start* should be called.
virtual void OnWritesDoneDone(bool /*ok*/) {}
private:

@ -303,10 +303,13 @@ class BidiStreamingHandler
::grpc_impl::ServerReaderWriter<ResponseType, RequestType>*)>
func,
ServiceType* service)
// TODO(vjpai): When gRPC supports C++14, move-capture func in the below
: TemplatedBidiStreamingHandler<
::grpc_impl::ServerReaderWriter<ResponseType, RequestType>, false>(
std::bind(func, service, std::placeholders::_1,
std::placeholders::_2)) {}
[func, service](
::grpc_impl::ServerContext* ctx,
::grpc_impl::ServerReaderWriter<ResponseType, RequestType>*
streamer) { return func(service, ctx, streamer); }) {}
};
template <class RequestType, class ResponseType>
@ -321,7 +324,7 @@ class StreamedUnaryHandler
func)
: TemplatedBidiStreamingHandler<
::grpc_impl::ServerUnaryStreamer<RequestType, ResponseType>, true>(
func) {}
std::move(func)) {}
};
template <class RequestType, class ResponseType>
@ -336,7 +339,7 @@ class SplitServerStreamingHandler
func)
: TemplatedBidiStreamingHandler<
::grpc_impl::ServerSplitStreamer<RequestType, ResponseType>, false>(
func) {}
std::move(func)) {}
};
/// General method handler class for errors that prevent real method use

@ -419,7 +419,7 @@ class ClientReaderWriterInterface : public internal::ClientStreamingInterface,
virtual void WaitForInitialMetadata() = 0;
/// Half close writing from the client. (signal that the stream of messages
/// coming from the clinet is complete).
/// coming from the client is complete).
/// Blocks until currently-pending writes are completed.
/// Thread-safe with respect to \a ReaderInterface::Read
///

@ -114,6 +114,8 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc" role="src" />

@ -1343,11 +1343,14 @@ void PrintHeaderServerMethodStreamedUnary(
printer->Print(*vars,
"WithStreamedUnaryMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::internal::StreamedUnaryHandler< $Request$, "
"$Response$>(std::bind"
"(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
" new ::grpc::internal::StreamedUnaryHandler<\n"
" $Request$, $Response$>(\n"
" [this](::grpc_impl::ServerContext* context,\n"
" ::grpc_impl::ServerUnaryStreamer<\n"
" $Request$, $Response$>* streamer) {\n"
" return this->Streamed$Method$(context,\n"
" streamer);\n"
" }));\n"
"}\n");
printer->Print(*vars,
"~WithStreamedUnaryMethod_$Method$() override {\n"
@ -1391,16 +1394,18 @@ void PrintHeaderServerMethodSplitStreaming(
"{}\n");
printer->Print(" public:\n");
printer->Indent();
printer->Print(
*vars,
"WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::internal::SplitServerStreamingHandler< $Request$, "
"$Response$>(std::bind"
"(&WithSplitStreamingMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
"WithSplitStreamingMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::internal::SplitServerStreamingHandler<\n"
" $Request$, $Response$>(\n"
" [this](::grpc_impl::ServerContext* context,\n"
" ::grpc_impl::ServerSplitStreamer<\n"
" $Request$, $Response$>* streamer) {\n"
" return this->Streamed$Method$(context,\n"
" streamer);\n"
" }));\n"
"}\n");
printer->Print(*vars,
"~WithSplitStreamingMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
@ -2251,7 +2256,12 @@ void PrintSourceService(grpc_generator::Printer* printer,
" new ::grpc::internal::RpcMethodHandler< $ns$$Service$::Service, "
"$Request$, "
"$Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
" []($ns$$Service$::Service* service,\n"
" ::grpc_impl::ServerContext* ctx,\n"
" const $Request$* req,\n"
" $Response$* resp) {\n"
" return service->$Method$(ctx, req, resp);\n"
" }, this)));\n");
} else if (ClientOnlyStreaming(method.get())) {
printer->Print(
*vars,
@ -2260,7 +2270,12 @@ void PrintSourceService(grpc_generator::Printer* printer,
" ::grpc::internal::RpcMethod::CLIENT_STREAMING,\n"
" new ::grpc::internal::ClientStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
" []($ns$$Service$::Service* service,\n"
" ::grpc_impl::ServerContext* ctx,\n"
" ::grpc_impl::ServerReader<$Request$>* reader,\n"
" $Response$* resp) {\n"
" return service->$Method$(ctx, reader, resp);\n"
" }, this)));\n");
} else if (ServerOnlyStreaming(method.get())) {
printer->Print(
*vars,
@ -2269,16 +2284,25 @@ void PrintSourceService(grpc_generator::Printer* printer,
" ::grpc::internal::RpcMethod::SERVER_STREAMING,\n"
" new ::grpc::internal::ServerStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
" []($ns$$Service$::Service* service,\n"
" ::grpc_impl::ServerContext* ctx,\n"
" const $Request$* req,\n"
" ::grpc_impl::ServerWriter<$Response$>* writer) {\n"
" return service->$Method$(ctx, req, writer);\n"
" }, this)));\n");
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::internal::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::internal::BidiStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
printer->Print(*vars,
"AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
" ::grpc::internal::RpcMethod::BIDI_STREAMING,\n"
" new ::grpc::internal::BidiStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" []($ns$$Service$::Service* service,\n"
" ::grpc_impl::ServerContext* ctx,\n"
" ::grpc_impl::ServerReaderWriter<$Response$,\n"
" $Request$>* stream) {\n"
" return service->$Method$(ctx, stream);\n"
" }, this)));\n");
}
}
printer->Outdent();

@ -1630,32 +1630,13 @@ void ChannelData::ProcessLbPolicy(
// If not, try the setting from channel args.
const char* policy_name = nullptr;
if (parsed_service_config != nullptr &&
parsed_service_config->parsed_deprecated_lb_policy() != nullptr) {
policy_name = parsed_service_config->parsed_deprecated_lb_policy();
!parsed_service_config->parsed_deprecated_lb_policy().empty()) {
policy_name = parsed_service_config->parsed_deprecated_lb_policy().c_str();
} else {
const grpc_arg* channel_arg =
grpc_channel_args_find(resolver_result.args, GRPC_ARG_LB_POLICY_NAME);
policy_name = grpc_channel_arg_get_string(channel_arg);
}
// Special case: If at least one balancer address is present, we use
// the grpclb policy, regardless of what the resolver has returned.
bool found_balancer_address = false;
for (size_t i = 0; i < resolver_result.addresses.size(); ++i) {
const ServerAddress& address = resolver_result.addresses[i];
if (address.IsBalancer()) {
found_balancer_address = true;
break;
}
}
if (found_balancer_address) {
if (policy_name != nullptr && strcmp(policy_name, "grpclb") != 0) {
gpr_log(GPR_INFO,
"resolver requested LB policy %s but provided at least one "
"balancer address -- forcing use of grpclb LB policy",
policy_name);
}
policy_name = "grpclb";
}
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (policy_name == nullptr) policy_name = "pick_first";
@ -3975,8 +3956,10 @@ bool CallData::PickSubchannelLocked(grpc_call_element* elem,
if (pick_queued_) RemoveCallFromQueuedPicksLocked(elem);
// Handle drops.
if (GPR_UNLIKELY(result.subchannel == nullptr)) {
result.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy");
result.error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
} else {
// Grab a ref to the connected subchannel while we're still
// holding the data plane mutex.

@ -16,6 +16,8 @@
#include <grpc/support/port_platform.h>
#include <cstring>
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "absl/strings/str_cat.h"
@ -138,8 +140,6 @@ void ChildPolicyHandler::ShutdownLocked() {
}
void ChildPolicyHandler::UpdateLocked(UpdateArgs args) {
// The name of the policy that this update wants us to use.
const char* child_policy_name = args.config->name();
// If the child policy name changes, we need to create a new child
// policy. When this happens, we leave child_policy_ as-is and store
// the new child policy in pending_child_policy_. Once the new child
@ -166,10 +166,10 @@ void ChildPolicyHandler::UpdateLocked(UpdateArgs args) {
// previous update that changed the policy name, or we have already
// finished swapping in the new policy; in this case, child_policy_
// is non-null but pending_child_policy_ is null). In this case:
// a. If child_policy_->name() equals child_policy_name, then we
// update the existing child policy.
// b. If child_policy_->name() does not equal child_policy_name,
// we create a new policy. The policy will be stored in
// a. If going from the current config to the new config does not
// require a new policy, then we update the existing child policy.
// b. If going from the current config to the new config does require a
// new policy, we create a new policy. The policy will be stored in
// pending_child_policy_ and will later be swapped into
// child_policy_ by the helper when the new child transitions
// into state READY.
@ -180,10 +180,11 @@ void ChildPolicyHandler::UpdateLocked(UpdateArgs args) {
// not yet transitioned into state READY and been swapped into
// child_policy_; in this case, both child_policy_ and
// pending_child_policy_ are non-null). In this case:
// a. If pending_child_policy_->name() equals child_policy_name,
// then we update the existing pending child policy.
// b. If pending_child_policy->name() does not equal
// child_policy_name, then we create a new policy. The new
// a. If going from the current config to the new config does not
// require a new policy, then we update the existing pending
// child policy.
// b. If going from the current config to the new config does require a
// new child policy, then we create a new policy. The new
// policy is stored in pending_child_policy_ (replacing the one
// that was there before, which will be immediately shut down)
// and will later be swapped into child_policy_ by the helper
@ -191,12 +192,10 @@ void ChildPolicyHandler::UpdateLocked(UpdateArgs args) {
const bool create_policy =
// case 1
child_policy_ == nullptr ||
// case 2b
(pending_child_policy_ == nullptr &&
strcmp(child_policy_->name(), child_policy_name) != 0) ||
// case 3b
(pending_child_policy_ != nullptr &&
strcmp(pending_child_policy_->name(), child_policy_name) != 0);
// cases 2b and 3b
ConfigChangeRequiresNewPolicyInstance(current_config_.get(),
args.config.get());
current_config_ = args.config;
LoadBalancingPolicy* policy_to_update = nullptr;
if (create_policy) {
// Cases 1, 2b, and 3b: create a new child policy.
@ -205,11 +204,11 @@ void ChildPolicyHandler::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO,
"[child_policy_handler %p] creating new %schild policy %s", this,
child_policy_ == nullptr ? "" : "pending ", child_policy_name);
child_policy_ == nullptr ? "" : "pending ", args.config->name());
}
auto& lb_policy =
child_policy_ == nullptr ? child_policy_ : pending_child_policy_;
lb_policy = CreateChildPolicy(child_policy_name, *args.args);
lb_policy = CreateChildPolicy(args.config->name(), *args.args);
policy_to_update = lb_policy.get();
} else {
// Cases 2a and 3a: update an existing policy.
@ -257,8 +256,7 @@ OrphanablePtr<LoadBalancingPolicy> ChildPolicyHandler::CreateChildPolicy(
std::unique_ptr<ChannelControlHelper>(helper);
lb_policy_args.args = &args;
OrphanablePtr<LoadBalancingPolicy> lb_policy =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
child_policy_name, std::move(lb_policy_args));
CreateLoadBalancingPolicy(child_policy_name, std::move(lb_policy_args));
if (GPR_UNLIKELY(lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", child_policy_name);
return nullptr;
@ -277,4 +275,17 @@ OrphanablePtr<LoadBalancingPolicy> ChildPolicyHandler::CreateChildPolicy(
return lb_policy;
}
bool ChildPolicyHandler::ConfigChangeRequiresNewPolicyInstance(
LoadBalancingPolicy::Config* old_config,
LoadBalancingPolicy::Config* new_config) const {
return strcmp(old_config->name(), new_config->name()) != 0;
}
OrphanablePtr<LoadBalancingPolicy>
ChildPolicyHandler::CreateLoadBalancingPolicy(
const char* name, LoadBalancingPolicy::Args args) const {
return LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
name, std::move(args));
}
} // namespace grpc_core

@ -42,6 +42,18 @@ class ChildPolicyHandler : public LoadBalancingPolicy {
void ExitIdleLocked() override;
void ResetBackoffLocked() override;
// Returns true if transitioning from the old config to the new config
// requires instantiating a new policy object.
virtual bool ConfigChangeRequiresNewPolicyInstance(
LoadBalancingPolicy::Config* old_config,
LoadBalancingPolicy::Config* new_config) const;
// Instantiates a new policy of the specified name.
// May be overridden by subclasses to avoid recursion when an LB
// policy factory returns a ChildPolicyHandler.
virtual OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
const char* name, LoadBalancingPolicy::Args args) const;
private:
class Helper;
@ -55,6 +67,11 @@ class ChildPolicyHandler : public LoadBalancingPolicy {
bool shutting_down_ = false;
// The most recent config passed to UpdateLocked().
// If pending_child_policy_ is non-null, this is the config passed to
// pending_child_policy_; otherwise, it's the config passed to child_policy_.
RefCountedPtr<LoadBalancingPolicy::Config> current_config_;
// Child LB policy.
OrphanablePtr<LoadBalancingPolicy> child_policy_;
OrphanablePtr<LoadBalancingPolicy> pending_child_policy_;

@ -74,6 +74,7 @@
#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
@ -1241,25 +1242,11 @@ void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
// helper code for creating balancer channel
//
ServerAddressList ExtractBalancerAddresses(const ServerAddressList& addresses) {
ServerAddressList balancer_addresses;
for (size_t i = 0; i < addresses.size(); ++i) {
if (addresses[i].IsBalancer()) {
// Strip out the is_balancer channel arg, since we don't want to
// recursively use the grpclb policy in the channel used to talk to
// the balancers. Note that we do NOT strip out the balancer_name
// channel arg, since we need that to set the authority correctly
// to talk to the balancers.
static const char* args_to_remove[] = {
GRPC_ARG_ADDRESS_IS_BALANCER,
};
balancer_addresses.emplace_back(
addresses[i].address(),
grpc_channel_args_copy_and_remove(addresses[i].args(), args_to_remove,
GPR_ARRAY_SIZE(args_to_remove)));
}
}
return balancer_addresses;
ServerAddressList ExtractBalancerAddresses(const grpc_channel_args& args) {
const ServerAddressList* addresses =
FindGrpclbBalancerAddressesInChannelArgs(args);
if (addresses != nullptr) return *addresses;
return ServerAddressList();
}
/* Returns the channel args for the LB channel, used to create a bidirectional
@ -1452,27 +1439,25 @@ void GrpcLb::UpdateLocked(UpdateArgs args) {
// helpers for UpdateLocked()
//
// Returns the backend addresses extracted from the given addresses.
ServerAddressList ExtractBackendAddresses(const ServerAddressList& addresses) {
ServerAddressList AddNullLbTokenToAddresses(
const ServerAddressList& addresses) {
static const char* lb_token = "";
grpc_arg arg = grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_GRPCLB_ADDRESS_LB_TOKEN),
const_cast<char*>(lb_token), &lb_token_arg_vtable);
ServerAddressList backend_addresses;
ServerAddressList addresses_out;
for (size_t i = 0; i < addresses.size(); ++i) {
if (!addresses[i].IsBalancer()) {
backend_addresses.emplace_back(
addresses[i].address(),
grpc_channel_args_copy_and_add(addresses[i].args(), &arg, 1));
}
addresses_out.emplace_back(
addresses[i].address(),
grpc_channel_args_copy_and_add(addresses[i].args(), &arg, 1));
}
return backend_addresses;
return addresses_out;
}
void GrpcLb::ProcessAddressesAndChannelArgsLocked(
const ServerAddressList& addresses, const grpc_channel_args& args) {
// Update fallback address list.
fallback_backend_addresses_ = ExtractBackendAddresses(addresses);
fallback_backend_addresses_ = AddNullLbTokenToAddresses(addresses);
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
@ -1482,7 +1467,7 @@ void GrpcLb::ProcessAddressesAndChannelArgsLocked(
args_ = grpc_channel_args_copy_and_add_and_remove(
&args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
// Construct args for balancer channel.
ServerAddressList balancer_addresses = ExtractBalancerAddresses(addresses);
ServerAddressList balancer_addresses = ExtractBalancerAddresses(args);
grpc_channel_args* lb_channel_args = BuildBalancerChannelArgs(
balancer_addresses, response_generator_.get(), &args);
// Create balancer channel if needed.

@ -0,0 +1,89 @@
//
// Copyright 2019 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/useful.h"
// Channel arg key for the list of balancer addresses.
#define GRPC_ARG_GRPCLB_BALANCER_ADDRESSES "grpc.grpclb_balancer_addresses"
// Channel arg key for a string indicating an address's balancer name.
#define GRPC_ARG_ADDRESS_BALANCER_NAME "grpc.address_balancer_name"
namespace grpc_core {
namespace {
void* BalancerAddressesArgCopy(void* p) {
ServerAddressList* address_list = static_cast<ServerAddressList*>(p);
return new ServerAddressList(*address_list);
}
void BalancerAddressesArgDestroy(void* p) {
ServerAddressList* address_list = static_cast<ServerAddressList*>(p);
delete address_list;
}
int BalancerAddressesArgCmp(void* p, void* q) {
ServerAddressList* address_list1 = static_cast<ServerAddressList*>(p);
ServerAddressList* address_list2 = static_cast<ServerAddressList*>(q);
if (address_list1 == nullptr || address_list2 == nullptr) {
return GPR_ICMP(address_list1, address_list2);
}
if (address_list1->size() > address_list2->size()) return 1;
if (address_list1->size() < address_list2->size()) return -1;
for (size_t i = 0; i < address_list1->size(); ++i) {
int retval = (*address_list1)[i].Cmp((*address_list2)[i]);
if (retval != 0) return retval;
}
return 0;
}
const grpc_arg_pointer_vtable kBalancerAddressesArgVtable = {
BalancerAddressesArgCopy, BalancerAddressesArgDestroy,
BalancerAddressesArgCmp};
} // namespace
grpc_arg CreateGrpclbBalancerAddressesArg(
const ServerAddressList* address_list) {
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_GRPCLB_BALANCER_ADDRESSES),
const_cast<ServerAddressList*>(address_list),
&kBalancerAddressesArgVtable);
}
const ServerAddressList* FindGrpclbBalancerAddressesInChannelArgs(
const grpc_channel_args& args) {
return grpc_channel_args_find_pointer<const ServerAddressList>(
&args, const_cast<char*>(GRPC_ARG_GRPCLB_BALANCER_ADDRESSES));
}
grpc_arg CreateGrpclbBalancerNameArg(const char* balancer_name) {
return grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_ADDRESS_BALANCER_NAME),
const_cast<char*>(balancer_name));
}
const char* FindGrpclbBalancerNameInChannelArgs(const grpc_channel_args& args) {
return grpc_channel_args_find_string(
&args, const_cast<char*>(GRPC_ARG_ADDRESS_BALANCER_NAME));
}
} // namespace grpc_core

@ -0,0 +1,40 @@
//
// Copyright 2019 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_BALANCER_ADDRESSES_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_BALANCER_ADDRESSES_H
#include <grpc/support/port_platform.h>
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/ext/filters/client_channel/server_address.h"
namespace grpc_core {
grpc_arg CreateGrpclbBalancerAddressesArg(
const ServerAddressList* address_list);
const ServerAddressList* FindGrpclbBalancerAddressesInChannelArgs(
const grpc_channel_args& args);
grpc_arg CreateGrpclbBalancerNameArg(const char* balancer_name);
const char* FindGrpclbBalancerNameInChannelArgs(const grpc_channel_args& args);
} // namespace grpc_core
#endif /* \
GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_BALANCER_ADDRESSES_H \
*/

@ -27,6 +27,7 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
@ -55,8 +56,8 @@ RefCountedPtr<TargetAuthorityTable> CreateTargetAuthorityTable(
grpc_sockaddr_to_string(&addr_str, &addresses[i].address(), true) > 0);
target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str);
gpr_free(addr_str);
char* balancer_name = grpc_channel_arg_get_string(grpc_channel_args_find(
addresses[i].args(), GRPC_ARG_ADDRESS_BALANCER_NAME));
const char* balancer_name =
FindGrpclbBalancerNameInChannelArgs(*addresses[i].args());
target_authority_entries[i].value.reset(gpr_strdup(balancer_name));
}
RefCountedPtr<TargetAuthorityTable> target_authority_table =

@ -370,13 +370,6 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
GRPC_ARG_SERVICE_CONFIG};
// Create a subchannel for each address.
for (size_t i = 0; i < addresses.size(); i++) {
// TODO(roth): we should ideally hide this from the LB policy code. In
// principle, if we're dealing with this special case in the client_channel
// code for selecting grpclb, then we should also strip out these addresses
// there if we're not using grpclb.
if (addresses[i].IsBalancer()) {
continue;
}
InlinedVector<grpc_arg, 3> args_to_add;
const size_t subchannel_address_arg_index = args_to_add.size();
args_to_add.emplace_back(

@ -25,6 +25,8 @@
#include <limits.h>
#include <string.h>
#include "absl/types/optional.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
@ -80,7 +82,7 @@ class XdsConfig : public LoadBalancingPolicy::Config {
XdsConfig(RefCountedPtr<LoadBalancingPolicy::Config> child_policy,
RefCountedPtr<LoadBalancingPolicy::Config> fallback_policy,
std::string eds_service_name,
Optional<std::string> lrs_load_reporting_server_name)
absl::optional<std::string> lrs_load_reporting_server_name)
: child_policy_(std::move(child_policy)),
fallback_policy_(std::move(fallback_policy)),
eds_service_name_(std::move(eds_service_name)),
@ -101,7 +103,7 @@ class XdsConfig : public LoadBalancingPolicy::Config {
return eds_service_name_.empty() ? nullptr : eds_service_name_.c_str();
};
const Optional<std::string>& lrs_load_reporting_server_name() const {
const absl::optional<std::string>& lrs_load_reporting_server_name() const {
return lrs_load_reporting_server_name_;
};
@ -109,7 +111,7 @@ class XdsConfig : public LoadBalancingPolicy::Config {
RefCountedPtr<LoadBalancingPolicy::Config> child_policy_;
RefCountedPtr<LoadBalancingPolicy::Config> fallback_policy_;
std::string eds_service_name_;
Optional<std::string> lrs_load_reporting_server_name_;
absl::optional<std::string> lrs_load_reporting_server_name_;
};
class XdsLb : public LoadBalancingPolicy {
@ -494,6 +496,16 @@ XdsLb::PickResult XdsLb::LocalityPicker::Pick(PickArgs args) {
result.type = PickResult::PICK_COMPLETE;
return result;
}
// If we didn't drop, we better have some localities to pick from.
if (pickers_.empty()) { // Should never happen.
PickResult result;
result.type = PickResult::PICK_FAILED;
result.error =
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"xds picker not given any localities"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL);
return result;
}
// Generate a random number in [0, total weight).
const uint32_t key = rand() % pickers_[pickers_.size() - 1].first;
// Forward pick to whichever locality maps to the range in which the
@ -568,7 +580,7 @@ class XdsLb::EndpointWatcher : public XdsClient::EndpointWatcherInterface {
}
// If the balancer tells us to drop all the calls, we should exit fallback
// mode immediately.
if (update.drop_all) xds_policy_->MaybeExitFallbackMode();
if (update.drop_config->drop_all()) xds_policy_->MaybeExitFallbackMode();
// Update the drop config.
const bool drop_config_changed =
xds_policy_->drop_config_ == nullptr ||
@ -723,7 +735,6 @@ void XdsLb::UpdateLocked(UpdateArgs args) {
}
const bool is_initial_update = args_ == nullptr;
// Update config.
const char* old_eds_service_name = eds_service_name();
auto old_config = std::move(config_);
config_ = std::move(args.config);
// Update fallback address list.
@ -771,30 +782,8 @@ void XdsLb::UpdateLocked(UpdateArgs args) {
eds_service_name(), eds_service_name());
}
}
// Update priority list.
// Note that this comes after updating drop_stats_, since we want that
// to be used by any new picker we create here.
// No need to do this on the initial update, since there won't be any
// priorities to update yet.
if (!is_initial_update) {
const bool update_locality_stats =
config_->lrs_load_reporting_server_name() !=
old_config->lrs_load_reporting_server_name() ||
strcmp(old_eds_service_name, eds_service_name()) != 0;
UpdatePrioritiesLocked(update_locality_stats);
}
// Update endpoint watcher if needed.
if (is_initial_update ||
strcmp(old_eds_service_name, eds_service_name()) != 0) {
if (!is_initial_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
gpr_log(GPR_INFO, "[xdslb %p] cancelling watch for %s", this,
old_eds_service_name);
}
xds_client()->CancelEndpointDataWatch(StringView(old_eds_service_name),
endpoint_watcher_,
/*delay_unsubscription=*/true);
}
// On the initial update, create the endpoint watcher.
if (is_initial_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
gpr_log(GPR_INFO, "[xdslb %p] starting watch for %s", this,
eds_service_name());
@ -804,6 +793,16 @@ void XdsLb::UpdateLocked(UpdateArgs args) {
endpoint_watcher_ = watcher.get();
xds_client()->WatchEndpointData(StringView(eds_service_name()),
std::move(watcher));
} else {
// Update priority list.
// Note that this comes after updating drop_stats_, since we want that
// to be used by any new picker we create here.
// No need to do this on the initial update, since there won't be any
// priorities to update yet.
const bool update_locality_stats =
config_->lrs_load_reporting_server_name() !=
old_config->lrs_load_reporting_server_name();
UpdatePrioritiesLocked(update_locality_stats);
}
}
@ -930,13 +929,24 @@ void XdsLb::UpdatePrioritiesLocked(bool update_locality_stats) {
void XdsLb::UpdateXdsPickerLocked() {
// If we are in fallback mode, don't generate an xds picker from localities.
if (fallback_policy_ != nullptr) return;
if (current_priority_ == UINT32_MAX) {
grpc_error* error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("no ready locality map"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
// If we're dropping all calls, report READY, even though we won't
// have a selected priority.
if (drop_config_ != nullptr && drop_config_->drop_all()) {
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE,
absl::make_unique<TransientFailurePicker>(error));
GRPC_CHANNEL_READY,
absl::make_unique<LocalityPicker>(this, LocalityPicker::PickerList{}));
return;
}
// If we don't have a selected priority, report TRANSIENT_FAILURE.
if (current_priority_ == UINT32_MAX) {
if (fallback_policy_ == nullptr) {
grpc_error* error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("no ready locality map"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE,
absl::make_unique<TransientFailurePicker>(error));
}
return;
}
priorities_[current_priority_]->UpdateXdsPickerLocked();
@ -998,7 +1008,16 @@ OrphanablePtr<XdsLb::LocalityMap::Locality> XdsLb::ExtractLocalityLocked(
if (priority == exclude_priority) continue;
LocalityMap* locality_map = priorities_[priority].get();
auto locality = locality_map->ExtractLocalityLocked(name);
if (locality != nullptr) return locality;
if (locality != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
gpr_log(GPR_INFO,
"[xdslb %p] moving locality %p %s to new priority (%" PRIu32
" -> %" PRIu32 ")",
this, locality.get(), name->AsHumanReadableString(),
exclude_priority, priority);
}
return locality;
}
}
return nullptr;
}
@ -1024,7 +1043,7 @@ XdsLb::LocalityMap::LocalityMap(RefCountedPtr<XdsLb> xds_policy,
&on_failover_timer_);
failover_timer_callback_pending_ = true;
// This is the first locality map ever created, report CONNECTING.
if (priority_ == 0) {
if (priority_ == 0 && xds_policy_->fallback_policy_ == nullptr) {
xds_policy_->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING,
absl::make_unique<QueuePicker>(
@ -1158,6 +1177,10 @@ XdsLb::LocalityMap::ExtractLocalityLocked(
}
void XdsLb::LocalityMap::DeactivateLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
gpr_log(GPR_INFO, "[xdslb %p] deactivating priority %" PRIu32, xds_policy(),
priority_);
}
// If already deactivated, don't do it again.
if (delayed_removal_timer_callback_pending_) return;
MaybeCancelFailoverTimerLocked();
@ -1182,6 +1205,10 @@ bool XdsLb::LocalityMap::MaybeReactivateLocked() {
// Don't reactivate a priority that is not higher than the current one.
if (priority_ >= xds_policy_->current_priority_) return false;
// Reactivate this priority by cancelling deletion timer.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
gpr_log(GPR_INFO, "[xdslb %p] reactivating priority %" PRIu32, xds_policy(),
priority_);
}
if (delayed_removal_timer_callback_pending_) {
grpc_timer_cancel(&delayed_removal_timer_);
}
@ -1438,6 +1465,10 @@ void XdsLb::LocalityMap::Locality::UpdateLocked(uint32_t locality_weight,
// Update locality weight.
weight_ = locality_weight;
if (delayed_removal_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
gpr_log(GPR_INFO, "[xdslb %p] Locality %p %s: reactivating", xds_policy(),
this, name_->AsHumanReadableString());
}
grpc_timer_cancel(&delayed_removal_timer_);
}
// Update locality stats.
@ -1495,6 +1526,10 @@ void XdsLb::LocalityMap::Locality::Orphan() {
void XdsLb::LocalityMap::Locality::DeactivateLocked() {
// If already deactivated, don't do that again.
if (weight_ == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
gpr_log(GPR_INFO, "[xdslb %p] Locality %p %s: deactivating", xds_policy(),
this, name_->AsHumanReadableString());
}
// Set the locality weight to 0 so that future xds picker won't contain this
// locality.
weight_ = 0;
@ -1572,7 +1607,7 @@ class XdsFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<XdsLb>(std::move(args));
return MakeOrphanable<XdsChildHandler>(std::move(args), &grpc_lb_xds_trace);
}
const char* name() const override { return kXds; }
@ -1656,7 +1691,7 @@ class XdsFactory : public LoadBalancingPolicyFactory {
}
}
if (error_list.empty()) {
Optional<std::string> optional_lrs_load_reporting_server_name;
absl::optional<std::string> optional_lrs_load_reporting_server_name;
if (lrs_load_reporting_server_name != nullptr) {
optional_lrs_load_reporting_server_name.emplace(
std::string(lrs_load_reporting_server_name));
@ -1670,6 +1705,36 @@ class XdsFactory : public LoadBalancingPolicyFactory {
return nullptr;
}
}
private:
class XdsChildHandler : public ChildPolicyHandler {
public:
XdsChildHandler(Args args, TraceFlag* tracer)
: ChildPolicyHandler(std::move(args), tracer) {}
bool ConfigChangeRequiresNewPolicyInstance(
LoadBalancingPolicy::Config* old_config,
LoadBalancingPolicy::Config* new_config) const override {
GPR_ASSERT(old_config->name() == kXds);
GPR_ASSERT(new_config->name() == kXds);
XdsConfig* old_xds_config = static_cast<XdsConfig*>(old_config);
XdsConfig* new_xds_config = static_cast<XdsConfig*>(new_config);
const char* old_eds_service_name =
old_xds_config->eds_service_name() == nullptr
? ""
: old_xds_config->eds_service_name();
const char* new_eds_service_name =
new_xds_config->eds_service_name() == nullptr
? ""
: new_xds_config->eds_service_name();
return strcmp(old_eds_service_name, new_eds_service_name) != 0;
}
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
const char* name, LoadBalancingPolicy::Args args) const override {
return MakeOrphanable<XdsLb>(std::move(args));
}
};
};
} // namespace

@ -30,6 +30,7 @@
#include <address_sorting/address_sorting.h>
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
#include "src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.h"
@ -107,8 +108,10 @@ class AresDnsResolver : public Resolver {
grpc_millis last_resolution_timestamp_ = -1;
/// retry backoff state
BackOff backoff_;
/// currently resolving addresses
/// currently resolving backend addresses
std::unique_ptr<ServerAddressList> addresses_;
/// currently resolving balancer addresses
std::unique_ptr<ServerAddressList> balancer_addresses_;
/// currently resolving service config
char* service_config_json_ = nullptr;
// has shutdown been initiated
@ -328,9 +331,11 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
r->Unref(DEBUG_LOCATION, "OnResolvedLocked() shutdown");
return;
}
if (r->addresses_ != nullptr) {
if (r->addresses_ != nullptr || r->balancer_addresses_ != nullptr) {
Result result;
result.addresses = std::move(*r->addresses_);
if (r->addresses_ != nullptr) {
result.addresses = std::move(*r->addresses_);
}
if (r->service_config_json_ != nullptr) {
std::string service_config_string = ChooseServiceConfig(
r->service_config_json_, &result.service_config_error);
@ -343,9 +348,16 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
service_config_string, &result.service_config_error);
}
}
result.args = grpc_channel_args_copy(r->channel_args_);
InlinedVector<grpc_arg, 1> new_args;
if (r->balancer_addresses_ != nullptr) {
new_args.push_back(
CreateGrpclbBalancerAddressesArg(r->balancer_addresses_.get()));
}
result.args = grpc_channel_args_copy_and_add(
r->channel_args_, new_args.data(), new_args.size());
r->result_handler()->ReturnResult(std::move(result));
r->addresses_.reset();
r->balancer_addresses_.reset();
// Reset backoff state so that we start from the beginning when the
// next request gets triggered.
r->backoff_.Reset();
@ -424,7 +436,8 @@ void AresDnsResolver::StartResolvingLocked() {
GRPC_CLOSURE_INIT(&on_resolved_, OnResolved, this, grpc_schedule_on_exec_ctx);
pending_request_ = grpc_dns_lookup_ares_locked(
dns_server_, name_to_resolve_, kDefaultPort, interested_parties_,
&on_resolved_, &addresses_, enable_srv_queries_ /* check_grpclb */,
&on_resolved_, &addresses_,
enable_srv_queries_ ? &balancer_addresses_ : nullptr,
request_service_config_ ? &service_config_json_ : nullptr,
query_timeout_ms_, combiner());
last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now();

@ -33,6 +33,7 @@
#include <grpc/support/time.h>
#include <address_sorting/address_sorting.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
#include "src/core/lib/gpr/string.h"
@ -60,6 +61,8 @@ struct grpc_ares_request {
grpc_closure* on_done;
/** the pointer to receive the resolved addresses */
std::unique_ptr<grpc_core::ServerAddressList>* addresses_out;
/** the pointer to receive the resolved balancer addresses */
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses_out;
/** the pointer to receive the service config in JSON */
char** service_config_json_out;
/** the evernt driver used by this request */
@ -184,17 +187,17 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
GRPC_CARES_TRACE_LOG(
"request:%p on_hostbyname_done_locked host=%s ARES_SUCCESS", r,
hr->host);
if (*r->addresses_out == nullptr) {
*r->addresses_out = absl::make_unique<ServerAddressList>();
std::unique_ptr<ServerAddressList>* address_list_ptr =
hr->is_balancer ? r->balancer_addresses_out : r->addresses_out;
if (*address_list_ptr == nullptr) {
*address_list_ptr = absl::make_unique<ServerAddressList>();
}
ServerAddressList& addresses = **r->addresses_out;
ServerAddressList& addresses = **address_list_ptr;
for (size_t i = 0; hostent->h_addr_list[i] != nullptr; ++i) {
grpc_core::InlinedVector<grpc_arg, 2> args_to_add;
grpc_core::InlinedVector<grpc_arg, 1> args_to_add;
if (hr->is_balancer) {
args_to_add.emplace_back(grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_BALANCER), 1));
args_to_add.emplace_back(grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_ADDRESS_BALANCER_NAME), hr->host));
args_to_add.emplace_back(
grpc_core::CreateGrpclbBalancerNameArg(hr->host));
}
grpc_channel_args* args = grpc_channel_args_copy_and_add(
nullptr, args_to_add.data(), args_to_add.size());
@ -350,7 +353,7 @@ done:
void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
grpc_ares_request* r, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
bool check_grpclb, int query_timeout_ms, grpc_core::Combiner* combiner) {
int query_timeout_ms, grpc_core::Combiner* combiner) {
grpc_error* error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request* hr = nullptr;
ares_channel* channel = nullptr;
@ -425,7 +428,7 @@ void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
/*is_balancer=*/false);
ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_locked,
hr);
if (check_grpclb) {
if (r->balancer_addresses_out != nullptr) {
/* Query the SRV record */
grpc_ares_request_ref_locked(r);
char* service_name;
@ -588,7 +591,8 @@ static bool grpc_ares_maybe_resolve_localhost_manually_locked(
static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addrs, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) {
grpc_ares_request* r =
@ -596,6 +600,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
r->ev_driver = nullptr;
r->on_done = on_done;
r->addresses_out = addrs;
r->balancer_addresses_out = balancer_addrs;
r->service_config_json_out = service_config_json;
r->error = GRPC_ERROR_NONE;
r->pending_queries = 0;
@ -618,20 +623,21 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
// as to cut down on lookups over the network, especially in tests:
// https://github.com/grpc/proposal/pull/79
if (target_matches_localhost(name)) {
check_grpclb = false;
r->balancer_addresses_out = nullptr;
r->service_config_json_out = nullptr;
}
// Look up name using c-ares lib.
grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
r, dns_server, name, default_port, interested_parties, check_grpclb,
query_timeout_ms, combiner);
r, dns_server, name, default_port, interested_parties, query_timeout_ms,
combiner);
return r;
}
grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addrs, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
@ -709,7 +715,6 @@ static void on_dns_lookup_done_locked(void* arg, grpc_error* error) {
static_cast<grpc_resolved_address*>(gpr_zalloc(
sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs));
for (size_t i = 0; i < (*resolved_addresses)->naddrs; ++i) {
GPR_ASSERT(!(*r->addresses)[i].IsBalancer());
memcpy(&(*resolved_addresses)->addrs[i], &(*r->addresses)[i].address(),
sizeof(grpc_resolved_address));
}
@ -736,9 +741,9 @@ static void grpc_resolve_address_invoke_dns_lookup_ares_locked(
grpc_schedule_on_exec_ctx);
r->ares_request = grpc_dns_lookup_ares_locked(
nullptr /* dns_server */, r->name, r->default_port, r->interested_parties,
&r->on_dns_lookup_done_locked, &r->addresses, false /* check_grpclb */,
nullptr /* service_config_json */, GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS,
r->combiner);
&r->on_dns_lookup_done_locked, &r->addresses,
nullptr /* balancer_addresses */, nullptr /* service_config_json */,
GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, r->combiner);
}
static void grpc_resolve_address_ares_impl(const char* name,

@ -63,7 +63,8 @@ extern void (*grpc_resolve_address_ares)(const char* name,
extern grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addresses, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addresses,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addresses,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner);

@ -29,7 +29,8 @@ struct grpc_ares_request {
static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addrs, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) {
return NULL;
@ -38,7 +39,8 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
std::unique_ptr<grpc_core::ServerAddressList>* addrs, bool check_grpclb,
std::unique_ptr<grpc_core::ServerAddressList>* addrs,
std::unique_ptr<grpc_core::ServerAddressList>* balancer_addrs,
char** service_config_json, int query_timeout_ms,
grpc_core::Combiner* combiner) = grpc_dns_lookup_ares_locked_impl;

@ -317,7 +317,7 @@ ClientChannelServiceConfigParser::ParseGlobalParams(const Json& json,
GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
std::vector<grpc_error*> error_list;
RefCountedPtr<LoadBalancingPolicy::Config> parsed_lb_config;
grpc_core::UniquePtr<char> lb_policy_name;
std::string lb_policy_name;
Optional<ClientChannelGlobalParsedConfig::RetryThrottling> retry_throttling;
const char* health_check_service_name = nullptr;
// Parse LB config.
@ -340,16 +340,13 @@ ClientChannelServiceConfigParser::ParseGlobalParams(const Json& json,
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:type should be string"));
} else {
lb_policy_name.reset(gpr_strdup(it->second.string_value().c_str()));
char* lb_policy = lb_policy_name.get();
if (lb_policy != nullptr) {
for (size_t i = 0; i < strlen(lb_policy); ++i) {
lb_policy[i] = tolower(lb_policy[i]);
}
lb_policy_name = it->second.string_value();
for (size_t i = 0; i < lb_policy_name.size(); ++i) {
lb_policy_name[i] = tolower(lb_policy_name[i]);
}
bool requires_config = false;
if (!LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(
lb_policy, &requires_config)) {
lb_policy_name.c_str(), &requires_config)) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:loadBalancingPolicy error:Unknown lb policy"));
} else if (requires_config) {
@ -357,7 +354,7 @@ ClientChannelServiceConfigParser::ParseGlobalParams(const Json& json,
gpr_asprintf(&error_msg,
"field:loadBalancingPolicy error:%s requires a config. "
"Please use loadBalancingConfig instead.",
lb_policy);
lb_policy_name.c_str());
error_list.push_back(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg));
gpr_free(error_msg);
}

@ -46,7 +46,7 @@ class ClientChannelGlobalParsedConfig : public ServiceConfig::ParsedConfig {
ClientChannelGlobalParsedConfig(
RefCountedPtr<LoadBalancingPolicy::Config> parsed_lb_config,
grpc_core::UniquePtr<char> parsed_deprecated_lb_policy,
std::string parsed_deprecated_lb_policy,
const Optional<RetryThrottling>& retry_throttling,
const char* health_check_service_name)
: parsed_lb_config_(std::move(parsed_lb_config)),
@ -62,8 +62,8 @@ class ClientChannelGlobalParsedConfig : public ServiceConfig::ParsedConfig {
return parsed_lb_config_;
}
const char* parsed_deprecated_lb_policy() const {
return parsed_deprecated_lb_policy_.get();
const std::string& parsed_deprecated_lb_policy() const {
return parsed_deprecated_lb_policy_;
}
const char* health_check_service_name() const {
@ -72,7 +72,7 @@ class ClientChannelGlobalParsedConfig : public ServiceConfig::ParsedConfig {
private:
RefCountedPtr<LoadBalancingPolicy::Config> parsed_lb_config_;
grpc_core::UniquePtr<char> parsed_deprecated_lb_policy_;
std::string parsed_deprecated_lb_policy_;
Optional<RetryThrottling> retry_throttling_;
const char* health_check_service_name_;
};

@ -37,15 +37,12 @@ ServerAddress::ServerAddress(const void* address, size_t address_len,
address_.len = static_cast<socklen_t>(address_len);
}
bool ServerAddress::operator==(const ServerAddress& other) const {
return address_.len == other.address_.len &&
memcmp(address_.addr, other.address_.addr, address_.len) == 0 &&
grpc_channel_args_compare(args_, other.args_) == 0;
}
bool ServerAddress::IsBalancer() const {
return grpc_channel_arg_get_bool(
grpc_channel_args_find(args_, GRPC_ARG_ADDRESS_IS_BALANCER), false);
int ServerAddress::Cmp(const ServerAddress& other) const {
if (address_.len > other.address_.len) return 1;
if (address_.len < other.address_.len) return -1;
int retval = memcmp(address_.addr, other.address_.addr, address_.len);
if (retval != 0) return retval;
return grpc_channel_args_compare(args_, other.args_);
}
} // namespace grpc_core

@ -25,13 +25,6 @@
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/iomgr/resolve_address.h"
// Channel arg key for a bool indicating whether an address is a grpclb
// load balancer (as opposed to a backend).
#define GRPC_ARG_ADDRESS_IS_BALANCER "grpc.address_is_balancer"
// Channel arg key for a string indicating an address's balancer name.
#define GRPC_ARG_ADDRESS_BALANCER_NAME "grpc.address_balancer_name"
namespace grpc_core {
//
@ -73,13 +66,13 @@ class ServerAddress {
return *this;
}
bool operator==(const ServerAddress& other) const;
bool operator==(const ServerAddress& other) const { return Cmp(other) == 0; }
int Cmp(const ServerAddress& other) const;
const grpc_resolved_address& address() const { return address_; }
const grpc_channel_args* args() const { return args_; }
bool IsBalancer() const;
private:
grpc_resolved_address address_;
grpc_channel_args* args_;

@ -20,6 +20,8 @@
#include <string.h>
#include "absl/strings/str_cat.h"
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@ -27,9 +29,7 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/json/json.h"
#include "src/core/lib/slice/slice_hash_table.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
namespace grpc_core {
@ -77,6 +77,12 @@ ServiceConfig::ServiceConfig(std::string json_string, Json json,
}
}
ServiceConfig::~ServiceConfig() {
for (auto& p : parsed_method_configs_map_) {
grpc_slice_unref_internal(p.first);
}
}
grpc_error* ServiceConfig::ParseGlobalParams() {
std::vector<grpc_error*> error_list;
for (size_t i = 0; i < g_registered_parsers->size(); i++) {
@ -91,10 +97,8 @@ grpc_error* ServiceConfig::ParseGlobalParams() {
return GRPC_ERROR_CREATE_FROM_VECTOR("Global Params", &error_list);
}
grpc_error* ServiceConfig::ParseJsonMethodConfigToServiceConfigVectorTable(
const Json& json,
InlinedVector<SliceHashTable<const ParsedConfigVector*>::Entry, 10>*
entries) {
grpc_error* ServiceConfig::ParseJsonMethodConfig(const Json& json) {
// Parse method config with each registered parser.
auto objs_vector = absl::make_unique<ParsedConfigVector>();
InlinedVector<grpc_error*, 4> error_list;
for (size_t i = 0; i < g_registered_parsers->size(); i++) {
@ -108,8 +112,8 @@ grpc_error* ServiceConfig::ParseJsonMethodConfigToServiceConfigVectorTable(
}
parsed_method_config_vectors_storage_.push_back(std::move(objs_vector));
const auto* vector_ptr = parsed_method_config_vectors_storage_.back().get();
// Construct list of paths.
InlinedVector<UniquePtr<char>, 10> paths;
// Add an entry for each path.
bool found_name = false;
auto it = json.object_value().find("name");
if (it != json.object_value().end()) {
if (it->second.type() != Json::Type::ARRAY) {
@ -120,29 +124,42 @@ grpc_error* ServiceConfig::ParseJsonMethodConfigToServiceConfigVectorTable(
const Json::Array& name_array = it->second.array_value();
for (const Json& name : name_array) {
grpc_error* parse_error = GRPC_ERROR_NONE;
UniquePtr<char> path = ParseJsonMethodName(name, &parse_error);
if (path == nullptr) {
std::string path = ParseJsonMethodName(name, &parse_error);
if (parse_error != GRPC_ERROR_NONE) {
error_list.push_back(parse_error);
} else {
GPR_DEBUG_ASSERT(parse_error == GRPC_ERROR_NONE);
paths.push_back(std::move(path));
found_name = true;
if (path.empty()) {
if (default_method_config_vector_ != nullptr) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error:multiple default method configs"));
}
default_method_config_vector_ = vector_ptr;
} else {
grpc_slice key = grpc_slice_from_copied_string(path.c_str());
// If the key is not already present in the map, this will
// store a ref to the key in the map.
auto& value = parsed_method_configs_map_[key];
if (value != nullptr) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error:multiple method configs with same name"));
// The map entry already existed, so we need to unref the
// key we just created.
grpc_slice_unref_internal(key);
} else {
value = vector_ptr;
}
}
}
}
}
if (paths.size() == 0) {
error_list.push_back(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No names specified"));
}
// Add entry for each path.
for (size_t i = 0; i < paths.size(); ++i) {
entries->push_back(
{grpc_slice_from_copied_string(paths[i].get()), vector_ptr});
if (!found_name) {
parsed_method_config_vectors_storage_.pop_back();
}
return GRPC_ERROR_CREATE_FROM_VECTOR("methodConfig", &error_list);
}
grpc_error* ServiceConfig::ParsePerMethodParams() {
InlinedVector<SliceHashTable<const ParsedConfigVector*>::Entry, 10> entries;
std::vector<grpc_error*> error_list;
auto it = json_.object_value().find("methodConfig");
if (it != json_.object_value().end()) {
@ -156,91 +173,80 @@ grpc_error* ServiceConfig::ParsePerMethodParams() {
"field:methodConfig error:not of type Object"));
continue;
}
grpc_error* error = ParseJsonMethodConfigToServiceConfigVectorTable(
method_config, &entries);
grpc_error* error = ParseJsonMethodConfig(method_config);
if (error != GRPC_ERROR_NONE) {
error_list.push_back(error);
}
}
}
if (!entries.empty()) {
parsed_method_configs_table_ =
SliceHashTable<const ParsedConfigVector*>::Create(
entries.size(), entries.data(), nullptr);
}
return GRPC_ERROR_CREATE_FROM_VECTOR("Method Params", &error_list);
}
UniquePtr<char> ServiceConfig::ParseJsonMethodName(const Json& json,
grpc_error** error) {
std::string ServiceConfig::ParseJsonMethodName(const Json& json,
grpc_error** error) {
if (json.type() != Json::Type::OBJECT) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error:type is not object");
return nullptr;
return "";
}
// Find service name.
const std::string* service_name = nullptr;
auto it = json.object_value().find("service");
if (it == json.object_value().end()) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:service error:not found");
return nullptr; // Required field.
}
if (it->second.type() != Json::Type::STRING) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:service error:not of type string");
return nullptr;
}
if (it->second.string_value().empty()) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:service error:empty value");
return nullptr;
if (it != json.object_value().end() &&
it->second.type() != Json::Type::JSON_NULL) {
if (it->second.type() != Json::Type::STRING) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:service error:not of type string");
return "";
}
if (!it->second.string_value().empty()) {
service_name = &it->second.string_value();
}
}
const char* service_name = it->second.string_value().c_str();
const char* method_name = nullptr;
const std::string* method_name = nullptr;
// Find method name.
it = json.object_value().find("method");
if (it != json.object_value().end()) {
if (it != json.object_value().end() &&
it->second.type() != Json::Type::JSON_NULL) {
if (it->second.type() != Json::Type::STRING) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:method error:not of type string");
return nullptr;
return "";
}
if (!it->second.string_value().empty()) {
method_name = &it->second.string_value();
}
if (it->second.string_value().empty()) {
}
// If neither service nor method are specified, it's the default.
// Method name may not be specified without service name.
if (service_name == nullptr) {
if (method_name != nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:method error:empty value");
return nullptr;
"field:name error:method name populated without service name");
}
method_name = it->second.string_value().c_str();
return "";
}
char* path;
gpr_asprintf(&path, "/%s/%s", service_name,
method_name == nullptr ? "" : method_name);
return grpc_core::UniquePtr<char>(path);
// Construct path.
return absl::StrCat("/", *service_name, "/",
method_name == nullptr ? "" : *method_name);
}
const ServiceConfig::ParsedConfigVector*
ServiceConfig::GetMethodParsedConfigVector(const grpc_slice& path) {
if (parsed_method_configs_table_.get() == nullptr) {
return nullptr;
}
const auto* value = parsed_method_configs_table_->Get(path);
ServiceConfig::GetMethodParsedConfigVector(const grpc_slice& path) const {
// Try looking up the full path in the map.
auto it = parsed_method_configs_map_.find(path);
if (it != parsed_method_configs_map_.end()) return it->second;
// If we didn't find a match for the path, try looking for a wildcard
// entry (i.e., change "/service/method" to "/service/").
if (value == nullptr) {
char* path_str = grpc_slice_to_c_string(path);
const char* sep = strrchr(path_str, '/') + 1;
const size_t len = (size_t)(sep - path_str);
char* buf = (char*)gpr_malloc(len + 1); // trailing NUL
memcpy(buf, path_str, len);
buf[len] = '\0';
grpc_slice wildcard_path = grpc_slice_from_copied_string(buf);
gpr_free(buf);
value = parsed_method_configs_table_->Get(wildcard_path);
grpc_slice_unref_internal(wildcard_path);
gpr_free(path_str);
if (value == nullptr) return nullptr;
}
return *value;
UniquePtr<char> path_str(grpc_slice_to_c_string(path));
char* sep = strrchr(path_str.get(), '/') + 1;
if (sep == nullptr) return nullptr; // Shouldn't ever happen.
*sep = '\0';
grpc_slice wildcard_path = grpc_slice_from_static_string(path_str.get());
it = parsed_method_configs_map_.find(wildcard_path);
if (it != parsed_method_configs_map_.end()) return it->second;
// Try default method config, if set.
return default_method_config_vector_;
}
size_t ServiceConfig::RegisterParser(std::unique_ptr<Parser> parser) {

@ -19,6 +19,8 @@
#include <grpc/support/port_platform.h>
#include <unordered_map>
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/string_util.h>
@ -27,7 +29,7 @@
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/json/json.h"
#include "src/core/lib/slice/slice_hash_table.h"
#include "src/core/lib/slice/slice_internal.h"
// The main purpose of the code here is to parse the service config in
// JSON form, which will look like this:
@ -129,6 +131,7 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
grpc_error** error);
ServiceConfig(std::string json_string, Json json, grpc_error** error);
~ServiceConfig();
const std::string& json_string() const { return json_string_; }
@ -143,7 +146,8 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
/// Retrieves the vector of parsed configs for the method identified
/// by \a path. The lifetime of the returned vector and contained objects
/// is tied to the lifetime of the ServiceConfig object.
const ParsedConfigVector* GetMethodParsedConfigVector(const grpc_slice& path);
const ParsedConfigVector* GetMethodParsedConfigVector(
const grpc_slice& path) const;
/// Globally register a service config parser. On successful registration, it
/// returns the index at which the parser was registered. On failure, -1 is
@ -162,15 +166,11 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
grpc_error* ParseGlobalParams();
grpc_error* ParsePerMethodParams();
// Returns a path string for the JSON name object specified by \a json.
// Returns null on error, and stores error in \a error.
static UniquePtr<char> ParseJsonMethodName(const Json& json,
grpc_error** error);
// Returns a path string for the JSON name object specified by json.
// Sets *error on error.
static std::string ParseJsonMethodName(const Json& json, grpc_error** error);
grpc_error* ParseJsonMethodConfigToServiceConfigVectorTable(
const Json& json,
InlinedVector<SliceHashTable<const ParsedConfigVector*>::Entry, 10>*
entries);
grpc_error* ParseJsonMethodConfig(const Json& json);
std::string json_string_;
Json json_;
@ -180,8 +180,10 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
// A map from the method name to the parsed config vector. Note that we are
// using a raw pointer and not a unique pointer so that we can use the same
// vector for multiple names.
RefCountedPtr<SliceHashTable<const ParsedConfigVector*>>
parsed_method_configs_table_;
std::unordered_map<grpc_slice, const ParsedConfigVector*, SliceHash>
parsed_method_configs_map_;
// Default method config.
const ParsedConfigVector* default_method_config_vector_ = nullptr;
// Storage for all the vectors that are being used in
// parsed_method_configs_table_.
InlinedVector<std::unique_ptr<ParsedConfigVector>, 32>

@ -1336,7 +1336,7 @@ grpc_error* LocalityParse(
grpc_error* DropParseAndAppend(
const envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload* drop_overload,
XdsApi::DropConfig* drop_config, bool* drop_all) {
XdsApi::DropConfig* drop_config) {
// Get the category.
upb_strview category =
envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload_category(
@ -1367,13 +1367,12 @@ grpc_error* DropParseAndAppend(
}
// Cap numerator to 1000000.
numerator = GPR_MIN(numerator, 1000000);
if (numerator == 1000000) *drop_all = true;
drop_config->AddCategory(std::string(category.data, category.size),
numerator);
return GRPC_ERROR_NONE;
}
grpc_error* EdsResponsedParse(
grpc_error* EdsResponseParse(
XdsClient* client, TraceFlag* tracer,
const envoy_api_v2_DiscoveryResponse* response,
const std::set<StringView>& expected_eds_service_names,
@ -1423,6 +1422,14 @@ grpc_error* EdsResponsedParse(
if (locality.lb_weight == 0) continue;
eds_update.priority_list_update.Add(locality);
}
for (uint32_t priority = 0;
priority < eds_update.priority_list_update.size(); ++priority) {
auto* locality_map = eds_update.priority_list_update.Find(priority);
if (locality_map == nullptr || locality_map->size() == 0) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"EDS update includes sparse priority list");
}
}
// Get the drop config.
eds_update.drop_config = MakeRefCounted<XdsApi::DropConfig>();
const envoy_api_v2_ClusterLoadAssignment_Policy* policy =
@ -1435,13 +1442,13 @@ grpc_error* EdsResponsedParse(
policy, &drop_size);
for (size_t j = 0; j < drop_size; ++j) {
grpc_error* error =
DropParseAndAppend(drop_overload[j], eds_update.drop_config.get(),
&eds_update.drop_all);
DropParseAndAppend(drop_overload[j], eds_update.drop_config.get());
if (error != GRPC_ERROR_NONE) return error;
}
}
// Validate the update content.
if (eds_update.priority_list_update.empty() && !eds_update.drop_all) {
if (eds_update.priority_list_update.empty() &&
!eds_update.drop_config->drop_all()) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"EDS response doesn't contain any valid "
"locality but doesn't require to drop all calls.");
@ -1497,9 +1504,9 @@ grpc_error* XdsApi::ParseAdsResponse(
return CdsResponseParse(client_, tracer_, response, expected_cluster_names,
cds_update_map, arena.ptr());
} else if (*type_url == kEdsTypeUrl) {
return EdsResponsedParse(client_, tracer_, response,
expected_eds_service_names, eds_update_map,
arena.ptr());
return EdsResponseParse(client_, tracer_, response,
expected_eds_service_names, eds_update_map,
arena.ptr());
} else {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Unsupported ADS resource type.");

@ -25,12 +25,13 @@
#include <set>
#include "absl/types/optional.h"
#include <grpc/slice_buffer.h>
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/xds/xds_bootstrap.h"
#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
#include "src/core/lib/gprpp/optional.h"
namespace grpc_core {
@ -47,18 +48,35 @@ class XdsApi {
std::string service;
std::string method;
std::string cluster_name;
bool operator==(const RdsRoute& other) const {
return (service == other.service &&
method == other.method &&
cluster_name == other.cluster_name);
}
};
struct RdsUpdate {
std::vector<RdsRoute> routes;
bool operator==(const RdsUpdate& other) const {
return routes == other.routes;
}
};
// TODO(roth): When we can use absl::variant<>, consider using that
// here, to enforce the fact that only one of the two fields can be set.
struct LdsUpdate {
// The name to use in the RDS request.
std::string route_config_name;
// The name to use in the CDS request. Present if the LDS response has it
// inlined.
Optional<RdsUpdate> rds_update;
absl::optional<RdsUpdate> rds_update;
bool operator==(const LdsUpdate& other) const {
return route_config_name == other.route_config_name &&
rds_update == other.rds_update;
}
};
using LdsUpdateMap = std::map<std::string /*server_name*/, LdsUpdate>;
@ -73,7 +91,7 @@ class XdsApi {
// If not set, load reporting will be disabled.
// If set to the empty string, will use the same server we obtained the CDS
// data from.
Optional<std::string> lrs_load_reporting_server_name;
absl::optional<std::string> lrs_load_reporting_server_name;
};
using CdsUpdateMap = std::map<std::string /*cluster_name*/, CdsUpdate>;
@ -157,6 +175,7 @@ class XdsApi {
void AddCategory(std::string name, uint32_t parts_per_million) {
drop_category_list_.emplace_back(
DropCategory{std::move(name), parts_per_million});
if (parts_per_million == 1000000) drop_all_ = true;
}
// The only method invoked from the data plane combiner.
@ -166,6 +185,8 @@ class XdsApi {
return drop_category_list_;
}
bool drop_all() const { return drop_all_; }
bool operator==(const DropConfig& other) const {
return drop_category_list_ == other.drop_category_list_;
}
@ -173,19 +194,19 @@ class XdsApi {
private:
DropCategoryList drop_category_list_;
bool drop_all_ = false;
};
struct EdsUpdate {
PriorityListUpdate priority_list_update;
RefCountedPtr<DropConfig> drop_config;
bool drop_all = false;
};
using EdsUpdateMap = std::map<std::string /*eds_service_name*/, EdsUpdate>;
struct ClusterLoadReport {
XdsClusterDropStats::DroppedRequestsMap dropped_requests;
std::map<XdsLocalityName*, XdsClusterLocalityStats::Snapshot,
std::map<RefCountedPtr<XdsLocalityName>, XdsClusterLocalityStats::Snapshot,
XdsLocalityName::Less>
locality_stats;
grpc_millis load_report_interval;

@ -30,8 +30,12 @@ grpc_channel_args* ModifyXdsChannelArgs(grpc_channel_args* args) {
grpc_channel* CreateXdsChannel(const XdsBootstrap& bootstrap,
const grpc_channel_args& args,
grpc_error** /*error*/) {
if (!bootstrap.server().channel_creds.empty()) return nullptr;
grpc_error** error) {
if (!bootstrap.server().channel_creds.empty()) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"credential specified but gRPC not built with security");
return nullptr;
}
return grpc_insecure_channel_create(bootstrap.server().server_uri.c_str(),
&args, nullptr);
}

@ -332,7 +332,6 @@ class XdsClient::ChannelState::LrsCallState
void Orphan() override;
void MaybeStartReportingLocked();
bool ShouldSendLoadReports(const StringView& cluster_name) const;
RetryableCall<LrsCallState>* parent() { return parent_.get(); }
ChannelState* chand() const { return parent_->chand(); }
@ -735,7 +734,8 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
grpc_op* op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY |
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
op->reserved = nullptr;
op++;
call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
@ -746,6 +746,11 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
grpc_schedule_on_exec_ctx);
if (xds_client()->service_config_watcher_ != nullptr) {
Subscribe(XdsApi::kLdsTypeUrl, xds_client()->server_name_);
if (xds_client()->lds_result_.has_value() &&
!xds_client()->lds_result_->route_config_name.empty()) {
Subscribe(XdsApi::kRdsTypeUrl,
xds_client()->lds_result_->route_config_name);
}
}
for (const auto& p : xds_client()->cluster_map_) {
Subscribe(XdsApi::kCdsTypeUrl, std::string(p.first));
@ -831,11 +836,12 @@ void XdsClient::ChannelState::AdsCallState::SendMessageLocked(
GRPC_ERROR_REF(state.error), !sent_initial_message_);
state.subscribed_resources[xds_client()->server_name_]->Start(Ref());
} else if (type_url == XdsApi::kRdsTypeUrl) {
resource_names.insert(xds_client()->route_config_name_);
resource_names.insert(xds_client()->lds_result_->route_config_name);
request_payload_slice = xds_client()->api_.CreateRdsRequest(
xds_client()->route_config_name_, state.version, state.nonce,
GRPC_ERROR_REF(state.error), !sent_initial_message_);
state.subscribed_resources[xds_client()->route_config_name_]->Start(Ref());
xds_client()->lds_result_->route_config_name, state.version,
state.nonce, GRPC_ERROR_REF(state.error), !sent_initial_message_);
state.subscribed_resources[xds_client()->lds_result_->route_config_name]
->Start(Ref());
} else if (type_url == XdsApi::kCdsTypeUrl) {
resource_names = ClusterNamesForRequest();
request_payload_slice = xds_client()->api_.CreateCdsRequest(
@ -921,7 +927,10 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate(
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] LDS update received: route_config_name=%s",
xds_client(), lds_update->route_config_name.c_str());
xds_client(),
(!lds_update->route_config_name.empty()
? lds_update->route_config_name.c_str()
: "<inlined>"));
if (lds_update->rds_update.has_value()) {
for (const auto& route : lds_update->rds_update.value().routes) {
gpr_log(GPR_INFO,
@ -935,18 +944,28 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate(
auto& lds_state = state_map_[XdsApi::kLdsTypeUrl];
auto& state = lds_state.subscribed_resources[xds_client()->server_name_];
if (state != nullptr) state->Finish();
if (!xds_client()->route_config_name_.empty()) {
// Ignore identical update.
if (xds_client()->lds_result_ == lds_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] LDS update identical to current, ignoring.",
xds_client());
}
return;
}
if (xds_client()->lds_result_.has_value() &&
!xds_client()->lds_result_->route_config_name.empty()) {
Unsubscribe(
XdsApi::kRdsTypeUrl, xds_client()->route_config_name_,
XdsApi::kRdsTypeUrl, xds_client()->lds_result_->route_config_name,
/*delay_unsubscription=*/!lds_update->route_config_name.empty());
}
xds_client()->route_config_name_ = lds_update->route_config_name;
if (lds_update->rds_update.has_value()) {
// If the RouteConfiguration was found inlined in LDS response, notify the
// watcher immediately.
xds_client()->lds_result_ = std::move(lds_update);
if (xds_client()->lds_result_->rds_update.has_value()) {
// If the RouteConfiguration was found inlined in LDS response, notify
// the watcher immediately.
RefCountedPtr<ServiceConfig> service_config;
grpc_error* error = xds_client()->CreateServiceConfig(
lds_update->rds_update.value(), &service_config);
xds_client()->lds_result_->rds_update.value(), &service_config);
if (error == GRPC_ERROR_NONE) {
xds_client()->service_config_watcher_->OnServiceConfigChanged(
std::move(service_config));
@ -955,7 +974,8 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate(
}
} else {
// Send RDS request for dynamic resolution.
Subscribe(XdsApi::kRdsTypeUrl, xds_client()->route_config_name_);
Subscribe(XdsApi::kRdsTypeUrl,
xds_client()->lds_result_->route_config_name);
}
}
@ -972,12 +992,23 @@ void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdate(
}
auto& rds_state = state_map_[XdsApi::kRdsTypeUrl];
auto& state =
rds_state.subscribed_resources[xds_client()->route_config_name_];
rds_state
.subscribed_resources[xds_client()->lds_result_->route_config_name];
if (state != nullptr) state->Finish();
// Ignore identical update.
if (xds_client()->rds_result_ == rds_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] RDS update identical to current, ignoring.",
xds_client());
}
return;
}
xds_client()->rds_result_ = std::move(rds_update);
// Notify the watcher.
RefCountedPtr<ServiceConfig> service_config;
grpc_error* error =
xds_client()->CreateServiceConfig(rds_update.value(), &service_config);
grpc_error* error = xds_client()->CreateServiceConfig(
xds_client()->rds_result_.value(), &service_config);
if (error == GRPC_ERROR_NONE) {
xds_client()->service_config_watcher_->OnServiceConfigChanged(
std::move(service_config));
@ -1074,7 +1105,7 @@ void XdsClient::ChannelState::AdsCallState::AcceptEdsUpdate(
" drop categories received (drop_all=%d)",
xds_client(), eds_update.priority_list_update.size(),
eds_update.drop_config->drop_category_list().size(),
eds_update.drop_all);
eds_update.drop_config->drop_all());
for (size_t priority = 0;
priority < eds_update.priority_list_update.size(); ++priority) {
const auto* locality_map_update = eds_update.priority_list_update.Find(
@ -1222,7 +1253,10 @@ void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked(
std::string type_url;
// Note that ParseAdsResponse() also validates the response.
grpc_error* parse_error = xds_client->api_.ParseAdsResponse(
response_slice, xds_client->server_name_, xds_client->route_config_name_,
response_slice, xds_client->server_name_,
(xds_client->lds_result_.has_value()
? xds_client->lds_result_->route_config_name
: ""),
ads_calld->ClusterNamesForRequest(),
ads_calld->EdsServiceNamesForRequest(), &lds_update, &rds_update,
&cds_update_map, &eds_update_map, &version, &nonce, &type_url);
@ -1416,7 +1450,7 @@ bool LoadReportCountersAreZero(const XdsApi::ClusterLoadReportMap& snapshot) {
void XdsClient::ChannelState::LrsCallState::Reporter::SendReportLocked() {
// Construct snapshot from all reported stats.
XdsApi::ClusterLoadReportMap snapshot =
xds_client()->BuildLoadReportSnapshot();
xds_client()->BuildLoadReportSnapshot(parent_->cluster_names_);
// Skip client load report if the counters were all zero in the last
// report and they are still zero in this one.
const bool old_val = last_report_counters_were_zero_;
@ -1462,6 +1496,12 @@ void XdsClient::ChannelState::LrsCallState::Reporter::OnReportDoneLocked(
Reporter* self = static_cast<Reporter*>(arg);
grpc_byte_buffer_destroy(self->parent_->send_message_payload_);
self->parent_->send_message_payload_ = nullptr;
// If there are no more registered stats to report, cancel the call.
if (self->xds_client()->load_report_map_.empty()) {
self->parent_->chand()->StopLrsCall();
self->Unref(DEBUG_LOCATION, "Reporter+report_done+no_more_reporters");
return;
}
if (error != GRPC_ERROR_NONE || !self->IsCurrentReporterOnCall()) {
// If this reporter is no longer the current one on the call, the reason
// might be that it was orphaned for a new one due to config update.
@ -1517,7 +1557,8 @@ XdsClient::ChannelState::LrsCallState::LrsCallState(
grpc_op* op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY |
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
op->reserved = nullptr;
op++;
// Op: send request message.
@ -1615,13 +1656,6 @@ void XdsClient::ChannelState::LrsCallState::MaybeStartReportingLocked() {
Ref(DEBUG_LOCATION, "LRS+load_report+start"), load_reporting_interval_);
}
bool XdsClient::ChannelState::LrsCallState::ShouldSendLoadReports(
const StringView& cluster_name) const {
// Only send load reports for the clusters that are asked for by the LRS
// server.
return cluster_names_.find(std::string(cluster_name)) != cluster_names_.end();
}
void XdsClient::ChannelState::LrsCallState::OnInitialRequestSent(
void* arg, grpc_error* error) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
@ -1968,19 +2002,14 @@ void XdsClient::RemoveClusterDropStats(
LoadReportState& load_report_state = load_report_it->second;
// TODO(roth): When we add support for direct federation, use the
// server name specified in lrs_server.
// TODO(roth): In principle, we should try to send a final load report
// containing whatever final stats have been accumulated since the
// last load report.
auto it = load_report_state.drop_stats.find(cluster_drop_stats);
if (it != load_report_state.drop_stats.end()) {
load_report_state.drop_stats.erase(it);
if (load_report_state.drop_stats.empty() &&
load_report_state.locality_stats.empty()) {
load_report_map_.erase(load_report_it);
if (chand_ != nullptr && load_report_map_.empty()) {
chand_->StopLrsCall();
}
// Record final drop stats in deleted_drop_stats, which will be
// added to the next load report.
for (const auto& p : cluster_drop_stats->GetSnapshotAndReset()) {
load_report_state.deleted_drop_stats[p.first] += p.second;
}
load_report_state.drop_stats.erase(it);
}
}
@ -2001,7 +2030,7 @@ RefCountedPtr<XdsClusterLocalityStats> XdsClient::AddClusterLocalityStats(
Ref(DEBUG_LOCATION, "LocalityStats"), lrs_server,
it->first.first /*cluster_name*/, it->first.second /*eds_service_name*/,
locality);
it->second.locality_stats[std::move(locality)].insert(
it->second.locality_stats[std::move(locality)].locality_stats.insert(
cluster_locality_stats.get());
chand_->MaybeStartLrsCall();
return cluster_locality_stats;
@ -2017,25 +2046,16 @@ void XdsClient::RemoveClusterLocalityStats(
LoadReportState& load_report_state = load_report_it->second;
// TODO(roth): When we add support for direct federation, use the
// server name specified in lrs_server.
// TODO(roth): In principle, we should try to send a final load report
// containing whatever final stats have been accumulated since the
// last load report.
auto locality_it = load_report_state.locality_stats.find(locality);
if (locality_it == load_report_state.locality_stats.end()) return;
auto& locality_set = locality_it->second;
auto& locality_set = locality_it->second.locality_stats;
auto it = locality_set.find(cluster_locality_stats);
if (it != locality_set.end()) {
// Record final snapshot in deleted_locality_stats, which will be
// added to the next load report.
locality_it->second.deleted_locality_stats.emplace_back(
cluster_locality_stats->GetSnapshotAndReset());
locality_set.erase(it);
if (locality_set.empty()) {
load_report_state.locality_stats.erase(locality_it);
if (load_report_state.locality_stats.empty() &&
load_report_state.drop_stats.empty()) {
load_report_map_.erase(load_report_it);
if (chand_ != nullptr && load_report_map_.empty()) {
chand_->StopLrsCall();
}
}
}
}
}
@ -2088,32 +2108,70 @@ grpc_error* XdsClient::CreateServiceConfig(
return error;
}
XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshot() {
XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshot(
const std::set<std::string>& clusters) {
XdsApi::ClusterLoadReportMap snapshot_map;
for (auto& p : load_report_map_) {
const auto& cluster_key = p.first; // cluster and EDS service name
LoadReportState& load_report = p.second;
XdsApi::ClusterLoadReport& snapshot = snapshot_map[cluster_key];
for (auto load_report_it = load_report_map_.begin();
load_report_it != load_report_map_.end();) {
// Cluster key is cluster and EDS service name.
const auto& cluster_key = load_report_it->first;
LoadReportState& load_report = load_report_it->second;
// If the CDS response for a cluster indicates to use LRS but the
// LRS server does not say that it wants reports for this cluster,
// then we'll have stats objects here whose data we're not going to
// include in the load report. However, we still need to clear out
// the data from the stats objects, so that if the LRS server starts
// asking for the data in the future, we don't incorrectly include
// data from previous reporting intervals in that future report.
const bool record_stats =
clusters.find(cluster_key.first) != clusters.end();
XdsApi::ClusterLoadReport snapshot;
// Aggregate drop stats.
snapshot.dropped_requests = std::move(load_report.deleted_drop_stats);
for (auto& drop_stats : load_report.drop_stats) {
for (const auto& p : drop_stats->GetSnapshotAndReset()) {
snapshot.dropped_requests[p.first] += p.second;
}
}
// Aggregate locality stats.
for (auto& p : load_report.locality_stats) {
XdsLocalityName* locality_name = p.first.get();
auto& locality_stats_set = p.second;
for (auto it = load_report.locality_stats.begin();
it != load_report.locality_stats.end();) {
const RefCountedPtr<XdsLocalityName>& locality_name = it->first;
auto& locality_state = it->second;
XdsClusterLocalityStats::Snapshot& locality_snapshot =
snapshot.locality_stats[locality_name];
for (auto& locality_stats : locality_stats_set) {
for (auto& locality_stats : locality_state.locality_stats) {
locality_snapshot += locality_stats->GetSnapshotAndReset();
}
// Add final snapshots from recently deleted locality stats objects.
for (auto& deleted_locality_stats :
locality_state.deleted_locality_stats) {
locality_snapshot += deleted_locality_stats;
}
locality_state.deleted_locality_stats.clear();
// If the only thing left in this entry was final snapshots from
// deleted locality stats objects, remove the entry.
if (locality_state.locality_stats.empty()) {
it = load_report.locality_stats.erase(it);
} else {
++it;
}
}
if (record_stats) {
// Compute load report interval.
const grpc_millis now = ExecCtx::Get()->Now();
snapshot.load_report_interval = now - load_report.last_report_time;
load_report.last_report_time = now;
// Record snapshot.
snapshot_map[cluster_key] = std::move(snapshot);
}
// If the only thing left in this entry was final snapshots from
// deleted stats objects, remove the entry.
if (load_report.locality_stats.empty() && load_report.drop_stats.empty()) {
load_report_it = load_report_map_.erase(load_report_it);
} else {
++load_report_it;
}
// Compute load report interval.
const grpc_millis now = ExecCtx::Get()->Now();
snapshot.load_report_interval = now - load_report.last_report_time;
load_report.last_report_time = now;
}
return snapshot_map;
}

@ -21,13 +21,14 @@
#include <set>
#include "absl/types/optional.h"
#include "src/core/ext/filters/client_channel/service_config.h"
#include "src/core/ext/filters/client_channel/xds/xds_api.h"
#include "src/core/ext/filters/client_channel/xds/xds_bootstrap.h"
#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
#include "src/core/lib/gprpp/map.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/optional.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
@ -208,8 +209,14 @@ class XdsClient : public InternallyRefCounted<XdsClient> {
};
struct LoadReportState {
struct LocalityState {
std::set<XdsClusterLocalityStats*> locality_stats;
std::vector<XdsClusterLocalityStats::Snapshot> deleted_locality_stats;
};
std::set<XdsClusterDropStats*> drop_stats;
std::map<RefCountedPtr<XdsLocalityName>, std::set<XdsClusterLocalityStats*>,
XdsClusterDropStats::DroppedRequestsMap deleted_drop_stats;
std::map<RefCountedPtr<XdsLocalityName>, LocalityState,
XdsLocalityName::Less>
locality_stats;
grpc_millis last_report_time = ExecCtx::Get()->Now();
@ -222,7 +229,8 @@ class XdsClient : public InternallyRefCounted<XdsClient> {
const XdsApi::RdsUpdate& rds_update,
RefCountedPtr<ServiceConfig>* service_config) const;
XdsApi::ClusterLoadReportMap BuildLoadReportSnapshot();
XdsApi::ClusterLoadReportMap BuildLoadReportSnapshot(
const std::set<std::string>& clusters);
// Channel arg vtable functions.
static void* ChannelArgCopy(void* p);
@ -246,7 +254,9 @@ class XdsClient : public InternallyRefCounted<XdsClient> {
// The channel for communicating with the xds server.
OrphanablePtr<ChannelState> chand_;
std::string route_config_name_;
absl::optional<XdsApi::LdsUpdate> lds_result_;
absl::optional<XdsApi::RdsUpdate> rds_result_;
// One entry for each watched CDS resource.
std::map<std::string /*cluster_name*/, ClusterState> cluster_map_;
// One entry for each watched EDS resource.

@ -27,6 +27,19 @@
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/security/security_connector/ssl_utils.h"
struct grpc_tls_error_details
: public grpc_core::RefCounted<grpc_tls_error_details> {
public:
grpc_tls_error_details() : error_details_("") {}
void set_error_details(const char* err_details) {
error_details_ = err_details;
}
const std::string& error_details() { return error_details_; }
private:
std::string error_details_;
};
/** TLS key materials config. **/
struct grpc_tls_key_materials_config
: public grpc_core::RefCounted<grpc_tls_key_materials_config> {
@ -93,8 +106,8 @@ struct grpc_tls_credential_reload_config
gpr_log(GPR_ERROR, "schedule API is nullptr");
if (arg != nullptr) {
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL;
arg->error_details =
gpr_strdup("schedule API in credential reload config is nullptr");
arg->error_details->set_error_details(
"schedule API in credential reload config is nullptr");
}
return 1;
}
@ -108,8 +121,8 @@ struct grpc_tls_credential_reload_config
gpr_log(GPR_ERROR, "cancel API is nullptr.");
if (arg != nullptr) {
arg->status = GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL;
arg->error_details =
gpr_strdup("cancel API in credential reload config is nullptr");
arg->error_details->set_error_details(
"cancel API in credential reload config is nullptr");
}
return;
}
@ -169,7 +182,7 @@ struct grpc_tls_server_authorization_check_config
gpr_log(GPR_ERROR, "schedule API is nullptr");
if (arg != nullptr) {
arg->status = GRPC_STATUS_NOT_FOUND;
arg->error_details = gpr_strdup(
arg->error_details->set_error_details(
"schedule API in server authorization check config is nullptr");
}
return 1;
@ -185,7 +198,7 @@ struct grpc_tls_server_authorization_check_config
gpr_log(GPR_ERROR, "cancel API is nullptr.");
if (arg != nullptr) {
arg->status = GRPC_STATUS_NOT_FOUND;
arg->error_details = gpr_strdup(
arg->error_details->set_error_details(
"schedule API in server authorization check config is nullptr");
}
return;

@ -190,9 +190,10 @@ class grpc_ssl_channel_security_connector final
grpc_auth_context* auth_context,
grpc_closure* /*on_call_host_checked*/,
grpc_error** error) override {
return grpc_ssl_check_call_host(host, target_name_.get(),
overridden_target_name_.get(), auth_context,
error);
return grpc_ssl_check_call_host(
host, target_name_.get(),
overridden_target_name_ != nullptr ? overridden_target_name_.get() : "",
auth_context, error);
}
void cancel_check_call_host(grpc_closure* /*on_call_host_checked*/,

@ -88,6 +88,7 @@ grpc_status_code TlsFetchKeyMaterials(
if (credential_reload_config != nullptr) {
grpc_tls_credential_reload_arg* arg = new grpc_tls_credential_reload_arg();
arg->key_materials_config = key_materials_config.get();
arg->error_details = new grpc_tls_error_details();
int result = credential_reload_config->Schedule(arg);
if (result) {
/** Credential reloading is performed async. This is not yet supported.
@ -105,13 +106,13 @@ grpc_status_code TlsFetchKeyMaterials(
} else if (arg->status == GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL) {
gpr_log(GPR_ERROR, "Credential reload failed with an error:");
if (arg->error_details != nullptr) {
gpr_log(GPR_ERROR, "%s", arg->error_details);
gpr_log(GPR_ERROR, "%s", arg->error_details->error_details().c_str());
}
reload_status =
is_key_materials_empty ? GRPC_STATUS_INTERNAL : GRPC_STATUS_OK;
}
}
gpr_free((void*)arg->error_details);
delete arg->error_details;
/** If the credential reload config was constructed via a wrapped language,
* then |arg->context| and |arg->destroy_context| will not be nullptr. In
* this case, we must destroy |arg->context|, which stores the wrapped
@ -406,14 +407,14 @@ grpc_error* TlsChannelSecurityConnector::ProcessServerAuthorizationCheckResult(
gpr_asprintf(&msg,
"Server authorization check is cancelled by the caller with "
"error: %s",
arg->error_details);
arg->error_details->error_details().c_str());
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
} else if (arg->status == GRPC_STATUS_OK) {
/* Server authorization check completed successfully but returned check
* failure. */
if (!arg->success) {
gpr_asprintf(&msg, "Server authorization check failed with error: %s",
arg->error_details);
arg->error_details->error_details().c_str());
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
}
/* Server authorization check did not complete correctly. */
@ -421,7 +422,7 @@ grpc_error* TlsChannelSecurityConnector::ProcessServerAuthorizationCheckResult(
gpr_asprintf(
&msg,
"Server authorization check did not finish correctly with error: %s",
arg->error_details);
arg->error_details->error_details().c_str());
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
}
gpr_free(msg);
@ -433,6 +434,7 @@ TlsChannelSecurityConnector::ServerAuthorizationCheckArgCreate(
void* user_data) {
grpc_tls_server_authorization_check_arg* arg =
new grpc_tls_server_authorization_check_arg();
arg->error_details = new grpc_tls_error_details();
arg->cb = ServerAuthorizationCheckDone;
arg->cb_user_data = user_data;
arg->status = GRPC_STATUS_OK;
@ -447,7 +449,7 @@ void TlsChannelSecurityConnector::ServerAuthorizationCheckArgDestroy(
gpr_free((void*)arg->target_name);
gpr_free((void*)arg->peer_cert);
if (arg->peer_cert_full_chain) gpr_free((void*)arg->peer_cert_full_chain);
gpr_free((void*)arg->error_details);
delete arg->error_details;
if (arg->destroy_context != nullptr) {
arg->destroy_context(arg->context);
}

@ -347,4 +347,18 @@ size_t grpc_slice_memory_usage(grpc_slice s);
grpc_core::UnmanagedMemorySlice grpc_slice_sub_no_ref(
const grpc_core::UnmanagedMemorySlice& source, size_t begin, size_t end);
namespace grpc_core {
struct SliceHash {
std::size_t operator()(const grpc_slice& slice) const {
return grpc_slice_hash_internal(slice);
}
};
} // namespace grpc_core
inline bool operator==(const grpc_slice& s1, const grpc_slice& s2) {
return grpc_slice_eq(s1, s2);
}
#endif /* GRPC_CORE_LIB_SLICE_SLICE_INTERNAL_H */

@ -35,6 +35,8 @@
#include <sys/socket.h>
#endif
#include "absl/strings/match.h"
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@ -1658,7 +1660,7 @@ static int does_entry_match_name(grpc_core::StringView entry,
if (entry.empty()) return 0;
}
if (name == entry) {
if (absl::EqualsIgnoreCase(name, entry)) {
return 1; /* Perfect match. */
}
if (entry.front() != '*') return 0;
@ -1685,7 +1687,7 @@ static int does_entry_match_name(grpc_core::StringView entry,
if (name_subdomain.back() == '.') {
name_subdomain.remove_suffix(1);
}
return !entry.empty() && name_subdomain == entry;
return !entry.empty() && absl::EqualsIgnoreCase(name_subdomain, entry);
}
static int ssl_server_handshaker_factory_servername_callback(SSL* ssl,
@ -1707,7 +1709,7 @@ static int ssl_server_handshaker_factory_servername_callback(SSL* ssl,
}
}
gpr_log(GPR_ERROR, "No match found for server name: %s.", servername);
return SSL_TLSEXT_ERR_ALERT_WARNING;
return SSL_TLSEXT_ERR_NOACK;
}
#if TSI_OPENSSL_ALPN_SUPPORT

@ -6,7 +6,7 @@ Bad credentials (badclient.* / badserver.*):
These are self-signed certificates:
$ openssl req -x509 -newkey rsa:1024 -keyout badserver.key -out badserver.pem \
$ openssl req -x509 -newkey rsa:2048 -keyout badserver.key -out badserver.pem \
-days 3650 -nodes
When prompted for certificate information, everything is default except the
@ -19,44 +19,71 @@ Valid test credentials:
The ca is self-signed:
----------------------
$ openssl req -x509 -new -newkey rsa:1024 -nodes -out ca.pem -config ca-openssl.cnf -days 3650 -extensions v3_req
$ openssl req -x509 -new -newkey rsa:2048 -nodes -keyout ca.key -out ca.pem \
-config ca-openssl.cnf -days 3650 -extensions v3_req
When prompted for certificate information, everything is default.
client is issued by CA:
-----------------------
$ openssl genrsa -out client.key.rsa 1024
$ openssl genrsa -out client.key.rsa 2048
$ openssl pkcs8 -topk8 -in client.key.rsa -out client.key -nocrypt
$ rm client.key.rsa
$ openssl req -new -key client.key -out client.csr
When prompted for certificate information, everything is default except the
common name which is set to testclient.
$ openssl ca -in client.csr -out client.pem
$ openssl x509 -req -CA ca.pem -CAkey ca.key -CAcreateserial -in client.csr \
-out client.pem -days 3650
server0 is issued by CA:
------------------------
$ openssl genrsa -out server0.key.rsa 1024
$ openssl genrsa -out server0.key.rsa 2048
$ openssl pkcs8 -topk8 -in server0.key.rsa -out server0.key -nocrypt
$ rm server0.key.rsa
$ openssl req -new -key server0.key -out server0.csr
When prompted for certificate information, everything is default except the
common name which is set to *.test.google.com.au.
$ openssl ca -in server0.csr -out server0.pem
$ openssl x509 -req -CA ca.pem -CAkey ca.key -CAcreateserial -in server0.csr \
-out server0.pem -days 3650
server1 is issued by CA with a special config for subject alternative names:
----------------------------------------------------------------------------
$ openssl genrsa -out server1.key.rsa 1024
$ openssl genrsa -out server1.key.rsa 2048
$ openssl pkcs8 -topk8 -in server1.key.rsa -out server1.key -nocrypt
$ rm server1.key.rsa
$ openssl req -new -key server1.key -out server1.csr -config server1-openssl.cnf
When prompted for certificate information, everything is default except the
common name which is set to *.test.google.com.
$ openssl ca -in server1.csr -out server1.pem
$ openssl x509 -req -CA ca.pem -CAkey ca.key -CAcreateserial -in server1.csr \
-out server1.pem -extensions req_ext -extfile server1-openssl.cnf -days 3650
Clean up:
---------
$ rm *.rsa
$ rm *.csr
$ rm ca.srl
Sync up with other repositories
===============================
Copies of these keys exist in multiple locations across all the grpc repos
(e.g., see the following partial list). You need to be careful when updating
the keys.
grpc-dart/interop/
grpc-dotnet/testassets/Certs/InteropTests/
grpc-go/testdata/
grpc-java/testing/src/main/resources/certs/
grpc-node/test/data/
src/csharp/Grpc.IntegrationTesting/data/
src/objective-c/tests/TestCertificates.bundle/
src/php/tests/data/
src/python/grpcio_tests/tests/interop/credentials/
src/python/grpcio_tests/tests/unit/credentials/
src/ruby/spec/testdata/
test/core/end2end/data/

@ -1,16 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALJfYnFn4nkj52WF
E5W2qUxCfjsEFyuXYYKS/07UPWsv3gpZhtjXgdeGL+dpwEBC0IRDBfGnkMp6YY5S
O7rnEz0X3r/fvgYy+dEl2jnaA6zgc7RzMGl9U11d56gP9FiDC2190mvP/hpq2xLZ
CTbIximpmaoQyxuuH1bbYunesIG/AgMBAAECgYAdqJCEzMIyZE7oaW0tOpcB0BiP
FYoIvH4BKRH8eHvR476mt+YdDhBP1scGUmYeCT4Ej+RgHv2LPTgVYwT9eciP2+E/
CBCNRel0Sw9JepwW0r+jWJtDY1pp6YXAgNRGX2UflvUsT+o9lZvagf9moLTMyGvU
uLFnsyfLim1B4vXvWQJBANouZllXGZoSrZLtR3VgV4tzRQvJxu84kLeIk64Ov47X
pHVBMTRBfzPEhbBodjr1m5OLaVLqkFcXftzRCrbWoKsCQQDRSoLLXOiLrtJ3DLJC
rX7Y8wrHZrqk5bMdZLGa/UX8RanhVw3+Xp+urd1711umeNJfzu/MCk4a1KkG/CU0
rqs9AkA4cSx1DD1JSG+yxMNpsAS1xJomFIrsM9vsPt7FdndDwrF+y+CovhDkGYDk
RAHh+svGfZg/pQK2JRPimAmHhzqFAkEAu6Ya70s2FUeB3Mu9aJs2CD6hg3dQEVkB
53DI7TX48d9kGW58VX1xnqS02LyWqAPcW5qm1kLHFLdndaPNmBaj4QJBAJugl367
9d9t/QLTSuULLaoYv2vJT3s1y9HN89EoaDDEkPVfQu6GVEXgIBtim1sI/VPSzI8H
aXvaTUwblFWSM70=
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDvdzKDTYvRgjBO
UOrzDwkAZGwNFHHlMYyMGI5tItj3tCzXkbpM0uz3ZjHVahu+eYc+KvYApM64F2dB
b16hs713FCk8mihYABjnSndrQsl/U2v8YFT7DipfLReqqaOGu2o9HdvWfiUlaiC/
UGGfR+YblpK7CG+7/hvTXtUsMw+OppoeH9z87rhOJMxtiC7XwU5rhEmab/1f1XM/
nLoZrfDAcTbDywoeu826SJ3mifajq7oK3LDdNLjWZwfEsCO1qp2C4gLvBlOOKsWO
LNby6ByxCOPlCTa0UCaVuoNclYol71jyi17KW+Nk0nNe9yaVcyr6H0z3bImfJhbS
u4rzI93nAgMBAAECggEBAOIPOJRTpGaH7GpCYUpLK0g/hPFkF5EyEWg/1lSYzRIp
+RsX6zOS+zkiNHEv1jkeKNo7XDiHXM7U6RkQtdkZAQdk9PjM3sEUdm4CEnIjfmzA
p/R8TD0kxkNLIkhuFH2gd05y3ZHDS/XiFkAE9eOT0FrC7om6ESD7ZfFIWR18pncW
ZGq7tFAZZRmpkum2D+MJy1gWxIXBxt5madTEpRxQd56toEnfx372F0y4zkcX3pnE
4H6FaJUBjdvKl2QzF5c0jBqgxMRvWP5YfNu8+dmaQORPkpzSptOPmZM9VKV+tJVS
1xnOI6DtrnNZRojegR/E6KhNyiPTYy97UgYzdKS+SSECgYEA+wgSIqrfkeqqotJx
cGxF4x9v/ldKr5hlhJNoKXLkepkcrvhhxfHKgjWz1nZY/+Rpg42GFMvxWRrGTMIJ
ddiOr24p0HCkusWRMKQL7XxvuHDq0ro8SGqXzqWGuH31R+YNP8dy2pqd3OlwzTgg
8v0wwzx8AuyP5Ys4M20Ewv7Xuy0CgYEA9DSGMU8jmjxJ/uPDCXWOEAqtE78wTtIw
uMBv+ge0inc37xf+fN6D/ziTrJvgw/XyT15pmQdOlXx3Sg1h9XBZeIlaeCdFWrFB
oYrVsiuoXRswfkFwA0yOkCsHyGiI4TE0W1rGbqP158IjwXPczBswWI7i/D6LpINL
BD7YYpfHmeMCgYB08AiKr7Cf54H/gSqo5TcVGzLvdzhqXgKEZKp0DHpUhfivpTLe
o8jjKSMSN2U0JvHj/0xDadGO4YMYhJcll3C4VggSejaybpA46WJJCdt9PtSUv36P
eWAoOkFstfhJuufXGxDstnPtUa1jW881gi5x9D4MmqhZlKXkhtdeApr6LQKBgQDd
ItsJt9JTjpirGfC5lhwI5sIICa9jEO9RveEoluWkJYUfG6k1xgHdkYwYWCdXDFZa
DPKuwnEk6MrU4f181joO7sJf35/sGmuGL0SHzQTvGvn0uqkGM8M9RdoMXqzkzzvM
Jg1ej1bUgXcDbTnaEhzbdLiTFsg5NzMtKwOjdDIpZQKBgEIHeJIqiGjYgf7mUlX2
vNWgFNlzApkFSCQ8TkzkDOjtCdSHfdRDJ6+q8cS2TSQ7QPoAlI1woS0G48TNbVSo
wD0jNVRTdpA6R5FPsg09ohB/caSn0zlGVha2GS08ceYrn7nn4PSZ/UIYTm3pjUlV
H5tvHv0gG2C5vy3tIYQtSQCk
-----END PRIVATE KEY-----

@ -1,17 +1,22 @@
-----BEGIN CERTIFICATE-----
MIICoDCCAgmgAwIBAgIJANIz2/zoRiapMA0GCSqGSIb3DQEBBQUAMGkxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxIjAgBgNVBAMMGWJhZGNsaWVudC50ZXN0Lmdvb2dsZS5j
b20wHhcNMTQwNzI4MjAwODI1WhcNMjQwNzI1MjAwODI1WjBpMQswCQYDVQQGEwJB
VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
cyBQdHkgTHRkMSIwIAYDVQQDDBliYWRjbGllbnQudGVzdC5nb29nbGUuY29tMIGf
MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCyX2JxZ+J5I+dlhROVtqlMQn47BBcr
l2GCkv9O1D1rL94KWYbY14HXhi/nacBAQtCEQwXxp5DKemGOUju65xM9F96/374G
MvnRJdo52gOs4HO0czBpfVNdXeeoD/RYgwttfdJrz/4aatsS2Qk2yMYpqZmqEMsb
rh9W22Lp3rCBvwIDAQABo1AwTjAdBgNVHQ4EFgQU523AJMR8Ds9V8fhf7gu1i0MM
UqAwHwYDVR0jBBgwFoAU523AJMR8Ds9V8fhf7gu1i0MMUqAwDAYDVR0TBAUwAwEB
/zANBgkqhkiG9w0BAQUFAAOBgQCI/tvSBYH1iyfLaCTBKwpdj36+MkR9EeJJmImx
X+bjhKWXwsBX4PDMWvdusr++QGUYtyoya+hfYMXRhXua39mD54xgloQNuu9REDwX
Ffto+aOw3BcYducz6ofxicFK/Y2VeXDurSMpRv5TfGf2Qr6eOOdaRhj6ed7BibHk
X1VGZA==
MIIDszCCApugAwIBAgIUONWbkUn1obHCw9L7lMNEE5REvb8wDQYJKoZIhvcNAQEL
BQAwaTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEiMCAGA1UEAwwZYmFkY2xpZW50LnRl
c3QuZ29vZ2xlLmNvbTAeFw0yMDAzMTcxNzQzMjNaFw0zMDAzMTUxNzQzMjNaMGkx
CzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
cm5ldCBXaWRnaXRzIFB0eSBMdGQxIjAgBgNVBAMMGWJhZGNsaWVudC50ZXN0Lmdv
b2dsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDvdzKDTYvR
gjBOUOrzDwkAZGwNFHHlMYyMGI5tItj3tCzXkbpM0uz3ZjHVahu+eYc+KvYApM64
F2dBb16hs713FCk8mihYABjnSndrQsl/U2v8YFT7DipfLReqqaOGu2o9HdvWfiUl
aiC/UGGfR+YblpK7CG+7/hvTXtUsMw+OppoeH9z87rhOJMxtiC7XwU5rhEmab/1f
1XM/nLoZrfDAcTbDywoeu826SJ3mifajq7oK3LDdNLjWZwfEsCO1qp2C4gLvBlOO
KsWOLNby6ByxCOPlCTa0UCaVuoNclYol71jyi17KW+Nk0nNe9yaVcyr6H0z3bImf
JhbSu4rzI93nAgMBAAGjUzBRMB0GA1UdDgQWBBTKJskEYd2ndrwihPTg2PzYF/kP
gzAfBgNVHSMEGDAWgBTKJskEYd2ndrwihPTg2PzYF/kPgzAPBgNVHRMBAf8EBTAD
AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBoGwWR0pLM1icX4bIJ6yduFU/A4jSiqET6
gvJhwgErilqTKfH6Y89rqtzW8k4UurAOCsE4FA6wbkHWwrUMnClY4lkHJh+MuNaJ
nCGrK8wRKGb/mqW9d5pP72Et1Q6OW6DAKqGfjDWh2MzSPHBxcCLeyigO1wqd4W1T
nvvql6l4L+B5IT/c+/EHO3PwbI9v6MGTtLjsZgkRKItaPh+YeJdmBYhRD1BvWb6s
VwEb7aQ1oSF+esUvMmjGVuHXuQvWJahnjYdYT2DikyqR+AwaKzre4GJMHsX3/Cf8
qdxyI+B1jUwNr7sLA2EYDjnUR0jEHcrOBSpIQyRMGWduj0P16yb9
-----END CERTIFICATE-----

@ -1,16 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAKeZ1e1y29cmBKaW
oIUwJ5neOJUjx+eD/3nRPe+dvLXEd9+db0fG5RYRR0S3mF1Ywuj4PIxlTW2YprUS
oGSw+tcqWNIzxv94HjwYFkkvER3AblXcDBh0P2zAkzg+nf9AcAsMh0QpDTyrXtMl
gqryjq1/vkhFofKMMbY+aXJdG6OBAgMBAAECgYAAgaB51S0A22aMMkxN2rVj6530
JWWHN4jgD1fGj41wZyWNkWYyq1Ep3ed/N6bIMWp1VbqpGe0/9YQba/D8HOTFHGRt
72YXnP1e/ds8cxU4x4j1vvqSPtXpMmkiXfXijOvCl9mrMH2xjghFAt6/1Nb9xo1m
VdcOB8OdSuOIw6CI+QJBAN5FZUbS+bRXDWII/FaAih1DBpwCxhYEN+TXPJBxSen6
kOzGt5g+mB6YqRMZ/qshshwPq7bsgFGfJ2lIdS2t3GsCQQDBCKifV5AAkOdOUrkK
HvoX3qnVmyIA8CyvWLcIWpfZ76QAYh0q0StedKdOMXaB1jTeSJ2KU1nlss7UD1Yw
VbrDAkAwjMHpbW3jiVw//Kx5jIwehiRscWKpLnSzBJyTBFvbwsJjJai2lX2OuVO8
+2GYKb0Iyhd81j3VFkl6grwtpRtPAkB7+n+yt555fpfRKjhGU9b09cHGu7h/OcK5
bBVCfE0DYHLI/DsXgPiF1g6Onh4rDdUu3xyv9xDKAqnscV099hHZAkEAvcFBfXZs
tk18N+bUcvXTdZjzZbfLCHlJmwPIspZ8G/6Pn63deg4GVYoCvTwGruah+8y734Ph
7PskfPgUQlB7Ag==
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDRY2Z886nT6KF4
tjgJTX0l1M4j8bQp+jKfXz+hwUZbn/PnCXJlu/5denpyu4XrLxr6Ix4Il97SrKfQ
iGaSZQ8hcq6WQdEDfuo/U7R/dk6lYG7q+yg7+xHm02DzVdPLp09kLhU+fWH3Wek0
9GCk9iC1/sVTIomBYpar61Ris04iA1QveR+LZKNkQ8rL2i191Djs8cdrn9yhWdfJ
Ai89lLl6S6d8cXru1LwtEe0ejctnKf6ANqMnmyWTbHV8h0Cc3fbAnx92HsWDMJKe
8mI0CClauxrlagMHyw10NuFb9/MBEkFPJfxcUyW6F45LmqGHVfcxx6/BU7XRbWx8
aQM/pt2LAgMBAAECggEBAKWpei3D7op9FDvYF0+s4iXrT0j682r+y8xx5HtK2iql
y6fwPnUlHqPAwl5B5TtkonhjDmEIH0AZYBBJyrVqhWUWQfEZk4+rexOtWzI5XRHU
0QzSt0t1Yf15IcyEDDSlY9fD6gTt2HOFzE+cRVZecRTsxBv5SEd4w/KzFqmcaWXY
Q7mLvCs6eQ55LBQ6EMweZ3XE57qPf71oV8Ckxv/jstLlkE+3JICgEAaiOEzi7oCm
hYbkoU2VNewx5EA5ka52DQzbVYYYuDbjqtVPXCmlVdejBBmUCAlhdjAIDBYq/RMf
sVMagAo19Wt5lYuNGD9qzMUmzZPaVmkg4yUmU8EYFVkCgYEA8Tyup/0yx+/tp8KQ
cLyGc4RDUTfabL8mlvxcbEge9fQ12aHE3cA/hkHCI7AZxwrHYwb1hxzLaOmKYfFC
oLxfzx81p5BO0lQWcKiFZ6ISiku4TPdmBaauKKxd62kFUPO4Q6Zk1MFHMXrvZUxZ
BsK058HZ5JALDdQ5wBfJE5P58rcCgYEA3jPDMiXsH1s5gM/bh0s+cC1AFSst6YM3
rRPmHrqJJhKgU6gSB0d0LCUdj4/NkQT/Bw8DrfxLIqytsfRLKCx85K6lk8GfCk6T
1OhPKRp8bgg6WDQiJfJMokJN5zrnC02ns1cVdQSPY8bFxB++tv3du6DKLYx0e46D
Q9ojYqWHh80CgYEA0Shh7nkTrFKUZZ3GClkK4eFNVH/uu9bIKKTJpYCqh2mjvvwJ
apKjAU7GepbW4sKvuWZxPyJyIpZKSz0ZHa/2CejvZkcycB5EDo2ujPnyxUF9nA3s
wP2RhuZb0B4QY+3MV6tPRUAG8Bm8ssGNdtUecMqclxVk4Cqfn7N/vZ/RWOUCgYAL
i2rv1xKOioHRVHtWay1iTKeQsf6frEafQnJpVE294afc0NWm9SpvBLqlc9Y9W6IY
bspFJt+MfKZFoaip/K28f+pwY9XshiqeHDfIreybFuhZHtRLXmxm3cUIZ4ILj0xQ
QA0IWGVOzMwHpZKWFViI4BDBDxQaO0xMoS/Hd0w0XQKBgF5uZXXrNLmCeU6oco1R
gjGJE4gRwaSVcVJbs/VLbBmHT1VhBGsiluBuTpbmzDfyHWHJprnthlSTgqHXSax1
6GvHZ2NHBqmD2uxEGuwBffzhwWVxHpgSrRgvnnaeIph2Iv92/ATN5LCc5vF+SNGx
2kKWYTDSRu9q1xHpXcax+nmJ
-----END PRIVATE KEY-----

@ -1,17 +1,22 @@
-----BEGIN CERTIFICATE-----
MIICoDCCAgmgAwIBAgIJAPdqwqsKNy81MA0GCSqGSIb3DQEBBQUAMGkxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxIjAgBgNVBAMMGWJhZHNlcnZlci50ZXN0Lmdvb2dsZS5j
b20wHhcNMTQwNzI4MjAwODU0WhcNMjQwNzI1MjAwODU0WjBpMQswCQYDVQQGEwJB
VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
cyBQdHkgTHRkMSIwIAYDVQQDDBliYWRzZXJ2ZXIudGVzdC5nb29nbGUuY29tMIGf
MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCnmdXtctvXJgSmlqCFMCeZ3jiVI8fn
g/950T3vnby1xHffnW9HxuUWEUdEt5hdWMLo+DyMZU1tmKa1EqBksPrXKljSM8b/
eB48GBZJLxEdwG5V3AwYdD9swJM4Pp3/QHALDIdEKQ08q17TJYKq8o6tf75IRaHy
jDG2PmlyXRujgQIDAQABo1AwTjAdBgNVHQ4EFgQU3u/qvHr9knMBeZyAD7mAA/ec
8cUwHwYDVR0jBBgwFoAU3u/qvHr9knMBeZyAD7mAA/ec8cUwDAYDVR0TBAUwAwEB
/zANBgkqhkiG9w0BAQUFAAOBgQA/FmR1SGLguxCCfhp4CYCbrAePSyPWDi48gTwj
vVZf/OMxdVu/H8sBYFf27BjbrEugAw16DElFtgTZ83pLb2BvkUgb6vBUK5sEkgmh
z88zBsgDp8aCf4STDOLFZMBh/E9ZKkm1zogbEmlTjFp/ceSpa2gNv7OuN4WiorOh
Wvw40g==
MIIDszCCApugAwIBAgIULEum14ranwlUZjuZchSWaHtj8Z4wDQYJKoZIhvcNAQEL
BQAwaTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEiMCAGA1UEAwwZYmFkc2VydmVyLnRl
c3QuZ29vZ2xlLmNvbTAeFw0yMDAzMTcxNzE5NTRaFw0zMDAzMTUxNzE5NTRaMGkx
CzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
cm5ldCBXaWRnaXRzIFB0eSBMdGQxIjAgBgNVBAMMGWJhZHNlcnZlci50ZXN0Lmdv
b2dsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDRY2Z886nT
6KF4tjgJTX0l1M4j8bQp+jKfXz+hwUZbn/PnCXJlu/5denpyu4XrLxr6Ix4Il97S
rKfQiGaSZQ8hcq6WQdEDfuo/U7R/dk6lYG7q+yg7+xHm02DzVdPLp09kLhU+fWH3
Wek09GCk9iC1/sVTIomBYpar61Ris04iA1QveR+LZKNkQ8rL2i191Djs8cdrn9yh
WdfJAi89lLl6S6d8cXru1LwtEe0ejctnKf6ANqMnmyWTbHV8h0Cc3fbAnx92HsWD
MJKe8mI0CClauxrlagMHyw10NuFb9/MBEkFPJfxcUyW6F45LmqGHVfcxx6/BU7XR
bWx8aQM/pt2LAgMBAAGjUzBRMB0GA1UdDgQWBBTYP9Av5QoPxsDRE33wQedENOke
wDAfBgNVHSMEGDAWgBTYP9Av5QoPxsDRE33wQedENOkewDAPBgNVHRMBAf8EBTAD
AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCXA/Ewb5laDDxJi4YJxnmqQsb4WSsm65Hj
MX21Ii2vzf4XZ+i8c9xBezCae85Bkhtb/oMC/V15DshjVkkJNmdQfAlYD1NASSrN
hTaiQ4AfXWjO7H8o2B/rneZtA21NDCwvFxTXeJzAVnBkpIePR//KmuHjtCMjsrtP
ovckcTRGmhWJJ9sRx4HCsJXygBvnCIIIYC585aU4+nE53UDNT2T+Bd4b1vPmwf9R
9XgbyN6AhQ+0F11zlnftwsJ23nbnXqX/fpG/YZuhnPwaUILRodc6HZQtf/8xpRcA
0dKMdnL2YtBjuL5QFJMLT0mdsmnXj3h/oK8894nYBZYSmlb3bzZK
-----END CERTIFICATE-----

@ -1,16 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMBA3wVeTGHZR1Ry
e/i+J8a2cu5gXwFV6TnObzGM7bLFCO5i9v4mLo4iFzPsHmWDUxKS3Y8iXbu0eYBl
LoNY0lSvxDx33O+DuwMmVN+DzSD+Eod9zfvwOWHsazYCZT2PhNxnVWIuJXViY4JA
HUGodjx+QAi6yCAurUZGvYXGgZSBAgMBAAECgYAxRi8i9BlFlufGSBVoGmydbJOm
bwLKl9dP3o33ODSP9hok5y6A0w5plWk3AJSF1hPLleK9VcSKYGYnt0clmPVHF35g
bx2rVK8dOT0mn7rz9Zr70jcSz1ETA2QonHZ+Y+niLmcic9At6hRtWiewblUmyFQm
GwggIzi7LOyEUHrEcQJBAOXxyQvnLvtKzXiqcsW/K6rExqVJVk+KF0fzzVyMzTJx
HRBxUVgvGdEJT7j+7P2kcTyafve0BBzDSPIaDyiJ+Y0CQQDWCb7jASFSbu5M3Zcd
Gkr4ZKN1XO3VLQX10b22bQYdF45hrTN2tnzRvVUR4q86VVnXmiGiTqmLkXcA2WWf
pHfFAkAhv9olUBo6MeF0i3frBEMRfm41hk0PwZHnMqZ6pgPcGnQMnMU2rzsXzkkQ
OwJnvAIOxhJKovZTjmofdqmw5odlAkBYVUdRWjsNUTjJwj3GRf6gyq/nFMYWz3EB
RWFdM1ttkDYzu45ctO2IhfHg4sPceDMO1s6AtKQmNI9/azkUjITdAkApNa9yFRzc
TBaDNPd5KVd58LVIzoPQ6i7uMHteLXJUWqSroji6S3s4gKMFJ/dO+ZXIlgQgfJJJ
ZDL4cdrdkeoM
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCwYvShd+UXQvOg
z4GH6pRT3KGrPDbDw45fma7+I0LJQ4GupoeLuYYfHvcYPTV2I3MLO+VxCp00gfo1
BIvsNOkGNxrrqNhP27ve9l7YwOuvWdVu4u9+73znRx3GJQ4ie/nF/z6xMcbQL5r5
UC8yGwuJGOyr6VcpEnKTnORtuwRPJuqnGgn4rsKhLLfJz+RAhjdOKnAS3CQo/iHP
KjoqIZ38M97GJ7icFQic3dtLUFR41nnN5ogLZ6DduR55btypPnlv5h6foLFjRMST
MEroAq39ZSJqUoyBPTBtPFFk7uRQIfdKrp7/Bd4V0n4e91Us+UCDlOcxo2lF1CKH
/ydEWmx3AgMBAAECggEAKrDosKQKKKUlvkg6+6CFIf8GiiFax+ru7KiPuCbkpT3X
h2P67pCKq8Gc4Jr/84YE9DUdBU0iW3ESE/7ztsnflIeF1n/ZSwrN39sVfbTD1n8R
r3LxsHFac8e8pxaU4zfKbmemztBTZFQBWFJV+fSdyCLmNX2WgPRcEuooR366PkWv
xZLAxeDGqpnsa62o1GdGmalxx8aljLN/QcbQi73mR9Osim1OtSd1cyDlZ/8x6OoV
Ae5GDN3Bj0hO9ZKzNWTbQpRw9SHKU6sWXtHlcDx4xi5kN/n9aptn7kixbY9y8uOM
5zjErVGWvxdP94IvlSkrkenwnIjlHBtdlAjVuCFioQKBgQDoJLyfHNWPBnjPGVnK
xcbIIwmf4C9UnZBbHRD3YxU/GBpsPgPh9EwhQTAXlGQGHeuslxCIh4cEfbOIrJ9b
/s3OqeL9CSUaz/N+1av1ZuwOI9CEvNPi51IK+rXNRmVJG8pG6RaKNx57pXaFtmqq
FUtC7twbPECvjspapn61nZYSiQKBgQDCg1tpGwZJJOCIkhYH4wFc4j4p0LxIcBJ2
E3L9VnQ+APT/x8uitkZsuRY9tmWcHK8/zWTc1GpFdwGUJ9+Yzvprtej+P/buxM9J
Y6ZJZdCIHWDuh3eq+sXS4lwr5fi7ir5m97npG1bXPlOoYIJ7p172EyoNmurRIgiP
LWnzK0jG/wKBgQCRQtOouNFFcyZLaTCPutxdRddy7ESRrRq0eOax9pVH6tw12URy
snyk3naqepdwYG6li82zsSKig8nA/0uktDeyVwoLjhpiwbc7KZc1sxaI7o4/US1B
McBb0G/MqH0elz4myxnomP8BHhOhLflmvnZexrqCbFyJvk8PFFn7aUWMCQKBgDvX
9BCzOszYJqh94X9NrQapqJxu1u6mZFelhjRBHARTgQ0MqC8IS0R58UjNTBeqj5Re
mdCDHar/gSHW3qkBzPPEhMlsXol5TZjzqp5cT7sA5uicDwowmxpVgCwVVeBFQG0n
fDAmtCIGz/A2uQ5YIRQuMzr6VZJAGUgLndQtlfd7AoGBAMq1imggFKd1rt49XCnO
t97lpWOT+TlWYblHr01tOw+esawG5MFucqVI6tGpBSccTRQw6orWf4GK3KmkgQ6J
UgHKjwYsA0sf4U5vppkdkbAbM/WwUPOTQpGFRERyJqMqFGIc4wMtZOJBxXwf+9iD
l8tvan8w/6HugqnI7qqkTgLq
-----END PRIVATE KEY-----

@ -1,15 +1,20 @@
-----BEGIN CERTIFICATE-----
MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla
Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0
YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT
BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7
+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu
g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd
Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV
HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau
sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m
oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG
Dfcog5wrJytaQ6UA0wE=
MIIDWjCCAkKgAwIBAgIUWrP0VvHcy+LP6UuYNtiL9gBhD5owDQYJKoZIhvcNAQEL
BQAwVjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTIw
MDMxNzE4NTk1MVoXDTMwMDMxNTE4NTk1MVowVjELMAkGA1UEBhMCQVUxEzARBgNV
BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0
ZDEPMA0GA1UEAwwGdGVzdGNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAsGL0oXflF0LzoM+Bh+qUU9yhqzw2w8OOX5mu/iNCyUOBrqaHi7mGHx73GD01
diNzCzvlcQqdNIH6NQSL7DTpBjca66jYT9u73vZe2MDrr1nVbuLvfu9850cdxiUO
Inv5xf8+sTHG0C+a+VAvMhsLiRjsq+lXKRJyk5zkbbsETybqpxoJ+K7CoSy3yc/k
QIY3TipwEtwkKP4hzyo6KiGd/DPexie4nBUInN3bS1BUeNZ5zeaIC2eg3bkeeW7c
qT55b+Yen6CxY0TEkzBK6AKt/WUialKMgT0wbTxRZO7kUCH3Sq6e/wXeFdJ+HvdV
LPlAg5TnMaNpRdQih/8nRFpsdwIDAQABoyAwHjAMBgNVHRMEBTADAQH/MA4GA1Ud
DwEB/wQEAwICBDANBgkqhkiG9w0BAQsFAAOCAQEAkTrKZjBrJXHps/HrjNCFPb5a
THuGPCSsepe1wkKdSp1h4HGRpLoCgcLysCJ5hZhRpHkRihhef+rFHEe60UePQO3S
CVTtdJB4CYWpcNyXOdqefrbJW5QNljxgi6Fhvs7JJkBqdXIkWXtFk2eRgOIP2Eo9
/OHQHlYnwZFrk6sp4wPyR+A95S0toZBcyDVz7u+hOW0pGK3wviOe9lvRgj/H3Pwt
bewb0l+MhRig0/DVHamyVxrDRbqInU1/GTNCwcZkXKYFWSf92U+kIcTth24Q1gcw
eZiLl5FfrWokUNytFElXob0V0a5/kbhiLc3yWmvWqHTpqCALbVyF+rKJo2f5Kw==
-----END CERTIFICATE-----

@ -1,16 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIICeQIBADANBgkqhkiG9w0BAQEFAASCAmMwggJfAgEAAoGBAOxUR9uhvhbeVUIM
s5WbH0px0mehl2+6sZpNjzvE2KimZpHzMJHukVH0Ffkvhs0b8+S5Ut9VNUAqd3IM
JCCAEGtRNoQhM1t9Yr2zAckSvbRacp+FL/Cj9eDmyo00KsVGaeefA4Dh4OW+ZhkT
NKcldXqkSuj1sEf244JZYuqZp6/tAgMBAAECgYEAi2NSVqpZMafE5YYUTcMGe6QS
k2jtpsqYgggI2RnLJ/2tNZwYI5pwP8QVSbnMaiF4gokD5hGdrNDfTnb2v+yIwYEH
0w8+oG7Z81KodsiZSIDJfTGsAZhVNwOz9y0VD8BBZZ1/274Zh52AUKLjZS/ZwIbS
W2ywya855dPnH/wj+0ECQQD9X8D920kByTNHhBG18biAEZ4pxs9f0OAG8333eVcI
w2lJDLsYDZrCB2ocgA3lUdozlzPC7YDYw8reg0tkiRY5AkEA7sdNzOeQsQRn7++5
0bP9DtT/iON1gbfxRzCfCfXdoOtfQWIzTePWtURt9X/5D9NofI0Rg5W2oGy/MLe5
/sXHVQJBAIup5XrJDkQywNZyAUU2ecn2bCWBFjwtqd+LBmuMciI9fOKsZtEKZrz/
U0lkeMRoSwvXE8wmGLjjrAbdfohrXFkCQQDZEx/LtIl6JINJQiswVe0tWr6k+ASP
1WXoTm+HYpoF/XUvv9LccNF1IazFj34hwRQwhx7w/V52Ieb+p0jUMYGxAkEAjDhd
9pBO1fKXWiXzi9ZKfoyTNcUq3eBSVKwPG2nItg5ycXengjT5sgcWDnciIzW7BIVI
JiqOszq9GWESErAatg==
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCyqYRp+DXVp72N
FbQH8hdhTZLycZXOlJhmMsrJmrjn2p7pI/8mTZ/0FC+SGWBGZV+ELiHrmCX5zfaI
Lr9Iuw7Ghr3Vzoefi8r62rLupVPNi/qdqyjWk2dECHC9Z3+Ag3KzKTyerXWjKcvy
KVmM0ZxE0RXhDW/RoQbqZsU2GKg1B2rhUU8KN0gVmKn0rJHOxzRVSYeYLYp5Yn7K
rtPJcKyo9aVuEr7dGANzpyF6lg/nYBWc+9SGwkoLdFvKvABYJMyrbNhHUQfv0fza
Z0P86dfTENrDxzALrzGnqcx3KTrwJjkZ/aSr1tyD0/tXvukRFiPxWBJhjHQ70GqT
FQY19RbhAgMBAAECggEAIL8JUhL4awyvpWhQ8xPgTSlWwbEn8BE0TacJnCILuhNM
BRdf8LlRk/8PKQwVpVF3TFbYSMI+U6b4hMVssfv3HVQc/083dHq+3XOwUCVlUstR
SAzTE2E5EDMr1stdh0SQhV4Nilfos9s5Uk1Z6IGSztoz1GgOErIc/mGPy/aA/hbr
fRWHvTp35+MbCJSvZuOeevX2iLs0dNzqdk6DiOWIH/BVGirVPtO6ykrkuTj1FWiN
hyZ3MBChShlNH2poNX46ntOc7nEus0qteOgxBK8lummFEtlehCA7hd/8xuvYlP0k
7aN684LCRDajmAGpoZO57NSDYQhAFGZeUZ93SMFucQKBgQDe7GGkzZFEiv91u1q9
lgMy1h5dZjIZKgQaOarPC6wCQMUdqCf6cSLsAPr4T8EDoWsnY7dSnrTZ6YCIFL1T
idg8M3BQXipICCJkFORS76pKKZ0wMn3/NgkSepsmNct91WHr6okvx4tOaoRCtdzU
g7jt4Mr3sfLCiZtqTQyySdMUEwKBgQDNK+ZFKL0XhkWZP+PGKjWG8LWpPiK3d78/
wYBFXzSTGlkr6FvRmYtZeNwXWRYLB4UxZ9At4hbJVEdi/2dITOz/sehVDyCAjjs3
gycsc3UJqiZbcw5XKhI5TWBuWxkKENdbMSayogVbp2aSYoRblH764//t0ACmbfTW
KUQRQPB/uwKBgQC5QjjjfPL8w4cJkGoYpFKELO2PMR7xSrmeEc6hwlFwjeNCgjy3
JM6g0y++rIj7O2qRkY0IXFxvvF3UuWedxTCu1xC/uYHp2ti506LsScB7YZoAM/YB
4iYn9Tx6xLoYGP0H0iGwU2SyBlNkHT8oXU+SYP5MWtYkVbeS3/VtNWz1gQKBgQCA
6Nk4kN0mH7YxEKRzSOfyzeDF4oV7kuB2FYUbkTL+TirC3K58JiYY5Egc31trOKFm
Jlz1xz0b6DkmKWTiV3r9OPHKJ8P7IeJxAZWmZzCdDuwkv0i+WW+z0zsIe3JjEavN
3zb6O7R0HtziksWoqMeTqZeO+wa9iw6vVKQw1wWEqwKBgFHfahFs0DZ5cUTpGpBt
F/AQG7ukgipB6N6AkB9kDbgCs1FLgd199MQrEncug5hfpq8QerbyMatmA+GXoGMb
7vztKEH85yzp4n02FNL6H7xL4VVILvyZHdolmiORJ4qT2hZnl8pEQ2TYuF4RlHUd
nSwXX+2o0J/nF85fm4AwWKAc
-----END PRIVATE KEY-----

@ -1,14 +1,20 @@
-----BEGIN CERTIFICATE-----
MIICHzCCAYgCAQEwDQYJKoZIhvcNAQEFBQAwVjELMAkGA1UEBhMCQVUxEzARBgNV
BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0
ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTE0MDcxNzIzNTYwMloXDTI0MDcxNDIzNTYw
MlowWjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDETMBEGA1UEAwwKdGVzdGNsaWVudDCB
nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA7FRH26G+Ft5VQgyzlZsfSnHSZ6GX
b7qxmk2PO8TYqKZmkfMwke6RUfQV+S+GzRvz5LlS31U1QCp3cgwkIIAQa1E2hCEz
W31ivbMByRK9tFpyn4Uv8KP14ObKjTQqxUZp558DgOHg5b5mGRM0pyV1eqRK6PWw
R/bjglli6pmnr+0CAwEAATANBgkqhkiG9w0BAQUFAAOBgQAStSm5PM7ubROiKK6/
T2FkKlhiTOx+Ryenm3Eio59emq+jXl+1nhPySX5G2PQzSR5vd1dIhwgZSR4Gyttk
tRZ57k/NI1brUW8joiEOMJA/Mr7H7asx7wIRYDE91Fs8GkKWd5LhoPAQj+qdG35C
OO+svdkmqH0KZo320ZUqdl2ooQ==
MIIDNzCCAh8CFGyX00RCepOv/qCJ1oVdTtY92U83MA0GCSqGSIb3DQEBCwUAMFYx
CzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
cm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMMBnRlc3RjYTAeFw0yMDAzMTgw
MTA2MTBaFw0zMDAzMTYwMTA2MTBaMFoxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApT
b21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxEzAR
BgNVBAMMCnRlc3RjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQCyqYRp+DXVp72NFbQH8hdhTZLycZXOlJhmMsrJmrjn2p7pI/8mTZ/0FC+SGWBG
ZV+ELiHrmCX5zfaILr9Iuw7Ghr3Vzoefi8r62rLupVPNi/qdqyjWk2dECHC9Z3+A
g3KzKTyerXWjKcvyKVmM0ZxE0RXhDW/RoQbqZsU2GKg1B2rhUU8KN0gVmKn0rJHO
xzRVSYeYLYp5Yn7KrtPJcKyo9aVuEr7dGANzpyF6lg/nYBWc+9SGwkoLdFvKvABY
JMyrbNhHUQfv0fzaZ0P86dfTENrDxzALrzGnqcx3KTrwJjkZ/aSr1tyD0/tXvukR
FiPxWBJhjHQ70GqTFQY19RbhAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFXCewK8
cWT+zWxXyGFnouFSBzTi0BMBJRrhsiNoiQxkqityJHWFExiQZie+7CA+EabXCQUB
+JwMSWM29j3mSw10DTfmC3rhheQqGxy304BZyUpdpvI2dt3p/mcsE7O+p4sQrSep
gijiDssKAfxTAmUM93N6+Q8yJK5immxlbeYfijoBvmkzyB/B+qNRPsx0n7aFGnfv
oWfkW296iPhWLiwknpC3xB6oK3vRbK4Zj1OaGb0grK7VN8EyhBix2xVF61i4dzCK
kMIpl7CUpw1Mb2z8q3F2bHBS7iF7g1Ccn5VGcO+aJ+6PWydaeqJ6VEBF0Nwv9woe
mL5AluNRLaqjZvE=
-----END CERTIFICATE-----

@ -1,16 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANOmffupIGC8YDau
rOF4eKnHwPszgpkkhWzKsVxhNDBxCVYx4TEjG0XWIO0iyRXupZbUC+7N/8HnEVNa
8F1jYhng14Iiq99cNQbbnuHHhIztmpocrJTxmnhGzoAnRa1Tb+GnAuRoIHRA/V2c
VUE9tbikQugFx/SPgXAw6tfWB+YvAgMBAAECgYEAoEq9qzUBgoHoVEGiSPiWWe8g
5p6yUA1qx2QTQyWTAwT4z0DjjfVKmG99bFsl8+hTnJFnoCp/gnjflEOROwkjp5kG
m0drqOPx1jeipJjpXYTBu49h+WpZ1PF+KhVtxsIm3OOCvh67iWaKyyOVb5Og8aiR
jl6dn/TdG/dlGD8AfUECQQDuNMle6p0oU8amC6O9wIMBroxx2nFstzE6O35PLEzG
/tj0kxxn9Jp2TS9mGaLCzSuXmpjlF4+NOWiBPkrLC2TfAkEA43Xg7uEUkaJAz2/W
m1lIBTLt+4rIQY/2emh33bDcA+rv8rwwrMMIv17/xPx7bs49YqGG5xufD+Rwl6TL
qFXYsQJAPrOwagax1aKvwJeBw3oAQhoTKAkLIEXcdGqipe6QSzVcIIz0xjxxyEAr
AOIwoLxnBCISqwMXq2H4K0UdZPMb2wJAdhdYLY1L6YRMk6XjzImg25oidisKZweA
FvMv8DgHMj2CUAqmVrt3SivfLH1M9C09L3zfFhOAFHcsgX58gav4MQJBANSBnrHj
tIq4l8z79CPUIuu3QyeEh+XwY8s5qE5CNTck0U59lzp9NvENHbkx3KO896TTerko
+8bXHMLkJkHPXms=
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCei9aKutDNg2mr
COICW4hT6+LVJfI5J6DZ3yqb6MBzbc//VeUj1OCX/vt5vvPm1Qb5XNk1MOIFPEW6
t2/0Mhj2VbQfjDe/PhZRhwu4PBtuoJBDeBsKkdnxD0I+4G0XRbeTtUsGMGBgWSAd
qHMD2HnEhgydRw0krYjZp4a/HMCnZZ1WFamw1PCvQK6AuWSSk4iHGaYklXUaKd3r
Rlkujr7//ihcvqweJ+OdaTwXD8z4P0YfFe/bkWWpuqIZz1Or/nxiNBtvWKlI8pR2
K3wkoWsW+6bnFN8u81nstHDardR6zy3B7QXke02q4sddUERcwNFyPqpscetMpnhg
lhjrmnAXAgMBAAECggEAA6pB5GUbLJUMHUsQRnOtPBto2/qLleynmEHDb2U7BbAV
LdbjfCeQpZLcZ10VsFFeXudZkhQ2NV7GUeGpseBymUcz6cLJCx+5Tlsr1y90huMp
UpX1MhJbEmqC4oc3nmEbNEvtlxAJOlD1IBpjxkP71KIwqnYZBK8KSdXIlKRqg7QZ
VUgjA08TmWlZSxnOt1hpt2ZVjTOn7973YoTb4D7SZydMuVjTkwv9YjPFZOZ/wIP4
JTZczY/bJjEF7QBYL/wtir/vNJlxxi+FunJdoO3blhf8li5QU0iPd/YsyBFBBWfF
vD7QslaB7wQ8zyWxWpPLiWeD83XGE+7CY2+8EpG3AQKBgQDMK6N7jDQCq9F7n+5B
R8YPDMdINpcVsN8bIeEVKSxYE86oADQg/0jPUct+4liUS1dv0DUmUP1U0zbvupX7
NxE+gI8KFwCyq8nqZ1guW9oO00ZAGo4Rn0TIeoHWVgsE2tDqBFeC2wWYle1AaZLx
ZtFH6Ya4Q3a4xvjkXXabhbBDlwKBgQDGyzuNCGT1Xa1DXKLzyKspahdm9r7QXifo
jjZkcmzwItC535MBbQMq5+THD+WUbWrZ/rJ8KaSsoGmnjaWguSG0WLFpH3UiGn1W
FOSG2UGc0mWyz2p/j97EuhK12fabzn8rkuiohiFXjJDYrAIulcM++0ar3q2LyqXr
gleBEHLHgQKBgEAt44j9rIe+bO44etOIdUjb0nTvvBR0cd18i910AN169HY5Ainx
NXj+FELBcejDuiuKvnpZ8RhOALHg7C54w/HqxYv9aRnBCIqni7+e3e/VF/sknc4K
S7vdTp0KlRIkmpFFZiDbKmopjte1mBxMHrNFRDT99/7jhO98NcFzh9HnAoGAMf62
sVdlHJg8lO5dRPY4pae6zvhLMNgdLU1mvIhSgWogGD70F6202DuNu8pxsIx8DOsT
NEq80XVeXPcwqmUk5thPdeKlcLg8wUNr3cYRzEDVtsyXOhGSsuMhBX8VmEWskebW
gFuLUxtU6kkIG3MqsVI8icjs2HVUmRAktZ7PXwECgYA9V/zZe2DpP36gp63wRk6S
FI7bDbLPQCKah23mwp3WeP5T+/HmFFRrl0OCaDLwudTolqgPa47CV7JYa9LmJAPj
QBxcnL4CxjlaaS3V9kxVWOXabMEtwSUurELJwFKTEC/AFN9dR/nv4AzXInZznotG
7qDX8EhfjbFVJw4riAmlEw==
-----END PRIVATE KEY-----

@ -1,14 +1,20 @@
-----BEGIN CERTIFICATE-----
MIICHDCCAYUCAQQwDQYJKoZIhvcNAQEFBQAwVjELMAkGA1UEBhMCQVUxEzARBgNV
BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0
ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTE0MDcyMjE3NTk0OVoXDTI0MDcxOTE3NTk0
OVowVzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxFDASBgNVBAoM
C0dvb2dsZSBJbmMuMR0wGwYDVQQDDBQqLnRlc3QuZ29vZ2xlLmNvbS5hdTCBnzAN
BgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA06Z9+6kgYLxgNq6s4Xh4qcfA+zOCmSSF
bMqxXGE0MHEJVjHhMSMbRdYg7SLJFe6lltQL7s3/wecRU1rwXWNiGeDXgiKr31w1
Btue4ceEjO2amhyslPGaeEbOgCdFrVNv4acC5GggdED9XZxVQT21uKRC6AXH9I+B
cDDq19YH5i8CAwEAATANBgkqhkiG9w0BAQUFAAOBgQBtfR5qXG9TTI8YcYh7sA4V
GeNoplp0x6p7OG0NLvbJqAkUnkvjIkk1m1R2AUHhbkxzx6G75JIOoNJcWrCzywBA
BIsaTdmnNysf/s1hQJuD3IHiVb+7Ji0jhttnJlYcMid4o0tJO/a2E9YUxR+9cg0i
obb+Ql3qsvKdWBC1dDLDLw==
MIIDQTCCAikCFGyX00RCepOv/qCJ1oVdTtY92U84MA0GCSqGSIb3DQEBCwUAMFYx
CzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
cm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMMBnRlc3RjYTAeFw0yMDAzMTgw
MTA3MzhaFw0zMDAzMTYwMTA3MzhaMGQxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApT
b21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxHTAb
BgNVBAMMFCoudGVzdC5nb29nbGUuY29tLmF1MIIBIjANBgkqhkiG9w0BAQEFAAOC
AQ8AMIIBCgKCAQEAnovWirrQzYNpqwjiAluIU+vi1SXyOSeg2d8qm+jAc23P/1Xl
I9Tgl/77eb7z5tUG+VzZNTDiBTxFurdv9DIY9lW0H4w3vz4WUYcLuDwbbqCQQ3gb
CpHZ8Q9CPuBtF0W3k7VLBjBgYFkgHahzA9h5xIYMnUcNJK2I2aeGvxzAp2WdVhWp
sNTwr0CugLlkkpOIhxmmJJV1Gind60ZZLo6+//4oXL6sHifjnWk8Fw/M+D9GHxXv
25FlqbqiGc9Tq/58YjQbb1ipSPKUdit8JKFrFvum5xTfLvNZ7LRw2q3Ues8twe0F
5HtNquLHXVBEXMDRcj6qbHHrTKZ4YJYY65pwFwIDAQABMA0GCSqGSIb3DQEBCwUA
A4IBAQCCGvZpM+t83xWPCsz5FyuCqA6LI+j0NMMmKpe1wJ8JcK2o9Qw4d0wPxWdy
0O7Ti2YlJS3gups00zsaFhQymIKUBi5Gc+1VC7qHUUrVtkoIRe6QSpcVlxPVczlD
If1egkjBCUZKVSWqYRKB6AMqjpp7/dF06j6zAaAH54jaLv9VmiBtsFyd017AsC9W
+OG2ke2dNtXySfVX4VusCcji86qb5sr6hNIQWMXk6dZoLDsZvwvVi7KnrqQOza8J
klcJXV8Hsnq/faHr/ZmsIA65N0+H8KuYfbO+s5nKPG9th6ZZAu4aY2VDei++TH+H
EAQhivPNUC1DgCmx0P7vKLhgka7S
-----END CERTIFICATE-----

@ -1,16 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD
M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf
3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY
AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm
V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY
tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p
dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q
K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR
81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff
DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd
aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2
ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3
XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe
F98XJ7tIFfJq
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDnE443EknxvxBq
6+hvn/t09hl8hx366EBYvZmVM/NC+7igXRAjiJiA/mIaCvL3MS0Iz5hBLxSGICU+
WproA3GCIFITIwcf/ETyWj/5xpgZ4AKrLrjQmmX8mhwUajfF3UvwMJrCOVqPp67t
PtP+2kBXaqrXdvnvXR41FsIB8V7zIAuIZB6bHQhiGVlc1sgZYsE2EGG9WMmHtS86
qkAOTjG2XyjmPTGAwhGDpYkYrpzp99IiDh4/Veai81hn0ssQkbry0XRD/Ig3jcHh
23WiriPNJ0JsbgXUSLKRPZObA9VgOLy2aXoN84IMaeK3yy+cwSYG/99w93fUZJte
MXwz4oYZAgMBAAECggEBAIVn2Ncai+4xbH0OLWckabwgyJ4IM9rDc0LIU368O1kU
koais8qP9dujAWgfoh3sGh/YGgKn96VnsZjKHlyMgF+r4TaDJn3k2rlAOWcurGlj
1qaVlsV4HiEzp7pxiDmHhWvp4672Bb6iBG+bsjCUOEk/n9o9KhZzIBluRhtxCmw5
nw4Do7z00PTvN81260uPWSc04IrytvZUiAIx/5qxD72bij2xJ8t/I9GI8g4FtoVB
8pB6S/hJX1PZhh9VlU6Yk+TOfOVnbebG4W5138LkB835eqk3Zz0qsbc2euoi8Hxi
y1VGwQEmMQ63jXz4c6g+X55ifvUK9Jpn5E8pq+pMd7ECgYEA93lYq+Cr54K4ey5t
sWMa+ye5RqxjzgXj2Kqr55jb54VWG7wp2iGbg8FMlkQwzTJwebzDyCSatguEZLuB
gRGroRnsUOy9vBvhKPOch9bfKIl6qOgzMJB267fBVWx5ybnRbWN/I7RvMQf3k+9y
biCIVnxDLEEYyx7z85/5qxsXg/MCgYEA7wmWKtCTn032Hy9P8OL49T0X6Z8FlkDC
Rk42ygrc/MUbugq9RGUxcCxoImOG9JXUpEtUe31YDm2j+/nbvrjl6/bP2qWs0V7l
dTJl6dABP51pCw8+l4cWgBBX08Lkeen812AAFNrjmDCjX6rHjWHLJcpS18fnRRkP
V1d/AHWX7MMCgYEA6Gsw2guhp0Zf2GCcaNK5DlQab8OL4Hwrpttzo4kuTlwtqNKp
Q9H4al9qfF4Cr1TFya98+EVYf8yFRM3NLNjZpe3gwYf2EerlJj7VLcahw0KKzoN1
QBENfwgPLRk5sDkx9VhSmcfl/diLroZdpAwtv3vo4nEoxeuGFbKTGx3Qkf0CgYEA
xyR+dcb05Ygm3w4klHQTowQ10s1H80iaUcZBgQuR1ghEtDbUPZHsoR5t1xCB02ys
DgAwLv1bChIvxvH/L6KM8ovZ2LekBX4AviWxoBxJnfz/EVau98B0b1auRN6eSC83
FRuGldlSOW1z/nSh8ViizSYE5H5HX1qkXEippvFRE88CgYB3Bfu3YQY60ITWIShv
nNkdcbTT9eoP9suaRJjw92Ln+7ZpALYlQMKUZmJ/5uBmLs4RFwUTQruLOPL4yLTH
awADWUzs3IRr1fwn9E+zM8JVyKCnUEM3w4N5UZskGO2klashAd30hWO+knRv/y0r
uGIYs9Ek7YXlXIRVrzMwcsrt1w==
-----END PRIVATE KEY-----

@ -1,16 +1,22 @@
-----BEGIN CERTIFICATE-----
MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET
MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ
dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx
MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV
BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50
ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco
LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg
zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd
9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw
CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy
em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G
CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6
hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh
y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8
MIIDtDCCApygAwIBAgIUbJfTREJ6k6/+oInWhV1O1j3ZT0IwDQYJKoZIhvcNAQEL
BQAwVjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTIw
MDMxODAzMTA0MloXDTMwMDMxNjAzMTA0MlowZTELMAkGA1UEBhMCVVMxETAPBgNV
BAgMCElsbGlub2lzMRAwDgYDVQQHDAdDaGljYWdvMRUwEwYDVQQKDAxFeGFtcGxl
LCBDby4xGjAYBgNVBAMMESoudGVzdC5nb29nbGUuY29tMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEA5xOONxJJ8b8Qauvob5/7dPYZfIcd+uhAWL2ZlTPz
Qvu4oF0QI4iYgP5iGgry9zEtCM+YQS8UhiAlPlqa6ANxgiBSEyMHH/xE8lo/+caY
GeACqy640Jpl/JocFGo3xd1L8DCawjlaj6eu7T7T/tpAV2qq13b5710eNRbCAfFe
8yALiGQemx0IYhlZXNbIGWLBNhBhvVjJh7UvOqpADk4xtl8o5j0xgMIRg6WJGK6c
6ffSIg4eP1XmovNYZ9LLEJG68tF0Q/yIN43B4dt1oq4jzSdCbG4F1EiykT2TmwPV
YDi8tml6DfOCDGnit8svnMEmBv/fcPd31GSbXjF8M+KGGQIDAQABo2swaTAJBgNV
HRMEAjAAMAsGA1UdDwQEAwIF4DBPBgNVHREESDBGghAqLnRlc3QuZ29vZ2xlLmZy
ghh3YXRlcnpvb2kudGVzdC5nb29nbGUuYmWCEioudGVzdC55b3V0dWJlLmNvbYcE
wKgBAzANBgkqhkiG9w0BAQsFAAOCAQEAS8hDQA8PSgipgAml7Q3/djwQ644ghWQv
C2Kb+r30RCY1EyKNhnQnIIh/OUbBZvh0M0iYsy6xqXgfDhCB93AA6j0i5cS8fkhH
Jl4RK0tSkGQ3YNY4NzXwQP/vmUgfkw8VBAZ4Y4GKxppdATjffIW+srbAmdDruIRM
wPeikgOoRrXf0LA1fi4TqxARzeRwenQpayNfGHTvVF9aJkl8HoaMunTAdG5pIVcr
9GKi/gEMpXUJbbVv3U5frX1Wo4CFo+rZWJ/LyCMeb0jciNLxSdMwj/E/ZuExlyeZ
gc9ctPjSMvgSyXEKv6Vwobleeg88V2ZgzenziORoWj4KszG/lbQZvg==
-----END CERTIFICATE-----

@ -157,12 +157,12 @@ You can find out how to build and run our simplest gRPC C++ example in our
For more detailed documentation on using gRPC in C++ , see our main
documentation site at [grpc.io](https://grpc.io), specifically:
* [Overview](https://grpc.io/docs/): An introduction to gRPC with a simple
* [Overview](https://grpc.io/docs): An introduction to gRPC with a simple
Hello World example in all our supported languages, including C++.
* [gRPC Basics - C++](https://grpc.io/docs/tutorials/basic/c.html):
* [gRPC Basics - C++](https://grpc.io/docs/tutorials/basic/cpp):
A tutorial that steps you through creating a simple gRPC C++ example
application.
* [Asynchronous Basics - C++](https://grpc.io/docs/tutorials/async/helloasync-cpp.html):
* [Asynchronous Basics - C++](https://grpc.io/docs/tutorials/async/helloasync-cpp):
A tutorial that shows you how to use gRPC C++'s asynchronous/non-blocking
APIs.

@ -68,8 +68,7 @@ grpc_ssl_certificate_config_reload_status TlsCredentialReloadArg::status()
}
grpc::string TlsCredentialReloadArg::error_details() const {
grpc::string cpp_error_details(c_arg_->error_details);
return cpp_error_details;
return c_arg_->error_details->error_details();
}
void TlsCredentialReloadArg::set_cb_user_data(void* cb_user_data) {
@ -159,7 +158,7 @@ void TlsCredentialReloadArg::set_status(
void TlsCredentialReloadArg::set_error_details(
const grpc::string& error_details) {
c_arg_->error_details = gpr_strdup(error_details.c_str());
c_arg_->error_details->set_error_details(error_details.c_str());
}
void TlsCredentialReloadArg::OnCredentialReloadDoneCallback() {
@ -221,8 +220,7 @@ grpc_status_code TlsServerAuthorizationCheckArg::status() const {
}
grpc::string TlsServerAuthorizationCheckArg::error_details() const {
grpc::string cpp_error_details(c_arg_->error_details);
return cpp_error_details;
return c_arg_->error_details->error_details();
}
void TlsServerAuthorizationCheckArg::set_cb_user_data(void* cb_user_data) {
@ -254,7 +252,7 @@ void TlsServerAuthorizationCheckArg::set_status(grpc_status_code status) {
void TlsServerAuthorizationCheckArg::set_error_details(
const grpc::string& error_details) {
c_arg_->error_details = gpr_strdup(error_details.c_str());
c_arg_->error_details->set_error_details(error_details.c_str());
}
void TlsServerAuthorizationCheckArg::OnServerAuthorizationCheckDoneCallback() {

@ -1241,6 +1241,12 @@ void Server::Start(grpc::ServerCompletionQueue** cqs, size_t num_cqs) {
RegisterCallbackGenericService(unimplemented_service_.get());
}
#ifndef NDEBUG
for (size_t i = 0; i < num_cqs; i++) {
cq_list_.push_back(cqs[i]);
}
#endif
grpc_server_start(server_);
if (!has_async_generic_service_ && !has_callback_generic_service_) {
@ -1249,9 +1255,6 @@ void Server::Start(grpc::ServerCompletionQueue** cqs, size_t num_cqs) {
}
for (size_t i = 0; i < num_cqs; i++) {
#ifndef NDEBUG
cq_list_.push_back(cqs[i]);
#endif
if (cqs[i]->IsFrequentlyPolled()) {
new UnimplementedAsyncRequest(this, cqs[i]);
}

@ -1,15 +1,20 @@
-----BEGIN CERTIFICATE-----
MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla
Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0
YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT
BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7
+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu
g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd
Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV
HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau
sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m
oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG
Dfcog5wrJytaQ6UA0wE=
MIIDWjCCAkKgAwIBAgIUWrP0VvHcy+LP6UuYNtiL9gBhD5owDQYJKoZIhvcNAQEL
BQAwVjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTIw
MDMxNzE4NTk1MVoXDTMwMDMxNTE4NTk1MVowVjELMAkGA1UEBhMCQVUxEzARBgNV
BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0
ZDEPMA0GA1UEAwwGdGVzdGNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAsGL0oXflF0LzoM+Bh+qUU9yhqzw2w8OOX5mu/iNCyUOBrqaHi7mGHx73GD01
diNzCzvlcQqdNIH6NQSL7DTpBjca66jYT9u73vZe2MDrr1nVbuLvfu9850cdxiUO
Inv5xf8+sTHG0C+a+VAvMhsLiRjsq+lXKRJyk5zkbbsETybqpxoJ+K7CoSy3yc/k
QIY3TipwEtwkKP4hzyo6KiGd/DPexie4nBUInN3bS1BUeNZ5zeaIC2eg3bkeeW7c
qT55b+Yen6CxY0TEkzBK6AKt/WUialKMgT0wbTxRZO7kUCH3Sq6e/wXeFdJ+HvdV
LPlAg5TnMaNpRdQih/8nRFpsdwIDAQABoyAwHjAMBgNVHRMEBTADAQH/MA4GA1Ud
DwEB/wQEAwICBDANBgkqhkiG9w0BAQsFAAOCAQEAkTrKZjBrJXHps/HrjNCFPb5a
THuGPCSsepe1wkKdSp1h4HGRpLoCgcLysCJ5hZhRpHkRihhef+rFHEe60UePQO3S
CVTtdJB4CYWpcNyXOdqefrbJW5QNljxgi6Fhvs7JJkBqdXIkWXtFk2eRgOIP2Eo9
/OHQHlYnwZFrk6sp4wPyR+A95S0toZBcyDVz7u+hOW0pGK3wviOe9lvRgj/H3Pwt
bewb0l+MhRig0/DVHamyVxrDRbqInU1/GTNCwcZkXKYFWSf92U+kIcTth24Q1gcw
eZiLl5FfrWokUNytFElXob0V0a5/kbhiLc3yWmvWqHTpqCALbVyF+rKJo2f5Kw==
-----END CERTIFICATE-----

@ -1,16 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD
M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf
3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY
AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm
V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY
tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p
dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q
K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR
81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff
DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd
aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2
ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3
XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe
F98XJ7tIFfJq
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDnE443EknxvxBq
6+hvn/t09hl8hx366EBYvZmVM/NC+7igXRAjiJiA/mIaCvL3MS0Iz5hBLxSGICU+
WproA3GCIFITIwcf/ETyWj/5xpgZ4AKrLrjQmmX8mhwUajfF3UvwMJrCOVqPp67t
PtP+2kBXaqrXdvnvXR41FsIB8V7zIAuIZB6bHQhiGVlc1sgZYsE2EGG9WMmHtS86
qkAOTjG2XyjmPTGAwhGDpYkYrpzp99IiDh4/Veai81hn0ssQkbry0XRD/Ig3jcHh
23WiriPNJ0JsbgXUSLKRPZObA9VgOLy2aXoN84IMaeK3yy+cwSYG/99w93fUZJte
MXwz4oYZAgMBAAECggEBAIVn2Ncai+4xbH0OLWckabwgyJ4IM9rDc0LIU368O1kU
koais8qP9dujAWgfoh3sGh/YGgKn96VnsZjKHlyMgF+r4TaDJn3k2rlAOWcurGlj
1qaVlsV4HiEzp7pxiDmHhWvp4672Bb6iBG+bsjCUOEk/n9o9KhZzIBluRhtxCmw5
nw4Do7z00PTvN81260uPWSc04IrytvZUiAIx/5qxD72bij2xJ8t/I9GI8g4FtoVB
8pB6S/hJX1PZhh9VlU6Yk+TOfOVnbebG4W5138LkB835eqk3Zz0qsbc2euoi8Hxi
y1VGwQEmMQ63jXz4c6g+X55ifvUK9Jpn5E8pq+pMd7ECgYEA93lYq+Cr54K4ey5t
sWMa+ye5RqxjzgXj2Kqr55jb54VWG7wp2iGbg8FMlkQwzTJwebzDyCSatguEZLuB
gRGroRnsUOy9vBvhKPOch9bfKIl6qOgzMJB267fBVWx5ybnRbWN/I7RvMQf3k+9y
biCIVnxDLEEYyx7z85/5qxsXg/MCgYEA7wmWKtCTn032Hy9P8OL49T0X6Z8FlkDC
Rk42ygrc/MUbugq9RGUxcCxoImOG9JXUpEtUe31YDm2j+/nbvrjl6/bP2qWs0V7l
dTJl6dABP51pCw8+l4cWgBBX08Lkeen812AAFNrjmDCjX6rHjWHLJcpS18fnRRkP
V1d/AHWX7MMCgYEA6Gsw2guhp0Zf2GCcaNK5DlQab8OL4Hwrpttzo4kuTlwtqNKp
Q9H4al9qfF4Cr1TFya98+EVYf8yFRM3NLNjZpe3gwYf2EerlJj7VLcahw0KKzoN1
QBENfwgPLRk5sDkx9VhSmcfl/diLroZdpAwtv3vo4nEoxeuGFbKTGx3Qkf0CgYEA
xyR+dcb05Ygm3w4klHQTowQ10s1H80iaUcZBgQuR1ghEtDbUPZHsoR5t1xCB02ys
DgAwLv1bChIvxvH/L6KM8ovZ2LekBX4AviWxoBxJnfz/EVau98B0b1auRN6eSC83
FRuGldlSOW1z/nSh8ViizSYE5H5HX1qkXEippvFRE88CgYB3Bfu3YQY60ITWIShv
nNkdcbTT9eoP9suaRJjw92Ln+7ZpALYlQMKUZmJ/5uBmLs4RFwUTQruLOPL4yLTH
awADWUzs3IRr1fwn9E+zM8JVyKCnUEM3w4N5UZskGO2klashAd30hWO+knRv/y0r
uGIYs9Ek7YXlXIRVrzMwcsrt1w==
-----END PRIVATE KEY-----

@ -1,16 +1,22 @@
-----BEGIN CERTIFICATE-----
MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET
MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ
dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx
MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV
BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50
ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco
LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg
zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd
9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw
CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy
em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G
CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6
hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh
y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8
MIIDtDCCApygAwIBAgIUbJfTREJ6k6/+oInWhV1O1j3ZT0IwDQYJKoZIhvcNAQEL
BQAwVjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTIw
MDMxODAzMTA0MloXDTMwMDMxNjAzMTA0MlowZTELMAkGA1UEBhMCVVMxETAPBgNV
BAgMCElsbGlub2lzMRAwDgYDVQQHDAdDaGljYWdvMRUwEwYDVQQKDAxFeGFtcGxl
LCBDby4xGjAYBgNVBAMMESoudGVzdC5nb29nbGUuY29tMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEA5xOONxJJ8b8Qauvob5/7dPYZfIcd+uhAWL2ZlTPz
Qvu4oF0QI4iYgP5iGgry9zEtCM+YQS8UhiAlPlqa6ANxgiBSEyMHH/xE8lo/+caY
GeACqy640Jpl/JocFGo3xd1L8DCawjlaj6eu7T7T/tpAV2qq13b5710eNRbCAfFe
8yALiGQemx0IYhlZXNbIGWLBNhBhvVjJh7UvOqpADk4xtl8o5j0xgMIRg6WJGK6c
6ffSIg4eP1XmovNYZ9LLEJG68tF0Q/yIN43B4dt1oq4jzSdCbG4F1EiykT2TmwPV
YDi8tml6DfOCDGnit8svnMEmBv/fcPd31GSbXjF8M+KGGQIDAQABo2swaTAJBgNV
HRMEAjAAMAsGA1UdDwQEAwIF4DBPBgNVHREESDBGghAqLnRlc3QuZ29vZ2xlLmZy
ghh3YXRlcnpvb2kudGVzdC5nb29nbGUuYmWCEioudGVzdC55b3V0dWJlLmNvbYcE
wKgBAzANBgkqhkiG9w0BAQsFAAOCAQEAS8hDQA8PSgipgAml7Q3/djwQ644ghWQv
C2Kb+r30RCY1EyKNhnQnIIh/OUbBZvh0M0iYsy6xqXgfDhCB93AA6j0i5cS8fkhH
Jl4RK0tSkGQ3YNY4NzXwQP/vmUgfkw8VBAZ4Y4GKxppdATjffIW+srbAmdDruIRM
wPeikgOoRrXf0LA1fi4TqxARzeRwenQpayNfGHTvVF9aJkl8HoaMunTAdG5pIVcr
9GKi/gEMpXUJbbVv3U5frX1Wo4CFo+rZWJ/LyCMeb0jciNLxSdMwj/E/ZuExlyeZ
gc9ctPjSMvgSyXEKv6Vwobleeg88V2ZgzenziORoWj4KszG/lbQZvg==
-----END CERTIFICATE-----

@ -137,7 +137,7 @@
<ProtoRoot>%(RelativeDir)</ProtoRoot>
</Protobuf_Compile>
<!-- Remove files not for compile. -->
<Protobuf_Compile Remove="@(Protobuf_Compile)" Condition=" !%(ProtoCompile) " />
<Protobuf_Compile Remove="@(Protobuf_Compile)" Condition=" '%(ProtoCompile)' != 'true' " />
<!-- Ensure invariant Source=%(Identity). -->
<Protobuf_Compile>
<Source>%(Identity)</Source>

@ -1,7 +1,7 @@
[![Cocoapods](https://img.shields.io/cocoapods/v/gRPC.svg)](https://cocoapods.org/pods/gRPC)
# gRPC Objective-C with CFStream
gRPC Objective-C library now provides the option to use Apple's CFStream API (rather than TCP
gRPC now provides the option to use Apple's CFStream API (rather than TCP
sockets) for networking. Using CFStream resolves a bunch of network connectivity transition issues
(see the [doc](https://github.com/grpc/grpc/blob/master/src/objective-c/NetworkTransitionBehavior.md)
for more information).
@ -14,8 +14,11 @@ As of v1.21.0, CFStream integration is now the default networking stack being us
Objective-C on iOS layer. You get to use it automatically without special configuration needed. See
below on how to disable CFStream in case of problem.
As of v1.23.0, CFStream is enabled by default on iOS for all wrapped languages. See below on how to
disable CFStream in case of a problem.
## Usage
If you use gRPC Objective-C library on iOS, CFStream is on automatically. If you use it on other
If you use gRPC on iOS, CFStream is on automatically. If you use it on other
platforms, you can turn it on with macro `GRPC_CFSTREAM=1` for the pod 'gRPC-Core' and 'gRPC'. In
case of problem and you want to disable CFStream on iOS, you can set environment variable
"grpc\_cfstream=0".

@ -39,6 +39,7 @@
#define TEST_TIMEOUT 32
static const int kTestRetries = 3;
extern const char *kCFStreamVarName;
// Convenience constructors for the generated proto messages:
@ -382,6 +383,36 @@ static dispatch_once_t initGlobalInterceptorFactory;
RMTTestService *_service;
}
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Warc-performSelector-leaks"
- (void)retriableTest:(SEL)selector retries:(int)retries timeout:(NSTimeInterval)timeout {
for (int i = 0; i < retries; i++) {
NSDate *waitUntil = [[NSDate date] dateByAddingTimeInterval:timeout];
NSCondition *cv = [[NSCondition alloc] init];
__block BOOL done = NO;
[cv lock];
dispatch_async(dispatch_get_global_queue(QOS_CLASS_BACKGROUND, 0), ^{
[self performSelector:selector];
[cv lock];
done = YES;
[cv signal];
[cv unlock];
});
while (!done && [waitUntil timeIntervalSinceNow] > 0) {
[cv waitUntilDate:waitUntil];
}
if (done) {
[cv unlock];
break;
} else {
[cv unlock];
[self tearDown];
[self setUp];
}
}
}
#pragma clang diagnostic pop
+ (XCTestSuite *)defaultTestSuite {
if (self == [InteropTests class]) {
return [XCTestSuite testSuiteWithName:@"InteropTestsEmptySuite"];
@ -721,14 +752,13 @@ static dispatch_once_t initGlobalInterceptorFactory;
[self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
}
- (void)testConcurrentRPCsWithErrors {
NSMutableArray *completeExpectations = [NSMutableArray array];
int num_rpcs = 10;
for (int i = 0; i < num_rpcs; ++i) {
[completeExpectations
addObject:[self expectationWithDescription:
[NSString stringWithFormat:@"Received trailer for RPC %d", i]]];
- (void)concurrentRPCsWithErrors {
const int kNumRpcs = 10;
__block int completedCallCount = 0;
NSCondition *cv = [[NSCondition alloc] init];
NSDate *waitUntil = [[NSDate date] dateByAddingTimeInterval:TEST_TIMEOUT];
[cv lock];
for (int i = 0; i < kNumRpcs; ++i) {
RMTSimpleRequest *request = [RMTSimpleRequest message];
request.responseType = RMTPayloadType_Compressable;
request.responseSize = 314159;
@ -739,20 +769,33 @@ static dispatch_once_t initGlobalInterceptorFactory;
request.responseStatus.code = GRPC_STATUS_CANCELLED;
}
[_service unaryCallWithRequest:request
handler:^(RMTSimpleResponse *response, NSError *error) {
if (error == nil) {
RMTSimpleResponse *expectedResponse = [RMTSimpleResponse message];
expectedResponse.payload.type = RMTPayloadType_Compressable;
expectedResponse.payload.body =
[NSMutableData dataWithLength:314159];
XCTAssertEqualObjects(response, expectedResponse);
}
[completeExpectations[i] fulfill];
}];
GRPCProtoCall *call = [_service
RPCToUnaryCallWithRequest:request
handler:^(RMTSimpleResponse *response, NSError *error) {
if (error == nil) {
RMTSimpleResponse *expectedResponse = [RMTSimpleResponse message];
expectedResponse.payload.type = RMTPayloadType_Compressable;
expectedResponse.payload.body = [NSMutableData dataWithLength:314159];
XCTAssertEqualObjects(response, expectedResponse);
}
// DEBUG
[cv lock];
if (++completedCallCount == kNumRpcs) {
[cv signal];
}
[cv unlock];
}];
[call setResponseDispatchQueue:dispatch_queue_create(NULL, DISPATCH_QUEUE_SERIAL)];
[call start];
}
while (completedCallCount<kNumRpcs && [waitUntil timeIntervalSinceNow]> 0) {
[cv waitUntilDate:waitUntil];
}
[cv unlock];
}
[self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
- (void)testConcurrentRPCsWithErrors {
[self retriableTest:@selector(concurrentRPCsWithErrors) retries:kTestRetries timeout:10];
}
- (void)testPacketCoalescing {

@ -1,15 +1,20 @@
-----BEGIN CERTIFICATE-----
MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla
Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0
YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT
BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7
+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu
g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd
Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV
HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau
sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m
oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG
Dfcog5wrJytaQ6UA0wE=
MIIDWjCCAkKgAwIBAgIUWrP0VvHcy+LP6UuYNtiL9gBhD5owDQYJKoZIhvcNAQEL
BQAwVjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTIw
MDMxNzE4NTk1MVoXDTMwMDMxNTE4NTk1MVowVjELMAkGA1UEBhMCQVUxEzARBgNV
BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0
ZDEPMA0GA1UEAwwGdGVzdGNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAsGL0oXflF0LzoM+Bh+qUU9yhqzw2w8OOX5mu/iNCyUOBrqaHi7mGHx73GD01
diNzCzvlcQqdNIH6NQSL7DTpBjca66jYT9u73vZe2MDrr1nVbuLvfu9850cdxiUO
Inv5xf8+sTHG0C+a+VAvMhsLiRjsq+lXKRJyk5zkbbsETybqpxoJ+K7CoSy3yc/k
QIY3TipwEtwkKP4hzyo6KiGd/DPexie4nBUInN3bS1BUeNZ5zeaIC2eg3bkeeW7c
qT55b+Yen6CxY0TEkzBK6AKt/WUialKMgT0wbTxRZO7kUCH3Sq6e/wXeFdJ+HvdV
LPlAg5TnMaNpRdQih/8nRFpsdwIDAQABoyAwHjAMBgNVHRMEBTADAQH/MA4GA1Ud
DwEB/wQEAwICBDANBgkqhkiG9w0BAQsFAAOCAQEAkTrKZjBrJXHps/HrjNCFPb5a
THuGPCSsepe1wkKdSp1h4HGRpLoCgcLysCJ5hZhRpHkRihhef+rFHEe60UePQO3S
CVTtdJB4CYWpcNyXOdqefrbJW5QNljxgi6Fhvs7JJkBqdXIkWXtFk2eRgOIP2Eo9
/OHQHlYnwZFrk6sp4wPyR+A95S0toZBcyDVz7u+hOW0pGK3wviOe9lvRgj/H3Pwt
bewb0l+MhRig0/DVHamyVxrDRbqInU1/GTNCwcZkXKYFWSf92U+kIcTth24Q1gcw
eZiLl5FfrWokUNytFElXob0V0a5/kbhiLc3yWmvWqHTpqCALbVyF+rKJo2f5Kw==
-----END CERTIFICATE-----

@ -86,7 +86,7 @@ if test "$PHP_GRPC" != "no"; then
PHP_NEW_EXTENSION(grpc, byte_buffer.c call.c call_credentials.c channel.c \
channel_credentials.c completion_queue.c timeval.c server.c \
server_credentials.c php_grpc.c, $ext_shared, , -Wall -Werror -std=c11)
server_credentials.c php_grpc.c, $ext_shared, , -Wall -Werror -std=c11 -DGRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK=1)
fi
if test "$PHP_COVERAGE" = "yes"; then

@ -1,15 +1,20 @@
-----BEGIN CERTIFICATE-----
MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla
Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0
YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT
BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7
+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu
g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd
Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV
HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau
sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m
oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG
Dfcog5wrJytaQ6UA0wE=
MIIDWjCCAkKgAwIBAgIUWrP0VvHcy+LP6UuYNtiL9gBhD5owDQYJKoZIhvcNAQEL
BQAwVjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTIw
MDMxNzE4NTk1MVoXDTMwMDMxNTE4NTk1MVowVjELMAkGA1UEBhMCQVUxEzARBgNV
BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0
ZDEPMA0GA1UEAwwGdGVzdGNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAsGL0oXflF0LzoM+Bh+qUU9yhqzw2w8OOX5mu/iNCyUOBrqaHi7mGHx73GD01
diNzCzvlcQqdNIH6NQSL7DTpBjca66jYT9u73vZe2MDrr1nVbuLvfu9850cdxiUO
Inv5xf8+sTHG0C+a+VAvMhsLiRjsq+lXKRJyk5zkbbsETybqpxoJ+K7CoSy3yc/k
QIY3TipwEtwkKP4hzyo6KiGd/DPexie4nBUInN3bS1BUeNZ5zeaIC2eg3bkeeW7c
qT55b+Yen6CxY0TEkzBK6AKt/WUialKMgT0wbTxRZO7kUCH3Sq6e/wXeFdJ+HvdV
LPlAg5TnMaNpRdQih/8nRFpsdwIDAQABoyAwHjAMBgNVHRMEBTADAQH/MA4GA1Ud
DwEB/wQEAwICBDANBgkqhkiG9w0BAQsFAAOCAQEAkTrKZjBrJXHps/HrjNCFPb5a
THuGPCSsepe1wkKdSp1h4HGRpLoCgcLysCJ5hZhRpHkRihhef+rFHEe60UePQO3S
CVTtdJB4CYWpcNyXOdqefrbJW5QNljxgi6Fhvs7JJkBqdXIkWXtFk2eRgOIP2Eo9
/OHQHlYnwZFrk6sp4wPyR+A95S0toZBcyDVz7u+hOW0pGK3wviOe9lvRgj/H3Pwt
bewb0l+MhRig0/DVHamyVxrDRbqInU1/GTNCwcZkXKYFWSf92U+kIcTth24Q1gcw
eZiLl5FfrWokUNytFElXob0V0a5/kbhiLc3yWmvWqHTpqCALbVyF+rKJo2f5Kw==
-----END CERTIFICATE-----

@ -1,16 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD
M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf
3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY
AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm
V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY
tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p
dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q
K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR
81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff
DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd
aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2
ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3
XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe
F98XJ7tIFfJq
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDnE443EknxvxBq
6+hvn/t09hl8hx366EBYvZmVM/NC+7igXRAjiJiA/mIaCvL3MS0Iz5hBLxSGICU+
WproA3GCIFITIwcf/ETyWj/5xpgZ4AKrLrjQmmX8mhwUajfF3UvwMJrCOVqPp67t
PtP+2kBXaqrXdvnvXR41FsIB8V7zIAuIZB6bHQhiGVlc1sgZYsE2EGG9WMmHtS86
qkAOTjG2XyjmPTGAwhGDpYkYrpzp99IiDh4/Veai81hn0ssQkbry0XRD/Ig3jcHh
23WiriPNJ0JsbgXUSLKRPZObA9VgOLy2aXoN84IMaeK3yy+cwSYG/99w93fUZJte
MXwz4oYZAgMBAAECggEBAIVn2Ncai+4xbH0OLWckabwgyJ4IM9rDc0LIU368O1kU
koais8qP9dujAWgfoh3sGh/YGgKn96VnsZjKHlyMgF+r4TaDJn3k2rlAOWcurGlj
1qaVlsV4HiEzp7pxiDmHhWvp4672Bb6iBG+bsjCUOEk/n9o9KhZzIBluRhtxCmw5
nw4Do7z00PTvN81260uPWSc04IrytvZUiAIx/5qxD72bij2xJ8t/I9GI8g4FtoVB
8pB6S/hJX1PZhh9VlU6Yk+TOfOVnbebG4W5138LkB835eqk3Zz0qsbc2euoi8Hxi
y1VGwQEmMQ63jXz4c6g+X55ifvUK9Jpn5E8pq+pMd7ECgYEA93lYq+Cr54K4ey5t
sWMa+ye5RqxjzgXj2Kqr55jb54VWG7wp2iGbg8FMlkQwzTJwebzDyCSatguEZLuB
gRGroRnsUOy9vBvhKPOch9bfKIl6qOgzMJB267fBVWx5ybnRbWN/I7RvMQf3k+9y
biCIVnxDLEEYyx7z85/5qxsXg/MCgYEA7wmWKtCTn032Hy9P8OL49T0X6Z8FlkDC
Rk42ygrc/MUbugq9RGUxcCxoImOG9JXUpEtUe31YDm2j+/nbvrjl6/bP2qWs0V7l
dTJl6dABP51pCw8+l4cWgBBX08Lkeen812AAFNrjmDCjX6rHjWHLJcpS18fnRRkP
V1d/AHWX7MMCgYEA6Gsw2guhp0Zf2GCcaNK5DlQab8OL4Hwrpttzo4kuTlwtqNKp
Q9H4al9qfF4Cr1TFya98+EVYf8yFRM3NLNjZpe3gwYf2EerlJj7VLcahw0KKzoN1
QBENfwgPLRk5sDkx9VhSmcfl/diLroZdpAwtv3vo4nEoxeuGFbKTGx3Qkf0CgYEA
xyR+dcb05Ygm3w4klHQTowQ10s1H80iaUcZBgQuR1ghEtDbUPZHsoR5t1xCB02ys
DgAwLv1bChIvxvH/L6KM8ovZ2LekBX4AviWxoBxJnfz/EVau98B0b1auRN6eSC83
FRuGldlSOW1z/nSh8ViizSYE5H5HX1qkXEippvFRE88CgYB3Bfu3YQY60ITWIShv
nNkdcbTT9eoP9suaRJjw92Ln+7ZpALYlQMKUZmJ/5uBmLs4RFwUTQruLOPL4yLTH
awADWUzs3IRr1fwn9E+zM8JVyKCnUEM3w4N5UZskGO2klashAd30hWO+knRv/y0r
uGIYs9Ek7YXlXIRVrzMwcsrt1w==
-----END PRIVATE KEY-----

@ -1,16 +1,22 @@
-----BEGIN CERTIFICATE-----
MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET
MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ
dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx
MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV
BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50
ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco
LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg
zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd
9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw
CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy
em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G
CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6
hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh
y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8
MIIDtDCCApygAwIBAgIUbJfTREJ6k6/+oInWhV1O1j3ZT0IwDQYJKoZIhvcNAQEL
BQAwVjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTIw
MDMxODAzMTA0MloXDTMwMDMxNjAzMTA0MlowZTELMAkGA1UEBhMCVVMxETAPBgNV
BAgMCElsbGlub2lzMRAwDgYDVQQHDAdDaGljYWdvMRUwEwYDVQQKDAxFeGFtcGxl
LCBDby4xGjAYBgNVBAMMESoudGVzdC5nb29nbGUuY29tMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEA5xOONxJJ8b8Qauvob5/7dPYZfIcd+uhAWL2ZlTPz
Qvu4oF0QI4iYgP5iGgry9zEtCM+YQS8UhiAlPlqa6ANxgiBSEyMHH/xE8lo/+caY
GeACqy640Jpl/JocFGo3xd1L8DCawjlaj6eu7T7T/tpAV2qq13b5710eNRbCAfFe
8yALiGQemx0IYhlZXNbIGWLBNhBhvVjJh7UvOqpADk4xtl8o5j0xgMIRg6WJGK6c
6ffSIg4eP1XmovNYZ9LLEJG68tF0Q/yIN43B4dt1oq4jzSdCbG4F1EiykT2TmwPV
YDi8tml6DfOCDGnit8svnMEmBv/fcPd31GSbXjF8M+KGGQIDAQABo2swaTAJBgNV
HRMEAjAAMAsGA1UdDwQEAwIF4DBPBgNVHREESDBGghAqLnRlc3QuZ29vZ2xlLmZy
ghh3YXRlcnpvb2kudGVzdC5nb29nbGUuYmWCEioudGVzdC55b3V0dWJlLmNvbYcE
wKgBAzANBgkqhkiG9w0BAQsFAAOCAQEAS8hDQA8PSgipgAml7Q3/djwQ644ghWQv
C2Kb+r30RCY1EyKNhnQnIIh/OUbBZvh0M0iYsy6xqXgfDhCB93AA6j0i5cS8fkhH
Jl4RK0tSkGQ3YNY4NzXwQP/vmUgfkw8VBAZ4Y4GKxppdATjffIW+srbAmdDruIRM
wPeikgOoRrXf0LA1fi4TqxARzeRwenQpayNfGHTvVF9aJkl8HoaMunTAdG5pIVcr
9GKi/gEMpXUJbbVv3U5frX1Wo4CFo+rZWJ/LyCMeb0jciNLxSdMwj/E/ZuExlyeZ
gc9ctPjSMvgSyXEKv6Vwobleeg88V2ZgzenziORoWj4KszG/lbQZvg==
-----END CERTIFICATE-----

@ -628,7 +628,7 @@ class AuthMetadataPlugin(six.with_metaclass(abc.ABCMeta)):
def __call__(self, context, callback):
"""Implements authentication by passing metadata to a callback.
Implementations of this method must not block.
This method will be invoked asynchronously in a separate thread.
Args:
context: An AuthMetadataContext providing information on the RPC that
@ -1076,6 +1076,14 @@ class Channel(six.with_metaclass(abc.ABCMeta)):
"""
raise NotImplementedError()
def __enter__(self):
"""Enters the runtime context related to the channel object."""
raise NotImplementedError()
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exits the runtime context related to the channel object."""
raise NotImplementedError()
########################## Service-Side Context ##############################

@ -14,7 +14,6 @@
"""GRPCAuthMetadataPlugins for standard authentication."""
import inspect
from concurrent import futures
import grpc
@ -24,43 +23,29 @@ def _sign_request(callback, token, error):
callback(metadata, error)
def _create_get_token_callback(callback):
def get_token_callback(future):
try:
access_token = future.result().access_token
except Exception as exception: # pylint: disable=broad-except
_sign_request(callback, None, exception)
else:
_sign_request(callback, access_token, None)
return get_token_callback
class GoogleCallCredentials(grpc.AuthMetadataPlugin):
"""Metadata wrapper for GoogleCredentials from the oauth2client library."""
def __init__(self, credentials):
self._credentials = credentials
self._pool = futures.ThreadPoolExecutor(max_workers=1)
# Hack to determine if these are JWT creds and we need to pass
# additional_claims when getting a token
self._is_jwt = 'additional_claims' in inspect.getargspec( # pylint: disable=deprecated-method
credentials.get_access_token).args
def __call__(self, context, callback):
# MetadataPlugins cannot block (see grpc.beta.interfaces.py)
if self._is_jwt:
future = self._pool.submit(
self._credentials.get_access_token,
additional_claims={'aud': context.service_url})
try:
if self._is_jwt:
access_token = self._credentials.get_access_token(
additional_claims={
'aud': context.service_url
}).access_token
else:
access_token = self._credentials.get_access_token().access_token
except Exception as exception: # pylint: disable=broad-except
_sign_request(callback, None, exception)
else:
future = self._pool.submit(self._credentials.get_access_token)
future.add_done_callback(_create_get_token_callback(callback))
def __del__(self):
self._pool.shutdown(wait=False)
_sign_request(callback, access_token, None)
class AccessTokenAuthMetadataPlugin(grpc.AuthMetadataPlugin):

@ -2,6 +2,13 @@ package(default_visibility = ["//visibility:public"])
load("//bazel:cython_library.bzl", "pyx_library")
genrule(
name = "copy_roots_pem",
srcs = ["//:etc/roots.pem"],
outs = ["_credentials/roots.pem"],
cmd = "cp $(SRCS) $(@)",
)
pyx_library(
name = "cygrpc",
srcs = glob([
@ -9,6 +16,7 @@ pyx_library(
"cygrpc.pxd",
"cygrpc.pyx",
]),
data = [":copy_roots_pem"],
deps = [
"//:grpc",
],

@ -115,7 +115,7 @@ cdef class _AioCall(GrpcCallWrapper):
self._channel.channel,
NULL,
_EMPTY_MASK,
self._channel.cq.c_ptr(),
global_completion_queue(),
method_slice,
NULL,
c_deadline,
@ -125,7 +125,7 @@ cdef class _AioCall(GrpcCallWrapper):
if credentials is not None:
set_credentials_error = grpc_call_set_credentials(self.call, credentials.c())
if set_credentials_error != GRPC_CALL_OK:
raise Exception("Credentials couldn't have been set")
raise InternalError("Credentials couldn't have been set: {0}".format(set_credentials_error))
grpc_slice_unref(method_slice)
@ -178,7 +178,7 @@ cdef class _AioCall(GrpcCallWrapper):
def cancel(self, str details):
"""Cancels the RPC in Core with given RPC status.
Above abstractions must invoke this method to set Core objects into
proper state.
"""
@ -209,7 +209,7 @@ cdef class _AioCall(GrpcCallWrapper):
def done(self):
"""Returns if the RPC call has finished.
Checks if the status has been provided, either
because the RPC finished or because was cancelled..
@ -220,7 +220,7 @@ cdef class _AioCall(GrpcCallWrapper):
def cancelled(self):
"""Returns if the RPC was cancelled.
Returns:
True if the RPC was cancelled.
"""
@ -231,7 +231,7 @@ cdef class _AioCall(GrpcCallWrapper):
async def status(self):
"""Returns the status of the RPC call.
It returns the finshed status of the RPC. If the RPC
has not finished yet this function will wait until the RPC
gets finished.
@ -254,7 +254,7 @@ cdef class _AioCall(GrpcCallWrapper):
async def initial_metadata(self):
"""Returns the initial metadata of the RPC call.
If the initial metadata has not been received yet this function will
wait until the RPC gets finished.
@ -286,7 +286,7 @@ cdef class _AioCall(GrpcCallWrapper):
bytes request,
tuple outbound_initial_metadata):
"""Performs a unary unary RPC.
Args:
request: the serialized requests in bytes.
outbound_initial_metadata: optional outbound metadata.
@ -420,7 +420,7 @@ cdef class _AioCall(GrpcCallWrapper):
tuple outbound_initial_metadata,
object metadata_sent_observer):
"""Actual implementation of the complete unary-stream call.
Needs to pay extra attention to the raise mechanism. If we want to
propagate the final status exception, then we have to raise it.
Othersize, it would end normally and raise `StopAsyncIteration()`.
@ -490,7 +490,7 @@ cdef class _AioCall(GrpcCallWrapper):
outbound_initial_metadata,
self._send_initial_metadata_flags,
self._loop)
# Notify upper level that sending messages are allowed now.
# Notify upper level that sending messages are allowed now.
metadata_sent_observer()
# Receives initial metadata.

@ -35,6 +35,7 @@ cdef struct CallbackContext:
# management.
grpc_experimental_completion_queue_functor functor
cpython.PyObject *waiter
cpython.PyObject *loop
cpython.PyObject *failure_handler
cpython.PyObject *callback_wrapper

@ -32,9 +32,10 @@ cdef class CallbackFailureHandler:
cdef class CallbackWrapper:
def __cinit__(self, object future, CallbackFailureHandler failure_handler):
def __cinit__(self, object future, object loop, CallbackFailureHandler failure_handler):
self.context.functor.functor_run = self.functor_run
self.context.waiter = <cpython.PyObject*>future
self.context.loop = <cpython.PyObject*>loop
self.context.failure_handler = <cpython.PyObject*>failure_handler
self.context.callback_wrapper = <cpython.PyObject*>self
# NOTE(lidiz) Not using a list here, because this class is critical in
@ -69,7 +70,8 @@ cdef CallbackFailureHandler CQ_SHUTDOWN_FAILURE_HANDLER = CallbackFailureHandler
InternalError)
class ExecuteBatchError(Exception): pass
class ExecuteBatchError(InternalError):
"""Raised when execute batch returns a failure from Core."""
async def execute_batch(GrpcCallWrapper grpc_call_wrapper,
@ -82,6 +84,7 @@ async def execute_batch(GrpcCallWrapper grpc_call_wrapper,
cdef object future = loop.create_future()
cdef CallbackWrapper wrapper = CallbackWrapper(
future,
loop,
CallbackFailureHandler('execute_batch', operations, ExecuteBatchError))
cdef grpc_call_error error = grpc_call_start_batch(
grpc_call_wrapper.call,
@ -126,7 +129,7 @@ async def _receive_message(GrpcCallWrapper grpc_call_wrapper,
# the callback (e.g. cancelled).
#
# Since they all indicates finish, they are better be merged.
_LOGGER.debug(e)
_LOGGER.debug('Failed to receive any message from Core')
return receive_op.message()

@ -21,7 +21,7 @@ cdef enum AioChannelStatus:
cdef class AioChannel:
cdef:
grpc_channel * channel
BaseCompletionQueue cq
object loop
bytes _target
AioChannelStatus _status
bint _is_secure

@ -27,26 +27,31 @@ cdef CallbackFailureHandler _WATCH_CONNECTIVITY_FAILURE_HANDLER = CallbackFailur
cdef class AioChannel:
def __cinit__(self, bytes target, tuple options, ChannelCredentials credentials, object loop):
init_grpc_aio()
if options is None:
options = ()
cdef _ChannelArgs channel_args = _ChannelArgs(options)
self._target = target
self.cq = create_completion_queue()
self.loop = loop
self._status = AIO_CHANNEL_STATUS_READY
if credentials is None:
self._is_secure = False
self.channel = grpc_insecure_channel_create(
<char *>target,
channel_args.c_args(),
NULL)
else:
self._is_secure = True
self.channel = grpc_secure_channel_create(
<grpc_channel_credentials *> credentials.c(),
<char *>target,
channel_args.c_args(),
NULL)
def __dealloc__(self):
shutdown_grpc_aio()
def __repr__(self):
class_name = self.__class__.__name__
id_ = id(self)
@ -78,12 +83,13 @@ cdef class AioChannel:
cdef object future = self.loop.create_future()
cdef CallbackWrapper wrapper = CallbackWrapper(
future,
self.loop,
_WATCH_CONNECTIVITY_FAILURE_HANDLER)
grpc_channel_watch_connectivity_state(
self.channel,
last_observed_state,
c_deadline,
self.cq.c_ptr(),
global_completion_queue(),
wrapper.c_functor())
try:
@ -111,13 +117,16 @@ cdef class AioChannel:
"""Assembles a Cython Call object.
Returns:
The _AioCall object.
An _AioCall object.
"""
if self.closed():
raise UsageError('Channel is closed.')
cdef CallCredentials cython_call_credentials
if python_call_credentials is not None:
if not self._is_secure:
raise UsageError("Call credentials are only valid on secure channels")
cython_call_credentials = python_call_credentials._credentials
else:
cython_call_credentials = None

@ -99,3 +99,16 @@ class AbortError(BaseError):
class InternalError(BaseError):
"""Raised upon unexpected errors in native code."""
def schedule_coro_threadsafe(object coro, object loop):
try:
return loop.create_task(coro)
except RuntimeError as runtime_error:
if 'Non-thread-safe operation' in str(runtime_error):
return asyncio.run_coroutine_threadsafe(
coro,
loop,
)
else:
raise

@ -12,6 +12,38 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(lidiz) Unfortunately, we can't use "cimport" here because Cython
# links it with exception handling. It introduces new dependencies.
cdef extern from "<queue>" namespace "std" nogil:
cdef cppclass queue[T]:
queue()
bint empty()
T& front()
void pop()
void push(T&)
size_t size()
cdef extern from "<mutex>" namespace "std" nogil:
cdef cppclass mutex:
mutex()
void lock()
void unlock()
ctypedef queue[grpc_event] cpp_event_queue
IF UNAME_SYSNAME == "Windows":
cdef extern from "winsock2.h" nogil:
ctypedef uint32_t WIN_SOCKET "SOCKET"
WIN_SOCKET win_socket "socket" (int af, int type, int protocol)
int win_socket_send "send" (WIN_SOCKET s, const char *buf, int len, int flags)
cdef void _unified_socket_write(int fd) nogil
cdef class BaseCompletionQueue:
cdef grpc_completion_queue *_cq
@ -19,12 +51,19 @@ cdef class BaseCompletionQueue:
cdef class PollerCompletionQueue(BaseCompletionQueue):
cdef bint _shutdown
cdef object _shutdown_completed
cdef cpp_event_queue _queue
cdef mutex _queue_mutex
cdef object _poller_thread
cdef int _write_fd
cdef object _read_socket
cdef object _write_socket
cdef object _loop
cdef void _poll(self) except *
cdef void _poll(self) nogil
cdef shutdown(self)
cdef class CallbackCompletionQueue(BaseCompletionQueue):
cdef object _shutdown_completed # asyncio.Future
cdef CallbackWrapper _wrapper
cdef object _loop

@ -12,18 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
cdef gpr_timespec _GPR_INF_FUTURE = gpr_inf_future(GPR_CLOCK_REALTIME)
IF UNAME_SYSNAME == "Windows":
cdef void _unified_socket_write(int fd) nogil:
win_socket_send(<WIN_SOCKET>fd, b"1", 1, 0)
ELSE:
from posix cimport unistd
cdef void _unified_socket_write(int fd) nogil:
unistd.write(fd, b"1", 1)
def _handle_callback_wrapper(CallbackWrapper callback_wrapper, int success):
CallbackWrapper.functor_run(callback_wrapper.c_functor(), success)
cdef class BaseCompletionQueue:
async def shutdown(self):
raise NotImplementedError()
cdef grpc_completion_queue* c_ptr(self):
return self._cq
@ -31,50 +40,87 @@ cdef class BaseCompletionQueue:
cdef class PollerCompletionQueue(BaseCompletionQueue):
def __cinit__(self):
self._loop = asyncio.get_event_loop()
self._cq = grpc_completion_queue_create_for_next(NULL)
self._shutdown = False
self._shutdown_completed = asyncio.get_event_loop().create_future()
self._poller_thread = threading.Thread(target=self._poll_wrapper, daemon=True)
self._poller_thread.start()
cdef void _poll(self) except *:
self._read_socket, self._write_socket = socket.socketpair()
self._write_fd = self._write_socket.fileno()
self._loop.add_reader(self._read_socket, self._handle_events)
self._queue = cpp_event_queue()
cdef void _poll(self) nogil:
cdef grpc_event event
cdef CallbackContext *context
while not self._shutdown:
with nogil:
event = grpc_completion_queue_next(self._cq,
event = grpc_completion_queue_next(self._cq,
_GPR_INF_FUTURE,
NULL)
if event.type == GRPC_QUEUE_TIMEOUT:
raise AssertionError("Core should not return timeout error!")
with gil:
raise AssertionError("Core should not return GRPC_QUEUE_TIMEOUT!")
elif event.type == GRPC_QUEUE_SHUTDOWN:
self._shutdown = True
aio_loop_call_soon_threadsafe(self._shutdown_completed.set_result, None)
else:
context = <CallbackContext *>event.tag
aio_loop_call_soon_threadsafe(
_handle_callback_wrapper,
<CallbackWrapper>context.callback_wrapper,
event.success)
self._queue_mutex.lock()
self._queue.push(event)
self._queue_mutex.unlock()
_unified_socket_write(self._write_fd)
def _poll_wrapper(self):
self._poll()
with nogil:
self._poll()
async def shutdown(self):
cdef shutdown(self):
self._loop.remove_reader(self._read_socket)
# TODO(https://github.com/grpc/grpc/issues/22365) perform graceful shutdown
grpc_completion_queue_shutdown(self._cq)
await self._shutdown_completed
grpc_completion_queue_destroy(self._cq)
self._poller_thread.join()
def _handle_events(self):
cdef bytes data = self._read_socket.recv(1)
cdef grpc_event event
cdef CallbackContext *context
while True:
self._queue_mutex.lock()
if self._queue.empty():
self._queue_mutex.unlock()
break
else:
event = self._queue.front()
self._queue.pop()
self._queue_mutex.unlock()
context = <CallbackContext *>event.tag
loop = <object>context.loop
if loop is self._loop:
# Executes callbacks: complete the future
CallbackWrapper.functor_run(
<grpc_experimental_completion_queue_functor *>event.tag,
event.success
)
else:
loop.call_soon_threadsafe(
_handle_callback_wrapper,
<CallbackWrapper>context.callback_wrapper,
event.success
)
cdef class CallbackCompletionQueue(BaseCompletionQueue):
def __cinit__(self):
self._shutdown_completed = grpc_aio_loop().create_future()
self._loop = asyncio.get_event_loop()
self._shutdown_completed = self._loop.create_future()
self._wrapper = CallbackWrapper(
self._shutdown_completed,
self._loop,
CQ_SHUTDOWN_FAILURE_HANDLER)
self._cq = grpc_completion_queue_create_for_callback(
self._wrapper.c_functor(),
@ -85,12 +131,3 @@ cdef class CallbackCompletionQueue(BaseCompletionQueue):
grpc_completion_queue_shutdown(self._cq)
await self._shutdown_completed
grpc_completion_queue_destroy(self._cq)
cdef BaseCompletionQueue create_completion_queue():
if grpc_aio_engine is AsyncIOEngine.CUSTOM_IO_MANAGER:
return CallbackCompletionQueue()
elif grpc_aio_engine is AsyncIOEngine.POLLER:
return PollerCompletionQueue()
else:
raise ValueError('Unsupported engine type [%s]' % grpc_aio_engine)

@ -13,14 +13,31 @@
# limitations under the License.
# distutils: language=c++
cdef class _AioState:
cdef object lock # threading.RLock
cdef int refcount
cdef object engine # AsyncIOEngine
cdef BaseCompletionQueue cq
cdef grpc_completion_queue *global_completion_queue()
cpdef init_grpc_aio()
cpdef shutdown_grpc_aio()
cdef extern from "src/core/lib/iomgr/timer_manager.h":
void grpc_timer_manager_set_threading(bint enabled);
void grpc_timer_manager_set_threading(bint enabled)
cdef extern from "src/core/lib/iomgr/iomgr_internal.h":
void grpc_set_default_iomgr_platform();
void grpc_set_default_iomgr_platform()
cdef extern from "src/core/lib/iomgr/executor.h" namespace "grpc_core":
cdef cppclass Executor:
@staticmethod
void SetThreadingAll(bint enable);
void SetThreadingAll(bint enable)

@ -12,98 +12,125 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
cdef bint _grpc_aio_initialized = False
# NOTE(lidiz) Theoretically, applications can run in multiple event loops as
# long as they are in the same thread with same magic. This is not a supported
# use case. So, the gRPC Python Async Stack should use a single event loop
# picked by "init_grpc_aio".
cdef object _grpc_aio_loop # asyncio.AbstractEventLoop
cdef int64_t _event_loop_thread_ident
cdef str _GRPC_ASYNCIO_ENGINE = os.environ.get('GRPC_ASYNCIO_ENGINE', 'default').lower()
grpc_aio_engine = None
cdef object _grpc_initialization_lock = threading.Lock()
cdef str _GRPC_ASYNCIO_ENGINE = os.environ.get('GRPC_ASYNCIO_ENGINE', 'poller').upper()
cdef _AioState _global_aio_state = _AioState()
class AsyncIOEngine(enum.Enum):
DEFAULT = 'default'
CUSTOM_IO_MANAGER = 'custom'
CUSTOM_IO_MANAGER = 'custom_io_manager'
POLLER = 'poller'
def init_grpc_aio():
global _grpc_aio_initialized
global _grpc_aio_loop
global _event_loop_thread_ident
global grpc_aio_engine
with _grpc_initialization_lock:
# Marks this function as called
if _grpc_aio_initialized:
return
else:
_grpc_aio_initialized = True
# Picks the engine for gRPC AsyncIO Stack
for engine_type in AsyncIOEngine:
if engine_type.value == _GRPC_ASYNCIO_ENGINE:
grpc_aio_engine = engine_type
break
if grpc_aio_engine is None or grpc_aio_engine is AsyncIOEngine.DEFAULT:
grpc_aio_engine = AsyncIOEngine.CUSTOM_IO_MANAGER
# Anchors the event loop that the gRPC library going to use.
_grpc_aio_loop = asyncio.get_event_loop()
_event_loop_thread_ident = threading.current_thread().ident
if grpc_aio_engine is AsyncIOEngine.CUSTOM_IO_MANAGER:
# Activates asyncio IO manager.
# NOTE(lidiz) Custom IO manager must be activated before the first
# `grpc_init()`. Otherwise, some special configurations in Core won't
# pick up the change, and resulted in SEGFAULT or ABORT.
install_asyncio_iomgr()
# TODO(https://github.com/grpc/grpc/issues/22244) we need a the
# grpc_shutdown_blocking() counterpart for this call. Otherwise, the gRPC
# library won't shutdown cleanly.
grpc_init()
# Timers are triggered by the Asyncio loop. We disable
# the background thread that is being used by the native
# gRPC iomgr.
grpc_timer_manager_set_threading(False)
# gRPC callbaks are executed within the same thread used by the Asyncio
# event loop, as it is being done by the other Asyncio callbacks.
Executor.SetThreadingAll(False)
else:
# TODO(https://github.com/grpc/grpc/issues/22244) we need a the
# grpc_shutdown_blocking() counterpart for this call. Otherwise, the gRPC
# library won't shutdown cleanly.
grpc_init()
def grpc_aio_loop():
"""Returns the one-and-only gRPC Aio event loop."""
return _grpc_aio_loop
def aio_loop_schedule_coroutine(object coro):
"""Thread-safely schedules coroutine to gRPC Aio event loop.
If invoked within the same thread as the event loop, return an
Asyncio.Task. Otherwise, return a concurrent.futures.Future (the sync
Future). For non-asyncio threads, sync Future objects are probably easier
to handle (without worrying other thread-safety stuff).
cdef _default_asyncio_engine():
return AsyncIOEngine.POLLER
cdef grpc_completion_queue *global_completion_queue():
return _global_aio_state.cq.c_ptr()
cdef class _AioState:
def __cinit__(self):
self.lock = threading.RLock()
self.refcount = 0
self.engine = None
self.cq = None
cdef _initialize_custom_io_manager():
# Activates asyncio IO manager.
# NOTE(lidiz) Custom IO manager must be activated before the first
# `grpc_init()`. Otherwise, some special configurations in Core won't
# pick up the change, and resulted in SEGFAULT or ABORT.
install_asyncio_iomgr()
# Initializes gRPC Core, must be called before other Core API
grpc_init()
# Timers are triggered by the Asyncio loop. We disable
# the background thread that is being used by the native
# gRPC iomgr.
grpc_timer_manager_set_threading(False)
# gRPC callbaks are executed within the same thread used by the Asyncio
# event loop, as it is being done by the other Asyncio callbacks.
Executor.SetThreadingAll(False)
# Creates the only completion queue
_global_aio_state.cq = CallbackCompletionQueue()
cdef _initialize_poller():
# Initializes gRPC Core, must be called before other Core API
grpc_init()
# Creates the only completion queue
_global_aio_state.cq = PollerCompletionQueue()
cdef _actual_aio_initialization():
# Picks the engine for gRPC AsyncIO Stack
_global_aio_state.engine = AsyncIOEngine.__members__.get(
_GRPC_ASYNCIO_ENGINE,
_default_asyncio_engine(),
)
_LOGGER.debug('Using %s as I/O engine', _global_aio_state.engine)
# Initializes the process-level state accordingly
if _global_aio_state.engine is AsyncIOEngine.CUSTOM_IO_MANAGER:
_initialize_custom_io_manager()
elif _global_aio_state.engine is AsyncIOEngine.POLLER:
_initialize_poller()
else:
raise ValueError('Unsupported engine type [%s]' % _global_aio_state.engine)
def _grpc_shutdown_wrapper(_):
"""A thin Python wrapper of Core's shutdown function.
Define functions are not allowed in "cdef" functions, and Cython complains
about a simple lambda with a C function.
"""
if _event_loop_thread_ident != threading.current_thread().ident:
return asyncio.run_coroutine_threadsafe(coro, _grpc_aio_loop)
grpc_shutdown_blocking()
cdef _actual_aio_shutdown():
if _global_aio_state.engine is AsyncIOEngine.CUSTOM_IO_MANAGER:
future = schedule_coro_threadsafe(
_global_aio_state.cq.shutdown(),
(<CallbackCompletionQueue>_global_aio_state.cq)._loop
)
future.add_done_callback(_grpc_shutdown_wrapper)
elif _global_aio_state.engine is AsyncIOEngine.POLLER:
_global_aio_state.cq.shutdown()
grpc_shutdown_blocking()
else:
return _grpc_aio_loop.create_task(coro)
raise ValueError('Unsupported engine type [%s]' % _global_aio_state.engine)
def aio_loop_call_soon_threadsafe(object func, *args):
# TODO(lidiz) After we are confident, we can drop this assert. Otherwsie,
# we should limit this function to non-grpc-event-loop thread.
assert _event_loop_thread_ident != threading.current_thread().ident
return _grpc_aio_loop.call_soon_threadsafe(func, *args)
cpdef init_grpc_aio():
"""Initializes the gRPC AsyncIO module.
Expected to be invoked on critical class constructors.
E.g., AioChannel, AioServer.
"""
with _global_aio_state.lock:
_global_aio_state.refcount += 1
if _global_aio_state.refcount == 1:
_actual_aio_initialization()
cpdef shutdown_grpc_aio():
"""Shuts down the gRPC AsyncIO module.
Expected to be invoked on critical class destructors.
E.g., AioChannel, AioServer.
"""
with _global_aio_state.lock:
assert _global_aio_state.refcount > 0
_global_aio_state.refcount -= 1
if not _global_aio_state.refcount:
_actual_aio_shutdown()

@ -212,7 +212,18 @@ cdef void asyncio_run_loop(size_t timeout_ms) with gil:
pass
def _auth_plugin_callback_wrapper(object cb,
str service_url,
str method_name,
object callback):
asyncio.get_event_loop().call_soon(cb, service_url, method_name, callback)
def install_asyncio_iomgr():
# Auth plugins invoke user provided logic in another thread by default. We
# need to override that behavior by registering the call to the event loop.
set_async_callback_func(_auth_plugin_callback_wrapper)
asyncio_resolver_vtable.resolve = asyncio_resolve
asyncio_resolver_vtable.resolve_async = asyncio_resolve_async

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save