Merge branch 'master' into grpc_namespace_serverbuilder

pull/18242/head
Karthik Ravi Shankar 6 years ago
commit 322e720fe6
  1. 24
      .github/mergeable.yml
  2. 32
      BUILD
  3. 191
      CMakeLists.txt
  4. 171
      Makefile
  5. 3
      bazel/grpc_build_system.bzl
  6. 10
      bazel/grpc_deps.bzl
  7. 41
      build.yaml
  8. 4
      config.m4
  9. 4
      config.w32
  10. 6
      doc/core/grpc-polling-engines.md
  11. 1
      doc/core/pending_api_cleanups.md
  12. 5
      doc/environment_variables.md
  13. 12
      doc/naming.md
  14. 32
      doc/statuscodes.md
  15. 2
      examples/cpp/helloworld/README.md
  16. 59
      examples/python/multiprocessing/BUILD
  17. 67
      examples/python/multiprocessing/README.md
  18. 95
      examples/python/multiprocessing/client.py
  19. 35
      examples/python/multiprocessing/prime.proto
  20. 123
      examples/python/multiprocessing/server.py
  21. 74
      examples/python/multiprocessing/test/_multiprocessing_example_test.py
  22. 4
      gRPC-C++.podspec
  23. 9
      gRPC-Core.podspec
  24. 1
      grpc.def
  25. 8
      grpc.gemspec
  26. 24
      grpc.gyp
  27. 60
      include/grpc/grpc_security.h
  28. 13
      include/grpc/impl/codegen/byte_buffer.h
  29. 4
      include/grpc/impl/codegen/grpc_types.h
  30. 51
      include/grpcpp/impl/codegen/async_generic_service.h
  31. 47
      include/grpcpp/impl/codegen/client_callback.h
  32. 2
      include/grpcpp/impl/codegen/core_codegen.h
  33. 2
      include/grpcpp/impl/codegen/core_codegen_interface.h
  34. 1
      include/grpcpp/impl/codegen/interceptor_common.h
  35. 18
      include/grpcpp/impl/codegen/proto_buffer_reader.h
  36. 10
      include/grpcpp/impl/codegen/server_context.h
  37. 23
      include/grpcpp/impl/codegen/server_interface.h
  38. 46
      include/grpcpp/server.h
  39. 8
      include/grpcpp/server_builder_impl.h
  40. 6
      package.xml
  41. 116
      src/core/ext/filters/client_channel/client_channel.cc
  42. 4
      src/core/ext/filters/client_channel/http_connect_handshaker.cc
  43. 7
      src/core/ext/filters/client_channel/lb_policy.h
  44. 465
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  45. 61
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  46. 969
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  47. 43
      src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc
  48. 47
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  49. 34
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
  50. 5
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
  51. 4
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  52. 273
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  53. 13
      src/core/ext/filters/client_channel/resolving_lb_policy.h
  54. 10
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  55. 2
      src/core/ext/transport/chttp2/transport/flow_control.cc
  56. 27
      src/core/ext/upb-generated/google/protobuf/any.upb.c
  57. 59
      src/core/ext/upb-generated/google/protobuf/any.upb.h
  58. 485
      src/core/ext/upb-generated/google/protobuf/descriptor.upb.c
  59. 1691
      src/core/ext/upb-generated/google/protobuf/descriptor.upb.h
  60. 27
      src/core/ext/upb-generated/google/protobuf/duration.upb.c
  61. 59
      src/core/ext/upb-generated/google/protobuf/duration.upb.h
  62. 79
      src/core/ext/upb-generated/google/protobuf/struct.upb.c
  63. 216
      src/core/ext/upb-generated/google/protobuf/struct.upb.h
  64. 27
      src/core/ext/upb-generated/google/protobuf/timestamp.upb.c
  65. 59
      src/core/ext/upb-generated/google/protobuf/timestamp.upb.h
  66. 106
      src/core/ext/upb-generated/google/protobuf/wrappers.upb.c
  67. 239
      src/core/ext/upb-generated/google/protobuf/wrappers.upb.h
  68. 2
      src/core/lib/compression/stream_compression_gzip.cc
  69. 8
      src/core/lib/gpr/cpu_posix.cc
  70. 2
      src/core/lib/http/httpcli.cc
  71. 4
      src/core/lib/iomgr/endpoint.cc
  72. 5
      src/core/lib/iomgr/endpoint.h
  73. 2
      src/core/lib/iomgr/endpoint_cfstream.cc
  74. 486
      src/core/lib/iomgr/ev_poll_posix.cc
  75. 5
      src/core/lib/iomgr/ev_posix.cc
  76. 6
      src/core/lib/iomgr/internal_errqueue.cc
  77. 3
      src/core/lib/iomgr/port.h
  78. 2
      src/core/lib/iomgr/tcp_custom.cc
  79. 206
      src/core/lib/iomgr/tcp_posix.cc
  80. 2
      src/core/lib/iomgr/tcp_windows.cc
  81. 107
      src/core/lib/iomgr/wakeup_fd_cv.cc
  82. 69
      src/core/lib/iomgr/wakeup_fd_cv.h
  83. 20
      src/core/lib/iomgr/wakeup_fd_posix.cc
  84. 9
      src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h
  85. 129
      src/core/lib/security/credentials/tls/spiffe_credentials.cc
  86. 62
      src/core/lib/security/credentials/tls/spiffe_credentials.h
  87. 9
      src/core/lib/security/security_connector/fake/fake_security_connector.cc
  88. 237
      src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
  89. 134
      src/core/lib/security/security_connector/ssl_utils.cc
  90. 32
      src/core/lib/security/security_connector/ssl_utils.h
  91. 426
      src/core/lib/security/security_connector/tls/spiffe_security_connector.cc
  92. 122
      src/core/lib/security/security_connector/tls/spiffe_security_connector.h
  93. 4
      src/core/lib/security/transport/secure_endpoint.cc
  94. 7
      src/core/lib/security/transport/security_handshaker.cc
  95. 17
      src/core/lib/surface/byte_buffer_reader.cc
  96. 24
      src/core/tsi/ssl_transport_security.cc
  97. 5
      src/cpp/common/core_codegen.cc
  98. 24
      src/cpp/server/server_builder.cc
  99. 174
      src/cpp/server/server_cc.cc
  100. 30
      src/cpp/server/server_context.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,14 +1,18 @@
mergeable:
pull_requests:
label:
or:
- and:
and:
- must_exclude:
regex: '^disposition/DO NOT MERGE'
message: 'Pull request marked not mergeable'
- or:
- and:
- must_include:
regex: 'release notes: yes'
message: 'Please include release note: yes'
- must_include:
regex: '^lang\/'
message: 'Please include a language label'
- must_include:
regex: 'release notes: yes'
message: 'Please include release note: yes'
- must_include:
regex: '^lang\/'
message: 'Please include a language label'
- must_include:
regex: 'release notes: no'
message: 'Please include release note: no'
regex: 'release notes: no'
message: 'Please include release note: no'

32
BUILD

@ -308,6 +308,7 @@ grpc_cc_library(
public_hdrs = GRPC_PUBLIC_HDRS + GRPC_SECURE_PUBLIC_HDRS,
standalone = True,
deps = [
"grpc_cfstream",
"grpc_common",
"grpc_lb_policy_grpclb_secure",
"grpc_lb_policy_xds_secure",
@ -801,7 +802,6 @@ grpc_cc_library(
"src/core/lib/iomgr/udp_server.cc",
"src/core/lib/iomgr/unix_sockets_posix.cc",
"src/core/lib/iomgr/unix_sockets_posix_noop.cc",
"src/core/lib/iomgr/wakeup_fd_cv.cc",
"src/core/lib/iomgr/wakeup_fd_eventfd.cc",
"src/core/lib/iomgr/wakeup_fd_nospecial.cc",
"src/core/lib/iomgr/wakeup_fd_pipe.cc",
@ -942,7 +942,6 @@ grpc_cc_library(
"src/core/lib/iomgr/timer_manager.h",
"src/core/lib/iomgr/udp_server.h",
"src/core/lib/iomgr/unix_sockets_posix.h",
"src/core/lib/iomgr/wakeup_fd_cv.h",
"src/core/lib/iomgr/wakeup_fd_pipe.h",
"src/core/lib/iomgr/wakeup_fd_posix.h",
"src/core/lib/json/json.h",
@ -1620,6 +1619,7 @@ grpc_cc_library(
"src/core/lib/security/credentials/plugin/plugin_credentials.cc",
"src/core/lib/security/credentials/ssl/ssl_credentials.cc",
"src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc",
"src/core/lib/security/credentials/tls/spiffe_credentials.cc",
"src/core/lib/security/security_connector/alts/alts_security_connector.cc",
"src/core/lib/security/security_connector/fake/fake_security_connector.cc",
"src/core/lib/security/security_connector/load_system_roots_fallback.cc",
@ -1628,6 +1628,7 @@ grpc_cc_library(
"src/core/lib/security/security_connector/security_connector.cc",
"src/core/lib/security/security_connector/ssl/ssl_security_connector.cc",
"src/core/lib/security/security_connector/ssl_utils.cc",
"src/core/lib/security/security_connector/tls/spiffe_security_connector.cc",
"src/core/lib/security/transport/client_auth_filter.cc",
"src/core/lib/security/transport/secure_endpoint.cc",
"src/core/lib/security/transport/security_handshaker.cc",
@ -1655,6 +1656,7 @@ grpc_cc_library(
"src/core/lib/security/credentials/plugin/plugin_credentials.h",
"src/core/lib/security/credentials/ssl/ssl_credentials.h",
"src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h",
"src/core/lib/security/credentials/tls/spiffe_credentials.h",
"src/core/lib/security/security_connector/alts/alts_security_connector.h",
"src/core/lib/security/security_connector/fake/fake_security_connector.h",
"src/core/lib/security/security_connector/load_system_roots.h",
@ -1663,6 +1665,7 @@ grpc_cc_library(
"src/core/lib/security/security_connector/security_connector.h",
"src/core/lib/security/security_connector/ssl/ssl_security_connector.h",
"src/core/lib/security/security_connector/ssl_utils.h",
"src/core/lib/security/security_connector/tls/spiffe_security_connector.h",
"src/core/lib/security/transport/auth_filters.h",
"src/core/lib/security/transport/secure_endpoint.h",
"src/core/lib/security/transport/security_handshaker.h",
@ -2309,4 +2312,29 @@ grpc_cc_library(
],
)
#TODO: Get this into build.yaml once we start using it.
grpc_cc_library(
name = "google_protobuf_upb",
srcs = [
"src/core/ext/upb-generated/google/protobuf/any.upb.c",
"src/core/ext/upb-generated/google/protobuf/descriptor.upb.c",
"src/core/ext/upb-generated/google/protobuf/duration.upb.c",
"src/core/ext/upb-generated/google/protobuf/struct.upb.c",
"src/core/ext/upb-generated/google/protobuf/timestamp.upb.c",
"src/core/ext/upb-generated/google/protobuf/wrappers.upb.c",
],
hdrs = [
"src/core/ext/upb-generated/google/protobuf/any.upb.h",
"src/core/ext/upb-generated/google/protobuf/descriptor.upb.h",
"src/core/ext/upb-generated/google/protobuf/duration.upb.h",
"src/core/ext/upb-generated/google/protobuf/struct.upb.h",
"src/core/ext/upb-generated/google/protobuf/timestamp.upb.h",
"src/core/ext/upb-generated/google/protobuf/wrappers.upb.h",
],
language = "c++",
external_deps = [
"upb_lib",
],
)
grpc_generate_one_off_targets()

@ -437,9 +437,6 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c udp_server_test)
endif()
add_dependencies(buildtests_c uri_parser_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_c wakeup_fd_cv_test)
endif()
add_dependencies(buildtests_c public_headers_must_be_c89)
add_dependencies(buildtests_c badreq_bad_client_test)
add_dependencies(buildtests_c connection_prefix_bad_client_test)
@ -485,6 +482,7 @@ add_dependencies(buildtests_c h2_proxy_test)
add_dependencies(buildtests_c h2_sockpair_test)
add_dependencies(buildtests_c h2_sockpair+trace_test)
add_dependencies(buildtests_c h2_sockpair_1byte_test)
add_dependencies(buildtests_c h2_spiffe_test)
add_dependencies(buildtests_c h2_ssl_test)
add_dependencies(buildtests_c h2_ssl_proxy_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
@ -720,6 +718,7 @@ add_dependencies(buildtests_cxx transport_security_common_api_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx writes_per_rpc_test)
endif()
add_dependencies(buildtests_cxx xds_end2end_test)
add_dependencies(buildtests_cxx resolver_component_test_unsecure)
add_dependencies(buildtests_cxx resolver_component_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
@ -1071,7 +1070,6 @@ add_library(grpc
src/core/lib/iomgr/udp_server.cc
src/core/lib/iomgr/unix_sockets_posix.cc
src/core/lib/iomgr/unix_sockets_posix_noop.cc
src/core/lib/iomgr/wakeup_fd_cv.cc
src/core/lib/iomgr/wakeup_fd_eventfd.cc
src/core/lib/iomgr/wakeup_fd_nospecial.cc
src/core/lib/iomgr/wakeup_fd_pipe.cc
@ -1167,6 +1165,7 @@ add_library(grpc
src/core/lib/security/credentials/plugin/plugin_credentials.cc
src/core/lib/security/credentials/ssl/ssl_credentials.cc
src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
src/core/lib/security/credentials/tls/spiffe_credentials.cc
src/core/lib/security/security_connector/alts/alts_security_connector.cc
src/core/lib/security/security_connector/fake/fake_security_connector.cc
src/core/lib/security/security_connector/load_system_roots_fallback.cc
@ -1175,6 +1174,7 @@ add_library(grpc
src/core/lib/security/security_connector/security_connector.cc
src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
src/core/lib/security/security_connector/ssl_utils.cc
src/core/lib/security/security_connector/tls/spiffe_security_connector.cc
src/core/lib/security/transport/client_auth_filter.cc
src/core/lib/security/transport/secure_endpoint.cc
src/core/lib/security/transport/security_handshaker.cc
@ -1496,7 +1496,6 @@ add_library(grpc_cronet
src/core/lib/iomgr/udp_server.cc
src/core/lib/iomgr/unix_sockets_posix.cc
src/core/lib/iomgr/unix_sockets_posix_noop.cc
src/core/lib/iomgr/wakeup_fd_cv.cc
src/core/lib/iomgr/wakeup_fd_eventfd.cc
src/core/lib/iomgr/wakeup_fd_nospecial.cc
src/core/lib/iomgr/wakeup_fd_pipe.cc
@ -1625,6 +1624,7 @@ add_library(grpc_cronet
src/core/lib/security/credentials/plugin/plugin_credentials.cc
src/core/lib/security/credentials/ssl/ssl_credentials.cc
src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
src/core/lib/security/credentials/tls/spiffe_credentials.cc
src/core/lib/security/security_connector/alts/alts_security_connector.cc
src/core/lib/security/security_connector/fake/fake_security_connector.cc
src/core/lib/security/security_connector/load_system_roots_fallback.cc
@ -1633,6 +1633,7 @@ add_library(grpc_cronet
src/core/lib/security/security_connector/security_connector.cc
src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
src/core/lib/security/security_connector/ssl_utils.cc
src/core/lib/security/security_connector/tls/spiffe_security_connector.cc
src/core/lib/security/transport/client_auth_filter.cc
src/core/lib/security/transport/secure_endpoint.cc
src/core/lib/security/transport/security_handshaker.cc
@ -1906,7 +1907,6 @@ add_library(grpc_test_util
src/core/lib/iomgr/udp_server.cc
src/core/lib/iomgr/unix_sockets_posix.cc
src/core/lib/iomgr/unix_sockets_posix_noop.cc
src/core/lib/iomgr/wakeup_fd_cv.cc
src/core/lib/iomgr/wakeup_fd_eventfd.cc
src/core/lib/iomgr/wakeup_fd_nospecial.cc
src/core/lib/iomgr/wakeup_fd_pipe.cc
@ -2231,7 +2231,6 @@ add_library(grpc_test_util_unsecure
src/core/lib/iomgr/udp_server.cc
src/core/lib/iomgr/unix_sockets_posix.cc
src/core/lib/iomgr/unix_sockets_posix_noop.cc
src/core/lib/iomgr/wakeup_fd_cv.cc
src/core/lib/iomgr/wakeup_fd_eventfd.cc
src/core/lib/iomgr/wakeup_fd_nospecial.cc
src/core/lib/iomgr/wakeup_fd_pipe.cc
@ -2532,7 +2531,6 @@ add_library(grpc_unsecure
src/core/lib/iomgr/udp_server.cc
src/core/lib/iomgr/unix_sockets_posix.cc
src/core/lib/iomgr/unix_sockets_posix_noop.cc
src/core/lib/iomgr/wakeup_fd_cv.cc
src/core/lib/iomgr/wakeup_fd_eventfd.cc
src/core/lib/iomgr/wakeup_fd_nospecial.cc
src/core/lib/iomgr/wakeup_fd_pipe.cc
@ -3420,7 +3418,6 @@ add_library(grpc++_cronet
src/core/lib/iomgr/udp_server.cc
src/core/lib/iomgr/unix_sockets_posix.cc
src/core/lib/iomgr/unix_sockets_posix_noop.cc
src/core/lib/iomgr/wakeup_fd_cv.cc
src/core/lib/iomgr/wakeup_fd_eventfd.cc
src/core/lib/iomgr/wakeup_fd_nospecial.cc
src/core/lib/iomgr/wakeup_fd_pipe.cc
@ -3838,6 +3835,7 @@ foreach(_hdr
endforeach()
endif (gRPC_BUILD_CODEGEN)
if (gRPC_BUILD_CODEGEN)
if (gRPC_INSTALL)
install(TARGETS grpc++_error_details EXPORT gRPCTargets
@ -3847,6 +3845,7 @@ if (gRPC_INSTALL)
)
endif()
endif (gRPC_BUILD_CODEGEN)
if (gRPC_BUILD_TESTS)
if (gRPC_BUILD_CODEGEN)
@ -3968,6 +3967,7 @@ foreach(_hdr
endforeach()
endif (gRPC_BUILD_CODEGEN)
if (gRPC_BUILD_CODEGEN)
if (gRPC_INSTALL)
install(TARGETS grpc++_reflection EXPORT gRPCTargets
@ -3977,6 +3977,7 @@ if (gRPC_INSTALL)
)
endif()
endif (gRPC_BUILD_CODEGEN)
if (gRPC_BUILD_TESTS)
add_library(grpc++_test_config
@ -4970,6 +4971,7 @@ foreach(_hdr
endforeach()
endif (gRPC_BUILD_CODEGEN)
if (gRPC_BUILD_CODEGEN)
if (gRPC_INSTALL)
install(TARGETS grpcpp_channelz EXPORT gRPCTargets
@ -4979,6 +4981,7 @@ if (gRPC_INSTALL)
)
endif()
endif (gRPC_BUILD_CODEGEN)
if (gRPC_BUILD_TESTS)
if (gRPC_BUILD_CODEGEN)
@ -5505,6 +5508,58 @@ endif()
endif (gRPC_BUILD_CSHARP_EXT)
if (gRPC_BUILD_TESTS)
add_library(upb
third_party/upb/google/protobuf/descriptor.upb.c
third_party/upb/upb/decode.c
third_party/upb/upb/def.c
third_party/upb/upb/encode.c
third_party/upb/upb/handlers.c
third_party/upb/upb/msg.c
third_party/upb/upb/msgfactory.c
third_party/upb/upb/sink.c
third_party/upb/upb/table.c
third_party/upb/upb/upb.c
)
if(WIN32 AND MSVC)
set_target_properties(upb PROPERTIES COMPILE_PDB_NAME "upb"
COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}"
)
if (gRPC_INSTALL)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/upb.pdb
DESTINATION ${gRPC_INSTALL_LIBDIR} OPTIONAL
)
endif()
endif()
target_include_directories(upb
PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR}
)
# avoid dependency on libstdc++
if (_gRPC_CORE_NOSTDCXX_FLAGS)
set_target_properties(upb PROPERTIES LINKER_LANGUAGE C)
# only use the flags for C++ source files
target_compile_options(upb PRIVATE $<$<COMPILE_LANGUAGE:CXX>:${_gRPC_CORE_NOSTDCXX_FLAGS}>)
endif()
target_link_libraries(upb
${_gRPC_SSL_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_library(bad_client_test
test/core/bad_client/bad_client.cc
)
@ -10455,42 +10510,6 @@ target_link_libraries(uri_parser_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(wakeup_fd_cv_test
test/core/iomgr/wakeup_fd_cv_test.cc
)
target_include_directories(wakeup_fd_cv_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR}
)
target_link_libraries(wakeup_fd_cv_test
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc
gpr
)
# avoid dependency on libstdc++
if (_gRPC_CORE_NOSTDCXX_FLAGS)
set_target_properties(wakeup_fd_cv_test PROPERTIES LINKER_LANGUAGE C)
target_compile_options(wakeup_fd_cv_test PRIVATE $<$<COMPILE_LANGUAGE:CXX>:${_gRPC_CORE_NOSTDCXX_FLAGS}>)
endif()
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(alarm_test
test/cpp/common/alarm_test.cc
@ -16235,6 +16254,53 @@ endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(xds_end2end_test
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.grpc.pb.h
test/cpp/end2end/xds_end2end_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
protobuf_generate_grpc_cpp(
src/proto/grpc/lb/v1/load_balancer.proto
)
target_include_directories(xds_end2end_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR}
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(xds_end2end_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
grpc++
grpc
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(public_headers_must_be_c89
test/core/surface/public_headers_must_be_c89.c
)
@ -17411,6 +17477,41 @@ target_link_libraries(h2_sockpair_1byte_test
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(h2_spiffe_test
test/core/end2end/fixtures/h2_spiffe.cc
)
target_include_directories(h2_spiffe_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR}
)
target_link_libraries(h2_spiffe_test
${_gRPC_ALLTARGETS_LIBRARIES}
end2end_tests
grpc_test_util
grpc
gpr
)
# avoid dependency on libstdc++
if (_gRPC_CORE_NOSTDCXX_FLAGS)
set_target_properties(h2_spiffe_test PROPERTIES LINKER_LANGUAGE C)
target_compile_options(h2_spiffe_test PRIVATE $<$<COMPILE_LANGUAGE:CXX>:${_gRPC_CORE_NOSTDCXX_FLAGS}>)
endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(h2_ssl_test
test/core/end2end/fixtures/h2_ssl.cc
)

@ -1140,7 +1140,6 @@ transport_security_test: $(BINDIR)/$(CONFIG)/transport_security_test
udp_server_test: $(BINDIR)/$(CONFIG)/udp_server_test
uri_fuzzer_test: $(BINDIR)/$(CONFIG)/uri_fuzzer_test
uri_parser_test: $(BINDIR)/$(CONFIG)/uri_parser_test
wakeup_fd_cv_test: $(BINDIR)/$(CONFIG)/wakeup_fd_cv_test
alarm_test: $(BINDIR)/$(CONFIG)/alarm_test
alts_counter_test: $(BINDIR)/$(CONFIG)/alts_counter_test
alts_crypt_test: $(BINDIR)/$(CONFIG)/alts_crypt_test
@ -1276,6 +1275,7 @@ time_change_test: $(BINDIR)/$(CONFIG)/time_change_test
transport_pid_controller_test: $(BINDIR)/$(CONFIG)/transport_pid_controller_test
transport_security_common_api_test: $(BINDIR)/$(CONFIG)/transport_security_common_api_test
writes_per_rpc_test: $(BINDIR)/$(CONFIG)/writes_per_rpc_test
xds_end2end_test: $(BINDIR)/$(CONFIG)/xds_end2end_test
public_headers_must_be_c89: $(BINDIR)/$(CONFIG)/public_headers_must_be_c89
gen_hpack_tables: $(BINDIR)/$(CONFIG)/gen_hpack_tables
gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
@ -1361,6 +1361,7 @@ h2_proxy_test: $(BINDIR)/$(CONFIG)/h2_proxy_test
h2_sockpair_test: $(BINDIR)/$(CONFIG)/h2_sockpair_test
h2_sockpair+trace_test: $(BINDIR)/$(CONFIG)/h2_sockpair+trace_test
h2_sockpair_1byte_test: $(BINDIR)/$(CONFIG)/h2_sockpair_1byte_test
h2_spiffe_test: $(BINDIR)/$(CONFIG)/h2_spiffe_test
h2_ssl_test: $(BINDIR)/$(CONFIG)/h2_ssl_test
h2_ssl_proxy_test: $(BINDIR)/$(CONFIG)/h2_ssl_proxy_test
h2_uds_test: $(BINDIR)/$(CONFIG)/h2_uds_test
@ -1443,7 +1444,7 @@ plugins: $(PROTOC_PLUGINS)
privatelibs: privatelibs_c privatelibs_cxx
privatelibs_c: $(LIBDIR)/$(CONFIG)/libalts_test_util.a $(LIBDIR)/$(CONFIG)/libcxxabi.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libreconnect_server.a $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a $(LIBDIR)/$(CONFIG)/libz.a $(LIBDIR)/$(CONFIG)/libares.a $(LIBDIR)/$(CONFIG)/libbad_client_test.a $(LIBDIR)/$(CONFIG)/libbad_ssl_test_server.a $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a
privatelibs_c: $(LIBDIR)/$(CONFIG)/libalts_test_util.a $(LIBDIR)/$(CONFIG)/libcxxabi.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libreconnect_server.a $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a $(LIBDIR)/$(CONFIG)/libupb.a $(LIBDIR)/$(CONFIG)/libz.a $(LIBDIR)/$(CONFIG)/libares.a $(LIBDIR)/$(CONFIG)/libbad_client_test.a $(LIBDIR)/$(CONFIG)/libbad_ssl_test_server.a $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a
pc_c: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc.pc $(LIBDIR)/$(CONFIG)/pkgconfig/gpr.pc
pc_c_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc $(LIBDIR)/$(CONFIG)/pkgconfig/gpr.pc
@ -1592,7 +1593,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/transport_security_test \
$(BINDIR)/$(CONFIG)/udp_server_test \
$(BINDIR)/$(CONFIG)/uri_parser_test \
$(BINDIR)/$(CONFIG)/wakeup_fd_cv_test \
$(BINDIR)/$(CONFIG)/public_headers_must_be_c89 \
$(BINDIR)/$(CONFIG)/badreq_bad_client_test \
$(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test \
@ -1624,6 +1624,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/h2_sockpair_test \
$(BINDIR)/$(CONFIG)/h2_sockpair+trace_test \
$(BINDIR)/$(CONFIG)/h2_sockpair_1byte_test \
$(BINDIR)/$(CONFIG)/h2_spiffe_test \
$(BINDIR)/$(CONFIG)/h2_ssl_test \
$(BINDIR)/$(CONFIG)/h2_ssl_proxy_test \
$(BINDIR)/$(CONFIG)/h2_uds_test \
@ -1787,6 +1788,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/transport_pid_controller_test \
$(BINDIR)/$(CONFIG)/transport_security_common_api_test \
$(BINDIR)/$(CONFIG)/writes_per_rpc_test \
$(BINDIR)/$(CONFIG)/xds_end2end_test \
$(BINDIR)/$(CONFIG)/boringssl_crypto_test_data \
$(BINDIR)/$(CONFIG)/boringssl_asn1_test \
$(BINDIR)/$(CONFIG)/boringssl_base64_test \
@ -1976,6 +1978,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/transport_pid_controller_test \
$(BINDIR)/$(CONFIG)/transport_security_common_api_test \
$(BINDIR)/$(CONFIG)/writes_per_rpc_test \
$(BINDIR)/$(CONFIG)/xds_end2end_test \
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_test \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure \
@ -2240,8 +2243,6 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/udp_server_test || ( echo test udp_server_test failed ; exit 1 )
$(E) "[RUN] Testing uri_parser_test"
$(Q) $(BINDIR)/$(CONFIG)/uri_parser_test || ( echo test uri_parser_test failed ; exit 1 )
$(E) "[RUN] Testing wakeup_fd_cv_test"
$(Q) $(BINDIR)/$(CONFIG)/wakeup_fd_cv_test || ( echo test wakeup_fd_cv_test failed ; exit 1 )
$(E) "[RUN] Testing public_headers_must_be_c89"
$(Q) $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 || ( echo test public_headers_must_be_c89 failed ; exit 1 )
$(E) "[RUN] Testing badreq_bad_client_test"
@ -2500,6 +2501,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/transport_security_common_api_test || ( echo test transport_security_common_api_test failed ; exit 1 )
$(E) "[RUN] Testing writes_per_rpc_test"
$(Q) $(BINDIR)/$(CONFIG)/writes_per_rpc_test || ( echo test writes_per_rpc_test failed ; exit 1 )
$(E) "[RUN] Testing xds_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/xds_end2end_test || ( echo test xds_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing resolver_component_tests_runner_invoker_unsecure"
$(Q) $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure || ( echo test resolver_component_tests_runner_invoker_unsecure failed ; exit 1 )
$(E) "[RUN] Testing resolver_component_tests_runner_invoker"
@ -3612,7 +3615,6 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/udp_server.cc \
src/core/lib/iomgr/unix_sockets_posix.cc \
src/core/lib/iomgr/unix_sockets_posix_noop.cc \
src/core/lib/iomgr/wakeup_fd_cv.cc \
src/core/lib/iomgr/wakeup_fd_eventfd.cc \
src/core/lib/iomgr/wakeup_fd_nospecial.cc \
src/core/lib/iomgr/wakeup_fd_pipe.cc \
@ -3708,6 +3710,7 @@ LIBGRPC_SRC = \
src/core/lib/security/credentials/plugin/plugin_credentials.cc \
src/core/lib/security/credentials/ssl/ssl_credentials.cc \
src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc \
src/core/lib/security/credentials/tls/spiffe_credentials.cc \
src/core/lib/security/security_connector/alts/alts_security_connector.cc \
src/core/lib/security/security_connector/fake/fake_security_connector.cc \
src/core/lib/security/security_connector/load_system_roots_fallback.cc \
@ -3716,6 +3719,7 @@ LIBGRPC_SRC = \
src/core/lib/security/security_connector/security_connector.cc \
src/core/lib/security/security_connector/ssl/ssl_security_connector.cc \
src/core/lib/security/security_connector/ssl_utils.cc \
src/core/lib/security/security_connector/tls/spiffe_security_connector.cc \
src/core/lib/security/transport/client_auth_filter.cc \
src/core/lib/security/transport/secure_endpoint.cc \
src/core/lib/security/transport/security_handshaker.cc \
@ -4031,7 +4035,6 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/iomgr/udp_server.cc \
src/core/lib/iomgr/unix_sockets_posix.cc \
src/core/lib/iomgr/unix_sockets_posix_noop.cc \
src/core/lib/iomgr/wakeup_fd_cv.cc \
src/core/lib/iomgr/wakeup_fd_eventfd.cc \
src/core/lib/iomgr/wakeup_fd_nospecial.cc \
src/core/lib/iomgr/wakeup_fd_pipe.cc \
@ -4160,6 +4163,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/security/credentials/plugin/plugin_credentials.cc \
src/core/lib/security/credentials/ssl/ssl_credentials.cc \
src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc \
src/core/lib/security/credentials/tls/spiffe_credentials.cc \
src/core/lib/security/security_connector/alts/alts_security_connector.cc \
src/core/lib/security/security_connector/fake/fake_security_connector.cc \
src/core/lib/security/security_connector/load_system_roots_fallback.cc \
@ -4168,6 +4172,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/security/security_connector/security_connector.cc \
src/core/lib/security/security_connector/ssl/ssl_security_connector.cc \
src/core/lib/security/security_connector/ssl_utils.cc \
src/core/lib/security/security_connector/tls/spiffe_security_connector.cc \
src/core/lib/security/transport/client_auth_filter.cc \
src/core/lib/security/transport/secure_endpoint.cc \
src/core/lib/security/transport/security_handshaker.cc \
@ -4434,7 +4439,6 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/iomgr/udp_server.cc \
src/core/lib/iomgr/unix_sockets_posix.cc \
src/core/lib/iomgr/unix_sockets_posix_noop.cc \
src/core/lib/iomgr/wakeup_fd_cv.cc \
src/core/lib/iomgr/wakeup_fd_eventfd.cc \
src/core/lib/iomgr/wakeup_fd_nospecial.cc \
src/core/lib/iomgr/wakeup_fd_pipe.cc \
@ -4746,7 +4750,6 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/lib/iomgr/udp_server.cc \
src/core/lib/iomgr/unix_sockets_posix.cc \
src/core/lib/iomgr/unix_sockets_posix_noop.cc \
src/core/lib/iomgr/wakeup_fd_cv.cc \
src/core/lib/iomgr/wakeup_fd_eventfd.cc \
src/core/lib/iomgr/wakeup_fd_nospecial.cc \
src/core/lib/iomgr/wakeup_fd_pipe.cc \
@ -5021,7 +5024,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/iomgr/udp_server.cc \
src/core/lib/iomgr/unix_sockets_posix.cc \
src/core/lib/iomgr/unix_sockets_posix_noop.cc \
src/core/lib/iomgr/wakeup_fd_cv.cc \
src/core/lib/iomgr/wakeup_fd_eventfd.cc \
src/core/lib/iomgr/wakeup_fd_nospecial.cc \
src/core/lib/iomgr/wakeup_fd_pipe.cc \
@ -5886,7 +5888,6 @@ LIBGRPC++_CRONET_SRC = \
src/core/lib/iomgr/udp_server.cc \
src/core/lib/iomgr/unix_sockets_posix.cc \
src/core/lib/iomgr/unix_sockets_posix_noop.cc \
src/core/lib/iomgr/wakeup_fd_cv.cc \
src/core/lib/iomgr/wakeup_fd_eventfd.cc \
src/core/lib/iomgr/wakeup_fd_nospecial.cc \
src/core/lib/iomgr/wakeup_fd_pipe.cc \
@ -10196,6 +10197,41 @@ ifneq ($(NO_DEPS),true)
endif
LIBUPB_SRC = \
third_party/upb/google/protobuf/descriptor.upb.c \
third_party/upb/upb/decode.c \
third_party/upb/upb/def.c \
third_party/upb/upb/encode.c \
third_party/upb/upb/handlers.c \
third_party/upb/upb/msg.c \
third_party/upb/upb/msgfactory.c \
third_party/upb/upb/sink.c \
third_party/upb/upb/table.c \
third_party/upb/upb/upb.c \
PUBLIC_HEADERS_C += \
LIBUPB_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBUPB_SRC))))
$(LIBUPB_OBJS): CFLAGS += -Ithird_party/upb -Wno-sign-conversion -Wno-shadow -Wno-conversion -Wno-implicit-fallthrough -Wno-sign-compare -Wno-missing-field-initializers
$(LIBDIR)/$(CONFIG)/libupb.a: $(ZLIB_DEP) $(CARES_DEP) $(ADDRESS_SORTING_DEP) $(LIBUPB_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
$(Q) rm -f $(LIBDIR)/$(CONFIG)/libupb.a
$(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libupb.a $(LIBUPB_OBJS)
ifeq ($(SYSTEM),Darwin)
$(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libupb.a
endif
ifneq ($(NO_DEPS),true)
-include $(LIBUPB_OBJS:.o=.dep)
endif
LIBZ_SRC = \
third_party/zlib/adler32.c \
third_party/zlib/compress.c \
@ -15403,38 +15439,6 @@ endif
endif
WAKEUP_FD_CV_TEST_SRC = \
test/core/iomgr/wakeup_fd_cv_test.cc \
WAKEUP_FD_CV_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(WAKEUP_FD_CV_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/wakeup_fd_cv_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/wakeup_fd_cv_test: $(WAKEUP_FD_CV_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(WAKEUP_FD_CV_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/wakeup_fd_cv_test
endif
$(OBJDIR)/$(CONFIG)/test/core/iomgr/wakeup_fd_cv_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_wakeup_fd_cv_test: $(WAKEUP_FD_CV_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(WAKEUP_FD_CV_TEST_OBJS:.o=.dep)
endif
endif
ALARM_TEST_SRC = \
test/cpp/common/alarm_test.cc \
@ -21311,6 +21315,53 @@ endif
endif
XDS_END2END_TEST_SRC = \
$(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc \
test/cpp/end2end/xds_end2end_test.cc \
XDS_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(XDS_END2END_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/xds_end2end_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.5.0+.
$(BINDIR)/$(CONFIG)/xds_end2end_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/xds_end2end_test: $(PROTOBUF_DEP) $(XDS_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(XDS_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/xds_end2end_test
endif
endif
$(OBJDIR)/$(CONFIG)/src/proto/grpc/lb/v1/load_balancer.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_xds_end2end_test: $(XDS_END2END_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(XDS_END2END_TEST_OBJS:.o=.dep)
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc
PUBLIC_HEADERS_MUST_BE_C89_SRC = \
test/core/surface/public_headers_must_be_c89.c \
@ -24311,6 +24362,38 @@ endif
endif
H2_SPIFFE_TEST_SRC = \
test/core/end2end/fixtures/h2_spiffe.cc \
H2_SPIFFE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(H2_SPIFFE_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/h2_spiffe_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/h2_spiffe_test: $(H2_SPIFFE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(H2_SPIFFE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/h2_spiffe_test
endif
$(OBJDIR)/$(CONFIG)/test/core/end2end/fixtures/h2_spiffe.o: $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_h2_spiffe_test: $(H2_SPIFFE_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(H2_SPIFFE_TEST_OBJS:.o=.dep)
endif
endif
H2_SSL_TEST_SRC = \
test/core/end2end/fixtures/h2_ssl.cc \
@ -25530,6 +25613,7 @@ src/core/lib/security/credentials/oauth2/oauth2_credentials.cc: $(OPENSSL_DEP)
src/core/lib/security/credentials/plugin/plugin_credentials.cc: $(OPENSSL_DEP)
src/core/lib/security/credentials/ssl/ssl_credentials.cc: $(OPENSSL_DEP)
src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc: $(OPENSSL_DEP)
src/core/lib/security/credentials/tls/spiffe_credentials.cc: $(OPENSSL_DEP)
src/core/lib/security/security_connector/alts/alts_security_connector.cc: $(OPENSSL_DEP)
src/core/lib/security/security_connector/fake/fake_security_connector.cc: $(OPENSSL_DEP)
src/core/lib/security/security_connector/load_system_roots_fallback.cc: $(OPENSSL_DEP)
@ -25538,6 +25622,7 @@ src/core/lib/security/security_connector/local/local_security_connector.cc: $(OP
src/core/lib/security/security_connector/security_connector.cc: $(OPENSSL_DEP)
src/core/lib/security/security_connector/ssl/ssl_security_connector.cc: $(OPENSSL_DEP)
src/core/lib/security/security_connector/ssl_utils.cc: $(OPENSSL_DEP)
src/core/lib/security/security_connector/tls/spiffe_security_connector.cc: $(OPENSSL_DEP)
src/core/lib/security/transport/client_auth_filter.cc: $(OPENSSL_DEP)
src/core/lib/security/transport/secure_endpoint.cc: $(OPENSSL_DEP)
src/core/lib/security/transport/security_handshaker.cc: $(OPENSSL_DEP)

@ -26,7 +26,7 @@
load("//bazel:cc_grpc_library.bzl", "cc_grpc_library")
# The set of pollers to test against if a test exercises polling
POLLERS = ["epollex", "epoll1", "poll", "poll-cv"]
POLLERS = ["epollex", "epoll1", "poll"]
def if_not_windows(a):
return select({
@ -113,6 +113,7 @@ def grpc_cc_library(
linkopts = linkopts,
includes = [
"include",
"src/core/ext/upb-generated",
],
alwayslink = alwayslink,
data = data,

@ -12,7 +12,7 @@ def grpc_deps():
)
native.bind(
name = "upblib",
name = "upb_lib",
actual = "@upb//:upb",
)
@ -124,8 +124,8 @@ def grpc_deps():
if "com_google_protobuf" not in native.existing_rules():
http_archive(
name = "com_google_protobuf",
strip_prefix = "protobuf-66dc42d891a4fc8e9190c524fd67961688a37bbe",
url = "https://github.com/google/protobuf/archive/66dc42d891a4fc8e9190c524fd67961688a37bbe.tar.gz",
strip_prefix = "protobuf-582743bf40c5d3639a70f98f183914a2c0cd0680",
url = "https://github.com/google/protobuf/archive/582743bf40c5d3639a70f98f183914a2c0cd0680.tar.gz",
)
if "com_github_nanopb_nanopb" not in native.existing_rules():
@ -203,8 +203,8 @@ def grpc_deps():
if "upb" not in native.existing_rules():
http_archive(
name = "upb",
strip_prefix = "upb-9ce4a77f61c134bbed28bfd5be5cd7dc0e80f5e3",
url = "https://github.com/google/upb/archive/9ce4a77f61c134bbed28bfd5be5cd7dc0e80f5e3.tar.gz",
strip_prefix = "upb-ed9faae0993704b033c594b072d65e1bf19207fa",
url = "https://github.com/google/upb/archive/ed9faae0993704b033c594b072d65e1bf19207fa.tar.gz",
)
# TODO: move some dependencies from "grpc_deps" here?

@ -333,7 +333,6 @@ filegroups:
- src/core/lib/iomgr/udp_server.cc
- src/core/lib/iomgr/unix_sockets_posix.cc
- src/core/lib/iomgr/unix_sockets_posix_noop.cc
- src/core/lib/iomgr/wakeup_fd_cv.cc
- src/core/lib/iomgr/wakeup_fd_eventfd.cc
- src/core/lib/iomgr/wakeup_fd_nospecial.cc
- src/core/lib/iomgr/wakeup_fd_pipe.cc
@ -498,7 +497,6 @@ filegroups:
- src/core/lib/iomgr/timer_manager.h
- src/core/lib/iomgr/udp_server.h
- src/core/lib/iomgr/unix_sockets_posix.h
- src/core/lib/iomgr/wakeup_fd_cv.h
- src/core/lib/iomgr/wakeup_fd_pipe.h
- src/core/lib/iomgr/wakeup_fd_posix.h
- src/core/lib/json/json.h
@ -835,6 +833,7 @@ filegroups:
- src/core/lib/security/credentials/plugin/plugin_credentials.h
- src/core/lib/security/credentials/ssl/ssl_credentials.h
- src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h
- src/core/lib/security/credentials/tls/spiffe_credentials.h
- src/core/lib/security/security_connector/alts/alts_security_connector.h
- src/core/lib/security/security_connector/fake/fake_security_connector.h
- src/core/lib/security/security_connector/load_system_roots.h
@ -843,6 +842,7 @@ filegroups:
- src/core/lib/security/security_connector/security_connector.h
- src/core/lib/security/security_connector/ssl/ssl_security_connector.h
- src/core/lib/security/security_connector/ssl_utils.h
- src/core/lib/security/security_connector/tls/spiffe_security_connector.h
- src/core/lib/security/transport/auth_filters.h
- src/core/lib/security/transport/secure_endpoint.h
- src/core/lib/security/transport/security_handshaker.h
@ -868,6 +868,7 @@ filegroups:
- src/core/lib/security/credentials/plugin/plugin_credentials.cc
- src/core/lib/security/credentials/ssl/ssl_credentials.cc
- src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
- src/core/lib/security/credentials/tls/spiffe_credentials.cc
- src/core/lib/security/security_connector/alts/alts_security_connector.cc
- src/core/lib/security/security_connector/fake/fake_security_connector.cc
- src/core/lib/security/security_connector/load_system_roots_fallback.cc
@ -876,6 +877,7 @@ filegroups:
- src/core/lib/security/security_connector/security_connector.cc
- src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
- src/core/lib/security/security_connector/ssl_utils.cc
- src/core/lib/security/security_connector/tls/spiffe_security_connector.cc
- src/core/lib/security/transport/client_auth_filter.cc
- src/core/lib/security/transport/secure_endpoint.cc
- src/core/lib/security/transport/security_handshaker.cc
@ -3741,21 +3743,6 @@ targets:
- grpc_test_util
- grpc
- gpr
- name: wakeup_fd_cv_test
build: test
language: c
src:
- test/core/iomgr/wakeup_fd_cv_test.cc
deps:
- grpc_test_util
- grpc
- gpr
exclude_iomgrs:
- uv
platforms:
- mac
- linux
- posix
- name: alarm_test
gtest: true
build: test
@ -4181,7 +4168,6 @@ targets:
defaults: benchmark
excluded_poll_engines:
- poll
- poll-cv
platforms:
- mac
- linux
@ -4207,7 +4193,6 @@ targets:
defaults: benchmark
excluded_poll_engines:
- poll
- poll-cv
platforms:
- mac
- linux
@ -4233,7 +4218,6 @@ targets:
- tsan
excluded_poll_engines:
- poll
- poll-cv
platforms:
- mac
- linux
@ -4259,7 +4243,6 @@ targets:
defaults: benchmark
excluded_poll_engines:
- poll
- poll-cv
platforms:
- mac
- linux
@ -5645,6 +5628,19 @@ targets:
- mac
- linux
- posix
- name: xds_end2end_test
gtest: true
build: test
language: c++
src:
- src/proto/grpc/lb/v1/load_balancer.proto
- test/cpp/end2end/xds_end2end_test.cc
deps:
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr
- name: public_headers_must_be_c89
build: test
language: c89
@ -5846,6 +5842,9 @@ defaults:
-Wno-deprecated-declarations -Ithird_party/nanopb -DPB_FIELD_32BIT
CXXFLAGS: -Wnon-virtual-dtor
LDFLAGS: -g
upb:
CFLAGS: -Ithird_party/upb -Wno-sign-conversion -Wno-shadow -Wno-conversion -Wno-implicit-fallthrough
-Wno-sign-compare -Wno-missing-field-initializers
zlib:
CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-implicit-function-declaration
-Wno-implicit-fallthrough $(W_NO_SHIFT_NEGATIVE_VALUE) -fvisibility=hidden

@ -187,7 +187,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/udp_server.cc \
src/core/lib/iomgr/unix_sockets_posix.cc \
src/core/lib/iomgr/unix_sockets_posix_noop.cc \
src/core/lib/iomgr/wakeup_fd_cv.cc \
src/core/lib/iomgr/wakeup_fd_eventfd.cc \
src/core/lib/iomgr/wakeup_fd_nospecial.cc \
src/core/lib/iomgr/wakeup_fd_pipe.cc \
@ -283,6 +282,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/security/credentials/plugin/plugin_credentials.cc \
src/core/lib/security/credentials/ssl/ssl_credentials.cc \
src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc \
src/core/lib/security/credentials/tls/spiffe_credentials.cc \
src/core/lib/security/security_connector/alts/alts_security_connector.cc \
src/core/lib/security/security_connector/fake/fake_security_connector.cc \
src/core/lib/security/security_connector/load_system_roots_fallback.cc \
@ -291,6 +291,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/security/security_connector/security_connector.cc \
src/core/lib/security/security_connector/ssl/ssl_security_connector.cc \
src/core/lib/security/security_connector/ssl_utils.cc \
src/core/lib/security/security_connector/tls/spiffe_security_connector.cc \
src/core/lib/security/transport/client_auth_filter.cc \
src/core/lib/security/transport/secure_endpoint.cc \
src/core/lib/security/transport/security_handshaker.cc \
@ -734,6 +735,7 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/security_connector/fake)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/security_connector/local)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/security_connector/ssl)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/security_connector/tls)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/transport)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/util)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/slice)

@ -162,7 +162,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\iomgr\\udp_server.cc " +
"src\\core\\lib\\iomgr\\unix_sockets_posix.cc " +
"src\\core\\lib\\iomgr\\unix_sockets_posix_noop.cc " +
"src\\core\\lib\\iomgr\\wakeup_fd_cv.cc " +
"src\\core\\lib\\iomgr\\wakeup_fd_eventfd.cc " +
"src\\core\\lib\\iomgr\\wakeup_fd_nospecial.cc " +
"src\\core\\lib\\iomgr\\wakeup_fd_pipe.cc " +
@ -258,6 +257,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\security\\credentials\\plugin\\plugin_credentials.cc " +
"src\\core\\lib\\security\\credentials\\ssl\\ssl_credentials.cc " +
"src\\core\\lib\\security\\credentials\\tls\\grpc_tls_credentials_options.cc " +
"src\\core\\lib\\security\\credentials\\tls\\spiffe_credentials.cc " +
"src\\core\\lib\\security\\security_connector\\alts\\alts_security_connector.cc " +
"src\\core\\lib\\security\\security_connector\\fake\\fake_security_connector.cc " +
"src\\core\\lib\\security\\security_connector\\load_system_roots_fallback.cc " +
@ -266,6 +266,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\security\\security_connector\\security_connector.cc " +
"src\\core\\lib\\security\\security_connector\\ssl\\ssl_security_connector.cc " +
"src\\core\\lib\\security\\security_connector\\ssl_utils.cc " +
"src\\core\\lib\\security\\security_connector\\tls\\spiffe_security_connector.cc " +
"src\\core\\lib\\security\\transport\\client_auth_filter.cc " +
"src\\core\\lib\\security\\transport\\secure_endpoint.cc " +
"src\\core\\lib\\security\\transport\\security_handshaker.cc " +
@ -749,6 +750,7 @@ if (PHP_GRPC != "no") {
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\security_connector\\fake");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\security_connector\\local");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\security_connector\\ssl");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\security_connector\\tls");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\transport");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\util");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\slice");

@ -23,11 +23,9 @@ There are multiple polling engine implementations depending on the OS and the OS
- **`epollex`** (default but requires kernel version >= 4.5),
- `epoll1` (If `epollex` is not available and glibc version >= 2.9)
- `poll` (If kernel does not have epoll support)
- `poll-cv` (If explicitly configured)
- Mac: **`poll`** (default), `poll-cv` (If explicitly configured)
- Mac: **`poll`** (default)
- Windows: (no name)
- One-off polling engines:
- AppEngine platform: **`poll-cv`** (default)
- NodeJS : `libuv` polling engine implementation (requires different compile `#define`s)
## Polling Engine Interface
@ -87,7 +85,7 @@ Add/Remove fd to the `grpc_pollset_set`
- **grpc\_pollset\_set_[add|del]\_pollset**
- Signature: `grpc_pollset_set_[add|del]_pollset(grpc_pollset_set* pss, grpc_pollset* ps)`
- What does adding a pollset to a pollset_set mean ?
- It means that calling `grpc_pollset_work()` on the pollset will also poll all the fds in the pollset_set i.e semantically, it is similar to adding all the fds inside pollset_set to the pollset.
- It means that calling `grpc_pollset_work()` on the pollset will also poll all the fds in the pollset_set i.e semantically, it is similar to adding all the fds inside pollset_set to the pollset.
- This guarantee is no longer true once the pollset is removed from the pollset_set
- **grpc\_pollset\_set_[add|del]\_pollset\_set**

@ -15,3 +15,4 @@ number:
`include/grpc/impl/codegen/grpc_types.h` (commit `af00d8b`)
(cannot be done until after next grpc release, so that TensorFlow can
use the same code both internally and externally)
- require a C++ runtime for all languages wrapping core.

@ -45,8 +45,9 @@ some configuration as environment variables that can be set.
- cares_address_sorting - traces operations of the c-ares based DNS
resolver's resolved address sorter
- channel - traces operations on the C core channel stack
- client_channel - traces client channel activity, including resolver
and load balancing policy interaction
- client_channel_call - traces client channel call batch activity
- client_channel_routing - traces client channel call routing, including
resolver and load balancing policy interaction
- compression - traces compression operations
- connectivity_state - traces connectivity state changes to channels
- executor - traces grpc's internal thread pool ('the executor')

@ -67,14 +67,10 @@ Resolvers should be able to contact the authority and get a resolution
that they return back to the gRPC client library. The returned contents
include:
- A list of resolved addresses, each of which has three attributes:
- The address itself, including both IP address and port.
- A boolean indicating whether the address is a backend address (i.e.,
the address to use to contact the server directly) or a balancer
address (for cases where [external load balancing](load-balancing.md)
is in use).
- The name of the balancer, if the address is a balancer address.
This will be used to perform peer authorization.
- A list of resolved addresses (both IP address and port). Each address
may have a set of arbitrary attributes (key/value pairs) associated with
it, which can be used to communicate information from the resolver to the
[load balancing](load-balancing.md) policy.
- A [service config](service_config.md).
The plugin API allows the resolvers to continuously watch an endpoint

@ -1,13 +1,35 @@
# Status codes and their use in gRPC
gRPC uses a set of well defined status codes as part of the RPC API. All
RPCs started at a client return a `status` object composed of an integer
gRPC uses a set of well defined status codes as part of the RPC API. These
statuses are defined as such:
| Code | Number | Description | Closest HTTP Mapping |
|------------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------|
| OK | 0 | Not an error; returned on success. | 200 OK |
| CANCELLED | 1 | The operation was cancelled, typically by the caller. | 499 Client Closed Request |
| UNKNOWN | 2 | Unknown error. For example, this error may be returned when a `Status` value received from another address space belongs to an error space that is not known in this address space. Also errors raised by APIs that do not return enough error information may be converted to this error. | 500 Internal Server Error |
| INVALID_ARGUMENT | 3 | The client specified an invalid argument. Note that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments that are problematic regardless of the state of the system (e.g., a malformed file name). | 400 Bad Request |
| DEADLINE_EXCEEDED | 4 | The deadline expired before the operation could complete. For operations that change the state of the system, this error may be returned even if the operation has completed successfully. For example, a successful response from a server could have been delayed long | 504 Gateway Timeout |
| NOT_FOUND | 5 | Some requested entity (e.g., file or directory) was not found. Note to server developers: if a request is denied for an entire class of users, such as gradual feature rollout or undocumented whitelist, `NOT_FOUND` may be used. If a request is denied for some users within a class of users, such as user-based access control, `PERMISSION_DENIED` must be used. | 404 Not Found |
| ALREADY_EXISTS | 6 | The entity that a client attempted to create (e.g., file or directory) already exists. | 409 Conflict |
| PERMISSION_DENIED | 7 | The caller does not have permission to execute the specified operation. `PERMISSION_DENIED` must not be used for rejections caused by exhausting some resource (use `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` must not be used if the caller can not be identified (use `UNAUTHENTICATED` instead for those errors). This error code does not imply the request is valid or the requested entity exists or satisfies other pre-conditions. | 403 Forbidden |
| UNAUTHENTICATED | 16 | The request does not have valid authentication credentials for the operation. | 401 Unauthorized |
| RESOURCE_EXHAUSTED | 8 | Some resource has been exhausted, perhaps a per-user quota, or perhaps the entire file system is out of space. | 429 Too Many Requests |
| FAILED_PRECONDITION | 9 | The operation was rejected because the system is not in a state required for the operation's execution. For example, the directory to be deleted is non-empty, an rmdir operation is applied to a non-directory, etc. Service implementors can use the following guidelines to decide between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can retry just the failing call. (b) Use `ABORTED` if the client should retry at a higher level (e.g., when a client-specified test-and-set fails, indicating the client should restart a read-modify-write sequence). (c) Use `FAILED_PRECONDITION` if the client should not retry until the system state has been explicitly fixed. E.g., if an "rmdir" fails because the directory is non-empty, `FAILED_PRECONDITION` should be returned since the client should not retry unless the files are deleted from the directory. | 400 Bad Request |
| ABORTED | 10 | The operation was aborted, typically due to a concurrency issue such as a sequencer check failure or transaction abort. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. | 409 Conflict |
| OUT_OF_RANGE | 11 | The operation was attempted past the valid range. E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, this error indicates a problem that may be fixed if the system state changes. For example, a 32-bit file system will generate `INVALID_ARGUMENT` if asked to read at an offset that is not in the range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to read from an offset past the current file size. There is a fair bit of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific error) when it applies so that callers who are iterating through a space can easily look for an `OUT_OF_RANGE` error to detect when they are done. | 400 Bad Request |
| UNIMPLEMENTED | 12 | The operation is not implemented or is not supported/enabled in this service. | 501 Not Implemented |
| INTERNAL | 13 | Internal errors. This means that some invariants expected by the underlying system have been broken. This error code is reserved for serious errors. | 500 Internal Server Error |
| UNAVAILABLE | 14 | The service is currently unavailable. This is most likely a transient condition, which can be corrected by retrying with a backoff. | 503 Service Unavailable |
| DATA_LOSS | 15 | Unrecoverable data loss or corruption. | 500 Internal Server Error |
All RPCs started at a client return a `status` object composed of an integer
`code` and a string `message`. The server-side can choose the status it
returns for a given RPC.
The gRPC client and server-side implementations may also generate and
return `status` on their own when errors happen. Only a subset of
the pre-defined status codes are generated by the gRPC libraries. This
return `status` on their own when errors happen. Only a subset of
the pre-defined status codes are generated by the gRPC libraries. This
allows applications to be sure that any other code it sees was actually
returned by the application (although it is also possible for the
server-side to return one of the codes generated by the gRPC libraries).
@ -49,4 +71,4 @@ The following status codes are never generated by the library:
- OUT_OF_RANGE
- DATA_LOSS
Applications that may wish to [retry](https://github.com/grpc/proposal/blob/master/A6-client-retries.md) failed RPCs must decide which status codes on which to retry. As shown in the table above, the gRPC library can generate the same status code for different cases. Server applications can also return those same status codes. Therefore, there is no fixed list of status codes on which it is appropriate to retry in all applications. As a result, individual applications must make their own determination as to which status codes should cause an RPC to be retried.
Applications that may wish to [retry](https:github.com/grpc/proposal/blob/master/A6-client-retries.md) failed RPCs must decide which status codes on which to retry. As shown in the table above, the gRPC library can generate the same status code for different cases. Server applications can also return those same status codes. Therefore, there is no fixed list of status codes on which it is appropriate to retry in all applications. As a result, individual applications must make their own determination as to which status codes should cause an RPC to be retried.

@ -98,7 +98,7 @@ $ protoc -I ../../protos/ --cpp_out=. ../../protos/helloworld.proto
```
- Create a stub. A stub implements the rpc methods of a service and in the
generated code, a method is provided to created a stub with a channel:
generated code, a method is provided to create a stub with a channel:
```cpp
auto stub = helloworld::Greeter::NewStub(channel);

@ -0,0 +1,59 @@
# gRPC Bazel BUILD file.
#
# Copyright 2019 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@grpc_python_dependencies//:requirements.bzl", "requirement")
load("@org_pubref_rules_protobuf//python:rules.bzl", "py_proto_library")
py_proto_library(
name = "prime_proto",
protos = ["prime.proto",],
deps = [requirement("protobuf")],
)
py_binary(
name = "client",
testonly = 1,
srcs = ["client.py"],
deps = [
"//src/python/grpcio/grpc:grpcio",
":prime_proto",
],
default_python_version = "PY3",
)
py_binary(
name = "server",
testonly = 1,
srcs = ["server.py"],
deps = [
"//src/python/grpcio/grpc:grpcio",
":prime_proto"
] + select({
"//conditions:default": [requirement("futures")],
"//:python3": [],
}),
default_python_version = "PY3",
)
py_test(
name = "test/_multiprocessing_example_test",
srcs = ["test/_multiprocessing_example_test.py"],
data = [
":client",
":server"
],
size = "small",
)

@ -0,0 +1,67 @@
## Multiprocessing with gRPC Python
Multiprocessing allows application developers to sidestep the Python global
interpreter lock and achieve true concurrency on multicore systems.
Unfortunately, using multiprocessing and gRPC Python is not yet as simple as
instantiating your server with a `futures.ProcessPoolExecutor`.
The library is implemented as a C extension, maintaining much of the state that
drives the system in native code. As such, upon calling
[`fork`](http://man7.org/linux/man-pages/man2/fork.2.html), much of the
state copied into the child process is invalid, leading to hangs and crashes.
However, calling `fork` without `exec` in your python process is supported
*before* any gRPC servers have been instantiated. Application developers can
take advantage of this to parallelize their CPU-intensive operations.
## Calculating Prime Numbers with Multiple Processes
This example calculates the first 10,000 prime numbers as an RPC. We instantiate
one server per subprocess, balancing requests between the servers using the
[`SO_REUSEPORT`](https://lwn.net/Articles/542629/) socket option. Note that this
option is not available in `manylinux1` distributions, which are, as of the time
of writing, the only gRPC Python wheels available on PyPI. To take advantage of this
feature, you'll need to build from source, either using bazel (as we do for
these examples) or via pip, using `pip install grpcio --no-binary grpcio`.
```python
_PROCESS_COUNT = multiprocessing.cpu_count()
```
On the server side, we detect the number of CPUs available on the system and
spawn exactly that many child processes. If we spin up fewer, we won't be taking
full advantage of the hardware resources available.
## Running the Example
To run the server,
[ensure `bazel` is installed](https://docs.bazel.build/versions/master/install.html)
and run:
```
bazel run //examples/python/multiprocessing:server &
```
Note the address at which the server is running. For example,
```
...
[PID 107153] Binding to '[::]:33915'
[PID 107507] Starting new server.
[PID 107508] Starting new server.
...
```
Note that several servers have been started, each with its own PID.
Now, start the client by running
```
bazel run //examples/python/multiprocessing:client -- [SERVER_ADDRESS]
```
For example,
```
bazel run //examples/python/multiprocessing:client -- [::]:33915
```

@ -0,0 +1,95 @@
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of multiprocessing concurrency with gRPC."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import atexit
import logging
import multiprocessing
import operator
import sys
import grpc
from examples.python.multiprocessing import prime_pb2
from examples.python.multiprocessing import prime_pb2_grpc
_PROCESS_COUNT = 8
_MAXIMUM_CANDIDATE = 10000
# Each worker process initializes a single channel after forking.
# It's regrettable, but to ensure that each subprocess only has to instantiate
# a single channel to be reused across all RPCs, we use globals.
_worker_channel_singleton = None
_worker_stub_singleton = None
_LOGGER = logging.getLogger(__name__)
def _shutdown_worker():
_LOGGER.info('Shutting worker process down.')
if _worker_channel_singleton is not None:
_worker_channel_singleton.stop()
def _initialize_worker(server_address):
global _worker_channel_singleton # pylint: disable=global-statement
global _worker_stub_singleton # pylint: disable=global-statement
_LOGGER.info('Initializing worker process.')
_worker_channel_singleton = grpc.insecure_channel(server_address)
_worker_stub_singleton = prime_pb2_grpc.PrimeCheckerStub(
_worker_channel_singleton)
atexit.register(_shutdown_worker)
def _run_worker_query(primality_candidate):
_LOGGER.info('Checking primality of %s.', primality_candidate)
return _worker_stub_singleton.check(
prime_pb2.PrimeCandidate(candidate=primality_candidate))
def _calculate_primes(server_address):
worker_pool = multiprocessing.Pool(
processes=_PROCESS_COUNT,
initializer=_initialize_worker,
initargs=(server_address,))
check_range = range(2, _MAXIMUM_CANDIDATE)
primality = worker_pool.map(_run_worker_query, check_range)
primes = zip(check_range, map(operator.attrgetter('isPrime'), primality))
return tuple(primes)
def main():
msg = 'Determine the primality of the first {} integers.'.format(
_MAXIMUM_CANDIDATE)
parser = argparse.ArgumentParser(description=msg)
parser.add_argument(
'server_address',
help='The address of the server (e.g. localhost:50051)')
args = parser.parse_args()
primes = _calculate_primes(args.server_address)
print(primes)
if __name__ == '__main__':
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[PID %(process)d] %(message)s')
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
_LOGGER.setLevel(logging.INFO)
main()

@ -0,0 +1,35 @@
// Copyright 2019 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package prime;
// A candidate integer for primality testing.
message PrimeCandidate {
// The candidate.
int64 candidate = 1;
}
// The primality of the requested integer candidate.
message Primality {
// Is the candidate prime?
bool isPrime = 1;
}
// Service to check primality.
service PrimeChecker {
// Determines the primality of an integer.
rpc check (PrimeCandidate) returns (Primality) {}
}

@ -0,0 +1,123 @@
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of multiprocess concurrency with gRPC."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import contextlib
import datetime
import logging
import math
import multiprocessing
import time
import socket
import sys
import grpc
from examples.python.multiprocessing import prime_pb2
from examples.python.multiprocessing import prime_pb2_grpc
_LOGGER = logging.getLogger(__name__)
_ONE_DAY = datetime.timedelta(days=1)
_PROCESS_COUNT = multiprocessing.cpu_count()
_THREAD_CONCURRENCY = _PROCESS_COUNT
def is_prime(n):
for i in range(2, int(math.ceil(math.sqrt(n)))):
if n % i == 0:
return False
else:
return True
class PrimeChecker(prime_pb2_grpc.PrimeCheckerServicer):
def check(self, request, context):
_LOGGER.info('Determining primality of %s', request.candidate)
return prime_pb2.Primality(isPrime=is_prime(request.candidate))
def _wait_forever(server):
try:
while True:
time.sleep(_ONE_DAY.total_seconds())
except KeyboardInterrupt:
server.stop(None)
def _run_server(bind_address):
"""Start a server in a subprocess."""
_LOGGER.info('Starting new server.')
options = (('grpc.so_reuseport', 1),)
# WARNING: This example takes advantage of SO_REUSEPORT. Due to the
# limitations of manylinux1, none of our precompiled Linux wheels currently
# support this option. (https://github.com/grpc/grpc/issues/18210). To take
# advantage of this feature, install from source with
# `pip install grpcio --no-binary grpcio`.
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=_THREAD_CONCURRENCY,),
options=options)
prime_pb2_grpc.add_PrimeCheckerServicer_to_server(PrimeChecker(), server)
server.add_insecure_port(bind_address)
server.start()
_wait_forever(server)
@contextlib.contextmanager
def _reserve_port():
"""Find and reserve a port for all subprocesses to use."""
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) != 1:
raise RuntimeError("Failed to set SO_REUSEPORT.")
sock.bind(('', 0))
try:
yield sock.getsockname()[1]
finally:
sock.close()
def main():
with _reserve_port() as port:
bind_address = 'localhost:{}'.format(port)
_LOGGER.info("Binding to '%s'", bind_address)
sys.stdout.flush()
workers = []
for _ in range(_PROCESS_COUNT):
# NOTE: It is imperative that the worker subprocesses be forked before
# any gRPC servers start up. See
# https://github.com/grpc/grpc/issues/16001 for more details.
worker = multiprocessing.Process(
target=_run_server, args=(bind_address,))
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
if __name__ == '__main__':
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[PID %(process)d] %(message)s')
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
_LOGGER.setLevel(logging.INFO)
main()

@ -0,0 +1,74 @@
# Copyright 2019 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for multiprocessing example."""
import ast
import logging
import math
import os
import re
import subprocess
import tempfile
import unittest
_BINARY_DIR = os.path.realpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
_SERVER_PATH = os.path.join(_BINARY_DIR, 'server')
_CLIENT_PATH = os.path.join(_BINARY_DIR, 'client')
def is_prime(n):
for i in range(2, int(math.ceil(math.sqrt(n)))):
if n % i == 0:
return False
else:
return True
def _get_server_address(server_stream):
while True:
server_stream.seek(0)
line = server_stream.readline()
while line:
matches = re.search('Binding to \'(.+)\'', line)
if matches is not None:
return matches.groups()[0]
line = server_stream.readline()
class MultiprocessingExampleTest(unittest.TestCase):
def test_multiprocessing_example(self):
server_stdout = tempfile.TemporaryFile(mode='r')
server_process = subprocess.Popen((_SERVER_PATH,), stdout=server_stdout)
server_address = _get_server_address(server_stdout)
client_stdout = tempfile.TemporaryFile(mode='r')
client_process = subprocess.Popen(
(
_CLIENT_PATH,
server_address,
), stdout=client_stdout)
client_process.wait()
server_process.terminate()
client_stdout.seek(0)
results = ast.literal_eval(client_stdout.read().strip().split('\n')[-1])
values = tuple(result[0] for result in results)
self.assertSequenceEqual(range(2, 10000), values)
for result in results:
self.assertEqual(is_prime(result[0]), result[1])
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)

@ -300,6 +300,7 @@ Pod::Spec.new do |s|
'src/core/lib/security/credentials/plugin/plugin_credentials.h',
'src/core/lib/security/credentials/ssl/ssl_credentials.h',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h',
'src/core/lib/security/credentials/tls/spiffe_credentials.h',
'src/core/lib/security/security_connector/alts/alts_security_connector.h',
'src/core/lib/security/security_connector/fake/fake_security_connector.h',
'src/core/lib/security/security_connector/load_system_roots.h',
@ -308,6 +309,7 @@ Pod::Spec.new do |s|
'src/core/lib/security/security_connector/security_connector.h',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.h',
'src/core/lib/security/security_connector/ssl_utils.h',
'src/core/lib/security/security_connector/tls/spiffe_security_connector.h',
'src/core/lib/security/transport/auth_filters.h',
'src/core/lib/security/transport/secure_endpoint.h',
'src/core/lib/security/transport/security_handshaker.h',
@ -475,7 +477,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/timer_manager.h',
'src/core/lib/iomgr/udp_server.h',
'src/core/lib/iomgr/unix_sockets_posix.h',
'src/core/lib/iomgr/wakeup_fd_cv.h',
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/json/json.h',
@ -667,7 +668,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/timer_manager.h',
'src/core/lib/iomgr/udp_server.h',
'src/core/lib/iomgr/unix_sockets_posix.h',
'src/core/lib/iomgr/wakeup_fd_cv.h',
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/json/json.h',

@ -293,6 +293,7 @@ Pod::Spec.new do |s|
'src/core/lib/security/credentials/plugin/plugin_credentials.h',
'src/core/lib/security/credentials/ssl/ssl_credentials.h',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h',
'src/core/lib/security/credentials/tls/spiffe_credentials.h',
'src/core/lib/security/security_connector/alts/alts_security_connector.h',
'src/core/lib/security/security_connector/fake/fake_security_connector.h',
'src/core/lib/security/security_connector/load_system_roots.h',
@ -301,6 +302,7 @@ Pod::Spec.new do |s|
'src/core/lib/security/security_connector/security_connector.h',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.h',
'src/core/lib/security/security_connector/ssl_utils.h',
'src/core/lib/security/security_connector/tls/spiffe_security_connector.h',
'src/core/lib/security/transport/auth_filters.h',
'src/core/lib/security/transport/secure_endpoint.h',
'src/core/lib/security/transport/security_handshaker.h',
@ -468,7 +470,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/timer_manager.h',
'src/core/lib/iomgr/udp_server.h',
'src/core/lib/iomgr/unix_sockets_posix.h',
'src/core/lib/iomgr/wakeup_fd_cv.h',
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/json/json.h',
@ -634,7 +635,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/udp_server.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_cv.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
@ -730,6 +730,7 @@ Pod::Spec.new do |s|
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/spiffe_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
@ -738,6 +739,7 @@ Pod::Spec.new do |s|
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/tls/spiffe_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
@ -921,6 +923,7 @@ Pod::Spec.new do |s|
'src/core/lib/security/credentials/plugin/plugin_credentials.h',
'src/core/lib/security/credentials/ssl/ssl_credentials.h',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h',
'src/core/lib/security/credentials/tls/spiffe_credentials.h',
'src/core/lib/security/security_connector/alts/alts_security_connector.h',
'src/core/lib/security/security_connector/fake/fake_security_connector.h',
'src/core/lib/security/security_connector/load_system_roots.h',
@ -929,6 +932,7 @@ Pod::Spec.new do |s|
'src/core/lib/security/security_connector/security_connector.h',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.h',
'src/core/lib/security/security_connector/ssl_utils.h',
'src/core/lib/security/security_connector/tls/spiffe_security_connector.h',
'src/core/lib/security/transport/auth_filters.h',
'src/core/lib/security/transport/secure_endpoint.h',
'src/core/lib/security/transport/security_handshaker.h',
@ -1096,7 +1100,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/timer_manager.h',
'src/core/lib/iomgr/udp_server.h',
'src/core/lib/iomgr/unix_sockets_posix.h',
'src/core/lib/iomgr/wakeup_fd_cv.h',
'src/core/lib/iomgr/wakeup_fd_pipe.h',
'src/core/lib/iomgr/wakeup_fd_posix.h',
'src/core/lib/json/json.h',

@ -149,7 +149,6 @@ EXPORTS
grpc_byte_buffer_reader_init
grpc_byte_buffer_reader_destroy
grpc_byte_buffer_reader_next
grpc_byte_buffer_reader_peek
grpc_byte_buffer_reader_readall
grpc_raw_byte_buffer_from_reader
gpr_log_severity_string

@ -29,7 +29,7 @@ Gem::Specification.new do |s|
s.require_paths = %w( src/ruby/lib src/ruby/bin src/ruby/pb )
s.platform = Gem::Platform::RUBY
s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'google-protobuf', '~> 3.7'
s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0'
s.add_development_dependency 'bundler', '~> 1.9'
@ -223,6 +223,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/security/credentials/plugin/plugin_credentials.h )
s.files += %w( src/core/lib/security/credentials/ssl/ssl_credentials.h )
s.files += %w( src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h )
s.files += %w( src/core/lib/security/credentials/tls/spiffe_credentials.h )
s.files += %w( src/core/lib/security/security_connector/alts/alts_security_connector.h )
s.files += %w( src/core/lib/security/security_connector/fake/fake_security_connector.h )
s.files += %w( src/core/lib/security/security_connector/load_system_roots.h )
@ -231,6 +232,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/security/security_connector/security_connector.h )
s.files += %w( src/core/lib/security/security_connector/ssl/ssl_security_connector.h )
s.files += %w( src/core/lib/security/security_connector/ssl_utils.h )
s.files += %w( src/core/lib/security/security_connector/tls/spiffe_security_connector.h )
s.files += %w( src/core/lib/security/transport/auth_filters.h )
s.files += %w( src/core/lib/security/transport/secure_endpoint.h )
s.files += %w( src/core/lib/security/transport/security_handshaker.h )
@ -402,7 +404,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/timer_manager.h )
s.files += %w( src/core/lib/iomgr/udp_server.h )
s.files += %w( src/core/lib/iomgr/unix_sockets_posix.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_cv.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.h )
s.files += %w( src/core/lib/iomgr/wakeup_fd_posix.h )
s.files += %w( src/core/lib/json/json.h )
@ -568,7 +569,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/udp_server.cc )
s.files += %w( src/core/lib/iomgr/unix_sockets_posix.cc )
s.files += %w( src/core/lib/iomgr/unix_sockets_posix_noop.cc )
s.files += %w( src/core/lib/iomgr/wakeup_fd_cv.cc )
s.files += %w( src/core/lib/iomgr/wakeup_fd_eventfd.cc )
s.files += %w( src/core/lib/iomgr/wakeup_fd_nospecial.cc )
s.files += %w( src/core/lib/iomgr/wakeup_fd_pipe.cc )
@ -664,6 +664,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/security/credentials/plugin/plugin_credentials.cc )
s.files += %w( src/core/lib/security/credentials/ssl/ssl_credentials.cc )
s.files += %w( src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc )
s.files += %w( src/core/lib/security/credentials/tls/spiffe_credentials.cc )
s.files += %w( src/core/lib/security/security_connector/alts/alts_security_connector.cc )
s.files += %w( src/core/lib/security/security_connector/fake/fake_security_connector.cc )
s.files += %w( src/core/lib/security/security_connector/load_system_roots_fallback.cc )
@ -672,6 +673,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/security/security_connector/security_connector.cc )
s.files += %w( src/core/lib/security/security_connector/ssl/ssl_security_connector.cc )
s.files += %w( src/core/lib/security/security_connector/ssl_utils.cc )
s.files += %w( src/core/lib/security/security_connector/tls/spiffe_security_connector.cc )
s.files += %w( src/core/lib/security/transport/client_auth_filter.cc )
s.files += %w( src/core/lib/security/transport/secure_endpoint.cc )
s.files += %w( src/core/lib/security/transport/security_handshaker.cc )

@ -369,7 +369,6 @@
'src/core/lib/iomgr/udp_server.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_cv.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
@ -465,6 +464,7 @@
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/spiffe_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
@ -473,6 +473,7 @@
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/tls/spiffe_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
@ -735,7 +736,6 @@
'src/core/lib/iomgr/udp_server.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_cv.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
@ -980,7 +980,6 @@
'src/core/lib/iomgr/udp_server.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_cv.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
@ -1201,7 +1200,6 @@
'src/core/lib/iomgr/udp_server.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_cv.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
@ -2643,6 +2641,24 @@
'third_party/benchmark/src/timers.cc',
],
},
{
'target_name': 'upb',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/upb/google/protobuf/descriptor.upb.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/handlers.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/msgfactory.c',
'third_party/upb/upb/sink.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/upb.c',
],
},
{
'target_name': 'z',
'type': 'static_library',

@ -191,6 +191,15 @@ typedef struct {
try to get the roots set by grpc_override_ssl_default_roots. Eventually,
if all these fail, it will try to get the roots from a well-known place on
disk (in the grpc install directory).
gRPC has implemented root cache if the underlying OpenSSL library supports
it. The gRPC root certificates cache is only applicable on the default
root certificates, which is used when this parameter is nullptr. If user
provides their own pem_root_certs, when creating an SSL credential object,
gRPC would not be able to cache it, and each subchannel will generate a
copy of the root store. So it is recommended to avoid providing large room
pem with pem_root_certs parameter to avoid excessive memory consumption,
particularly on mobile platforms such as iOS.
- pem_key_cert_pair is a pointer on the object containing client's private
key and certificate chain. This parameter can be NULL if the client does
not have such a key/cert pair.
@ -711,7 +720,7 @@ struct grpc_tls_credential_reload_arg {
grpc_tls_on_credential_reload_done_cb cb;
void* cb_user_data;
grpc_tls_key_materials_config* key_materials_config;
grpc_status_code status;
grpc_ssl_certificate_config_reload_status status;
const char* error_details;
};
@ -758,17 +767,19 @@ typedef void (*grpc_tls_on_server_authorization_check_done_cb)(
/** A struct containing all information necessary to schedule/cancel a server
authorization check request. cb and cb_user_data represent a gRPC-provided
callback and an argument passed to it. result will store the result of
server authorization check. target_name is the name of an endpoint the
channel is connecting to and certificate represents a complete certificate
chain including both signing and leaf certificates. status and error_details
contain information about errors occurred when a server authorization check
request is scheduled/cancelled. It is used for experimental purpose for now
and subject to change.*/
callback and an argument passed to it. success will store the result of
server authorization check. That is, if success returns a non-zero value, it
means the authorization check passes and if returning zero, it means the
check fails. target_name is the name of an endpoint the channel is connecting
to and certificate represents a complete certificate chain including both
signing and leaf certificates. status and error_details contain information
about errors occurred when a server authorization check request is
scheduled/cancelled. It is used for experimental purpose for now and subject
to change.*/
struct grpc_tls_server_authorization_check_arg {
grpc_tls_on_server_authorization_check_done_cb cb;
void* cb_user_data;
int result;
int success;
const char* target_name;
const char* peer_cert;
grpc_status_code status;
@ -804,6 +815,37 @@ grpc_tls_server_authorization_check_config_create(
grpc_tls_server_authorization_check_arg* arg),
void (*destruct)(void* config_user_data));
/** --- SPIFFE channel/server credentials --- **/
/**
* This method creates a TLS SPIFFE channel credential object.
* It takes ownership of the options parameter.
*
* - options: grpc TLS credentials options instance.
*
* It returns the created credential object.
*
* It is used for experimental purpose for now and subject
* to change.
*/
grpc_channel_credentials* grpc_tls_spiffe_credentials_create(
grpc_tls_credentials_options* options);
/**
* This method creates a TLS server credential object.
* It takes ownership of the options parameter.
*
* - options: grpc TLS credentials options instance.
*
* It returns the created credential object.
*
* It is used for experimental purpose for now and subject
* to change.
*/
grpc_server_credentials* grpc_tls_spiffe_server_credentials_create(
grpc_tls_credentials_options* options);
#ifdef __cplusplus
}
#endif

@ -73,19 +73,6 @@ GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader);
GRPCAPI int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
grpc_slice* slice);
/** EXPERIMENTAL API - This function may be removed and changed, in the future.
*
* Updates \a slice with the next piece of data from from \a reader and returns
* 1. Returns 0 at the end of the stream. Caller is responsible for making sure
* the slice pointer remains valid when accessed.
*
* NOTE: Do not use this function unless the caller can guarantee that the
* underlying grpc_byte_buffer outlasts the use of the slice. This is only
* safe when the underlying grpc_byte_buffer remains immutable while slice
* is being accessed. */
GRPCAPI int grpc_byte_buffer_reader_peek(grpc_byte_buffer_reader* reader,
grpc_slice** slice);
/** Merge all data from \a reader into single slice */
GRPCAPI grpc_slice
grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader* reader);

@ -317,6 +317,10 @@ typedef struct {
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. Default value is 10000. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the xDS load
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. Default value is 10000. */
#define GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS "grpc.xds_fallback_timeout_ms"
/** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
"grpc.workaround.cronet_compression"

@ -21,6 +21,7 @@
#include <grpcpp/impl/codegen/async_stream.h>
#include <grpcpp/impl/codegen/byte_buffer.h>
#include <grpcpp/impl/codegen/server_callback.h>
struct grpc_server;
@ -41,6 +42,12 @@ class GenericServerContext final : public ServerContext {
friend class Server;
friend class ServerInterface;
void Clear() {
method_.clear();
host_.clear();
ServerContext::Clear();
}
grpc::string method_;
grpc::string host_;
};
@ -76,6 +83,50 @@ class AsyncGenericService final {
Server* server_;
};
namespace experimental {
class ServerGenericBidiReactor
: public ServerBidiReactor<ByteBuffer, ByteBuffer> {
public:
void OnStarted(ServerContext* ctx) final {
OnStarted(static_cast<GenericServerContext*>(ctx));
}
virtual void OnStarted(GenericServerContext* ctx) {}
};
} // namespace experimental
namespace internal {
class UnimplementedGenericBidiReactor
: public experimental::ServerGenericBidiReactor {
public:
void OnDone() override { delete this; }
void OnStarted(GenericServerContext*) override {
this->Finish(Status(StatusCode::UNIMPLEMENTED, ""));
}
};
} // namespace internal
namespace experimental {
class CallbackGenericService {
public:
CallbackGenericService() {}
virtual ~CallbackGenericService() {}
virtual ServerGenericBidiReactor* CreateReactor() {
return new internal::UnimplementedGenericBidiReactor;
}
private:
friend class ::grpc::Server;
internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>* Handler() {
return new internal::CallbackBidiHandler<ByteBuffer, ByteBuffer>(
[this] { return CreateReactor(); });
}
Server* server_{nullptr};
};
} // namespace experimental
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_ASYNC_GENERIC_SERVICE_H

@ -112,6 +112,8 @@ class ClientCallbackReaderWriter {
virtual void Write(const Request* req, WriteOptions options) = 0;
virtual void WritesDone() = 0;
virtual void Read(Response* resp) = 0;
virtual void AddHold(int holds) = 0;
virtual void RemoveHold() = 0;
protected:
void BindReactor(ClientBidiReactor<Request, Response>* reactor) {
@ -125,6 +127,8 @@ class ClientCallbackReader {
virtual ~ClientCallbackReader() {}
virtual void StartCall() = 0;
virtual void Read(Response* resp) = 0;
virtual void AddHold(int holds) = 0;
virtual void RemoveHold() = 0;
protected:
void BindReactor(ClientReadReactor<Response>* reactor) {
@ -144,6 +148,9 @@ class ClientCallbackWriter {
}
virtual void WritesDone() = 0;
virtual void AddHold(int holds) = 0;
virtual void RemoveHold() = 0;
protected:
void BindReactor(ClientWriteReactor<Request>* reactor) {
reactor->BindWriter(this);
@ -174,6 +181,29 @@ class ClientBidiReactor {
}
void StartWritesDone() { stream_->WritesDone(); }
/// Holds are needed if (and only if) this stream has operations that take
/// place on it after StartCall but from outside one of the reactions
/// (OnReadDone, etc). This is _not_ a common use of the streaming API.
///
/// Holds must be added before calling StartCall. If a stream still has a hold
/// in place, its resources will not be destroyed even if the status has
/// already come in from the wire and there are currently no active callbacks
/// outstanding. Similarly, the stream will not call OnDone if there are still
/// holds on it.
///
/// For example, if a StartRead or StartWrite operation is going to be
/// initiated from elsewhere in the application, the application should call
/// AddHold or AddMultipleHolds before StartCall. If there is going to be,
/// for example, a read-flow and a write-flow taking place outside the
/// reactions, then call AddMultipleHolds(2) before StartCall. When the
/// application knows that it won't issue any more Read operations (such as
/// when a read comes back as not ok), it should issue a RemoveHold(). It
/// should also call RemoveHold() again after it does StartWriteLast or
/// StartWritesDone that indicates that there will be no more Write ops.
void AddHold() { AddMultipleHolds(1); }
void AddMultipleHolds(int holds) { stream_->AddHold(holds); }
void RemoveHold() { stream_->RemoveHold(); }
private:
friend class ClientCallbackReaderWriter<Request, Response>;
void BindStream(ClientCallbackReaderWriter<Request, Response>* stream) {
@ -193,6 +223,10 @@ class ClientReadReactor {
void StartCall() { reader_->StartCall(); }
void StartRead(Response* resp) { reader_->Read(resp); }
void AddHold() { AddMultipleHolds(1); }
void AddMultipleHolds(int holds) { reader_->AddHold(holds); }
void RemoveHold() { reader_->RemoveHold(); }
private:
friend class ClientCallbackReader<Response>;
void BindReader(ClientCallbackReader<Response>* reader) { reader_ = reader; }
@ -218,6 +252,10 @@ class ClientWriteReactor {
}
void StartWritesDone() { writer_->WritesDone(); }
void AddHold() { AddMultipleHolds(1); }
void AddMultipleHolds(int holds) { writer_->AddHold(holds); }
void RemoveHold() { writer_->RemoveHold(); }
private:
friend class ClientCallbackWriter<Request>;
void BindWriter(ClientCallbackWriter<Request>* writer) { writer_ = writer; }
@ -374,6 +412,9 @@ class ClientCallbackReaderWriterImpl
}
}
virtual void AddHold(int holds) override { callbacks_outstanding_ += holds; }
virtual void RemoveHold() override { MaybeFinish(); }
private:
friend class ClientCallbackReaderWriterFactory<Request, Response>;
@ -509,6 +550,9 @@ class ClientCallbackReaderImpl
}
}
virtual void AddHold(int holds) override { callbacks_outstanding_ += holds; }
virtual void RemoveHold() override { MaybeFinish(); }
private:
friend class ClientCallbackReaderFactory<Response>;
@ -677,6 +721,9 @@ class ClientCallbackWriterImpl
}
}
virtual void AddHold(int holds) override { callbacks_outstanding_ += holds; }
virtual void RemoveHold() override { MaybeFinish(); }
private:
friend class ClientCallbackWriterFactory<Request>;

@ -85,8 +85,6 @@ class CoreCodegen final : public CoreCodegenInterface {
grpc_byte_buffer_reader* reader) override;
int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
grpc_slice* slice) override;
int grpc_byte_buffer_reader_peek(grpc_byte_buffer_reader* reader,
grpc_slice** slice) override;
grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice,
size_t nslices) override;

@ -92,8 +92,6 @@ class CoreCodegenInterface {
grpc_byte_buffer_reader* reader) = 0;
virtual int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
grpc_slice* slice) = 0;
virtual int grpc_byte_buffer_reader_peek(grpc_byte_buffer_reader* reader,
grpc_slice** slice) = 0;
virtual grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slice,
size_t nslices) = 0;

@ -403,7 +403,6 @@ class InterceptorBatchMethodsImpl
grpc_status_code* code_ = nullptr;
grpc::string* error_details_ = nullptr;
grpc::string* error_message_ = nullptr;
Status send_status_;
std::multimap<grpc::string, grpc::string>* send_trailing_metadata_ = nullptr;

@ -73,7 +73,7 @@ class ProtoBufferReader : public ::grpc::protobuf::io::ZeroCopyInputStream {
}
/// If we have backed up previously, we need to return the backed-up slice
if (backup_count_ > 0) {
*data = GRPC_SLICE_START_PTR(*slice_) + GRPC_SLICE_LENGTH(*slice_) -
*data = GRPC_SLICE_START_PTR(slice_) + GRPC_SLICE_LENGTH(slice_) -
backup_count_;
GPR_CODEGEN_ASSERT(backup_count_ <= INT_MAX);
*size = (int)backup_count_;
@ -81,14 +81,15 @@ class ProtoBufferReader : public ::grpc::protobuf::io::ZeroCopyInputStream {
return true;
}
/// Otherwise get the next slice from the byte buffer reader
if (!g_core_codegen_interface->grpc_byte_buffer_reader_peek(&reader_,
if (!g_core_codegen_interface->grpc_byte_buffer_reader_next(&reader_,
&slice_)) {
return false;
}
*data = GRPC_SLICE_START_PTR(*slice_);
g_core_codegen_interface->grpc_slice_unref(slice_);
*data = GRPC_SLICE_START_PTR(slice_);
// On win x64, int is only 32bit
GPR_CODEGEN_ASSERT(GRPC_SLICE_LENGTH(*slice_) <= INT_MAX);
byte_count_ += * size = (int)GRPC_SLICE_LENGTH(*slice_);
GPR_CODEGEN_ASSERT(GRPC_SLICE_LENGTH(slice_) <= INT_MAX);
byte_count_ += * size = (int)GRPC_SLICE_LENGTH(slice_);
return true;
}
@ -99,7 +100,7 @@ class ProtoBufferReader : public ::grpc::protobuf::io::ZeroCopyInputStream {
/// bytes that have already been returned by the last call of Next.
/// So do the backup and have that ready for a later Next.
void BackUp(int count) override {
GPR_CODEGEN_ASSERT(count <= static_cast<int>(GRPC_SLICE_LENGTH(*slice_)));
GPR_CODEGEN_ASSERT(count <= static_cast<int>(GRPC_SLICE_LENGTH(slice_)));
backup_count_ = count;
}
@ -134,15 +135,14 @@ class ProtoBufferReader : public ::grpc::protobuf::io::ZeroCopyInputStream {
int64_t backup_count() { return backup_count_; }
void set_backup_count(int64_t backup_count) { backup_count_ = backup_count; }
grpc_byte_buffer_reader* reader() { return &reader_; }
grpc_slice* slice() { return slice_; }
grpc_slice** mutable_slice_ptr() { return &slice_; }
grpc_slice* slice() { return &slice_; }
private:
int64_t byte_count_; ///< total bytes read since object creation
int64_t backup_count_; ///< how far backed up in the stream we are
grpc_byte_buffer_reader reader_; ///< internal object to read \a grpc_slice
///< from the \a grpc_byte_buffer
grpc_slice* slice_; ///< current slice passed back to the caller
grpc_slice slice_; ///< current slice passed back to the caller
Status status_; ///< status of the entire object
};

@ -43,6 +43,10 @@ struct census_context;
namespace grpc {
class ClientContext;
class GenericServerContext;
class CompletionQueue;
class Server;
class ServerInterface;
template <class W, class R>
class ServerAsyncReader;
template <class W>
@ -55,6 +59,7 @@ template <class R>
class ServerReader;
template <class W>
class ServerWriter;
namespace internal {
template <class W, class R>
class ServerReaderWriterBody;
@ -82,10 +87,6 @@ class Call;
class ServerReactor;
} // namespace internal
class CompletionQueue;
class Server;
class ServerInterface;
namespace testing {
class InteropServerContextInspector;
class ServerContextTestSpouse;
@ -302,6 +303,7 @@ class ServerContext {
template <StatusCode code>
friend class internal::ErrorMethodHandler;
friend class ::grpc::ClientContext;
friend class ::grpc::GenericServerContext;
/// Prevent copying.
ServerContext(const ServerContext&);

@ -47,6 +47,10 @@ namespace internal {
class ServerAsyncStreamingInterface;
} // namespace internal
namespace experimental {
class CallbackGenericService;
} // namespace experimental
class ServerInterface : public internal::CallHook {
public:
virtual ~ServerInterface() {}
@ -115,6 +119,25 @@ class ServerInterface : public internal::CallHook {
/// service. The service must exist for the lifetime of the Server instance.
virtual void RegisterAsyncGenericService(AsyncGenericService* service) = 0;
/// NOTE: class experimental_registration_interface is not part of the public
/// API of this class
/// TODO(vjpai): Move these contents to public API when no longer experimental
class experimental_registration_interface {
public:
virtual ~experimental_registration_interface() {}
/// May not be abstract since this is a post-1.0 API addition
virtual void RegisterCallbackGenericService(
experimental::CallbackGenericService* service) {}
};
/// NOTE: The function experimental_registration() is not stable public API.
/// It is a view to the experimental components of this class. It may be
/// changed or removed at any time. May not be abstract since this is a
/// post-1.0 API addition
virtual experimental_registration_interface* experimental_registration() {
return nullptr;
}
/// Tries to bind \a server to the given \a addr.
///
/// It can be invoked multiple times.

@ -202,6 +202,8 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
friend class ServerInitializer;
class SyncRequest;
class CallbackRequestBase;
template <class ServerContextType>
class CallbackRequest;
class UnimplementedAsyncRequest;
class UnimplementedAsyncResponse;
@ -216,6 +218,34 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
/// service. The service must exist for the lifetime of the Server instance.
void RegisterAsyncGenericService(AsyncGenericService* service) override;
/// NOTE: class experimental_registration_type is not part of the public API
/// of this class
/// TODO(vjpai): Move these contents to the public API of Server when
/// they are no longer experimental
class experimental_registration_type final
: public experimental_registration_interface {
public:
explicit experimental_registration_type(Server* server) : server_(server) {}
void RegisterCallbackGenericService(
experimental::CallbackGenericService* service) override {
server_->RegisterCallbackGenericService(service);
}
private:
Server* server_;
};
/// TODO(vjpai): Mark this override when experimental type above is deleted
void RegisterCallbackGenericService(
experimental::CallbackGenericService* service);
/// NOTE: The function experimental_registration() is not stable public API.
/// It is a view to the experimental components of this class. It may be
/// changed or removed at any time.
experimental_registration_interface* experimental_registration() override {
return &experimental_registration_;
}
void PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) override;
@ -257,7 +287,11 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
std::vector<gpr_atm> callback_unmatched_reqs_count_;
// List of callback requests to start when server actually starts.
std::list<CallbackRequest*> callback_reqs_to_start_;
std::list<CallbackRequestBase*> callback_reqs_to_start_;
// For registering experimental callback generic service; remove when that
// method longer experimental
experimental_registration_type experimental_registration_{this};
// Server status
std::mutex mu_;
@ -281,7 +315,8 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
std::shared_ptr<GlobalCallbacks> global_callbacks_;
std::vector<grpc::string> services_;
bool has_generic_service_;
bool has_async_generic_service_{false};
bool has_callback_generic_service_{false};
// Pointer to the wrapped grpc_server.
grpc_server* server_;
@ -291,9 +326,16 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
std::unique_ptr<HealthCheckServiceInterface> health_check_service_;
bool health_check_service_disabled_;
// When appropriate, use a default callback generic service to handle
// unimplemented methods
std::unique_ptr<experimental::CallbackGenericService> unimplemented_service_;
// A special handler for resource exhausted in sync case
std::unique_ptr<internal::MethodHandler> resource_exhausted_handler_;
// Handler for callback generic service, if any
std::unique_ptr<internal::MethodHandler> generic_handler_;
// callback_cq_ references the callbackable completion queue associated
// with this server (if any). It is set on the first call to CallbackCQ().
// It is _not owned_ by the server; ownership belongs with its internal

@ -48,6 +48,10 @@ class Service;
namespace testing {
class ServerBuilderPluginTest;
} // namespace testing
namespace experimental {
class CallbackGenericService;
}
} // namespace grpc
namespace grpc_impl {
@ -233,6 +237,9 @@ class ServerBuilder {
builder_->interceptor_creators_ = std::move(interceptor_creators);
}
ServerBuilder& RegisterCallbackGenericService(
grpc::experimental::CallbackGenericService* service);
private:
ServerBuilder* builder_;
};
@ -318,6 +325,7 @@ class ServerBuilder {
std::vector<std::unique_ptr<grpc::ServerBuilderPlugin>> plugins_;
grpc_resource_quota* resource_quota_;
grpc::AsyncGenericService* generic_service_;
grpc::experimental::CallbackGenericService* callback_generic_service_{nullptr};
struct {
bool is_set;
grpc_compression_level level;

@ -228,6 +228,7 @@
<file baseinstalldir="/" name="src/core/lib/security/credentials/plugin/plugin_credentials.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/credentials/ssl/ssl_credentials.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/credentials/tls/spiffe_credentials.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/alts/alts_security_connector.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/fake/fake_security_connector.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/load_system_roots.h" role="src" />
@ -236,6 +237,7 @@
<file baseinstalldir="/" name="src/core/lib/security/security_connector/security_connector.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/ssl/ssl_security_connector.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/ssl_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/tls/spiffe_security_connector.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/transport/auth_filters.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/transport/secure_endpoint.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/transport/security_handshaker.h" role="src" />
@ -407,7 +409,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/timer_manager.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/udp_server.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/unix_sockets_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_cv.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/json/json.h" role="src" />
@ -573,7 +574,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/udp_server.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/unix_sockets_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/unix_sockets_posix_noop.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_cv.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_eventfd.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_nospecial.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/wakeup_fd_pipe.cc" role="src" />
@ -669,6 +669,7 @@
<file baseinstalldir="/" name="src/core/lib/security/credentials/plugin/plugin_credentials.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/credentials/ssl/ssl_credentials.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/credentials/tls/spiffe_credentials.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/alts/alts_security_connector.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/fake/fake_security_connector.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/load_system_roots_fallback.cc" role="src" />
@ -677,6 +678,7 @@
<file baseinstalldir="/" name="src/core/lib/security/security_connector/security_connector.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/ssl/ssl_security_connector.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/ssl_utils.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/security_connector/tls/spiffe_security_connector.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/transport/client_auth_filter.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/transport/secure_endpoint.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/transport/security_handshaker.cc" role="src" />

@ -82,7 +82,10 @@ using grpc_core::LoadBalancingPolicy;
// any even moderately compelling reason to do so.
#define RETRY_BACKOFF_JITTER 0.2
grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
grpc_core::TraceFlag grpc_client_channel_call_trace(false,
"client_channel_call");
grpc_core::TraceFlag grpc_client_channel_routing_trace(
false, "client_channel_routing");
/*************************************************************************
* CHANNEL-WIDE FUNCTIONS
@ -219,7 +222,7 @@ class ClientChannelControlHelper
void UpdateState(
grpc_connectivity_state state, grpc_error* state_error,
UniquePtr<LoadBalancingPolicy::SubchannelPicker> picker) override {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
const char* extra = chand_->disconnect_error == GRPC_ERROR_NONE
? ""
: " (ignoring -- channel shutting down)";
@ -256,7 +259,7 @@ static bool process_resolver_result_locked(
ProcessedResolverResult resolver_result(args, chand->enable_retries);
grpc_core::UniquePtr<char> service_config_json =
resolver_result.service_config_json();
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
chand, service_config_json.get());
}
@ -460,8 +463,9 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
grpc_error* error = GRPC_ERROR_NONE;
chand->resolving_lb_policy.reset(
grpc_core::New<grpc_core::ResolvingLoadBalancingPolicy>(
std::move(lb_args), &grpc_client_channel_trace, std::move(target_uri),
process_resolver_result_locked, chand, &error));
std::move(lb_args), &grpc_client_channel_routing_trace,
std::move(target_uri), process_resolver_result_locked, chand,
&error));
grpc_channel_args_destroy(new_args);
if (error != GRPC_ERROR_NONE) {
// Orphan the resolving LB policy and flush the exec_ctx to ensure
@ -480,7 +484,7 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
grpc_pollset_set_add_pollset_set(
chand->resolving_lb_policy->interested_parties(),
chand->interested_parties);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: created resolving_lb_policy=%p", chand,
chand->resolving_lb_policy.get());
}
@ -856,7 +860,7 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
// Frees cached send_initial_metadata.
static void free_cached_send_initial_metadata(channel_data* chand,
call_data* calld) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying calld->send_initial_metadata", chand,
calld);
@ -867,7 +871,7 @@ static void free_cached_send_initial_metadata(channel_data* chand,
// Frees cached send_message at index idx.
static void free_cached_send_message(channel_data* chand, call_data* calld,
size_t idx) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
chand, calld, idx);
@ -878,7 +882,7 @@ static void free_cached_send_message(channel_data* chand, call_data* calld,
// Frees cached send_trailing_metadata.
static void free_cached_send_trailing_metadata(channel_data* chand,
call_data* calld) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying calld->send_trailing_metadata",
chand, calld);
@ -964,7 +968,7 @@ static void pending_batches_add(grpc_call_element* elem,
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
const size_t idx = get_batch_index(batch);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
calld, idx);
@ -993,7 +997,7 @@ static void pending_batches_add(grpc_call_element* elem,
}
if (GPR_UNLIKELY(calld->bytes_buffered_for_retry >
chand->per_rpc_retry_buffer_size)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: exceeded retry buffer size, committing",
chand, calld);
@ -1008,7 +1012,7 @@ static void pending_batches_add(grpc_call_element* elem,
// If we are not going to retry and have not yet started, pretend
// retries are disabled so that we don't bother with retry overhead.
if (calld->num_attempts_completed == 0) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: disabling retries before first attempt",
chand, calld);
@ -1066,7 +1070,7 @@ static void pending_batches_fail(
YieldCallCombinerPredicate yield_call_combiner_predicate) {
GPR_ASSERT(error != GRPC_ERROR_NONE);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
size_t num_batches = 0;
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
@ -1121,7 +1125,7 @@ static void pending_batches_resume(grpc_call_element* elem) {
return;
}
// Retries not enabled; send down batches as-is.
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
size_t num_batches = 0;
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
if (calld->pending_batches[i].batch != nullptr) ++num_batches;
@ -1169,7 +1173,7 @@ static void maybe_clear_pending_batch(grpc_call_element* elem,
(!batch->recv_trailing_metadata ||
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
nullptr)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand,
calld);
}
@ -1189,7 +1193,7 @@ static pending_batch* pending_batch_find(grpc_call_element* elem,
pending_batch* pending = &calld->pending_batches[i];
grpc_transport_stream_op_batch* batch = pending->batch;
if (batch != nullptr && predicate(batch)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: %s pending batch at index %" PRIuPTR, chand,
calld, log_message, i);
@ -1211,7 +1215,7 @@ static void retry_commit(grpc_call_element* elem,
call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->retry_committed) return;
calld->retry_committed = true;
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand, calld);
}
if (retry_state != nullptr) {
@ -1250,7 +1254,7 @@ static void do_retry(grpc_call_element* elem,
}
next_attempt_time = calld->retry_backoff->NextAttemptTime();
}
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand,
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
@ -1283,7 +1287,7 @@ static bool maybe_retry(grpc_call_element* elem,
retry_state = static_cast<subchannel_call_retry_state*>(
batch_data->subchannel_call->GetParentData());
if (retry_state->retry_dispatched) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: retry already dispatched", chand,
calld);
}
@ -1295,14 +1299,14 @@ static bool maybe_retry(grpc_call_element* elem,
if (calld->retry_throttle_data != nullptr) {
calld->retry_throttle_data->RecordSuccess();
}
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: call succeeded", chand, calld);
}
return false;
}
// Status is not OK. Check whether the status is retryable.
if (!retry_policy->retryable_status_codes.Contains(status)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: status %s not configured as retryable", chand,
calld, grpc_status_code_to_string(status));
@ -1318,14 +1322,14 @@ static bool maybe_retry(grpc_call_element* elem,
// checks, so that we don't fail to record failures due to other factors.
if (calld->retry_throttle_data != nullptr &&
!calld->retry_throttle_data->RecordFailure()) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: retries throttled", chand, calld);
}
return false;
}
// Check whether the call is committed.
if (calld->retry_committed) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: retries already committed", chand,
calld);
}
@ -1334,7 +1338,7 @@ static bool maybe_retry(grpc_call_element* elem,
// Check whether we have retries remaining.
++calld->num_attempts_completed;
if (calld->num_attempts_completed >= retry_policy->max_attempts) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: exceeded %d retry attempts", chand,
calld, retry_policy->max_attempts);
}
@ -1342,7 +1346,7 @@ static bool maybe_retry(grpc_call_element* elem,
}
// If the call was cancelled from the surface, don't retry.
if (calld->cancel_error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: call cancelled from surface, not retrying",
chand, calld);
@ -1355,14 +1359,14 @@ static bool maybe_retry(grpc_call_element* elem,
// If the value is "-1" or any other unparseable string, we do not retry.
uint32_t ms;
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: not retrying due to server push-back",
chand, calld);
}
return false;
} else {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: server push-back: retry in %u ms",
chand, calld, ms);
}
@ -1484,7 +1488,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
grpc_call_element* elem = batch_data->elem;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
chand, calld, grpc_error_string(error));
@ -1508,7 +1512,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
if (GPR_UNLIKELY((retry_state->trailing_metadata_available ||
error != GRPC_ERROR_NONE) &&
!retry_state->completed_recv_trailing_metadata)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring recv_initial_metadata_ready "
"(Trailers-Only)",
@ -1574,7 +1578,7 @@ static void recv_message_ready(void* arg, grpc_error* error) {
grpc_call_element* elem = batch_data->elem;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: got recv_message_ready, error=%s",
chand, calld, grpc_error_string(error));
}
@ -1596,7 +1600,7 @@ static void recv_message_ready(void* arg, grpc_error* error) {
if (GPR_UNLIKELY(
(retry_state->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
!retry_state->completed_recv_trailing_metadata)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring recv_message_ready (nullptr "
"message and recv_trailing_metadata pending)",
@ -1748,7 +1752,7 @@ static void add_closures_to_fail_unstarted_pending_batches(
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
pending_batch* pending = &calld->pending_batches[i];
if (pending_batch_is_unstarted(pending, calld, retry_state)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: failing unstarted pending batch at index "
"%" PRIuPTR,
@ -1797,7 +1801,7 @@ static void recv_trailing_metadata_ready(void* arg, grpc_error* error) {
grpc_call_element* elem = batch_data->elem;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: got recv_trailing_metadata_ready, error=%s",
chand, calld, grpc_error_string(error));
@ -1813,7 +1817,7 @@ static void recv_trailing_metadata_ready(void* arg, grpc_error* error) {
batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata;
get_call_status(elem, md_batch, GRPC_ERROR_REF(error), &status,
&server_pushback_md);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand,
calld, grpc_status_code_to_string(status));
}
@ -1899,7 +1903,7 @@ static void add_closures_for_replay_or_pending_send_ops(
}
}
if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting next batch for pending send op(s)",
chand, calld);
@ -1919,7 +1923,7 @@ static void on_complete(void* arg, grpc_error* error) {
grpc_call_element* elem = batch_data->elem;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
gpr_log(GPR_INFO, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
chand, calld, grpc_error_string(error), batch_str);
@ -1999,7 +2003,7 @@ static void add_closure_for_subchannel_batch(
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
start_batch_in_call_combiner, batch,
grpc_schedule_on_exec_ctx);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
char* batch_str = grpc_transport_stream_op_batch_string(batch);
gpr_log(GPR_INFO, "chand=%p calld=%p: starting subchannel batch: %s", chand,
calld, batch_str);
@ -2067,7 +2071,7 @@ static void add_retriable_send_message_op(
subchannel_batch_data* batch_data) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
chand, calld, retry_state->started_send_message_count);
@ -2161,7 +2165,7 @@ static void add_retriable_recv_trailing_metadata_op(
static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: call failed but recv_trailing_metadata not "
"started; starting it internally",
@ -2194,7 +2198,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
if (calld->seen_send_initial_metadata &&
!retry_state->started_send_initial_metadata &&
!calld->pending_send_initial_metadata) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed "
"send_initial_metadata op",
@ -2210,7 +2214,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
retry_state->started_send_message_count ==
retry_state->completed_send_message_count &&
!calld->pending_send_message) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed "
"send_message op",
@ -2230,7 +2234,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
retry_state->started_send_message_count == calld->send_messages.size() &&
!retry_state->started_send_trailing_metadata &&
!calld->pending_send_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed "
"send_trailing_metadata op",
@ -2380,7 +2384,7 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: constructing retriable batches",
chand, calld);
}
@ -2405,7 +2409,7 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
// Now add pending batches.
add_subchannel_batches_for_pending_batches(elem, retry_state, &closures);
// Start batches on subchannel call.
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting %" PRIuPTR
" retriable batches on subchannel_call=%p",
@ -2439,7 +2443,7 @@ static void create_subchannel_call(grpc_call_element* elem) {
grpc_error* error = GRPC_ERROR_NONE;
calld->subchannel_call =
calld->pick.pick.connected_subchannel->CreateCall(call_args, &error);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call.get(),
grpc_error_string(error));
@ -2461,7 +2465,7 @@ static void pick_done(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: failed to pick subchannel: error=%s", chand,
calld, grpc_error_string(error));
@ -2493,7 +2497,7 @@ class QueuedPickCanceller {
auto* self = static_cast<QueuedPickCanceller*>(arg);
auto* chand = static_cast<channel_data*>(self->elem_->channel_data);
auto* calld = static_cast<call_data*>(self->elem_->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling queued pick: "
"error=%s self=%p calld->pick_canceller=%p",
@ -2525,7 +2529,7 @@ static void remove_call_from_queued_picks_locked(grpc_call_element* elem) {
for (QueuedPick** pick = &chand->queued_picks; *pick != nullptr;
pick = &(*pick)->next) {
if (*pick == &calld->pick) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: removing from queued picks list",
chand, calld);
}
@ -2545,7 +2549,7 @@ static void remove_call_from_queued_picks_locked(grpc_call_element* elem) {
static void add_call_to_queued_picks_locked(grpc_call_element* elem) {
auto* chand = static_cast<channel_data*>(elem->channel_data);
auto* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: adding to queued picks list", chand,
calld);
}
@ -2567,7 +2571,7 @@ static void add_call_to_queued_picks_locked(grpc_call_element* elem) {
static void apply_service_config_to_call_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
chand, calld);
}
@ -2679,7 +2683,7 @@ static void start_pick_locked(void* arg, grpc_error* error) {
// Attempt pick.
error = GRPC_ERROR_NONE;
auto pick_result = chand->picker->Pick(&calld->pick.pick, &error);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_routing_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: LB pick returned %s (connected_subchannel=%p, "
"error=%s)",
@ -2748,7 +2752,7 @@ static void cc_start_transport_stream_op_batch(
}
// If we've previously been cancelled, immediately fail any new batches.
if (GPR_UNLIKELY(calld->cancel_error != GRPC_ERROR_NONE)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(calld->cancel_error));
}
@ -2767,7 +2771,7 @@ static void cc_start_transport_stream_op_batch(
GRPC_ERROR_UNREF(calld->cancel_error);
calld->cancel_error =
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_string(calld->cancel_error));
}
@ -2795,7 +2799,7 @@ static void cc_start_transport_stream_op_batch(
// the channel combiner, which is more efficient (especially for
// streaming calls).
if (calld->subchannel_call != nullptr) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
calld, calld->subchannel_call.get());
@ -2807,7 +2811,7 @@ static void cc_start_transport_stream_op_batch(
// For batches containing a send_initial_metadata op, enter the channel
// combiner to start a pick.
if (GPR_LIKELY(batch->send_initial_metadata)) {
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: entering client_channel combiner",
chand, calld);
}
@ -2817,7 +2821,7 @@ static void cc_start_transport_stream_op_batch(
GRPC_ERROR_NONE);
} else {
// For all other batches, release the call combiner.
if (grpc_client_channel_trace.enabled()) {
if (grpc_client_channel_call_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: saved batch, yielding call combiner", chand,
calld);

@ -144,7 +144,7 @@ void HttpConnectHandshaker::OnWriteDone(void* arg, grpc_error* error) {
// The read callback inherits our ref to the handshaker.
grpc_endpoint_read(handshaker->args_->endpoint,
handshaker->args_->read_buffer,
&handshaker->response_read_closure_);
&handshaker->response_read_closure_, /*urgent=*/true);
gpr_mu_unlock(&handshaker->mu_);
}
}
@ -207,7 +207,7 @@ void HttpConnectHandshaker::OnReadDone(void* arg, grpc_error* error) {
grpc_slice_buffer_reset_and_unref_internal(handshaker->args_->read_buffer);
grpc_endpoint_read(handshaker->args_->endpoint,
handshaker->args_->read_buffer,
&handshaker->response_read_closure_);
&handshaker->response_read_closure_, /*urgent=*/true);
gpr_mu_unlock(&handshaker->mu_);
return;
}

@ -210,7 +210,8 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
RefCountedPtr<ServiceConfig> service_config)
: json_(lb_config), service_config_(std::move(service_config)) {}
const grpc_json* json() const { return json_; }
const char* name() const { return json_->key; }
const grpc_json* config() const { return json_->child; }
RefCountedPtr<ServiceConfig> service_config() const {
return service_config_;
}
@ -297,8 +298,8 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
grpc_combiner* combiner() const { return combiner_; }
// Note: LB policies MUST NOT call any method on the helper from
// their constructor.
// Note: LB policies MUST NOT call any method on the helper from their
// constructor.
// Note: This will return null after ShutdownLocked() has been called.
ChannelControlHelper* channel_control_helper() const {
return channel_control_helper_.get();

@ -39,15 +39,14 @@
/// the balancer, we update the round_robin policy with the new list of
/// addresses. If we cannot communicate with the balancer on startup,
/// however, we may enter fallback mode, in which case we will populate
/// the RR policy's addresses from the backend addresses returned by the
/// the child policy's addresses from the backend addresses returned by the
/// resolver.
///
/// Once an RR policy instance is in place (and getting updated as described),
/// Once a child policy instance is in place (and getting updated as described),
/// calls for a pick, a ping, or a cancellation will be serviced right
/// away by forwarding them to the RR instance. Any time there's no RR
/// policy available (i.e., right after the creation of the gRPCLB policy),
/// pick and ping requests are added to a list of pending picks and pings
/// to be flushed and serviced when the RR policy instance becomes available.
/// away by forwarding them to the child policy instance. Any time there's no
/// child policy available (i.e., right after the creation of the gRPCLB
/// policy), pick requests are queued.
///
/// \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
/// high level design and details.
@ -279,27 +278,37 @@ class GrpcLb : public LoadBalancingPolicy {
UniquePtr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void set_child(LoadBalancingPolicy* child) { child_ = child; }
private:
bool CalledByPendingChild() const;
bool CalledByCurrentChild() const;
RefCountedPtr<GrpcLb> parent_;
LoadBalancingPolicy* child_ = nullptr;
};
~GrpcLb();
void ShutdownLocked() override;
// Helper function used in UpdateLocked().
// Helper functions used in UpdateLocked().
void ProcessChannelArgsLocked(const grpc_channel_args& args);
void ParseLbConfig(Config* grpclb_config);
static void OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error);
// Methods for dealing with the balancer channel and call.
// Methods for dealing with the balancer call.
void StartBalancerCallLocked();
static void OnFallbackTimerLocked(void* arg, grpc_error* error);
void StartBalancerCallRetryTimerLocked();
static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error);
// Methods for dealing with the RR policy.
grpc_channel_args* CreateRoundRobinPolicyArgsLocked();
void CreateRoundRobinPolicyLocked(Args args);
void CreateOrUpdateRoundRobinPolicyLocked();
// Methods for dealing with the child policy.
grpc_channel_args* CreateChildPolicyArgsLocked();
OrphanablePtr<LoadBalancingPolicy> CreateChildPolicyLocked(
const char* name, const grpc_channel_args* args);
void CreateOrUpdateChildPolicyLocked();
// Who the client is trying to communicate with.
const char* server_name_ = nullptr;
@ -316,6 +325,9 @@ class GrpcLb : public LoadBalancingPolicy {
gpr_atm lb_channel_uuid_ = 0;
// Response generator to inject address updates into lb_channel_.
RefCountedPtr<FakeResolverResponseGenerator> response_generator_;
// Connectivity state notification.
grpc_connectivity_state lb_channel_connectivity_ = GRPC_CHANNEL_IDLE;
grpc_closure lb_channel_on_connectivity_changed_;
// The data associated with the current LB call. It holds a ref to this LB
// policy. It's initialized every time we query for backends. It's reset to
@ -345,8 +357,16 @@ class GrpcLb : public LoadBalancingPolicy {
grpc_timer lb_fallback_timer_;
grpc_closure lb_on_fallback_;
// The RR policy to use for the backends.
OrphanablePtr<LoadBalancingPolicy> rr_policy_;
// Lock held when modifying the value of child_policy_ or
// pending_child_policy_.
gpr_mu child_policy_mu_;
// The child policy to use for the backends.
OrphanablePtr<LoadBalancingPolicy> child_policy_;
// When switching child policies, the new policy will be stored here
// until it reports READY, at which point it will be moved to child_policy_.
OrphanablePtr<LoadBalancingPolicy> pending_child_policy_;
// The child policy config.
RefCountedPtr<Config> child_policy_config_;
};
//
@ -558,14 +578,30 @@ GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick,
// GrpcLb::Helper
//
bool GrpcLb::Helper::CalledByPendingChild() const {
GPR_ASSERT(child_ != nullptr);
return child_ == parent_->pending_child_policy_.get();
}
bool GrpcLb::Helper::CalledByCurrentChild() const {
GPR_ASSERT(child_ != nullptr);
return child_ == parent_->child_policy_.get();
}
Subchannel* GrpcLb::Helper::CreateSubchannel(const grpc_channel_args& args) {
if (parent_->shutting_down_) return nullptr;
if (parent_->shutting_down_ ||
(!CalledByPendingChild() && !CalledByCurrentChild())) {
return nullptr;
}
return parent_->channel_control_helper()->CreateSubchannel(args);
}
grpc_channel* GrpcLb::Helper::CreateChannel(const char* target,
const grpc_channel_args& args) {
if (parent_->shutting_down_) return nullptr;
if (parent_->shutting_down_ ||
(!CalledByPendingChild() && !CalledByCurrentChild())) {
return nullptr;
}
return parent_->channel_control_helper()->CreateChannel(target, args);
}
@ -576,31 +612,54 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
GRPC_ERROR_UNREF(state_error);
return;
}
// If this request is from the pending child policy, ignore it until
// it reports READY, at which point we swap it into place.
if (CalledByPendingChild()) {
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p helper %p] pending child policy %p reports state=%s",
parent_.get(), this, parent_->pending_child_policy_.get(),
grpc_connectivity_state_name(state));
}
if (state != GRPC_CHANNEL_READY) {
GRPC_ERROR_UNREF(state_error);
return;
}
grpc_pollset_set_del_pollset_set(
parent_->child_policy_->interested_parties(),
parent_->interested_parties());
MutexLock lock(&parent_->child_policy_mu_);
parent_->child_policy_ = std::move(parent_->pending_child_policy_);
} else if (!CalledByCurrentChild()) {
// This request is from an outdated child, so ignore it.
GRPC_ERROR_UNREF(state_error);
return;
}
// There are three cases to consider here:
// 1. We're in fallback mode. In this case, we're always going to use
// RR's result, so we pass its picker through as-is.
// the child policy's result, so we pass its picker through as-is.
// 2. The serverlist contains only drop entries. In this case, we
// want to use our own picker so that we can return the drops.
// 3. Not in fallback mode and serverlist is not all drops (i.e., it
// may be empty or contain at least one backend address). There are
// two sub-cases:
// a. RR is reporting state READY. In this case, we wrap RR's
// picker in our own, so that we can handle drops and LB token
// metadata for each pick.
// b. RR is reporting a state other than READY. In this case, we
// don't want to use our own picker, because we don't want to
// process drops for picks that yield a QUEUE result; this would
// a. The child policy is reporting state READY. In this case, we wrap
// the child's picker in our own, so that we can handle drops and LB
// token metadata for each pick.
// b. The child policy is reporting a state other than READY. In this
// case, we don't want to use our own picker, because we don't want
// to process drops for picks that yield a QUEUE result; this would
// result in dropping too many calls, since we will see the
// queued picks multiple times, and we'd consider each one a
// separate call for the drop calculation.
//
// Cases 1 and 3b: return picker from RR as-is.
// Cases 1 and 3b: return picker from the child policy as-is.
if (parent_->serverlist_ == nullptr ||
(!parent_->serverlist_->ContainsAllDropEntries() &&
state != GRPC_CHANNEL_READY)) {
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p helper %p] state=%s passing RR picker %p as-is",
"[grpclb %p helper %p] state=%s passing child picker %p as-is",
parent_.get(), this, grpc_connectivity_state_name(state),
picker.get());
}
@ -608,9 +667,9 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
std::move(picker));
return;
}
// Cases 2 and 3a: wrap picker from RR in our own picker.
// Cases 2 and 3a: wrap picker from the child in our own picker.
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p helper %p] state=%s wrapping RR picker %p",
gpr_log(GPR_INFO, "[grpclb %p helper %p] state=%s wrapping child picker %p",
parent_.get(), this, grpc_connectivity_state_name(state),
picker.get());
}
@ -628,15 +687,19 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
void GrpcLb::Helper::RequestReresolution() {
if (parent_->shutting_down_) return;
// If there is a pending child policy, ignore re-resolution requests
// from the current child policy (or any outdated child).
if (parent_->pending_child_policy_ != nullptr && !CalledByPendingChild()) {
return;
}
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Re-resolution requested from the internal RR policy "
"(%p).",
parent_.get(), parent_->rr_policy_.get());
"[grpclb %p] Re-resolution requested from child policy (%p).",
parent_.get(), child_);
}
// If we are talking to a balancer, we expect to get updated addresses
// from the balancer, so we can ignore the re-resolution request from
// the RR policy. Otherwise, pass the re-resolution request up to the
// the child policy. Otherwise, pass the re-resolution request up to the
// channel.
if (parent_->lb_calld_ == nullptr ||
!parent_->lb_calld_->seen_initial_response()) {
@ -975,6 +1038,12 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
} else { // New serverlist.
if (grpclb_policy->serverlist_ == nullptr) {
// Dispose of the fallback.
if (grpclb_policy->child_policy_ != nullptr) {
gpr_log(GPR_INFO,
"[grpclb %p] Received response from balancer; exiting "
"fallback mode",
grpclb_policy);
}
grpclb_policy->fallback_backend_addresses_.reset();
if (grpclb_policy->fallback_timer_callback_pending_) {
grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_);
@ -984,7 +1053,7 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
// instance will be destroyed either upon the next update or when the
// GrpcLb instance is destroyed.
grpclb_policy->serverlist_ = std::move(serverlist_wrapper);
grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
grpclb_policy->CreateOrUpdateChildPolicyLocked();
}
} else {
// No valid initial response or serverlist found.
@ -1164,6 +1233,11 @@ GrpcLb::GrpcLb(Args args)
.set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
.set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS *
1000)) {
// Initialization.
GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
&GrpcLb::OnBalancerChannelConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner));
gpr_mu_init(&child_policy_mu_);
// Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(arg);
@ -1189,6 +1263,7 @@ GrpcLb::GrpcLb(Args args)
GrpcLb::~GrpcLb() {
gpr_free((void*)server_name_);
grpc_channel_args_destroy(args_);
gpr_mu_destroy(&child_policy_mu_);
}
void GrpcLb::ShutdownLocked() {
@ -1200,7 +1275,19 @@ void GrpcLb::ShutdownLocked() {
if (fallback_timer_callback_pending_) {
grpc_timer_cancel(&lb_fallback_timer_);
}
rr_policy_.reset();
if (child_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(),
interested_parties());
}
if (pending_child_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(
pending_child_policy_->interested_parties(), interested_parties());
}
{
MutexLock lock(&child_policy_mu_);
child_policy_.reset();
pending_child_policy_.reset();
}
// We destroy the LB channel here instead of in our destructor because
// destroying the channel triggers a last callback to
// OnBalancerChannelConnectivityChangedLocked(), and we need to be
@ -1220,17 +1307,30 @@ void GrpcLb::ResetBackoffLocked() {
if (lb_channel_ != nullptr) {
grpc_channel_reset_connect_backoff(lb_channel_);
}
if (rr_policy_ != nullptr) {
rr_policy_->ResetBackoffLocked();
if (child_policy_ != nullptr) {
child_policy_->ResetBackoffLocked();
}
if (pending_child_policy_ != nullptr) {
pending_child_policy_->ResetBackoffLocked();
}
}
void GrpcLb::FillChildRefsForChannelz(
channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) {
// delegate to the RoundRobin to fill the children subchannels.
if (rr_policy_ != nullptr) {
rr_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
{
// Delegate to the child policy to fill the children subchannels.
// This must be done holding child_policy_mu_, since this method
// does not run in the combiner.
MutexLock lock(&child_policy_mu_);
if (child_policy_ != nullptr) {
child_policy_->FillChildRefsForChannelz(child_subchannels,
child_channels);
}
if (pending_child_policy_ != nullptr) {
pending_child_policy_->FillChildRefsForChannelz(child_subchannels,
child_channels);
}
}
gpr_atm uuid = gpr_atm_no_barrier_load(&lb_channel_uuid_);
if (uuid != 0) {
@ -1238,6 +1338,46 @@ void GrpcLb::FillChildRefsForChannelz(
}
}
void GrpcLb::UpdateLocked(const grpc_channel_args& args,
RefCountedPtr<Config> lb_config) {
const bool is_initial_update = lb_channel_ == nullptr;
ParseLbConfig(lb_config.get());
ProcessChannelArgsLocked(args);
// Update the existing child policy.
if (child_policy_ != nullptr) CreateOrUpdateChildPolicyLocked();
// If this is the initial update, start the fallback timer.
if (is_initial_update) {
if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr &&
!fallback_timer_callback_pending_) {
grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_;
Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback
GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimerLocked, this,
grpc_combiner_scheduler(combiner()));
fallback_timer_callback_pending_ = true;
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
// Start watching the channel's connectivity state. If the channel
// goes into state TRANSIENT_FAILURE, we go into fallback mode even if
// the fallback timeout has not elapsed.
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
// Ref held by callback.
Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity").release();
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(interested_parties()),
&lb_channel_connectivity_, &lb_channel_on_connectivity_changed_,
nullptr);
}
StartBalancerCallLocked();
}
}
//
// helpers for UpdateLocked()
//
// Returns the backend addresses extracted from the given addresses.
UniquePtr<ServerAddressList> ExtractBackendAddresses(
const ServerAddressList& addresses) {
@ -1299,26 +1439,56 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
grpc_channel_args_destroy(lb_channel_args);
}
void GrpcLb::UpdateLocked(const grpc_channel_args& args,
RefCountedPtr<Config> lb_config) {
const bool is_initial_update = lb_channel_ == nullptr;
ProcessChannelArgsLocked(args);
// Update the existing RR policy.
if (rr_policy_ != nullptr) CreateOrUpdateRoundRobinPolicyLocked();
// If this is the initial update, start the fallback timer and the
// balancer call.
if (is_initial_update) {
if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr &&
!fallback_timer_callback_pending_) {
grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_;
Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback
GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimerLocked, this,
grpc_combiner_scheduler(combiner()));
fallback_timer_callback_pending_ = true;
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
void GrpcLb::ParseLbConfig(Config* grpclb_config) {
const grpc_json* child_policy = nullptr;
if (grpclb_config != nullptr) {
const grpc_json* grpclb_config_json = grpclb_config->config();
for (const grpc_json* field = grpclb_config_json; field != nullptr;
field = field->next) {
if (field->key == nullptr) return;
if (strcmp(field->key, "childPolicy") == 0) {
if (child_policy != nullptr) return; // Duplicate.
child_policy = ParseLoadBalancingConfig(field);
}
}
StartBalancerCallLocked();
}
if (child_policy != nullptr) {
child_policy_config_ =
MakeRefCounted<Config>(child_policy, grpclb_config->service_config());
} else {
child_policy_config_.reset();
}
}
void GrpcLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error) {
GrpcLb* self = static_cast<GrpcLb*>(arg);
if (!self->shutting_down_ && self->fallback_timer_callback_pending_) {
if (self->lb_channel_connectivity_ != GRPC_CHANNEL_TRANSIENT_FAILURE) {
// Not in TRANSIENT_FAILURE. Renew connectivity watch.
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(self->lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(
self->interested_parties()),
&self->lb_channel_connectivity_,
&self->lb_channel_on_connectivity_changed_, nullptr);
return; // Early out so we don't drop the ref below.
}
// In TRANSIENT_FAILURE. Cancel the fallback timer and go into
// fallback mode immediately.
gpr_log(GPR_INFO,
"[grpclb %p] balancer channel in state TRANSIENT_FAILURE; "
"entering fallback mode",
self);
grpc_timer_cancel(&self->lb_fallback_timer_);
self->CreateOrUpdateChildPolicyLocked();
}
// Done watching connectivity state, so drop ref.
self->Unref(DEBUG_LOCATION, "watch_lb_channel_connectivity");
}
//
@ -1346,13 +1516,21 @@ void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
// actually runs, don't fall back.
if (grpclb_policy->serverlist_ == nullptr && !grpclb_policy->shutting_down_ &&
error == GRPC_ERROR_NONE) {
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Falling back to use backends from resolver",
grpclb_policy);
}
gpr_log(GPR_INFO,
"[grpclb %p] No response from balancer after fallback timeout; "
"entering fallback mode",
grpclb_policy);
GPR_ASSERT(grpclb_policy->fallback_backend_addresses_ != nullptr);
grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
grpclb_policy->CreateOrUpdateChildPolicyLocked();
// Cancel connectivity watch, since we no longer need it.
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(grpclb_policy->lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(
grpclb_policy->interested_parties()),
nullptr, &grpclb_policy->lb_channel_on_connectivity_changed_, nullptr);
}
grpclb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer");
}
@ -1396,10 +1574,10 @@ void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) {
}
//
// code for interacting with the RR policy
// code for interacting with the child policy
//
grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
grpc_channel_args* GrpcLb::CreateChildPolicyArgsLocked() {
ServerAddressList tmp_addresses;
ServerAddressList* addresses = &tmp_addresses;
bool is_backend_from_grpclb_load_balancer = false;
@ -1408,7 +1586,7 @@ grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
lb_calld_ == nullptr ? nullptr : lb_calld_->client_stats());
is_backend_from_grpclb_load_balancer = true;
} else {
// If CreateOrUpdateRoundRobinPolicyLocked() is invoked when we haven't
// If CreateOrUpdateChildPolicyLocked() is invoked when we haven't
// received any serverlist from the balancer, we use the fallback backends
// returned by the resolver. Note that the fallback backend list may be
// empty, in which case the new round_robin policy will keep the requested
@ -1435,49 +1613,140 @@ grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
const_cast<char*>(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1);
++num_args_to_add;
}
grpc_channel_args* args = grpc_channel_args_copy_and_add_and_remove(
return grpc_channel_args_copy_and_add_and_remove(
args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add,
num_args_to_add);
return args;
}
void GrpcLb::CreateRoundRobinPolicyLocked(Args args) {
GPR_ASSERT(rr_policy_ == nullptr);
rr_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
"round_robin", std::move(args));
if (GPR_UNLIKELY(rr_policy_ == nullptr)) {
gpr_log(GPR_ERROR, "[grpclb %p] Failure creating a RoundRobin policy",
this);
return;
OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
const char* name, const grpc_channel_args* args) {
Helper* helper = New<Helper>(Ref());
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
UniquePtr<ChannelControlHelper>(helper);
OrphanablePtr<LoadBalancingPolicy> lb_policy =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
name, std::move(lb_policy_args));
if (GPR_UNLIKELY(lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "[grpclb %p] Failure creating child policy %s", this,
name);
return nullptr;
}
helper->set_child(lb_policy.get());
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Created new RR policy %p", this,
rr_policy_.get());
gpr_log(GPR_INFO, "[grpclb %p] Created new child policy %s (%p)", this,
name, lb_policy.get());
}
// Add the gRPC LB's interested_parties pollset_set to that of the newly
// created RR policy. This will make the RR policy progress upon activity on
// gRPC LB, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(rr_policy_->interested_parties(),
// created child policy. This will make the child policy progress upon
// activity on gRPC LB, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
interested_parties());
return lb_policy;
}
void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
void GrpcLb::CreateOrUpdateChildPolicyLocked() {
if (shutting_down_) return;
grpc_channel_args* args = CreateRoundRobinPolicyArgsLocked();
grpc_channel_args* args = CreateChildPolicyArgsLocked();
GPR_ASSERT(args != nullptr);
if (rr_policy_ == nullptr) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
UniquePtr<ChannelControlHelper>(New<Helper>(Ref()));
CreateRoundRobinPolicyLocked(std::move(lb_policy_args));
// If the child policy name changes, we need to create a new child
// policy. When this happens, we leave child_policy_ as-is and store
// the new child policy in pending_child_policy_. Once the new child
// policy transitions into state READY, we swap it into child_policy_,
// replacing the original child policy. So pending_child_policy_ is
// non-null only between when we apply an update that changes the child
// policy name and when the new child reports state READY.
//
// Updates can arrive at any point during this transition. We always
// apply updates relative to the most recently created child policy,
// even if the most recent one is still in pending_child_policy_. This
// is true both when applying the updates to an existing child policy
// and when determining whether we need to create a new policy.
//
// As a result of this, there are several cases to consider here:
//
// 1. We have no existing child policy (i.e., we have started up but
// have not yet received a serverlist from the balancer or gone
// into fallback mode; in this case, both child_policy_ and
// pending_child_policy_ are null). In this case, we create a
// new child policy and store it in child_policy_.
//
// 2. We have an existing child policy and have no pending child policy
// from a previous update (i.e., either there has not been a
// previous update that changed the policy name, or we have already
// finished swapping in the new policy; in this case, child_policy_
// is non-null but pending_child_policy_ is null). In this case:
// a. If child_policy_->name() equals child_policy_name, then we
// update the existing child policy.
// b. If child_policy_->name() does not equal child_policy_name,
// we create a new policy. The policy will be stored in
// pending_child_policy_ and will later be swapped into
// child_policy_ by the helper when the new child transitions
// into state READY.
//
// 3. We have an existing child policy and have a pending child policy
// from a previous update (i.e., a previous update set
// pending_child_policy_ as per case 2b above and that policy has
// not yet transitioned into state READY and been swapped into
// child_policy_; in this case, both child_policy_ and
// pending_child_policy_ are non-null). In this case:
// a. If pending_child_policy_->name() equals child_policy_name,
// then we update the existing pending child policy.
// b. If pending_child_policy->name() does not equal
// child_policy_name, then we create a new policy. The new
// policy is stored in pending_child_policy_ (replacing the one
// that was there before, which will be immediately shut down)
// and will later be swapped into child_policy_ by the helper
// when the new child transitions into state READY.
const char* child_policy_name = child_policy_config_ == nullptr
? "round_robin"
: child_policy_config_->name();
const bool create_policy =
// case 1
child_policy_ == nullptr ||
// case 2b
(pending_child_policy_ == nullptr &&
strcmp(child_policy_->name(), child_policy_name) != 0) ||
// case 3b
(pending_child_policy_ != nullptr &&
strcmp(pending_child_policy_->name(), child_policy_name) != 0);
LoadBalancingPolicy* policy_to_update = nullptr;
if (create_policy) {
// Cases 1, 2b, and 3b: create a new child policy.
// If child_policy_ is null, we set it (case 1), else we set
// pending_child_policy_ (cases 2b and 3b).
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Creating new %schild policy %s", this,
child_policy_ == nullptr ? "" : "pending ", child_policy_name);
}
auto new_policy = CreateChildPolicyLocked(child_policy_name, args);
// Swap the policy into place.
auto& lb_policy =
child_policy_ == nullptr ? child_policy_ : pending_child_policy_;
{
MutexLock lock(&child_policy_mu_);
lb_policy = std::move(new_policy);
}
policy_to_update = lb_policy.get();
} else {
// Cases 2a and 3a: update an existing policy.
// If we have a pending child policy, send the update to the pending
// policy (case 3a), else send it to the current policy (case 2a).
policy_to_update = pending_child_policy_ != nullptr
? pending_child_policy_.get()
: child_policy_.get();
}
GPR_ASSERT(policy_to_update != nullptr);
// Update the policy.
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Updating RR policy %p", this,
rr_policy_.get());
gpr_log(GPR_INFO, "[grpclb %p] Updating %schild policy %p", this,
policy_to_update == pending_child_policy_.get() ? "pending " : "",
policy_to_update);
}
rr_policy_->UpdateLocked(*args, nullptr);
policy_to_update->UpdateLocked(*args, child_policy_config_);
// Clean up.
grpc_channel_args_destroy(args);
}
@ -1489,18 +1758,6 @@ class GrpcLbFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
/* Count the number of gRPC-LB addresses. There must be at least one. */
const ServerAddressList* addresses =
FindServerAddressListChannelArg(args.args);
if (addresses == nullptr) return nullptr;
bool found_balancer = false;
for (size_t i = 0; i < addresses->size(); ++i) {
if ((*addresses)[i].IsBalancer()) {
found_balancer = true;
break;
}
}
if (!found_balancer) return nullptr;
return OrphanablePtr<LoadBalancingPolicy>(New<GrpcLb>(std::move(args)));
}

@ -102,6 +102,14 @@ class PickFirst : public LoadBalancingPolicy {
PickFirst* p = static_cast<PickFirst*>(policy());
p->Unref(DEBUG_LOCATION, "subchannel_list");
}
bool in_transient_failure() const { return in_transient_failure_; }
void set_in_transient_failure(bool in_transient_failure) {
in_transient_failure_ = in_transient_failure;
}
private:
bool in_transient_failure_ = false;
};
class Picker : public SubchannelPicker {
@ -368,12 +376,21 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
p->selected_ = nullptr;
StopConnectivityWatchLocked();
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
grpc_error* new_error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"selected subchannel not ready; switching to pending update", &error,
1);
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(new_error),
UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(new_error)));
// Set our state to that of the pending subchannel list.
if (p->subchannel_list_->in_transient_failure()) {
grpc_error* new_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"selected subchannel failed; switching to pending update",
&error, 1);
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(new_error),
UniquePtr<SubchannelPicker>(
New<TransientFailurePicker>(new_error)));
} else {
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
UniquePtr<SubchannelPicker>(New<QueuePicker>(p->Ref())));
}
} else {
if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// If the selected subchannel goes bad, request a re-resolution. We
@ -382,7 +399,6 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
// to connect to the re-resolved backends until we leave IDLE state.
p->idle_ = true;
p->channel_control_helper()->RequestReresolution();
// In transient failure. Rely on re-resolution to recover.
p->selected_ = nullptr;
StopConnectivityWatchLocked();
p->channel_control_helper()->UpdateState(
@ -418,6 +434,7 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
// for a subchannel in p->latest_pending_subchannel_list_. The
// goal here is to find a subchannel from the update that we can
// select in place of the current one.
subchannel_list()->set_in_transient_failure(false);
switch (connectivity_state) {
case GRPC_CHANNEL_READY: {
// Renew notification.
@ -431,17 +448,25 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
size_t next_index =
(sd->Index() + 1) % subchannel_list()->num_subchannels();
sd = subchannel_list()->subchannel(next_index);
// Case 1: Only set state to TRANSIENT_FAILURE if we've tried
// all subchannels.
if (sd->Index() == 0 && subchannel_list() == p->subchannel_list_.get()) {
p->channel_control_helper()->RequestReresolution();
grpc_error* new_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"failed to connect to all addresses", &error, 1);
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(new_error),
UniquePtr<SubchannelPicker>(
New<TransientFailurePicker>(new_error)));
// If we're tried all subchannels, set state to TRANSIENT_FAILURE.
if (sd->Index() == 0) {
// Re-resolve if this is the most recent subchannel list.
if (subchannel_list() == (p->latest_pending_subchannel_list_ != nullptr
? p->latest_pending_subchannel_list_.get()
: p->subchannel_list_.get())) {
p->channel_control_helper()->RequestReresolution();
}
subchannel_list()->set_in_transient_failure(true);
// Only report new state in case 1.
if (subchannel_list() == p->subchannel_list_.get()) {
grpc_error* new_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"failed to connect to all addresses", &error, 1);
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(new_error),
UniquePtr<SubchannelPicker>(
New<TransientFailurePicker>(new_error)));
}
}
sd->CheckConnectivityStateAndStartWatchingLocked();
break;

File diff suppressed because it is too large Load Diff

@ -33,55 +33,12 @@
#include "src/core/lib/security/transport/target_authority_table.h"
#include "src/core/lib/slice/slice_internal.h"
namespace grpc_core {
namespace {
int BalancerNameCmp(const grpc_core::UniquePtr<char>& a,
const grpc_core::UniquePtr<char>& b) {
return strcmp(a.get(), b.get());
}
RefCountedPtr<TargetAuthorityTable> CreateTargetAuthorityTable(
const ServerAddressList& addresses) {
TargetAuthorityTable::Entry* target_authority_entries =
static_cast<TargetAuthorityTable::Entry*>(
gpr_zalloc(sizeof(*target_authority_entries) * addresses.size()));
for (size_t i = 0; i < addresses.size(); ++i) {
char* addr_str;
GPR_ASSERT(
grpc_sockaddr_to_string(&addr_str, &addresses[i].address(), true) > 0);
target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str);
gpr_free(addr_str);
char* balancer_name = grpc_channel_arg_get_string(grpc_channel_args_find(
addresses[i].args(), GRPC_ARG_ADDRESS_BALANCER_NAME));
target_authority_entries[i].value.reset(gpr_strdup(balancer_name));
}
RefCountedPtr<TargetAuthorityTable> target_authority_table =
TargetAuthorityTable::Create(addresses.size(), target_authority_entries,
BalancerNameCmp);
gpr_free(target_authority_entries);
return target_authority_table;
}
} // namespace
} // namespace grpc_core
grpc_channel_args* grpc_lb_policy_xds_modify_lb_channel_args(
grpc_channel_args* args) {
const char* args_to_remove[1];
size_t num_args_to_remove = 0;
grpc_arg args_to_add[2];
size_t num_args_to_add = 0;
// Add arg for targets info table.
grpc_core::ServerAddressList* addresses =
grpc_core::FindServerAddressListChannelArg(args);
GPR_ASSERT(addresses != nullptr);
grpc_core::RefCountedPtr<grpc_core::TargetAuthorityTable>
target_authority_table =
grpc_core::CreateTargetAuthorityTable(*addresses);
args_to_add[num_args_to_add++] =
grpc_core::CreateTargetAuthorityTableChannelArg(
target_authority_table.get());
// Substitute the channel credentials with a version without call
// credentials: the load balancer is not necessarily trusted to handle
// bearer token credentials.

@ -67,9 +67,7 @@ struct grpc_ares_request {
/** number of ongoing queries */
size_t pending_queries;
/** is there at least one successful query, set in on_done_cb */
bool success;
/** the errors explaining the request failure, set in on_done_cb */
/** the errors explaining query failures, appended to in query callbacks */
grpc_error* error;
};
@ -145,6 +143,10 @@ void grpc_ares_complete_request_locked(grpc_ares_request* r) {
ServerAddressList* addresses = r->addresses_out->get();
if (addresses != nullptr) {
grpc_cares_wrapper_address_sorting_sort(addresses);
GRPC_ERROR_UNREF(r->error);
r->error = GRPC_ERROR_NONE;
// TODO(apolcyn): allow c-ares to return a service config
// with no addresses along side it
}
GRPC_CLOSURE_SCHED(r->on_done, r->error);
}
@ -175,9 +177,9 @@ static void on_hostbyname_done_locked(void* arg, int status, int timeouts,
static_cast<grpc_ares_hostbyname_request*>(arg);
grpc_ares_request* r = hr->parent_request;
if (status == ARES_SUCCESS) {
GRPC_ERROR_UNREF(r->error);
r->error = GRPC_ERROR_NONE;
r->success = true;
GRPC_CARES_TRACE_LOG(
"request:%p on_hostbyname_done_locked host=%s ARES_SUCCESS", r,
hr->host);
if (*r->addresses_out == nullptr) {
*r->addresses_out = grpc_core::MakeUnique<ServerAddressList>();
}
@ -229,17 +231,15 @@ static void on_hostbyname_done_locked(void* arg, int status, int timeouts,
}
}
}
} else if (!r->success) {
} else {
char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_hostbyname_done_locked host=%s %s", r,
hr->host, error_msg);
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
} else {
r->error = grpc_error_add_child(error, r->error);
}
r->error = grpc_error_add_child(error, r->error);
}
destroy_hostbyname_request_locked(hr);
}
@ -247,9 +247,8 @@ static void on_hostbyname_done_locked(void* arg, int status, int timeouts,
static void on_srv_query_done_locked(void* arg, int status, int timeouts,
unsigned char* abuf, int alen) {
grpc_ares_request* r = static_cast<grpc_ares_request*>(arg);
GRPC_CARES_TRACE_LOG("request:%p on_query_srv_done_locked", r);
if (status == ARES_SUCCESS) {
GRPC_CARES_TRACE_LOG("request:%p on_query_srv_done_locked ARES_SUCCESS", r);
GRPC_CARES_TRACE_LOG("request:%p on_srv_query_done_locked ARES_SUCCESS", r);
struct ares_srv_reply* reply;
const int parse_status = ares_parse_srv_reply(abuf, alen, &reply);
if (parse_status == ARES_SUCCESS) {
@ -273,17 +272,15 @@ static void on_srv_query_done_locked(void* arg, int status, int timeouts,
if (reply != nullptr) {
ares_free_data(reply);
}
} else if (!r->success) {
} else {
char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_srv_query_done_locked %s", r,
error_msg);
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
} else {
r->error = grpc_error_add_child(error, r->error);
}
r->error = grpc_error_add_child(error, r->error);
}
grpc_ares_request_unref_locked(r);
}
@ -294,12 +291,12 @@ static void on_txt_done_locked(void* arg, int status, int timeouts,
unsigned char* buf, int len) {
char* error_msg;
grpc_ares_request* r = static_cast<grpc_ares_request*>(arg);
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked", r);
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext* result = nullptr;
struct ares_txt_ext* reply = nullptr;
grpc_error* error = GRPC_ERROR_NONE;
if (status != ARES_SUCCESS) goto fail;
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked ARES_SUCCESS", r);
status = ares_parse_txt_reply_ext(buf, len, &reply);
if (status != ARES_SUCCESS) goto fail;
// Find service config in TXT record.
@ -337,12 +334,9 @@ fail:
gpr_asprintf(&error_msg, "C-ares TXT lookup status is not ARES_SUCCESS: %s",
ares_strerror(status));
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked %s", r, error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
} else {
r->error = grpc_error_add_child(error, r->error);
}
r->error = grpc_error_add_child(error, r->error);
done:
grpc_ares_request_unref_locked(r);
}
@ -534,7 +528,6 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
r->on_done = on_done;
r->addresses_out = addrs;
r->service_config_json_out = service_config_json;
r->success = false;
r->error = GRPC_ERROR_NONE;
r->pending_queries = 0;
GRPC_CARES_TRACE_LOG(

@ -86,7 +86,14 @@ FakeResolver::FakeResolver(const ResolverArgs& args) : Resolver(args.combiner) {
channel_args_ = grpc_channel_args_copy(args.args);
FakeResolverResponseGenerator* response_generator =
FakeResolverResponseGenerator::GetFromArgs(args.args);
if (response_generator != nullptr) response_generator->resolver_ = this;
if (response_generator != nullptr) {
response_generator->resolver_ = this;
if (response_generator->response_ != nullptr) {
response_generator->SetResponse(response_generator->response_);
grpc_channel_args_destroy(response_generator->response_);
response_generator->response_ = nullptr;
}
}
}
FakeResolver::~FakeResolver() {
@ -114,6 +121,9 @@ void FakeResolver::RequestReresolutionLocked() {
void FakeResolver::MaybeFinishNextLocked() {
if (next_completion_ != nullptr &&
(next_results_ != nullptr || return_failure_)) {
// When both next_results_ and channel_args_ contain an arg with the same
// name, only the one in next_results_ will be kept since next_results_ is
// before channel_args_.
*target_result_ =
return_failure_ ? nullptr
: grpc_channel_args_union(next_results_, channel_args_);
@ -157,15 +167,19 @@ void FakeResolverResponseGenerator::SetResponseLocked(void* arg,
void FakeResolverResponseGenerator::SetResponse(grpc_channel_args* response) {
GPR_ASSERT(response != nullptr);
GPR_ASSERT(resolver_ != nullptr);
SetResponseClosureArg* closure_arg = New<SetResponseClosureArg>();
closure_arg->generator = this;
closure_arg->response = grpc_channel_args_copy(response);
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked,
closure_arg,
grpc_combiner_scheduler(resolver_->combiner())),
GRPC_ERROR_NONE);
if (resolver_ != nullptr) {
SetResponseClosureArg* closure_arg = New<SetResponseClosureArg>();
closure_arg->generator = this;
closure_arg->response = grpc_channel_args_copy(response);
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked,
closure_arg,
grpc_combiner_scheduler(resolver_->combiner())),
GRPC_ERROR_NONE);
} else {
GPR_ASSERT(response_ == nullptr);
response_ = grpc_channel_args_copy(response);
}
}
void FakeResolverResponseGenerator::SetReresolutionResponseLocked(

@ -44,7 +44,9 @@ class FakeResolverResponseGenerator
FakeResolverResponseGenerator() {}
// Instructs the fake resolver associated with the response generator
// instance to trigger a new resolution with the specified response.
// instance to trigger a new resolution with the specified response. If the
// resolver is not available yet, delays response setting until it is. This
// can be called at most once before the resolver is available.
void SetResponse(grpc_channel_args* next_response);
// Sets the re-resolution response, which is returned by the fake resolver
@ -79,6 +81,7 @@ class FakeResolverResponseGenerator
static void SetFailureLocked(void* arg, grpc_error* error);
FakeResolver* resolver_ = nullptr; // Do not own.
grpc_channel_args* response_ = nullptr;
};
} // namespace grpc_core

@ -148,8 +148,8 @@ void ProcessedResolverResult::ParseLbConfigFromServiceConfig(
LoadBalancingPolicy::ParseLoadBalancingConfig(field);
if (policy != nullptr) {
lb_policy_name_.reset(gpr_strdup(policy->key));
lb_policy_config_ = MakeRefCounted<LoadBalancingPolicy::Config>(
policy->child, service_config_);
lb_policy_config_ =
MakeRefCounted<LoadBalancingPolicy::Config>(policy, service_config_);
}
}

@ -47,6 +47,7 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
@ -77,12 +78,14 @@ class ResolvingLoadBalancingPolicy::ResolvingControlHelper
Subchannel* CreateSubchannel(const grpc_channel_args& args) override {
if (parent_->resolver_ == nullptr) return nullptr; // Shutting down.
if (!CalledByCurrentChild() && !CalledByPendingChild()) return nullptr;
return parent_->channel_control_helper()->CreateSubchannel(args);
}
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args& args) override {
if (parent_->resolver_ == nullptr) return nullptr; // Shutting down.
if (!CalledByCurrentChild() && !CalledByPendingChild()) return nullptr;
return parent_->channel_control_helper()->CreateChannel(target, args);
}
@ -93,11 +96,40 @@ class ResolvingLoadBalancingPolicy::ResolvingControlHelper
GRPC_ERROR_UNREF(state_error);
return;
}
// If this request is from the pending child policy, ignore it until
// it reports READY, at which point we swap it into place.
if (CalledByPendingChild()) {
if (parent_->tracer_->enabled()) {
gpr_log(GPR_INFO,
"resolving_lb=%p helper=%p: pending child policy %p reports "
"state=%s",
parent_.get(), this, child_,
grpc_connectivity_state_name(state));
}
if (state != GRPC_CHANNEL_READY) {
GRPC_ERROR_UNREF(state_error);
return;
}
grpc_pollset_set_del_pollset_set(
parent_->lb_policy_->interested_parties(),
parent_->interested_parties());
MutexLock lock(&parent_->lb_policy_mu_);
parent_->lb_policy_ = std::move(parent_->pending_lb_policy_);
} else if (!CalledByCurrentChild()) {
// This request is from an outdated child, so ignore it.
GRPC_ERROR_UNREF(state_error);
return;
}
parent_->channel_control_helper()->UpdateState(state, state_error,
std::move(picker));
}
void RequestReresolution() override {
// If there is a pending child policy, ignore re-resolution requests
// from the current child policy (or any outdated child).
if (parent_->pending_lb_policy_ != nullptr && !CalledByPendingChild()) {
return;
}
if (parent_->tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: started name re-resolving",
parent_.get());
@ -107,8 +139,21 @@ class ResolvingLoadBalancingPolicy::ResolvingControlHelper
}
}
void set_child(LoadBalancingPolicy* child) { child_ = child; }
private:
bool CalledByPendingChild() const {
GPR_ASSERT(child_ != nullptr);
return child_ == parent_->pending_lb_policy_.get();
}
bool CalledByCurrentChild() const {
GPR_ASSERT(child_ != nullptr);
return child_ == parent_->lb_policy_.get();
};
RefCountedPtr<ResolvingLoadBalancingPolicy> parent_;
LoadBalancingPolicy* child_ = nullptr;
};
//
@ -146,6 +191,7 @@ ResolvingLoadBalancingPolicy::ResolvingLoadBalancingPolicy(
process_resolver_result_(process_resolver_result),
process_resolver_result_user_data_(process_resolver_result_user_data) {
GPR_ASSERT(process_resolver_result != nullptr);
gpr_mu_init(&lb_policy_mu_);
*error = Init(*args.args);
}
@ -169,22 +215,38 @@ grpc_error* ResolvingLoadBalancingPolicy::Init(const grpc_channel_args& args) {
ResolvingLoadBalancingPolicy::~ResolvingLoadBalancingPolicy() {
GPR_ASSERT(resolver_ == nullptr);
GPR_ASSERT(lb_policy_ == nullptr);
gpr_mu_destroy(&lb_policy_mu_);
}
void ResolvingLoadBalancingPolicy::ShutdownLocked() {
if (resolver_ != nullptr) {
resolver_.reset();
MutexLock lock(&lb_policy_mu_);
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties());
lb_policy_.reset();
}
if (pending_lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down pending lb_policy=%p",
this, pending_lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(pending_lb_policy_->interested_parties(),
interested_parties());
pending_lb_policy_.reset();
}
}
}
void ResolvingLoadBalancingPolicy::ExitIdleLocked() {
if (lb_policy_ != nullptr) {
lb_policy_->ExitIdleLocked();
if (pending_lb_policy_ != nullptr) pending_lb_policy_->ExitIdleLocked();
} else {
if (!started_resolving_ && resolver_ != nullptr) {
StartResolvingLocked();
@ -197,17 +259,24 @@ void ResolvingLoadBalancingPolicy::ResetBackoffLocked() {
resolver_->ResetBackoffLocked();
resolver_->RequestReresolutionLocked();
}
if (lb_policy_ != nullptr) {
lb_policy_->ResetBackoffLocked();
}
if (lb_policy_ != nullptr) lb_policy_->ResetBackoffLocked();
if (pending_lb_policy_ != nullptr) pending_lb_policy_->ResetBackoffLocked();
}
void ResolvingLoadBalancingPolicy::FillChildRefsForChannelz(
channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) {
// Delegate to the lb_policy_ to fill the children subchannels.
// This must be done holding lb_policy_mu_, since this method does not
// run in the combiner.
MutexLock lock(&lb_policy_mu_);
if (lb_policy_ != nullptr) {
lb_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
}
if (pending_lb_policy_ != nullptr) {
pending_lb_policy_->FillChildRefsForChannelz(child_subchannels,
child_channels);
}
}
void ResolvingLoadBalancingPolicy::StartResolvingLocked() {
@ -229,14 +298,26 @@ void ResolvingLoadBalancingPolicy::OnResolverShutdownLocked(grpc_error* error) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down", this);
}
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
{
MutexLock lock(&lb_policy_mu_);
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties());
lb_policy_.reset();
}
if (pending_lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down pending lb_policy=%p",
this, pending_lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(pending_lb_policy_->interested_parties(),
interested_parties());
pending_lb_policy_.reset();
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties());
lb_policy_.reset();
}
if (resolver_ != nullptr) {
// This should never happen; it can only be triggered by a resolver
@ -260,53 +341,142 @@ void ResolvingLoadBalancingPolicy::OnResolverShutdownLocked(grpc_error* error) {
Unref();
}
// Creates a new LB policy, replacing any previous one.
void ResolvingLoadBalancingPolicy::CreateOrUpdateLbPolicyLocked(
const char* lb_policy_name, RefCountedPtr<Config> lb_policy_config,
TraceStringVector* trace_strings) {
// If the child policy name changes, we need to create a new child
// policy. When this happens, we leave child_policy_ as-is and store
// the new child policy in pending_child_policy_. Once the new child
// policy transitions into state READY, we swap it into child_policy_,
// replacing the original child policy. So pending_child_policy_ is
// non-null only between when we apply an update that changes the child
// policy name and when the new child reports state READY.
//
// Updates can arrive at any point during this transition. We always
// apply updates relative to the most recently created child policy,
// even if the most recent one is still in pending_child_policy_. This
// is true both when applying the updates to an existing child policy
// and when determining whether we need to create a new policy.
//
// As a result of this, there are several cases to consider here:
//
// 1. We have no existing child policy (i.e., we have started up but
// have not yet received a serverlist from the balancer or gone
// into fallback mode; in this case, both child_policy_ and
// pending_child_policy_ are null). In this case, we create a
// new child policy and store it in child_policy_.
//
// 2. We have an existing child policy and have no pending child policy
// from a previous update (i.e., either there has not been a
// previous update that changed the policy name, or we have already
// finished swapping in the new policy; in this case, child_policy_
// is non-null but pending_child_policy_ is null). In this case:
// a. If child_policy_->name() equals child_policy_name, then we
// update the existing child policy.
// b. If child_policy_->name() does not equal child_policy_name,
// we create a new policy. The policy will be stored in
// pending_child_policy_ and will later be swapped into
// child_policy_ by the helper when the new child transitions
// into state READY.
//
// 3. We have an existing child policy and have a pending child policy
// from a previous update (i.e., a previous update set
// pending_child_policy_ as per case 2b above and that policy has
// not yet transitioned into state READY and been swapped into
// child_policy_; in this case, both child_policy_ and
// pending_child_policy_ are non-null). In this case:
// a. If pending_child_policy_->name() equals child_policy_name,
// then we update the existing pending child policy.
// b. If pending_child_policy->name() does not equal
// child_policy_name, then we create a new policy. The new
// policy is stored in pending_child_policy_ (replacing the one
// that was there before, which will be immediately shut down)
// and will later be swapped into child_policy_ by the helper
// when the new child transitions into state READY.
const bool create_policy =
// case 1
lb_policy_ == nullptr ||
// case 2b
(pending_lb_policy_ == nullptr &&
strcmp(lb_policy_->name(), lb_policy_name) != 0) ||
// case 3b
(pending_lb_policy_ != nullptr &&
strcmp(pending_lb_policy_->name(), lb_policy_name) != 0);
LoadBalancingPolicy* policy_to_update = nullptr;
if (create_policy) {
// Cases 1, 2b, and 3b: create a new child policy.
// If lb_policy_ is null, we set it (case 1), else we set
// pending_lb_policy_ (cases 2b and 3b).
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: Creating new %schild policy %s", this,
lb_policy_ == nullptr ? "" : "pending ", lb_policy_name);
}
auto new_policy = CreateLbPolicyLocked(lb_policy_name, trace_strings);
auto& lb_policy = lb_policy_ == nullptr ? lb_policy_ : pending_lb_policy_;
{
MutexLock lock(&lb_policy_mu_);
lb_policy = std::move(new_policy);
}
policy_to_update = lb_policy.get();
} else {
// Cases 2a and 3a: update an existing policy.
// If we have a pending child policy, send the update to the pending
// policy (case 3a), else send it to the current policy (case 2a).
policy_to_update = pending_lb_policy_ != nullptr ? pending_lb_policy_.get()
: lb_policy_.get();
}
GPR_ASSERT(policy_to_update != nullptr);
// Update the policy.
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: Updating %schild policy %p", this,
policy_to_update == pending_lb_policy_.get() ? "pending " : "",
policy_to_update);
}
policy_to_update->UpdateLocked(*resolver_result_,
std::move(lb_policy_config));
}
// Creates a new LB policy.
// Updates trace_strings to indicate what was done.
void ResolvingLoadBalancingPolicy::CreateNewLbPolicyLocked(
OrphanablePtr<LoadBalancingPolicy>
ResolvingLoadBalancingPolicy::CreateLbPolicyLocked(
const char* lb_policy_name, TraceStringVector* trace_strings) {
ResolvingControlHelper* helper = New<ResolvingControlHelper>(Ref());
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.channel_control_helper =
UniquePtr<ChannelControlHelper>(New<ResolvingControlHelper>(Ref()));
UniquePtr<ChannelControlHelper>(helper);
lb_policy_args.args = resolver_result_;
OrphanablePtr<LoadBalancingPolicy> new_lb_policy =
OrphanablePtr<LoadBalancingPolicy> lb_policy =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
lb_policy_name, std::move(lb_policy_args));
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
if (GPR_UNLIKELY(lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
if (channelz_node() != nullptr) {
char* str;
gpr_asprintf(&str, "Could not create LB policy \"%s\"", lb_policy_name);
trace_strings->push_back(str);
}
} else {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: created new LB policy \"%s\" (%p)",
this, lb_policy_name, new_lb_policy.get());
}
if (channelz_node() != nullptr) {
char* str;
gpr_asprintf(&str, "Created new LB policy \"%s\"", lb_policy_name);
trace_strings->push_back(str);
}
// Propagate channelz node.
auto* channelz = channelz_node();
if (channelz != nullptr) {
new_lb_policy->set_channelz_node(channelz->Ref());
}
// Swap out the LB policy and update the fds in interested_parties_.
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties());
}
lb_policy_ = std::move(new_lb_policy);
grpc_pollset_set_add_pollset_set(lb_policy_->interested_parties(),
interested_parties());
return nullptr;
}
helper->set_child(lb_policy.get());
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: created new LB policy \"%s\" (%p)",
this, lb_policy_name, lb_policy.get());
}
if (channelz_node() != nullptr) {
char* str;
gpr_asprintf(&str, "Created new LB policy \"%s\"", lb_policy_name);
trace_strings->push_back(str);
}
// Propagate channelz node.
auto* channelz = channelz_node();
if (channelz != nullptr) {
lb_policy->set_channelz_node(channelz->Ref());
}
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
interested_parties());
return lb_policy;
}
void ResolvingLoadBalancingPolicy::MaybeAddTraceMessagesForAddressChangesLocked(
@ -415,23 +585,8 @@ void ResolvingLoadBalancingPolicy::OnResolverResultChangedLocked(
lb_policy_config = self->child_lb_config_;
}
GPR_ASSERT(lb_policy_name != nullptr);
// If we're not already using the right LB policy name, instantiate
// a new one.
if (self->lb_policy_ == nullptr ||
strcmp(self->lb_policy_->name(), lb_policy_name) != 0) {
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: creating new LB policy \"%s\"",
self, lb_policy_name);
}
self->CreateNewLbPolicyLocked(lb_policy_name, &trace_strings);
}
// Update the LB policy with the new addresses and config.
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: updating LB policy \"%s\" (%p)", self,
lb_policy_name, self->lb_policy_.get());
}
self->lb_policy_->UpdateLocked(*self->resolver_result_,
std::move(lb_policy_config));
self->CreateOrUpdateLbPolicyLocked(
lb_policy_name, std::move(lb_policy_config), &trace_strings);
// Add channel trace event.
if (self->channelz_node() != nullptr) {
if (service_config_changed) {

@ -102,8 +102,11 @@ class ResolvingLoadBalancingPolicy : public LoadBalancingPolicy {
void StartResolvingLocked();
void OnResolverShutdownLocked(grpc_error* error);
void CreateNewLbPolicyLocked(const char* lb_policy_name,
TraceStringVector* trace_strings);
void CreateOrUpdateLbPolicyLocked(const char* lb_policy_name,
RefCountedPtr<Config>,
TraceStringVector* trace_strings);
OrphanablePtr<LoadBalancingPolicy> CreateLbPolicyLocked(
const char* lb_policy_name, TraceStringVector* trace_strings);
void MaybeAddTraceMessagesForAddressChangesLocked(
TraceStringVector* trace_strings);
void ConcatenateAndAddChannelTraceLocked(
@ -125,8 +128,12 @@ class ResolvingLoadBalancingPolicy : public LoadBalancingPolicy {
bool previous_resolution_contained_addresses_ = false;
grpc_closure on_resolver_result_changed_;
// Child LB policy and associated state.
// Child LB policy.
OrphanablePtr<LoadBalancingPolicy> lb_policy_;
OrphanablePtr<LoadBalancingPolicy> pending_lb_policy_;
// Lock held when modifying the value of child_policy_ or
// pending_child_policy_.
gpr_mu lb_policy_mu_;
};
} // namespace grpc_core

@ -1136,8 +1136,10 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
}
t->goaway_error = grpc_error_set_str(
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("GOAWAY received"),
GRPC_ERROR_INT_HTTP2_ERROR, static_cast<intptr_t>(goaway_error)),
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("GOAWAY received"),
GRPC_ERROR_INT_HTTP2_ERROR, static_cast<intptr_t>(goaway_error)),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
GRPC_ERROR_STR_RAW_BYTES, goaway_text);
/* We want to log this irrespective of whether http tracing is enabled */
@ -2474,7 +2476,6 @@ static grpc_error* try_http_parsing(grpc_chttp2_transport* t) {
size_t i = 0;
grpc_error* error = GRPC_ERROR_NONE;
grpc_http_response response;
memset(&response, 0, sizeof(response));
grpc_http_parser_init(&parser, GRPC_HTTP_RESPONSE, &response);
@ -2577,7 +2578,8 @@ static void read_action_locked(void* tp, grpc_error* error) {
grpc_slice_buffer_reset_and_unref_internal(&t->read_buffer);
if (keep_reading) {
grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked);
const bool urgent = t->goaway_error != GRPC_ERROR_NONE;
grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked, urgent);
grpc_chttp2_act_on_flowctl_action(t->flow_control->MakeAction(), t,
nullptr);
GRPC_CHTTP2_UNREF_TRANSPORT(t, "keep_reading");

@ -190,7 +190,7 @@ TransportFlowControl::TransportFlowControl(const grpc_chttp2_transport* t,
uint32_t TransportFlowControl::MaybeSendUpdate(bool writing_anyway) {
FlowControlTrace trace("t updt sent", this, nullptr);
const uint32_t target_announced_window =
static_cast<const uint32_t>(target_window());
static_cast<uint32_t>(target_window());
if ((writing_anyway || announced_window_ <= target_announced_window / 2) &&
announced_window_ != target_announced_window) {
const uint32_t announce = static_cast<uint32_t> GPR_CLAMP(

@ -0,0 +1,27 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/any.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#include <stddef.h>
#include "upb/msg.h"
#include "google/protobuf/any.upb.h"
#include "upb/port_def.inc"
static const upb_msglayout_field google_protobuf_Any__fields[2] = {
{1, UPB_SIZE(0, 0), 0, 0, 9, 1},
{2, UPB_SIZE(8, 16), 0, 0, 12, 1},
};
const upb_msglayout google_protobuf_Any_msginit = {
NULL,
&google_protobuf_Any__fields[0],
UPB_SIZE(16, 32), 2, false,
};
#include "upb/port_undef.inc"

@ -0,0 +1,59 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/any.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#ifndef GOOGLE_PROTOBUF_ANY_PROTO_UPB_H_
#define GOOGLE_PROTOBUF_ANY_PROTO_UPB_H_
#include "upb/generated_util.h"
#include "upb/msg.h"
#include "upb/decode.h"
#include "upb/encode.h"
#include "upb/port_def.inc"
#ifdef __cplusplus
extern "C" {
#endif
struct google_protobuf_Any;
typedef struct google_protobuf_Any google_protobuf_Any;
extern const upb_msglayout google_protobuf_Any_msginit;
/* Enums */
/* google.protobuf.Any */
UPB_INLINE google_protobuf_Any *google_protobuf_Any_new(upb_arena *arena) {
return (google_protobuf_Any *)upb_msg_new(&google_protobuf_Any_msginit, arena);
}
UPB_INLINE google_protobuf_Any *google_protobuf_Any_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_Any *ret = google_protobuf_Any_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_Any_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_Any_serialize(const google_protobuf_Any *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_Any_msginit, arena, len);
}
UPB_INLINE upb_strview google_protobuf_Any_type_url(const google_protobuf_Any *msg) { return UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(0, 0)); }
UPB_INLINE upb_strview google_protobuf_Any_value(const google_protobuf_Any *msg) { return UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(8, 16)); }
UPB_INLINE void google_protobuf_Any_set_type_url(google_protobuf_Any *msg, upb_strview value) {
UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE void google_protobuf_Any_set_value(google_protobuf_Any *msg, upb_strview value) {
UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(8, 16)) = value;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#include "upb/port_undef.inc"
#endif /* GOOGLE_PROTOBUF_ANY_PROTO_UPB_H_ */

@ -0,0 +1,485 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/descriptor.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#include <stddef.h>
#include "upb/msg.h"
#include "google/protobuf/descriptor.upb.h"
#include "upb/port_def.inc"
static const upb_msglayout *const google_protobuf_FileDescriptorSet_submsgs[1] = {
&google_protobuf_FileDescriptorProto_msginit,
};
static const upb_msglayout_field google_protobuf_FileDescriptorSet__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_FileDescriptorSet_msginit = {
&google_protobuf_FileDescriptorSet_submsgs[0],
&google_protobuf_FileDescriptorSet__fields[0],
UPB_SIZE(4, 8), 1, false,
};
static const upb_msglayout *const google_protobuf_FileDescriptorProto_submsgs[6] = {
&google_protobuf_DescriptorProto_msginit,
&google_protobuf_EnumDescriptorProto_msginit,
&google_protobuf_FieldDescriptorProto_msginit,
&google_protobuf_FileOptions_msginit,
&google_protobuf_ServiceDescriptorProto_msginit,
&google_protobuf_SourceCodeInfo_msginit,
};
static const upb_msglayout_field google_protobuf_FileDescriptorProto__fields[12] = {
{1, UPB_SIZE(4, 8), 1, 0, 9, 1},
{2, UPB_SIZE(12, 24), 2, 0, 9, 1},
{3, UPB_SIZE(36, 72), 0, 0, 9, 3},
{4, UPB_SIZE(40, 80), 0, 0, 11, 3},
{5, UPB_SIZE(44, 88), 0, 1, 11, 3},
{6, UPB_SIZE(48, 96), 0, 4, 11, 3},
{7, UPB_SIZE(52, 104), 0, 2, 11, 3},
{8, UPB_SIZE(28, 56), 4, 3, 11, 1},
{9, UPB_SIZE(32, 64), 5, 5, 11, 1},
{10, UPB_SIZE(56, 112), 0, 0, 5, 3},
{11, UPB_SIZE(60, 120), 0, 0, 5, 3},
{12, UPB_SIZE(20, 40), 3, 0, 9, 1},
};
const upb_msglayout google_protobuf_FileDescriptorProto_msginit = {
&google_protobuf_FileDescriptorProto_submsgs[0],
&google_protobuf_FileDescriptorProto__fields[0],
UPB_SIZE(64, 128), 12, false,
};
static const upb_msglayout *const google_protobuf_DescriptorProto_submsgs[8] = {
&google_protobuf_DescriptorProto_msginit,
&google_protobuf_DescriptorProto_ExtensionRange_msginit,
&google_protobuf_DescriptorProto_ReservedRange_msginit,
&google_protobuf_EnumDescriptorProto_msginit,
&google_protobuf_FieldDescriptorProto_msginit,
&google_protobuf_MessageOptions_msginit,
&google_protobuf_OneofDescriptorProto_msginit,
};
static const upb_msglayout_field google_protobuf_DescriptorProto__fields[10] = {
{1, UPB_SIZE(4, 8), 1, 0, 9, 1},
{2, UPB_SIZE(16, 32), 0, 4, 11, 3},
{3, UPB_SIZE(20, 40), 0, 0, 11, 3},
{4, UPB_SIZE(24, 48), 0, 3, 11, 3},
{5, UPB_SIZE(28, 56), 0, 1, 11, 3},
{6, UPB_SIZE(32, 64), 0, 4, 11, 3},
{7, UPB_SIZE(12, 24), 2, 5, 11, 1},
{8, UPB_SIZE(36, 72), 0, 6, 11, 3},
{9, UPB_SIZE(40, 80), 0, 2, 11, 3},
{10, UPB_SIZE(44, 88), 0, 0, 9, 3},
};
const upb_msglayout google_protobuf_DescriptorProto_msginit = {
&google_protobuf_DescriptorProto_submsgs[0],
&google_protobuf_DescriptorProto__fields[0],
UPB_SIZE(48, 96), 10, false,
};
static const upb_msglayout *const google_protobuf_DescriptorProto_ExtensionRange_submsgs[1] = {
&google_protobuf_ExtensionRangeOptions_msginit,
};
static const upb_msglayout_field google_protobuf_DescriptorProto_ExtensionRange__fields[3] = {
{1, UPB_SIZE(4, 4), 1, 0, 5, 1},
{2, UPB_SIZE(8, 8), 2, 0, 5, 1},
{3, UPB_SIZE(12, 16), 3, 0, 11, 1},
};
const upb_msglayout google_protobuf_DescriptorProto_ExtensionRange_msginit = {
&google_protobuf_DescriptorProto_ExtensionRange_submsgs[0],
&google_protobuf_DescriptorProto_ExtensionRange__fields[0],
UPB_SIZE(16, 24), 3, false,
};
static const upb_msglayout_field google_protobuf_DescriptorProto_ReservedRange__fields[2] = {
{1, UPB_SIZE(4, 4), 1, 0, 5, 1},
{2, UPB_SIZE(8, 8), 2, 0, 5, 1},
};
const upb_msglayout google_protobuf_DescriptorProto_ReservedRange_msginit = {
NULL,
&google_protobuf_DescriptorProto_ReservedRange__fields[0],
UPB_SIZE(12, 12), 2, false,
};
static const upb_msglayout *const google_protobuf_ExtensionRangeOptions_submsgs[1] = {
&google_protobuf_UninterpretedOption_msginit,
};
static const upb_msglayout_field google_protobuf_ExtensionRangeOptions__fields[1] = {
{999, UPB_SIZE(0, 0), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_ExtensionRangeOptions_msginit = {
&google_protobuf_ExtensionRangeOptions_submsgs[0],
&google_protobuf_ExtensionRangeOptions__fields[0],
UPB_SIZE(4, 8), 1, false,
};
static const upb_msglayout *const google_protobuf_FieldDescriptorProto_submsgs[1] = {
&google_protobuf_FieldOptions_msginit,
};
static const upb_msglayout_field google_protobuf_FieldDescriptorProto__fields[10] = {
{1, UPB_SIZE(32, 32), 5, 0, 9, 1},
{2, UPB_SIZE(40, 48), 6, 0, 9, 1},
{3, UPB_SIZE(24, 24), 3, 0, 5, 1},
{4, UPB_SIZE(8, 8), 1, 0, 14, 1},
{5, UPB_SIZE(16, 16), 2, 0, 14, 1},
{6, UPB_SIZE(48, 64), 7, 0, 9, 1},
{7, UPB_SIZE(56, 80), 8, 0, 9, 1},
{8, UPB_SIZE(72, 112), 10, 0, 11, 1},
{9, UPB_SIZE(28, 28), 4, 0, 5, 1},
{10, UPB_SIZE(64, 96), 9, 0, 9, 1},
};
const upb_msglayout google_protobuf_FieldDescriptorProto_msginit = {
&google_protobuf_FieldDescriptorProto_submsgs[0],
&google_protobuf_FieldDescriptorProto__fields[0],
UPB_SIZE(80, 128), 10, false,
};
static const upb_msglayout *const google_protobuf_OneofDescriptorProto_submsgs[1] = {
&google_protobuf_OneofOptions_msginit,
};
static const upb_msglayout_field google_protobuf_OneofDescriptorProto__fields[2] = {
{1, UPB_SIZE(4, 8), 1, 0, 9, 1},
{2, UPB_SIZE(12, 24), 2, 0, 11, 1},
};
const upb_msglayout google_protobuf_OneofDescriptorProto_msginit = {
&google_protobuf_OneofDescriptorProto_submsgs[0],
&google_protobuf_OneofDescriptorProto__fields[0],
UPB_SIZE(16, 32), 2, false,
};
static const upb_msglayout *const google_protobuf_EnumDescriptorProto_submsgs[3] = {
&google_protobuf_EnumDescriptorProto_EnumReservedRange_msginit,
&google_protobuf_EnumOptions_msginit,
&google_protobuf_EnumValueDescriptorProto_msginit,
};
static const upb_msglayout_field google_protobuf_EnumDescriptorProto__fields[5] = {
{1, UPB_SIZE(4, 8), 1, 0, 9, 1},
{2, UPB_SIZE(16, 32), 0, 2, 11, 3},
{3, UPB_SIZE(12, 24), 2, 1, 11, 1},
{4, UPB_SIZE(20, 40), 0, 0, 11, 3},
{5, UPB_SIZE(24, 48), 0, 0, 9, 3},
};
const upb_msglayout google_protobuf_EnumDescriptorProto_msginit = {
&google_protobuf_EnumDescriptorProto_submsgs[0],
&google_protobuf_EnumDescriptorProto__fields[0],
UPB_SIZE(32, 64), 5, false,
};
static const upb_msglayout_field google_protobuf_EnumDescriptorProto_EnumReservedRange__fields[2] = {
{1, UPB_SIZE(4, 4), 1, 0, 5, 1},
{2, UPB_SIZE(8, 8), 2, 0, 5, 1},
};
const upb_msglayout google_protobuf_EnumDescriptorProto_EnumReservedRange_msginit = {
NULL,
&google_protobuf_EnumDescriptorProto_EnumReservedRange__fields[0],
UPB_SIZE(12, 12), 2, false,
};
static const upb_msglayout *const google_protobuf_EnumValueDescriptorProto_submsgs[1] = {
&google_protobuf_EnumValueOptions_msginit,
};
static const upb_msglayout_field google_protobuf_EnumValueDescriptorProto__fields[3] = {
{1, UPB_SIZE(8, 8), 2, 0, 9, 1},
{2, UPB_SIZE(4, 4), 1, 0, 5, 1},
{3, UPB_SIZE(16, 24), 3, 0, 11, 1},
};
const upb_msglayout google_protobuf_EnumValueDescriptorProto_msginit = {
&google_protobuf_EnumValueDescriptorProto_submsgs[0],
&google_protobuf_EnumValueDescriptorProto__fields[0],
UPB_SIZE(24, 32), 3, false,
};
static const upb_msglayout *const google_protobuf_ServiceDescriptorProto_submsgs[2] = {
&google_protobuf_MethodDescriptorProto_msginit,
&google_protobuf_ServiceOptions_msginit,
};
static const upb_msglayout_field google_protobuf_ServiceDescriptorProto__fields[3] = {
{1, UPB_SIZE(4, 8), 1, 0, 9, 1},
{2, UPB_SIZE(16, 32), 0, 0, 11, 3},
{3, UPB_SIZE(12, 24), 2, 1, 11, 1},
};
const upb_msglayout google_protobuf_ServiceDescriptorProto_msginit = {
&google_protobuf_ServiceDescriptorProto_submsgs[0],
&google_protobuf_ServiceDescriptorProto__fields[0],
UPB_SIZE(24, 48), 3, false,
};
static const upb_msglayout *const google_protobuf_MethodDescriptorProto_submsgs[1] = {
&google_protobuf_MethodOptions_msginit,
};
static const upb_msglayout_field google_protobuf_MethodDescriptorProto__fields[6] = {
{1, UPB_SIZE(4, 8), 3, 0, 9, 1},
{2, UPB_SIZE(12, 24), 4, 0, 9, 1},
{3, UPB_SIZE(20, 40), 5, 0, 9, 1},
{4, UPB_SIZE(28, 56), 6, 0, 11, 1},
{5, UPB_SIZE(1, 1), 1, 0, 8, 1},
{6, UPB_SIZE(2, 2), 2, 0, 8, 1},
};
const upb_msglayout google_protobuf_MethodDescriptorProto_msginit = {
&google_protobuf_MethodDescriptorProto_submsgs[0],
&google_protobuf_MethodDescriptorProto__fields[0],
UPB_SIZE(32, 64), 6, false,
};
static const upb_msglayout *const google_protobuf_FileOptions_submsgs[1] = {
&google_protobuf_UninterpretedOption_msginit,
};
static const upb_msglayout_field google_protobuf_FileOptions__fields[21] = {
{1, UPB_SIZE(28, 32), 11, 0, 9, 1},
{8, UPB_SIZE(36, 48), 12, 0, 9, 1},
{9, UPB_SIZE(8, 8), 1, 0, 14, 1},
{10, UPB_SIZE(16, 16), 2, 0, 8, 1},
{11, UPB_SIZE(44, 64), 13, 0, 9, 1},
{16, UPB_SIZE(17, 17), 3, 0, 8, 1},
{17, UPB_SIZE(18, 18), 4, 0, 8, 1},
{18, UPB_SIZE(19, 19), 5, 0, 8, 1},
{20, UPB_SIZE(20, 20), 6, 0, 8, 1},
{23, UPB_SIZE(21, 21), 7, 0, 8, 1},
{27, UPB_SIZE(22, 22), 8, 0, 8, 1},
{31, UPB_SIZE(23, 23), 9, 0, 8, 1},
{36, UPB_SIZE(52, 80), 14, 0, 9, 1},
{37, UPB_SIZE(60, 96), 15, 0, 9, 1},
{39, UPB_SIZE(68, 112), 16, 0, 9, 1},
{40, UPB_SIZE(76, 128), 17, 0, 9, 1},
{41, UPB_SIZE(84, 144), 18, 0, 9, 1},
{42, UPB_SIZE(24, 24), 10, 0, 8, 1},
{44, UPB_SIZE(92, 160), 19, 0, 9, 1},
{45, UPB_SIZE(100, 176), 20, 0, 9, 1},
{999, UPB_SIZE(108, 192), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_FileOptions_msginit = {
&google_protobuf_FileOptions_submsgs[0],
&google_protobuf_FileOptions__fields[0],
UPB_SIZE(112, 208), 21, false,
};
static const upb_msglayout *const google_protobuf_MessageOptions_submsgs[1] = {
&google_protobuf_UninterpretedOption_msginit,
};
static const upb_msglayout_field google_protobuf_MessageOptions__fields[5] = {
{1, UPB_SIZE(1, 1), 1, 0, 8, 1},
{2, UPB_SIZE(2, 2), 2, 0, 8, 1},
{3, UPB_SIZE(3, 3), 3, 0, 8, 1},
{7, UPB_SIZE(4, 4), 4, 0, 8, 1},
{999, UPB_SIZE(8, 8), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_MessageOptions_msginit = {
&google_protobuf_MessageOptions_submsgs[0],
&google_protobuf_MessageOptions__fields[0],
UPB_SIZE(12, 16), 5, false,
};
static const upb_msglayout *const google_protobuf_FieldOptions_submsgs[1] = {
&google_protobuf_UninterpretedOption_msginit,
};
static const upb_msglayout_field google_protobuf_FieldOptions__fields[7] = {
{1, UPB_SIZE(8, 8), 1, 0, 14, 1},
{2, UPB_SIZE(24, 24), 3, 0, 8, 1},
{3, UPB_SIZE(25, 25), 4, 0, 8, 1},
{5, UPB_SIZE(26, 26), 5, 0, 8, 1},
{6, UPB_SIZE(16, 16), 2, 0, 14, 1},
{10, UPB_SIZE(27, 27), 6, 0, 8, 1},
{999, UPB_SIZE(28, 32), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_FieldOptions_msginit = {
&google_protobuf_FieldOptions_submsgs[0],
&google_protobuf_FieldOptions__fields[0],
UPB_SIZE(32, 40), 7, false,
};
static const upb_msglayout *const google_protobuf_OneofOptions_submsgs[1] = {
&google_protobuf_UninterpretedOption_msginit,
};
static const upb_msglayout_field google_protobuf_OneofOptions__fields[1] = {
{999, UPB_SIZE(0, 0), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_OneofOptions_msginit = {
&google_protobuf_OneofOptions_submsgs[0],
&google_protobuf_OneofOptions__fields[0],
UPB_SIZE(4, 8), 1, false,
};
static const upb_msglayout *const google_protobuf_EnumOptions_submsgs[1] = {
&google_protobuf_UninterpretedOption_msginit,
};
static const upb_msglayout_field google_protobuf_EnumOptions__fields[3] = {
{2, UPB_SIZE(1, 1), 1, 0, 8, 1},
{3, UPB_SIZE(2, 2), 2, 0, 8, 1},
{999, UPB_SIZE(4, 8), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_EnumOptions_msginit = {
&google_protobuf_EnumOptions_submsgs[0],
&google_protobuf_EnumOptions__fields[0],
UPB_SIZE(8, 16), 3, false,
};
static const upb_msglayout *const google_protobuf_EnumValueOptions_submsgs[1] = {
&google_protobuf_UninterpretedOption_msginit,
};
static const upb_msglayout_field google_protobuf_EnumValueOptions__fields[2] = {
{1, UPB_SIZE(1, 1), 1, 0, 8, 1},
{999, UPB_SIZE(4, 8), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_EnumValueOptions_msginit = {
&google_protobuf_EnumValueOptions_submsgs[0],
&google_protobuf_EnumValueOptions__fields[0],
UPB_SIZE(8, 16), 2, false,
};
static const upb_msglayout *const google_protobuf_ServiceOptions_submsgs[1] = {
&google_protobuf_UninterpretedOption_msginit,
};
static const upb_msglayout_field google_protobuf_ServiceOptions__fields[2] = {
{33, UPB_SIZE(1, 1), 1, 0, 8, 1},
{999, UPB_SIZE(4, 8), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_ServiceOptions_msginit = {
&google_protobuf_ServiceOptions_submsgs[0],
&google_protobuf_ServiceOptions__fields[0],
UPB_SIZE(8, 16), 2, false,
};
static const upb_msglayout *const google_protobuf_MethodOptions_submsgs[1] = {
&google_protobuf_UninterpretedOption_msginit,
};
static const upb_msglayout_field google_protobuf_MethodOptions__fields[3] = {
{33, UPB_SIZE(16, 16), 2, 0, 8, 1},
{34, UPB_SIZE(8, 8), 1, 0, 14, 1},
{999, UPB_SIZE(20, 24), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_MethodOptions_msginit = {
&google_protobuf_MethodOptions_submsgs[0],
&google_protobuf_MethodOptions__fields[0],
UPB_SIZE(24, 32), 3, false,
};
static const upb_msglayout *const google_protobuf_UninterpretedOption_submsgs[1] = {
&google_protobuf_UninterpretedOption_NamePart_msginit,
};
static const upb_msglayout_field google_protobuf_UninterpretedOption__fields[7] = {
{2, UPB_SIZE(56, 80), 0, 0, 11, 3},
{3, UPB_SIZE(32, 32), 4, 0, 9, 1},
{4, UPB_SIZE(8, 8), 1, 0, 4, 1},
{5, UPB_SIZE(16, 16), 2, 0, 3, 1},
{6, UPB_SIZE(24, 24), 3, 0, 1, 1},
{7, UPB_SIZE(40, 48), 5, 0, 12, 1},
{8, UPB_SIZE(48, 64), 6, 0, 9, 1},
};
const upb_msglayout google_protobuf_UninterpretedOption_msginit = {
&google_protobuf_UninterpretedOption_submsgs[0],
&google_protobuf_UninterpretedOption__fields[0],
UPB_SIZE(64, 96), 7, false,
};
static const upb_msglayout_field google_protobuf_UninterpretedOption_NamePart__fields[2] = {
{1, UPB_SIZE(4, 8), 2, 0, 9, 2},
{2, UPB_SIZE(1, 1), 1, 0, 8, 2},
};
const upb_msglayout google_protobuf_UninterpretedOption_NamePart_msginit = {
NULL,
&google_protobuf_UninterpretedOption_NamePart__fields[0],
UPB_SIZE(16, 32), 2, false,
};
static const upb_msglayout *const google_protobuf_SourceCodeInfo_submsgs[1] = {
&google_protobuf_SourceCodeInfo_Location_msginit,
};
static const upb_msglayout_field google_protobuf_SourceCodeInfo__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_SourceCodeInfo_msginit = {
&google_protobuf_SourceCodeInfo_submsgs[0],
&google_protobuf_SourceCodeInfo__fields[0],
UPB_SIZE(4, 8), 1, false,
};
static const upb_msglayout_field google_protobuf_SourceCodeInfo_Location__fields[5] = {
{1, UPB_SIZE(20, 40), 0, 0, 5, 3},
{2, UPB_SIZE(24, 48), 0, 0, 5, 3},
{3, UPB_SIZE(4, 8), 1, 0, 9, 1},
{4, UPB_SIZE(12, 24), 2, 0, 9, 1},
{6, UPB_SIZE(28, 56), 0, 0, 9, 3},
};
const upb_msglayout google_protobuf_SourceCodeInfo_Location_msginit = {
NULL,
&google_protobuf_SourceCodeInfo_Location__fields[0],
UPB_SIZE(32, 64), 5, false,
};
static const upb_msglayout *const google_protobuf_GeneratedCodeInfo_submsgs[1] = {
&google_protobuf_GeneratedCodeInfo_Annotation_msginit,
};
static const upb_msglayout_field google_protobuf_GeneratedCodeInfo__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_GeneratedCodeInfo_msginit = {
&google_protobuf_GeneratedCodeInfo_submsgs[0],
&google_protobuf_GeneratedCodeInfo__fields[0],
UPB_SIZE(4, 8), 1, false,
};
static const upb_msglayout_field google_protobuf_GeneratedCodeInfo_Annotation__fields[4] = {
{1, UPB_SIZE(20, 32), 0, 0, 5, 3},
{2, UPB_SIZE(12, 16), 3, 0, 9, 1},
{3, UPB_SIZE(4, 4), 1, 0, 5, 1},
{4, UPB_SIZE(8, 8), 2, 0, 5, 1},
};
const upb_msglayout google_protobuf_GeneratedCodeInfo_Annotation_msginit = {
NULL,
&google_protobuf_GeneratedCodeInfo_Annotation__fields[0],
UPB_SIZE(24, 48), 4, false,
};
#include "upb/port_undef.inc"

File diff suppressed because it is too large Load Diff

@ -0,0 +1,27 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/duration.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#include <stddef.h>
#include "upb/msg.h"
#include "google/protobuf/duration.upb.h"
#include "upb/port_def.inc"
static const upb_msglayout_field google_protobuf_Duration__fields[2] = {
{1, UPB_SIZE(0, 0), 0, 0, 3, 1},
{2, UPB_SIZE(8, 8), 0, 0, 5, 1},
};
const upb_msglayout google_protobuf_Duration_msginit = {
NULL,
&google_protobuf_Duration__fields[0],
UPB_SIZE(16, 16), 2, false,
};
#include "upb/port_undef.inc"

@ -0,0 +1,59 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/duration.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#ifndef GOOGLE_PROTOBUF_DURATION_PROTO_UPB_H_
#define GOOGLE_PROTOBUF_DURATION_PROTO_UPB_H_
#include "upb/generated_util.h"
#include "upb/msg.h"
#include "upb/decode.h"
#include "upb/encode.h"
#include "upb/port_def.inc"
#ifdef __cplusplus
extern "C" {
#endif
struct google_protobuf_Duration;
typedef struct google_protobuf_Duration google_protobuf_Duration;
extern const upb_msglayout google_protobuf_Duration_msginit;
/* Enums */
/* google.protobuf.Duration */
UPB_INLINE google_protobuf_Duration *google_protobuf_Duration_new(upb_arena *arena) {
return (google_protobuf_Duration *)upb_msg_new(&google_protobuf_Duration_msginit, arena);
}
UPB_INLINE google_protobuf_Duration *google_protobuf_Duration_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_Duration *ret = google_protobuf_Duration_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_Duration_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_Duration_serialize(const google_protobuf_Duration *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_Duration_msginit, arena, len);
}
UPB_INLINE int64_t google_protobuf_Duration_seconds(const google_protobuf_Duration *msg) { return UPB_FIELD_AT(msg, int64_t, UPB_SIZE(0, 0)); }
UPB_INLINE int32_t google_protobuf_Duration_nanos(const google_protobuf_Duration *msg) { return UPB_FIELD_AT(msg, int32_t, UPB_SIZE(8, 8)); }
UPB_INLINE void google_protobuf_Duration_set_seconds(google_protobuf_Duration *msg, int64_t value) {
UPB_FIELD_AT(msg, int64_t, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE void google_protobuf_Duration_set_nanos(google_protobuf_Duration *msg, int32_t value) {
UPB_FIELD_AT(msg, int32_t, UPB_SIZE(8, 8)) = value;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#include "upb/port_undef.inc"
#endif /* GOOGLE_PROTOBUF_DURATION_PROTO_UPB_H_ */

@ -0,0 +1,79 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/struct.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#include <stddef.h>
#include "upb/msg.h"
#include "google/protobuf/struct.upb.h"
#include "upb/port_def.inc"
static const upb_msglayout *const google_protobuf_Struct_submsgs[1] = {
&google_protobuf_Struct_FieldsEntry_msginit,
};
static const upb_msglayout_field google_protobuf_Struct__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_Struct_msginit = {
&google_protobuf_Struct_submsgs[0],
&google_protobuf_Struct__fields[0],
UPB_SIZE(4, 8), 1, false,
};
static const upb_msglayout *const google_protobuf_Struct_FieldsEntry_submsgs[1] = {
&google_protobuf_Value_msginit,
};
static const upb_msglayout_field google_protobuf_Struct_FieldsEntry__fields[2] = {
{1, UPB_SIZE(0, 0), 0, 0, 9, 1},
{2, UPB_SIZE(8, 16), 0, 0, 11, 1},
};
const upb_msglayout google_protobuf_Struct_FieldsEntry_msginit = {
&google_protobuf_Struct_FieldsEntry_submsgs[0],
&google_protobuf_Struct_FieldsEntry__fields[0],
UPB_SIZE(16, 32), 2, false,
};
static const upb_msglayout *const google_protobuf_Value_submsgs[2] = {
&google_protobuf_ListValue_msginit,
&google_protobuf_Struct_msginit,
};
static const upb_msglayout_field google_protobuf_Value__fields[6] = {
{1, UPB_SIZE(0, 0), UPB_SIZE(-9, -17), 0, 14, 1},
{2, UPB_SIZE(0, 0), UPB_SIZE(-9, -17), 0, 1, 1},
{3, UPB_SIZE(0, 0), UPB_SIZE(-9, -17), 0, 9, 1},
{4, UPB_SIZE(0, 0), UPB_SIZE(-9, -17), 0, 8, 1},
{5, UPB_SIZE(0, 0), UPB_SIZE(-9, -17), 1, 11, 1},
{6, UPB_SIZE(0, 0), UPB_SIZE(-9, -17), 0, 11, 1},
};
const upb_msglayout google_protobuf_Value_msginit = {
&google_protobuf_Value_submsgs[0],
&google_protobuf_Value__fields[0],
UPB_SIZE(16, 32), 6, false,
};
static const upb_msglayout *const google_protobuf_ListValue_submsgs[1] = {
&google_protobuf_Value_msginit,
};
static const upb_msglayout_field google_protobuf_ListValue__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 11, 3},
};
const upb_msglayout google_protobuf_ListValue_msginit = {
&google_protobuf_ListValue_submsgs[0],
&google_protobuf_ListValue__fields[0],
UPB_SIZE(4, 8), 1, false,
};
#include "upb/port_undef.inc"

@ -0,0 +1,216 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/struct.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#ifndef GOOGLE_PROTOBUF_STRUCT_PROTO_UPB_H_
#define GOOGLE_PROTOBUF_STRUCT_PROTO_UPB_H_
#include "upb/generated_util.h"
#include "upb/msg.h"
#include "upb/decode.h"
#include "upb/encode.h"
#include "upb/port_def.inc"
#ifdef __cplusplus
extern "C" {
#endif
struct google_protobuf_Struct;
struct google_protobuf_Struct_FieldsEntry;
struct google_protobuf_Value;
struct google_protobuf_ListValue;
typedef struct google_protobuf_Struct google_protobuf_Struct;
typedef struct google_protobuf_Struct_FieldsEntry google_protobuf_Struct_FieldsEntry;
typedef struct google_protobuf_Value google_protobuf_Value;
typedef struct google_protobuf_ListValue google_protobuf_ListValue;
extern const upb_msglayout google_protobuf_Struct_msginit;
extern const upb_msglayout google_protobuf_Struct_FieldsEntry_msginit;
extern const upb_msglayout google_protobuf_Value_msginit;
extern const upb_msglayout google_protobuf_ListValue_msginit;
/* Enums */
typedef enum {
google_protobuf_NULL_VALUE = 0
} google_protobuf_NullValue;
/* google.protobuf.Struct */
UPB_INLINE google_protobuf_Struct *google_protobuf_Struct_new(upb_arena *arena) {
return (google_protobuf_Struct *)upb_msg_new(&google_protobuf_Struct_msginit, arena);
}
UPB_INLINE google_protobuf_Struct *google_protobuf_Struct_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_Struct *ret = google_protobuf_Struct_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_Struct_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_Struct_serialize(const google_protobuf_Struct *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_Struct_msginit, arena, len);
}
UPB_INLINE const google_protobuf_Struct_FieldsEntry* const* google_protobuf_Struct_fields(const google_protobuf_Struct *msg, size_t *len) { return (const google_protobuf_Struct_FieldsEntry* const*)_upb_array_accessor(msg, UPB_SIZE(0, 0), len); }
UPB_INLINE google_protobuf_Struct_FieldsEntry** google_protobuf_Struct_mutable_fields(google_protobuf_Struct *msg, size_t *len) {
return (google_protobuf_Struct_FieldsEntry**)_upb_array_mutable_accessor(msg, UPB_SIZE(0, 0), len);
}
UPB_INLINE google_protobuf_Struct_FieldsEntry** google_protobuf_Struct_resize_fields(google_protobuf_Struct *msg, size_t len, upb_arena *arena) {
return (google_protobuf_Struct_FieldsEntry**)_upb_array_resize_accessor(msg, UPB_SIZE(0, 0), len, UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, arena);
}
UPB_INLINE struct google_protobuf_Struct_FieldsEntry* google_protobuf_Struct_add_fields(google_protobuf_Struct *msg, upb_arena *arena) {
struct google_protobuf_Struct_FieldsEntry* sub = (struct google_protobuf_Struct_FieldsEntry*)upb_msg_new(&google_protobuf_Struct_FieldsEntry_msginit, arena);
bool ok = _upb_array_append_accessor(
msg, UPB_SIZE(0, 0), UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, &sub, arena);
if (!ok) return NULL;
return sub;
}
/* google.protobuf.Struct.FieldsEntry */
UPB_INLINE google_protobuf_Struct_FieldsEntry *google_protobuf_Struct_FieldsEntry_new(upb_arena *arena) {
return (google_protobuf_Struct_FieldsEntry *)upb_msg_new(&google_protobuf_Struct_FieldsEntry_msginit, arena);
}
UPB_INLINE google_protobuf_Struct_FieldsEntry *google_protobuf_Struct_FieldsEntry_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_Struct_FieldsEntry *ret = google_protobuf_Struct_FieldsEntry_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_Struct_FieldsEntry_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_Struct_FieldsEntry_serialize(const google_protobuf_Struct_FieldsEntry *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_Struct_FieldsEntry_msginit, arena, len);
}
UPB_INLINE upb_strview google_protobuf_Struct_FieldsEntry_key(const google_protobuf_Struct_FieldsEntry *msg) { return UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(0, 0)); }
UPB_INLINE const google_protobuf_Value* google_protobuf_Struct_FieldsEntry_value(const google_protobuf_Struct_FieldsEntry *msg) { return UPB_FIELD_AT(msg, const google_protobuf_Value*, UPB_SIZE(8, 16)); }
UPB_INLINE void google_protobuf_Struct_FieldsEntry_set_key(google_protobuf_Struct_FieldsEntry *msg, upb_strview value) {
UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE void google_protobuf_Struct_FieldsEntry_set_value(google_protobuf_Struct_FieldsEntry *msg, google_protobuf_Value* value) {
UPB_FIELD_AT(msg, google_protobuf_Value*, UPB_SIZE(8, 16)) = value;
}
UPB_INLINE struct google_protobuf_Value* google_protobuf_Struct_FieldsEntry_mutable_value(google_protobuf_Struct_FieldsEntry *msg, upb_arena *arena) {
struct google_protobuf_Value* sub = (struct google_protobuf_Value*)google_protobuf_Struct_FieldsEntry_value(msg);
if (sub == NULL) {
sub = (struct google_protobuf_Value*)upb_msg_new(&google_protobuf_Value_msginit, arena);
if (!sub) return NULL;
google_protobuf_Struct_FieldsEntry_set_value(msg, sub);
}
return sub;
}
/* google.protobuf.Value */
UPB_INLINE google_protobuf_Value *google_protobuf_Value_new(upb_arena *arena) {
return (google_protobuf_Value *)upb_msg_new(&google_protobuf_Value_msginit, arena);
}
UPB_INLINE google_protobuf_Value *google_protobuf_Value_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_Value *ret = google_protobuf_Value_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_Value_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_Value_serialize(const google_protobuf_Value *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_Value_msginit, arena, len);
}
typedef enum {
google_protobuf_Value_kind_null_value = 1,
google_protobuf_Value_kind_number_value = 2,
google_protobuf_Value_kind_string_value = 3,
google_protobuf_Value_kind_bool_value = 4,
google_protobuf_Value_kind_struct_value = 5,
google_protobuf_Value_kind_list_value = 6,
google_protobuf_Value_kind_NOT_SET = 0,
} google_protobuf_Value_kind_oneofcases;
UPB_INLINE google_protobuf_Value_kind_oneofcases google_protobuf_Value_kind_case(const google_protobuf_Value* msg) { return UPB_FIELD_AT(msg, int, UPB_SIZE(8, 16)); }
UPB_INLINE bool google_protobuf_Value_has_null_value(const google_protobuf_Value *msg) { return _upb_has_oneof_field(msg, UPB_SIZE(8, 16), 1); }
UPB_INLINE google_protobuf_NullValue google_protobuf_Value_null_value(const google_protobuf_Value *msg) { return UPB_READ_ONEOF(msg, google_protobuf_NullValue, UPB_SIZE(0, 0), UPB_SIZE(8, 16), 1, google_protobuf_NULL_VALUE); }
UPB_INLINE bool google_protobuf_Value_has_number_value(const google_protobuf_Value *msg) { return _upb_has_oneof_field(msg, UPB_SIZE(8, 16), 2); }
UPB_INLINE double google_protobuf_Value_number_value(const google_protobuf_Value *msg) { return UPB_READ_ONEOF(msg, double, UPB_SIZE(0, 0), UPB_SIZE(8, 16), 2, 0); }
UPB_INLINE bool google_protobuf_Value_has_string_value(const google_protobuf_Value *msg) { return _upb_has_oneof_field(msg, UPB_SIZE(8, 16), 3); }
UPB_INLINE upb_strview google_protobuf_Value_string_value(const google_protobuf_Value *msg) { return UPB_READ_ONEOF(msg, upb_strview, UPB_SIZE(0, 0), UPB_SIZE(8, 16), 3, upb_strview_make("", strlen(""))); }
UPB_INLINE bool google_protobuf_Value_has_bool_value(const google_protobuf_Value *msg) { return _upb_has_oneof_field(msg, UPB_SIZE(8, 16), 4); }
UPB_INLINE bool google_protobuf_Value_bool_value(const google_protobuf_Value *msg) { return UPB_READ_ONEOF(msg, bool, UPB_SIZE(0, 0), UPB_SIZE(8, 16), 4, false); }
UPB_INLINE bool google_protobuf_Value_has_struct_value(const google_protobuf_Value *msg) { return _upb_has_oneof_field(msg, UPB_SIZE(8, 16), 5); }
UPB_INLINE const google_protobuf_Struct* google_protobuf_Value_struct_value(const google_protobuf_Value *msg) { return UPB_READ_ONEOF(msg, const google_protobuf_Struct*, UPB_SIZE(0, 0), UPB_SIZE(8, 16), 5, NULL); }
UPB_INLINE bool google_protobuf_Value_has_list_value(const google_protobuf_Value *msg) { return _upb_has_oneof_field(msg, UPB_SIZE(8, 16), 6); }
UPB_INLINE const google_protobuf_ListValue* google_protobuf_Value_list_value(const google_protobuf_Value *msg) { return UPB_READ_ONEOF(msg, const google_protobuf_ListValue*, UPB_SIZE(0, 0), UPB_SIZE(8, 16), 6, NULL); }
UPB_INLINE void google_protobuf_Value_set_null_value(google_protobuf_Value *msg, google_protobuf_NullValue value) {
UPB_WRITE_ONEOF(msg, google_protobuf_NullValue, UPB_SIZE(0, 0), value, UPB_SIZE(8, 16), 1);
}
UPB_INLINE void google_protobuf_Value_set_number_value(google_protobuf_Value *msg, double value) {
UPB_WRITE_ONEOF(msg, double, UPB_SIZE(0, 0), value, UPB_SIZE(8, 16), 2);
}
UPB_INLINE void google_protobuf_Value_set_string_value(google_protobuf_Value *msg, upb_strview value) {
UPB_WRITE_ONEOF(msg, upb_strview, UPB_SIZE(0, 0), value, UPB_SIZE(8, 16), 3);
}
UPB_INLINE void google_protobuf_Value_set_bool_value(google_protobuf_Value *msg, bool value) {
UPB_WRITE_ONEOF(msg, bool, UPB_SIZE(0, 0), value, UPB_SIZE(8, 16), 4);
}
UPB_INLINE void google_protobuf_Value_set_struct_value(google_protobuf_Value *msg, google_protobuf_Struct* value) {
UPB_WRITE_ONEOF(msg, google_protobuf_Struct*, UPB_SIZE(0, 0), value, UPB_SIZE(8, 16), 5);
}
UPB_INLINE struct google_protobuf_Struct* google_protobuf_Value_mutable_struct_value(google_protobuf_Value *msg, upb_arena *arena) {
struct google_protobuf_Struct* sub = (struct google_protobuf_Struct*)google_protobuf_Value_struct_value(msg);
if (sub == NULL) {
sub = (struct google_protobuf_Struct*)upb_msg_new(&google_protobuf_Struct_msginit, arena);
if (!sub) return NULL;
google_protobuf_Value_set_struct_value(msg, sub);
}
return sub;
}
UPB_INLINE void google_protobuf_Value_set_list_value(google_protobuf_Value *msg, google_protobuf_ListValue* value) {
UPB_WRITE_ONEOF(msg, google_protobuf_ListValue*, UPB_SIZE(0, 0), value, UPB_SIZE(8, 16), 6);
}
UPB_INLINE struct google_protobuf_ListValue* google_protobuf_Value_mutable_list_value(google_protobuf_Value *msg, upb_arena *arena) {
struct google_protobuf_ListValue* sub = (struct google_protobuf_ListValue*)google_protobuf_Value_list_value(msg);
if (sub == NULL) {
sub = (struct google_protobuf_ListValue*)upb_msg_new(&google_protobuf_ListValue_msginit, arena);
if (!sub) return NULL;
google_protobuf_Value_set_list_value(msg, sub);
}
return sub;
}
/* google.protobuf.ListValue */
UPB_INLINE google_protobuf_ListValue *google_protobuf_ListValue_new(upb_arena *arena) {
return (google_protobuf_ListValue *)upb_msg_new(&google_protobuf_ListValue_msginit, arena);
}
UPB_INLINE google_protobuf_ListValue *google_protobuf_ListValue_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_ListValue *ret = google_protobuf_ListValue_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_ListValue_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_ListValue_serialize(const google_protobuf_ListValue *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_ListValue_msginit, arena, len);
}
UPB_INLINE const google_protobuf_Value* const* google_protobuf_ListValue_values(const google_protobuf_ListValue *msg, size_t *len) { return (const google_protobuf_Value* const*)_upb_array_accessor(msg, UPB_SIZE(0, 0), len); }
UPB_INLINE google_protobuf_Value** google_protobuf_ListValue_mutable_values(google_protobuf_ListValue *msg, size_t *len) {
return (google_protobuf_Value**)_upb_array_mutable_accessor(msg, UPB_SIZE(0, 0), len);
}
UPB_INLINE google_protobuf_Value** google_protobuf_ListValue_resize_values(google_protobuf_ListValue *msg, size_t len, upb_arena *arena) {
return (google_protobuf_Value**)_upb_array_resize_accessor(msg, UPB_SIZE(0, 0), len, UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, arena);
}
UPB_INLINE struct google_protobuf_Value* google_protobuf_ListValue_add_values(google_protobuf_ListValue *msg, upb_arena *arena) {
struct google_protobuf_Value* sub = (struct google_protobuf_Value*)upb_msg_new(&google_protobuf_Value_msginit, arena);
bool ok = _upb_array_append_accessor(
msg, UPB_SIZE(0, 0), UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, &sub, arena);
if (!ok) return NULL;
return sub;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#include "upb/port_undef.inc"
#endif /* GOOGLE_PROTOBUF_STRUCT_PROTO_UPB_H_ */

@ -0,0 +1,27 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/timestamp.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#include <stddef.h>
#include "upb/msg.h"
#include "google/protobuf/timestamp.upb.h"
#include "upb/port_def.inc"
static const upb_msglayout_field google_protobuf_Timestamp__fields[2] = {
{1, UPB_SIZE(0, 0), 0, 0, 3, 1},
{2, UPB_SIZE(8, 8), 0, 0, 5, 1},
};
const upb_msglayout google_protobuf_Timestamp_msginit = {
NULL,
&google_protobuf_Timestamp__fields[0],
UPB_SIZE(16, 16), 2, false,
};
#include "upb/port_undef.inc"

@ -0,0 +1,59 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/timestamp.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#ifndef GOOGLE_PROTOBUF_TIMESTAMP_PROTO_UPB_H_
#define GOOGLE_PROTOBUF_TIMESTAMP_PROTO_UPB_H_
#include "upb/generated_util.h"
#include "upb/msg.h"
#include "upb/decode.h"
#include "upb/encode.h"
#include "upb/port_def.inc"
#ifdef __cplusplus
extern "C" {
#endif
struct google_protobuf_Timestamp;
typedef struct google_protobuf_Timestamp google_protobuf_Timestamp;
extern const upb_msglayout google_protobuf_Timestamp_msginit;
/* Enums */
/* google.protobuf.Timestamp */
UPB_INLINE google_protobuf_Timestamp *google_protobuf_Timestamp_new(upb_arena *arena) {
return (google_protobuf_Timestamp *)upb_msg_new(&google_protobuf_Timestamp_msginit, arena);
}
UPB_INLINE google_protobuf_Timestamp *google_protobuf_Timestamp_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_Timestamp *ret = google_protobuf_Timestamp_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_Timestamp_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_Timestamp_serialize(const google_protobuf_Timestamp *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_Timestamp_msginit, arena, len);
}
UPB_INLINE int64_t google_protobuf_Timestamp_seconds(const google_protobuf_Timestamp *msg) { return UPB_FIELD_AT(msg, int64_t, UPB_SIZE(0, 0)); }
UPB_INLINE int32_t google_protobuf_Timestamp_nanos(const google_protobuf_Timestamp *msg) { return UPB_FIELD_AT(msg, int32_t, UPB_SIZE(8, 8)); }
UPB_INLINE void google_protobuf_Timestamp_set_seconds(google_protobuf_Timestamp *msg, int64_t value) {
UPB_FIELD_AT(msg, int64_t, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE void google_protobuf_Timestamp_set_nanos(google_protobuf_Timestamp *msg, int32_t value) {
UPB_FIELD_AT(msg, int32_t, UPB_SIZE(8, 8)) = value;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#include "upb/port_undef.inc"
#endif /* GOOGLE_PROTOBUF_TIMESTAMP_PROTO_UPB_H_ */

@ -0,0 +1,106 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/wrappers.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#include <stddef.h>
#include "upb/msg.h"
#include "google/protobuf/wrappers.upb.h"
#include "upb/port_def.inc"
static const upb_msglayout_field google_protobuf_DoubleValue__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 1, 1},
};
const upb_msglayout google_protobuf_DoubleValue_msginit = {
NULL,
&google_protobuf_DoubleValue__fields[0],
UPB_SIZE(8, 8), 1, false,
};
static const upb_msglayout_field google_protobuf_FloatValue__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 2, 1},
};
const upb_msglayout google_protobuf_FloatValue_msginit = {
NULL,
&google_protobuf_FloatValue__fields[0],
UPB_SIZE(4, 4), 1, false,
};
static const upb_msglayout_field google_protobuf_Int64Value__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 3, 1},
};
const upb_msglayout google_protobuf_Int64Value_msginit = {
NULL,
&google_protobuf_Int64Value__fields[0],
UPB_SIZE(8, 8), 1, false,
};
static const upb_msglayout_field google_protobuf_UInt64Value__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 4, 1},
};
const upb_msglayout google_protobuf_UInt64Value_msginit = {
NULL,
&google_protobuf_UInt64Value__fields[0],
UPB_SIZE(8, 8), 1, false,
};
static const upb_msglayout_field google_protobuf_Int32Value__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 5, 1},
};
const upb_msglayout google_protobuf_Int32Value_msginit = {
NULL,
&google_protobuf_Int32Value__fields[0],
UPB_SIZE(4, 4), 1, false,
};
static const upb_msglayout_field google_protobuf_UInt32Value__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 13, 1},
};
const upb_msglayout google_protobuf_UInt32Value_msginit = {
NULL,
&google_protobuf_UInt32Value__fields[0],
UPB_SIZE(4, 4), 1, false,
};
static const upb_msglayout_field google_protobuf_BoolValue__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 8, 1},
};
const upb_msglayout google_protobuf_BoolValue_msginit = {
NULL,
&google_protobuf_BoolValue__fields[0],
UPB_SIZE(1, 1), 1, false,
};
static const upb_msglayout_field google_protobuf_StringValue__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 9, 1},
};
const upb_msglayout google_protobuf_StringValue_msginit = {
NULL,
&google_protobuf_StringValue__fields[0],
UPB_SIZE(8, 16), 1, false,
};
static const upb_msglayout_field google_protobuf_BytesValue__fields[1] = {
{1, UPB_SIZE(0, 0), 0, 0, 12, 1},
};
const upb_msglayout google_protobuf_BytesValue_msginit = {
NULL,
&google_protobuf_BytesValue__fields[0],
UPB_SIZE(8, 16), 1, false,
};
#include "upb/port_undef.inc"

@ -0,0 +1,239 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* google/protobuf/wrappers.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#ifndef GOOGLE_PROTOBUF_WRAPPERS_PROTO_UPB_H_
#define GOOGLE_PROTOBUF_WRAPPERS_PROTO_UPB_H_
#include "upb/generated_util.h"
#include "upb/msg.h"
#include "upb/decode.h"
#include "upb/encode.h"
#include "upb/port_def.inc"
#ifdef __cplusplus
extern "C" {
#endif
struct google_protobuf_DoubleValue;
struct google_protobuf_FloatValue;
struct google_protobuf_Int64Value;
struct google_protobuf_UInt64Value;
struct google_protobuf_Int32Value;
struct google_protobuf_UInt32Value;
struct google_protobuf_BoolValue;
struct google_protobuf_StringValue;
struct google_protobuf_BytesValue;
typedef struct google_protobuf_DoubleValue google_protobuf_DoubleValue;
typedef struct google_protobuf_FloatValue google_protobuf_FloatValue;
typedef struct google_protobuf_Int64Value google_protobuf_Int64Value;
typedef struct google_protobuf_UInt64Value google_protobuf_UInt64Value;
typedef struct google_protobuf_Int32Value google_protobuf_Int32Value;
typedef struct google_protobuf_UInt32Value google_protobuf_UInt32Value;
typedef struct google_protobuf_BoolValue google_protobuf_BoolValue;
typedef struct google_protobuf_StringValue google_protobuf_StringValue;
typedef struct google_protobuf_BytesValue google_protobuf_BytesValue;
extern const upb_msglayout google_protobuf_DoubleValue_msginit;
extern const upb_msglayout google_protobuf_FloatValue_msginit;
extern const upb_msglayout google_protobuf_Int64Value_msginit;
extern const upb_msglayout google_protobuf_UInt64Value_msginit;
extern const upb_msglayout google_protobuf_Int32Value_msginit;
extern const upb_msglayout google_protobuf_UInt32Value_msginit;
extern const upb_msglayout google_protobuf_BoolValue_msginit;
extern const upb_msglayout google_protobuf_StringValue_msginit;
extern const upb_msglayout google_protobuf_BytesValue_msginit;
/* Enums */
/* google.protobuf.DoubleValue */
UPB_INLINE google_protobuf_DoubleValue *google_protobuf_DoubleValue_new(upb_arena *arena) {
return (google_protobuf_DoubleValue *)upb_msg_new(&google_protobuf_DoubleValue_msginit, arena);
}
UPB_INLINE google_protobuf_DoubleValue *google_protobuf_DoubleValue_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_DoubleValue *ret = google_protobuf_DoubleValue_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_DoubleValue_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_DoubleValue_serialize(const google_protobuf_DoubleValue *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_DoubleValue_msginit, arena, len);
}
UPB_INLINE double google_protobuf_DoubleValue_value(const google_protobuf_DoubleValue *msg) { return UPB_FIELD_AT(msg, double, UPB_SIZE(0, 0)); }
UPB_INLINE void google_protobuf_DoubleValue_set_value(google_protobuf_DoubleValue *msg, double value) {
UPB_FIELD_AT(msg, double, UPB_SIZE(0, 0)) = value;
}
/* google.protobuf.FloatValue */
UPB_INLINE google_protobuf_FloatValue *google_protobuf_FloatValue_new(upb_arena *arena) {
return (google_protobuf_FloatValue *)upb_msg_new(&google_protobuf_FloatValue_msginit, arena);
}
UPB_INLINE google_protobuf_FloatValue *google_protobuf_FloatValue_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_FloatValue *ret = google_protobuf_FloatValue_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_FloatValue_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_FloatValue_serialize(const google_protobuf_FloatValue *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_FloatValue_msginit, arena, len);
}
UPB_INLINE float google_protobuf_FloatValue_value(const google_protobuf_FloatValue *msg) { return UPB_FIELD_AT(msg, float, UPB_SIZE(0, 0)); }
UPB_INLINE void google_protobuf_FloatValue_set_value(google_protobuf_FloatValue *msg, float value) {
UPB_FIELD_AT(msg, float, UPB_SIZE(0, 0)) = value;
}
/* google.protobuf.Int64Value */
UPB_INLINE google_protobuf_Int64Value *google_protobuf_Int64Value_new(upb_arena *arena) {
return (google_protobuf_Int64Value *)upb_msg_new(&google_protobuf_Int64Value_msginit, arena);
}
UPB_INLINE google_protobuf_Int64Value *google_protobuf_Int64Value_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_Int64Value *ret = google_protobuf_Int64Value_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_Int64Value_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_Int64Value_serialize(const google_protobuf_Int64Value *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_Int64Value_msginit, arena, len);
}
UPB_INLINE int64_t google_protobuf_Int64Value_value(const google_protobuf_Int64Value *msg) { return UPB_FIELD_AT(msg, int64_t, UPB_SIZE(0, 0)); }
UPB_INLINE void google_protobuf_Int64Value_set_value(google_protobuf_Int64Value *msg, int64_t value) {
UPB_FIELD_AT(msg, int64_t, UPB_SIZE(0, 0)) = value;
}
/* google.protobuf.UInt64Value */
UPB_INLINE google_protobuf_UInt64Value *google_protobuf_UInt64Value_new(upb_arena *arena) {
return (google_protobuf_UInt64Value *)upb_msg_new(&google_protobuf_UInt64Value_msginit, arena);
}
UPB_INLINE google_protobuf_UInt64Value *google_protobuf_UInt64Value_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_UInt64Value *ret = google_protobuf_UInt64Value_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_UInt64Value_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_UInt64Value_serialize(const google_protobuf_UInt64Value *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_UInt64Value_msginit, arena, len);
}
UPB_INLINE uint64_t google_protobuf_UInt64Value_value(const google_protobuf_UInt64Value *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)); }
UPB_INLINE void google_protobuf_UInt64Value_set_value(google_protobuf_UInt64Value *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)) = value;
}
/* google.protobuf.Int32Value */
UPB_INLINE google_protobuf_Int32Value *google_protobuf_Int32Value_new(upb_arena *arena) {
return (google_protobuf_Int32Value *)upb_msg_new(&google_protobuf_Int32Value_msginit, arena);
}
UPB_INLINE google_protobuf_Int32Value *google_protobuf_Int32Value_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_Int32Value *ret = google_protobuf_Int32Value_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_Int32Value_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_Int32Value_serialize(const google_protobuf_Int32Value *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_Int32Value_msginit, arena, len);
}
UPB_INLINE int32_t google_protobuf_Int32Value_value(const google_protobuf_Int32Value *msg) { return UPB_FIELD_AT(msg, int32_t, UPB_SIZE(0, 0)); }
UPB_INLINE void google_protobuf_Int32Value_set_value(google_protobuf_Int32Value *msg, int32_t value) {
UPB_FIELD_AT(msg, int32_t, UPB_SIZE(0, 0)) = value;
}
/* google.protobuf.UInt32Value */
UPB_INLINE google_protobuf_UInt32Value *google_protobuf_UInt32Value_new(upb_arena *arena) {
return (google_protobuf_UInt32Value *)upb_msg_new(&google_protobuf_UInt32Value_msginit, arena);
}
UPB_INLINE google_protobuf_UInt32Value *google_protobuf_UInt32Value_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_UInt32Value *ret = google_protobuf_UInt32Value_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_UInt32Value_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_UInt32Value_serialize(const google_protobuf_UInt32Value *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_UInt32Value_msginit, arena, len);
}
UPB_INLINE uint32_t google_protobuf_UInt32Value_value(const google_protobuf_UInt32Value *msg) { return UPB_FIELD_AT(msg, uint32_t, UPB_SIZE(0, 0)); }
UPB_INLINE void google_protobuf_UInt32Value_set_value(google_protobuf_UInt32Value *msg, uint32_t value) {
UPB_FIELD_AT(msg, uint32_t, UPB_SIZE(0, 0)) = value;
}
/* google.protobuf.BoolValue */
UPB_INLINE google_protobuf_BoolValue *google_protobuf_BoolValue_new(upb_arena *arena) {
return (google_protobuf_BoolValue *)upb_msg_new(&google_protobuf_BoolValue_msginit, arena);
}
UPB_INLINE google_protobuf_BoolValue *google_protobuf_BoolValue_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_BoolValue *ret = google_protobuf_BoolValue_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_BoolValue_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_BoolValue_serialize(const google_protobuf_BoolValue *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_BoolValue_msginit, arena, len);
}
UPB_INLINE bool google_protobuf_BoolValue_value(const google_protobuf_BoolValue *msg) { return UPB_FIELD_AT(msg, bool, UPB_SIZE(0, 0)); }
UPB_INLINE void google_protobuf_BoolValue_set_value(google_protobuf_BoolValue *msg, bool value) {
UPB_FIELD_AT(msg, bool, UPB_SIZE(0, 0)) = value;
}
/* google.protobuf.StringValue */
UPB_INLINE google_protobuf_StringValue *google_protobuf_StringValue_new(upb_arena *arena) {
return (google_protobuf_StringValue *)upb_msg_new(&google_protobuf_StringValue_msginit, arena);
}
UPB_INLINE google_protobuf_StringValue *google_protobuf_StringValue_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_StringValue *ret = google_protobuf_StringValue_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_StringValue_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_StringValue_serialize(const google_protobuf_StringValue *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_StringValue_msginit, arena, len);
}
UPB_INLINE upb_strview google_protobuf_StringValue_value(const google_protobuf_StringValue *msg) { return UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(0, 0)); }
UPB_INLINE void google_protobuf_StringValue_set_value(google_protobuf_StringValue *msg, upb_strview value) {
UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(0, 0)) = value;
}
/* google.protobuf.BytesValue */
UPB_INLINE google_protobuf_BytesValue *google_protobuf_BytesValue_new(upb_arena *arena) {
return (google_protobuf_BytesValue *)upb_msg_new(&google_protobuf_BytesValue_msginit, arena);
}
UPB_INLINE google_protobuf_BytesValue *google_protobuf_BytesValue_parsenew(upb_strview buf, upb_arena *arena) {
google_protobuf_BytesValue *ret = google_protobuf_BytesValue_new(arena);
return (ret && upb_decode(buf, ret, &google_protobuf_BytesValue_msginit)) ? ret : NULL;
}
UPB_INLINE char *google_protobuf_BytesValue_serialize(const google_protobuf_BytesValue *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &google_protobuf_BytesValue_msginit, arena, len);
}
UPB_INLINE upb_strview google_protobuf_BytesValue_value(const google_protobuf_BytesValue *msg) { return UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(0, 0)); }
UPB_INLINE void google_protobuf_BytesValue_set_value(google_protobuf_BytesValue *msg, upb_strview value) {
UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(0, 0)) = value;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#include "upb/port_undef.inc"
#endif /* GOOGLE_PROTOBUF_WRAPPERS_PROTO_UPB_H_ */

@ -60,7 +60,7 @@ static bool gzip_flate(grpc_stream_compression_context_gzip* ctx,
if (r < 0 && r != Z_BUF_ERROR) {
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(slice_out);
grpc_slice_unref_internal(slice);
return false;
} else if (r == Z_STREAM_END && ctx->flate == inflate) {
eoc = true;

@ -25,7 +25,6 @@
#include <string.h>
#include <unistd.h>
#include <grpc/support/alloc.h>
#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
@ -52,7 +51,7 @@ unsigned gpr_cpu_num_cores(void) {
static void delete_thread_id(void* value) {
if (value) {
gpr_free(value);
free(value);
}
}
@ -71,7 +70,10 @@ unsigned gpr_cpu_current_cpu(void) {
unsigned int* thread_id =
static_cast<unsigned int*>(pthread_getspecific(thread_id_key));
if (thread_id == nullptr) {
thread_id = static_cast<unsigned int*>(gpr_malloc(sizeof(unsigned int)));
// Note we cannot use gpr_malloc here because this allocation can happen in
// a main thread and will only be free'd when the main thread exits, which
// will cause our internal memory counters to believe it is a leak.
thread_id = static_cast<unsigned int*>(malloc(sizeof(unsigned int)));
pthread_setspecific(thread_id_key, thread_id);
}

@ -121,7 +121,7 @@ static void append_error(internal_request* req, grpc_error* error) {
}
static void do_read(internal_request* req) {
grpc_endpoint_read(req->ep, &req->incoming, &req->on_read);
grpc_endpoint_read(req->ep, &req->incoming, &req->on_read, /*urgent=*/true);
}
static void on_read(void* user_data, grpc_error* error) {

@ -23,8 +23,8 @@
grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb) {
ep->vtable->read(ep, slices, cb);
grpc_closure* cb, bool urgent) {
ep->vtable->read(ep, slices, cb, urgent);
}
void grpc_endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* slices,

@ -36,7 +36,8 @@ typedef struct grpc_endpoint_vtable grpc_endpoint_vtable;
class Timestamps;
struct grpc_endpoint_vtable {
void (*read)(grpc_endpoint* ep, grpc_slice_buffer* slices, grpc_closure* cb);
void (*read)(grpc_endpoint* ep, grpc_slice_buffer* slices, grpc_closure* cb,
bool urgent);
void (*write)(grpc_endpoint* ep, grpc_slice_buffer* slices, grpc_closure* cb,
void* arg);
void (*add_to_pollset)(grpc_endpoint* ep, grpc_pollset* pollset);
@ -56,7 +57,7 @@ struct grpc_endpoint_vtable {
Valid slices may be placed into \a slices even when the callback is
invoked with error != GRPC_ERROR_NONE. */
void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb);
grpc_closure* cb, bool urgent);
char* grpc_endpoint_get_peer(grpc_endpoint* ep);

@ -251,7 +251,7 @@ static void CFStreamReadAllocationDone(void* arg, grpc_error* error) {
}
static void CFStreamRead(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb) {
grpc_closure* cb, bool urgent) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p read (%p, %p) length:%zu", ep_impl,

@ -43,7 +43,6 @@
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
@ -126,7 +125,7 @@ struct grpc_fd {
grpc_fork_fd_list* fork_fd_list;
};
/* True when GRPC_ENABLE_FORK_SUPPORT=1. We do not support fork with poll-cv */
/* True when GRPC_ENABLE_FORK_SUPPORT=1. */
static bool track_fds_for_fork = false;
/* Only used when GRPC_ENABLE_FORK_SUPPORT=1 */
@ -256,56 +255,6 @@ struct grpc_pollset_set {
grpc_fd** fds;
};
/*******************************************************************************
* condition variable polling definitions
*/
#define POLLCV_THREAD_GRACE_MS 1000
#define CV_POLL_PERIOD_MS 1000
#define CV_DEFAULT_TABLE_SIZE 16
typedef struct poll_result {
gpr_refcount refcount;
grpc_cv_node* watchers;
int watchcount;
struct pollfd* fds;
nfds_t nfds;
int retval;
int err;
int completed;
} poll_result;
typedef struct poll_args {
grpc_core::Thread poller_thd;
gpr_cv trigger;
int trigger_set;
bool harvestable;
gpr_cv harvest;
bool joinable;
gpr_cv join;
struct pollfd* fds;
nfds_t nfds;
poll_result* result;
struct poll_args* next;
struct poll_args* prev;
} poll_args;
// This is a 2-tiered cache, we mantain a hash table
// of active poll calls, so we can wait on the result
// of that call. We also maintain freelists of inactive
// poll args and of dead poller threads.
typedef struct poll_hash_table {
poll_args* free_pollers;
poll_args** active_pollers;
poll_args* dead_pollers;
unsigned int size;
unsigned int count;
} poll_hash_table;
// TODO(kpayson64): Eliminate use of global non-POD variables
poll_hash_table poll_cache;
grpc_cv_fd_table g_cvfds;
/*******************************************************************************
* functions to track opened fds. No-ops unless track_fds_for_fork is true.
*/
@ -1363,425 +1312,6 @@ static void pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
gpr_mu_unlock(&pollset_set->mu);
}
/*******************************************************************************
* Condition Variable polling extensions
*/
static void run_poll(void* args);
static void cache_poller_locked(poll_args* args);
static void cache_harvest_locked();
static void cache_insert_locked(poll_args* args) {
uint32_t key = gpr_murmur_hash3(args->fds, args->nfds * sizeof(struct pollfd),
0xDEADBEEF);
key = key % poll_cache.size;
if (poll_cache.active_pollers[key]) {
poll_cache.active_pollers[key]->prev = args;
}
args->next = poll_cache.active_pollers[key];
args->prev = nullptr;
poll_cache.active_pollers[key] = args;
poll_cache.count++;
}
static void init_result(poll_args* pargs) {
pargs->result = static_cast<poll_result*>(gpr_malloc(sizeof(poll_result)));
gpr_ref_init(&pargs->result->refcount, 1);
pargs->result->watchers = nullptr;
pargs->result->watchcount = 0;
pargs->result->fds = static_cast<struct pollfd*>(
gpr_malloc(sizeof(struct pollfd) * pargs->nfds));
memcpy(pargs->result->fds, pargs->fds, sizeof(struct pollfd) * pargs->nfds);
pargs->result->nfds = pargs->nfds;
pargs->result->retval = 0;
pargs->result->err = 0;
pargs->result->completed = 0;
}
// Creates a poll_args object for a given arguments to poll().
// This object may return a poll_args in the cache.
static poll_args* get_poller_locked(struct pollfd* fds, nfds_t count) {
uint32_t key =
gpr_murmur_hash3(fds, count * sizeof(struct pollfd), 0xDEADBEEF);
key = key % poll_cache.size;
poll_args* curr = poll_cache.active_pollers[key];
while (curr) {
if (curr->nfds == count &&
memcmp(curr->fds, fds, count * sizeof(struct pollfd)) == 0) {
gpr_free(fds);
return curr;
}
curr = curr->next;
}
if (poll_cache.free_pollers) {
poll_args* pargs = poll_cache.free_pollers;
poll_cache.free_pollers = pargs->next;
if (poll_cache.free_pollers) {
poll_cache.free_pollers->prev = nullptr;
}
pargs->fds = fds;
pargs->nfds = count;
pargs->next = nullptr;
pargs->prev = nullptr;
init_result(pargs);
cache_poller_locked(pargs);
return pargs;
}
poll_args* pargs =
static_cast<poll_args*>(gpr_malloc(sizeof(struct poll_args)));
gpr_cv_init(&pargs->trigger);
gpr_cv_init(&pargs->harvest);
gpr_cv_init(&pargs->join);
pargs->harvestable = false;
pargs->joinable = false;
pargs->fds = fds;
pargs->nfds = count;
pargs->next = nullptr;
pargs->prev = nullptr;
pargs->trigger_set = 0;
init_result(pargs);
cache_poller_locked(pargs);
gpr_ref(&g_cvfds.pollcount);
pargs->poller_thd = grpc_core::Thread("grpc_poller", &run_poll, pargs);
pargs->poller_thd.Start();
return pargs;
}
static void cache_delete_locked(poll_args* args) {
if (!args->prev) {
uint32_t key = gpr_murmur_hash3(
args->fds, args->nfds * sizeof(struct pollfd), 0xDEADBEEF);
key = key % poll_cache.size;
GPR_ASSERT(poll_cache.active_pollers[key] == args);
poll_cache.active_pollers[key] = args->next;
} else {
args->prev->next = args->next;
}
if (args->next) {
args->next->prev = args->prev;
}
poll_cache.count--;
if (poll_cache.free_pollers) {
poll_cache.free_pollers->prev = args;
}
args->prev = nullptr;
args->next = poll_cache.free_pollers;
gpr_free(args->fds);
poll_cache.free_pollers = args;
}
static void cache_poller_locked(poll_args* args) {
if (poll_cache.count + 1 > poll_cache.size / 2) {
poll_args** old_active_pollers = poll_cache.active_pollers;
poll_cache.size = poll_cache.size * 2;
poll_cache.count = 0;
poll_cache.active_pollers =
static_cast<poll_args**>(gpr_malloc(sizeof(void*) * poll_cache.size));
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = nullptr;
}
for (unsigned int i = 0; i < poll_cache.size / 2; i++) {
poll_args* curr = old_active_pollers[i];
poll_args* next = nullptr;
while (curr) {
next = curr->next;
cache_insert_locked(curr);
curr = next;
}
}
gpr_free(old_active_pollers);
}
cache_insert_locked(args);
}
static void cache_destroy_locked(poll_args* args) {
if (args->next) {
args->next->prev = args->prev;
}
if (args->prev) {
args->prev->next = args->next;
} else {
poll_cache.free_pollers = args->next;
}
// Now move this args to the dead poller list for later join
if (poll_cache.dead_pollers != nullptr) {
poll_cache.dead_pollers->prev = args;
}
args->prev = nullptr;
args->next = poll_cache.dead_pollers;
poll_cache.dead_pollers = args;
}
static void cache_harvest_locked() {
while (poll_cache.dead_pollers) {
poll_args* args = poll_cache.dead_pollers;
poll_cache.dead_pollers = poll_cache.dead_pollers->next;
// Keep the list consistent in case new dead pollers get added when we
// release the lock below to wait on joining
if (poll_cache.dead_pollers) {
poll_cache.dead_pollers->prev = nullptr;
}
args->harvestable = true;
gpr_cv_signal(&args->harvest);
while (!args->joinable) {
gpr_cv_wait(&args->join, &g_cvfds.mu,
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
args->poller_thd.Join();
gpr_cv_destroy(&args->trigger);
gpr_cv_destroy(&args->harvest);
gpr_cv_destroy(&args->join);
gpr_free(args);
}
}
static void decref_poll_result(poll_result* res) {
if (gpr_unref(&res->refcount)) {
GPR_ASSERT(!res->watchers);
gpr_free(res->fds);
gpr_free(res);
}
}
void remove_cvn(grpc_cv_node** head, grpc_cv_node* target) {
if (target->next) {
target->next->prev = target->prev;
}
if (target->prev) {
target->prev->next = target->next;
} else {
*head = target->next;
}
}
gpr_timespec thread_grace;
// Poll in a background thread
static void run_poll(void* args) {
poll_args* pargs = static_cast<poll_args*>(args);
while (1) {
poll_result* result = pargs->result;
int retval = g_cvfds.poll(result->fds, result->nfds, CV_POLL_PERIOD_MS);
gpr_mu_lock(&g_cvfds.mu);
cache_harvest_locked();
if (retval != 0) {
result->completed = 1;
result->retval = retval;
result->err = errno;
grpc_cv_node* watcher = result->watchers;
while (watcher) {
gpr_cv_signal(watcher->cv);
watcher = watcher->next;
}
}
if (result->watchcount == 0 || result->completed) {
cache_delete_locked(pargs);
decref_poll_result(result);
// Leave this polling thread alive for a grace period to do another poll()
// op
gpr_timespec deadline = gpr_now(GPR_CLOCK_MONOTONIC);
deadline = gpr_time_add(deadline, thread_grace);
pargs->trigger_set = 0;
gpr_cv_wait(&pargs->trigger, &g_cvfds.mu, deadline);
cache_harvest_locked();
if (!pargs->trigger_set) {
cache_destroy_locked(pargs);
break;
}
}
gpr_mu_unlock(&g_cvfds.mu);
}
if (gpr_unref(&g_cvfds.pollcount)) {
gpr_cv_signal(&g_cvfds.shutdown_cv);
}
while (!pargs->harvestable) {
gpr_cv_wait(&pargs->harvest, &g_cvfds.mu,
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
pargs->joinable = true;
gpr_cv_signal(&pargs->join);
gpr_mu_unlock(&g_cvfds.mu);
}
// This function overrides poll() to handle condition variable wakeup fds
static int cvfd_poll(struct pollfd* fds, nfds_t nfds, int timeout) {
if (timeout == 0) {
// Don't bother using background threads for polling if timeout is 0,
// poll-cv might not wait for a poll to return otherwise.
// https://github.com/grpc/grpc/issues/13298
return poll(fds, nfds, 0);
}
unsigned int i;
int res, idx;
grpc_cv_node* pollcv;
int skip_poll = 0;
nfds_t nsockfds = 0;
poll_result* result = nullptr;
gpr_mu_lock(&g_cvfds.mu);
cache_harvest_locked();
pollcv = static_cast<grpc_cv_node*>(gpr_malloc(sizeof(grpc_cv_node)));
pollcv->next = nullptr;
gpr_cv pollcv_cv;
gpr_cv_init(&pollcv_cv);
pollcv->cv = &pollcv_cv;
grpc_cv_node* fd_cvs =
static_cast<grpc_cv_node*>(gpr_malloc(nfds * sizeof(grpc_cv_node)));
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
idx = GRPC_FD_TO_IDX(fds[i].fd);
fd_cvs[i].cv = &pollcv_cv;
fd_cvs[i].prev = nullptr;
fd_cvs[i].next = g_cvfds.cvfds[idx].cvs;
if (g_cvfds.cvfds[idx].cvs) {
g_cvfds.cvfds[idx].cvs->prev = &(fd_cvs[i]);
}
g_cvfds.cvfds[idx].cvs = &(fd_cvs[i]);
// Don't bother polling if a wakeup fd is ready
if (g_cvfds.cvfds[idx].is_set) {
skip_poll = 1;
}
} else if (fds[i].fd >= 0) {
nsockfds++;
}
}
gpr_timespec deadline = gpr_now(GPR_CLOCK_MONOTONIC);
if (timeout < 0) {
deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
} else {
deadline =
gpr_time_add(deadline, gpr_time_from_millis(timeout, GPR_TIMESPAN));
}
res = 0;
if (!skip_poll && nsockfds > 0) {
struct pollfd* pollfds = static_cast<struct pollfd*>(
gpr_malloc(sizeof(struct pollfd) * nsockfds));
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd >= 0) {
pollfds[idx].fd = fds[i].fd;
pollfds[idx].events = fds[i].events;
pollfds[idx].revents = 0;
idx++;
}
}
poll_args* pargs = get_poller_locked(pollfds, nsockfds);
result = pargs->result;
pollcv->next = result->watchers;
pollcv->prev = nullptr;
if (result->watchers) {
result->watchers->prev = pollcv;
}
result->watchers = pollcv;
result->watchcount++;
gpr_ref(&result->refcount);
pargs->trigger_set = 1;
gpr_cv_signal(&pargs->trigger);
gpr_cv_wait(&pollcv_cv, &g_cvfds.mu, deadline);
cache_harvest_locked();
res = result->retval;
errno = result->err;
result->watchcount--;
remove_cvn(&result->watchers, pollcv);
} else if (!skip_poll) {
gpr_cv_wait(&pollcv_cv, &g_cvfds.mu, deadline);
cache_harvest_locked();
}
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
remove_cvn(&g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].cvs, &(fd_cvs[i]));
if (g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].is_set) {
fds[i].revents = POLLIN;
if (res >= 0) res++;
}
} else if (!skip_poll && fds[i].fd >= 0 && result->completed) {
fds[i].revents = result->fds[idx].revents;
idx++;
}
}
gpr_free(fd_cvs);
gpr_cv_destroy(pollcv->cv);
gpr_free(pollcv);
if (result) {
decref_poll_result(result);
}
gpr_mu_unlock(&g_cvfds.mu);
return res;
}
static void global_cv_fd_table_init() {
gpr_mu_init(&g_cvfds.mu);
gpr_mu_lock(&g_cvfds.mu);
gpr_cv_init(&g_cvfds.shutdown_cv);
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
g_cvfds.cvfds = static_cast<grpc_fd_node*>(
gpr_malloc(sizeof(grpc_fd_node) * CV_DEFAULT_TABLE_SIZE));
g_cvfds.free_fds = nullptr;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = nullptr;
g_cvfds.cvfds[i].next_free = g_cvfds.free_fds;
g_cvfds.free_fds = &g_cvfds.cvfds[i];
}
// Override the poll function with one that supports cvfds
g_cvfds.poll = grpc_poll_function;
grpc_poll_function = &cvfd_poll;
// Initialize the cache
poll_cache.size = 32;
poll_cache.count = 0;
poll_cache.free_pollers = nullptr;
poll_cache.active_pollers =
static_cast<poll_args**>(gpr_malloc(sizeof(void*) * 32));
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = nullptr;
}
poll_cache.dead_pollers = nullptr;
gpr_mu_unlock(&g_cvfds.mu);
}
static void global_cv_fd_table_shutdown() {
gpr_mu_lock(&g_cvfds.mu);
// Attempt to wait for all abandoned poll() threads to terminate
// Not doing so will result in reported memory leaks
if (!gpr_unref(&g_cvfds.pollcount)) {
int res = gpr_cv_wait(&g_cvfds.shutdown_cv, &g_cvfds.mu,
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(3, GPR_TIMESPAN)));
GPR_ASSERT(res == 0);
}
gpr_cv_destroy(&g_cvfds.shutdown_cv);
grpc_poll_function = g_cvfds.poll;
gpr_free(g_cvfds.cvfds);
cache_harvest_locked();
gpr_free(poll_cache.active_pollers);
gpr_mu_unlock(&g_cvfds.mu);
gpr_mu_destroy(&g_cvfds.mu);
}
/*******************************************************************************
* event engine binding
*/
@ -1792,9 +1322,6 @@ static void shutdown_background_closure(void) {}
static void shutdown_engine(void) {
pollset_global_shutdown();
if (grpc_cv_wakeup_fds_enabled()) {
global_cv_fd_table_shutdown();
}
if (track_fds_for_fork) {
gpr_mu_destroy(&fork_fd_list_mu);
grpc_core::Fork::SetResetChildPollingEngineFunc(nullptr);
@ -1876,15 +1403,4 @@ const grpc_event_engine_vtable* grpc_init_poll_posix(bool explicit_request) {
return &vtable;
}
const grpc_event_engine_vtable* grpc_init_poll_cv_posix(bool explicit_request) {
global_cv_fd_table_init();
grpc_enable_cv_wakeup_fds(1);
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
global_cv_fd_table_shutdown();
grpc_enable_cv_wakeup_fds(0);
return nullptr;
}
return &vtable;
}
#endif /* GRPC_POSIX_SOCKET_EV_POLL */

@ -126,10 +126,9 @@ static event_engine_factory g_factories[] = {
{ENGINE_HEAD_CUSTOM, nullptr}, {ENGINE_HEAD_CUSTOM, nullptr},
{ENGINE_HEAD_CUSTOM, nullptr}, {ENGINE_HEAD_CUSTOM, nullptr},
{"epollex", grpc_init_epollex_linux}, {"epoll1", grpc_init_epoll1_linux},
{"poll", grpc_init_poll_posix}, {"poll-cv", grpc_init_poll_cv_posix},
{"none", init_non_polling}, {ENGINE_TAIL_CUSTOM, nullptr},
{"poll", grpc_init_poll_posix}, {"none", init_non_polling},
{ENGINE_TAIL_CUSTOM, nullptr}, {ENGINE_TAIL_CUSTOM, nullptr},
{ENGINE_TAIL_CUSTOM, nullptr}, {ENGINE_TAIL_CUSTOM, nullptr},
{ENGINE_TAIL_CUSTOM, nullptr},
};
static void add(const char* beg, const char* end, char*** ss, size_t* ns) {

@ -38,8 +38,7 @@ bool kernel_supports_errqueue() { return errqueue_supported; }
void grpc_errqueue_init() {
/* Both-compile time and run-time linux kernel versions should be atleast 4.0.0
*/
#ifdef LINUX_VERSION_CODE
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
#ifdef GRPC_LINUX_ERRQUEUE
struct utsname buffer;
if (uname(&buffer) != 0) {
gpr_log(GPR_ERROR, "uname: %s", strerror(errno));
@ -55,8 +54,7 @@ void grpc_errqueue_init() {
} else {
gpr_log(GPR_DEBUG, "ERRQUEUE support not enabled");
}
#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(4, 0, 0) */
#endif /* LINUX_VERSION_CODE */
#endif /* GRPC_LINUX_ERRQUEUE */
}
} /* namespace grpc_core */

@ -60,6 +60,9 @@
#define GRPC_HAVE_IP_PKTINFO 1
#define GRPC_HAVE_MSG_NOSIGNAL 1
#define GRPC_HAVE_UNIX_SOCKET 1
/* Linux has TCP_INQ support since 4.18, but it is safe to set
the socket option on older kernels. */
#define GRPC_HAVE_TCP_INQ 1
#ifdef LINUX_VERSION_CODE
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
#define GRPC_LINUX_ERRQUEUE 1

@ -192,7 +192,7 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
}
static void endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
grpc_closure* cb) {
grpc_closure* cb, bool urgent) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
GPR_ASSERT(tcp->read_cb == nullptr);

@ -27,6 +27,7 @@
#include <errno.h>
#include <limits.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
@ -34,6 +35,7 @@
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include <algorithm>
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
@ -54,6 +56,15 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP
#endif
#ifndef TCP_INQ
#define TCP_INQ 36
#define TCP_CM_INQ TCP_INQ
#endif
#ifdef GRPC_HAVE_MSG_NOSIGNAL
#define SENDMSG_FLAGS MSG_NOSIGNAL
#else
@ -88,8 +99,11 @@ struct grpc_tcp {
grpc_slice_buffer last_read_buffer;
grpc_slice_buffer* incoming_buffer;
int inq; /* bytes pending on the socket from the last read. */
bool inq_capable; /* cache whether kernel supports inq */
grpc_slice_buffer* outgoing_buffer;
/** byte within outgoing_buffer->slices[0] to write next */
/* byte within outgoing_buffer->slices[0] to write next */
size_t outgoing_byte_idx;
grpc_closure* read_cb;
@ -429,69 +443,140 @@ static void tcp_do_read(grpc_tcp* tcp) {
GPR_TIMER_SCOPE("tcp_do_read", 0);
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
char cmsgbuf[24 /*CMSG_SPACE(sizeof(int))*/];
ssize_t read_bytes;
size_t i;
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
size_t total_read_bytes = 0;
for (i = 0; i < tcp->incoming_buffer->count; i++) {
size_t iov_len =
std::min<size_t>(MAX_READ_IOVEC, tcp->incoming_buffer->count);
for (size_t i = 0; i < iov_len; i++) {
iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
}
msg.msg_name = nullptr;
msg.msg_namelen = 0;
msg.msg_iov = iov;
msg.msg_iovlen = static_cast<msg_iovlen_type>(tcp->incoming_buffer->count);
msg.msg_control = nullptr;
msg.msg_controllen = 0;
msg.msg_flags = 0;
GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length);
GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
do {
GPR_TIMER_SCOPE("recvmsg", 0);
GRPC_STATS_INC_SYSCALL_READ();
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
* be running. */
if (errno == EAGAIN) {
finish_estimate(tcp);
/* We've consumed the edge, request a new one */
notify_on_read(tcp);
/* Assume there is something on the queue. If we receive TCP_INQ from
* kernel, we will update this value, otherwise, we have to assume there is
* always something to read until we get EAGAIN. */
tcp->inq = 1;
msg.msg_name = nullptr;
msg.msg_namelen = 0;
msg.msg_iov = iov;
msg.msg_iovlen = static_cast<msg_iovlen_type>(iov_len);
if (tcp->inq_capable) {
msg.msg_control = cmsgbuf;
msg.msg_controllen = sizeof(cmsgbuf);
} else {
msg.msg_control = nullptr;
msg.msg_controllen = 0;
}
msg.msg_flags = 0;
GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length);
GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
do {
GPR_TIMER_SCOPE("recvmsg", 0);
GRPC_STATS_INC_SYSCALL_READ();
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
/* We have read something in previous reads. We need to deliver those
* bytes to the upper layer. */
if (read_bytes <= 0 && total_read_bytes > 0) {
tcp->inq = 1;
break;
}
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
* be running. */
if (errno == EAGAIN) {
finish_estimate(tcp);
tcp->inq = 0;
/* We've consumed the edge, request a new one */
notify_on_read(tcp);
} else {
grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
call_read_cb(tcp,
tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp));
TCP_UNREF(tcp, "read");
}
return;
}
if (read_bytes == 0) {
/* 0 read size ==> end of stream
*
* We may have read something, i.e., total_read_bytes > 0, but
* since the connection is closed we will drop the data here, because we
* can't call the callback multiple times. */
grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
call_read_cb(tcp,
tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp));
call_read_cb(
tcp, tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
TCP_UNREF(tcp, "read");
return;
}
} else if (read_bytes == 0) {
/* 0 read size ==> end of stream */
grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
call_read_cb(
tcp, tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
TCP_UNREF(tcp, "read");
} else {
GRPC_STATS_INC_TCP_READ_SIZE(read_bytes);
add_to_estimate(tcp, static_cast<size_t>(read_bytes));
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
if (static_cast<size_t>(read_bytes) == tcp->incoming_buffer->length) {
finish_estimate(tcp);
} else if (static_cast<size_t>(read_bytes) < tcp->incoming_buffer->length) {
grpc_slice_buffer_trim_end(
tcp->incoming_buffer,
tcp->incoming_buffer->length - static_cast<size_t>(read_bytes),
&tcp->last_read_buffer);
GPR_DEBUG_ASSERT((size_t)read_bytes <=
tcp->incoming_buffer->length - total_read_bytes);
#ifdef GRPC_HAVE_TCP_INQ
if (tcp->inq_capable) {
GPR_DEBUG_ASSERT(!(msg.msg_flags & MSG_CTRUNC));
struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
for (; cmsg != nullptr; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level == SOL_TCP && cmsg->cmsg_type == TCP_CM_INQ &&
cmsg->cmsg_len == CMSG_LEN(sizeof(int))) {
tcp->inq = *reinterpret_cast<int*>(CMSG_DATA(cmsg));
}
}
}
GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length);
call_read_cb(tcp, GRPC_ERROR_NONE);
TCP_UNREF(tcp, "read");
#endif /* GRPC_HAVE_TCP_INQ */
total_read_bytes += read_bytes;
if (tcp->inq == 0 || total_read_bytes == tcp->incoming_buffer->length) {
/* We have filled incoming_buffer, and we cannot read any more. */
break;
}
/* We had a partial read, and still have space to read more data.
* So, adjust IOVs and try to read more. */
size_t remaining = read_bytes;
size_t j = 0;
for (size_t i = 0; i < iov_len; i++) {
if (remaining >= iov[i].iov_len) {
remaining -= iov[i].iov_len;
continue;
}
if (remaining > 0) {
iov[j].iov_base = static_cast<char*>(iov[i].iov_base) + remaining;
iov[j].iov_len = iov[i].iov_len - remaining;
remaining = 0;
} else {
iov[j].iov_base = iov[i].iov_base;
iov[j].iov_len = iov[i].iov_len;
}
++j;
}
iov_len = j;
} while (true);
if (tcp->inq == 0) {
finish_estimate(tcp);
}
GPR_DEBUG_ASSERT(total_read_bytes > 0);
if (total_read_bytes < tcp->incoming_buffer->length) {
grpc_slice_buffer_trim_end(tcp->incoming_buffer,
tcp->incoming_buffer->length - total_read_bytes,
&tcp->last_read_buffer);
}
call_read_cb(tcp, GRPC_ERROR_NONE);
TCP_UNREF(tcp, "read");
}
static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
@ -512,7 +597,8 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
static void tcp_continue_read(grpc_tcp* tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size / 2 &&
/* Wait for allocation only when there is no buffer left. */
if (tcp->incoming_buffer->length == 0 &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
@ -544,7 +630,7 @@ static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
}
static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
grpc_closure* cb) {
grpc_closure* cb, bool urgent) {
grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
GPR_ASSERT(tcp->read_cb == nullptr);
tcp->read_cb = cb;
@ -557,6 +643,11 @@ static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
* the polling engine */
tcp->is_first_read = false;
notify_on_read(tcp);
} else if (!urgent && tcp->inq == 0) {
/* Upper layer asked to read more but we know there is no pending data
* to read from previous reads. So, wait for POLLIN.
*/
notify_on_read(tcp);
} else {
/* Not the first time. We may or may not have more bytes available. In any
* case call tcp->read_done_closure (i.e tcp_handle_read()) which does the
@ -1157,6 +1248,19 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
tcp->tb_head = nullptr;
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
/* Always assume there is something on the queue to read. */
tcp->inq = 1;
#ifdef GRPC_HAVE_TCP_INQ
int one = 1;
if (setsockopt(tcp->fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) == 0) {
tcp->inq_capable = true;
} else {
gpr_log(GPR_DEBUG, "cannot set inq fd=%d errno=%d", tcp->fd, errno);
tcp->inq_capable = false;
}
#else
tcp->inq_capable = false;
#endif /* GRPC_HAVE_TCP_INQ */
/* Start being notified on errors if event engine can track errors. */
if (grpc_event_engine_can_track_errors()) {
/* Grab a ref to tcp so that we can safely access the tcp struct when

@ -241,7 +241,7 @@ static void on_read(void* tcpp, grpc_error* error) {
#define DEFAULT_TARGET_READ_SIZE 8192
#define MAX_WSABUF_COUNT 16
static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
grpc_closure* cb) {
grpc_closure* cb, bool urgent) {
grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_winsocket* handle = tcp->socket;
grpc_winsocket_callback_info* info = &handle->read_info;

@ -1,107 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/iomgr/port.h"
#ifdef GRPC_POSIX_WAKEUP_FD
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include <errno.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/thd.h"
#define MAX_TABLE_RESIZE 256
extern grpc_cv_fd_table g_cvfds;
static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
unsigned int i, newsize;
int idx;
gpr_mu_lock(&g_cvfds.mu);
if (!g_cvfds.free_fds) {
newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
g_cvfds.cvfds = static_cast<grpc_fd_node*>(
gpr_realloc(g_cvfds.cvfds, sizeof(grpc_fd_node) * newsize));
for (i = g_cvfds.size; i < newsize; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = nullptr;
g_cvfds.cvfds[i].next_free = g_cvfds.free_fds;
g_cvfds.free_fds = &g_cvfds.cvfds[i];
}
g_cvfds.size = newsize;
}
idx = static_cast<int>(g_cvfds.free_fds - g_cvfds.cvfds);
g_cvfds.free_fds = g_cvfds.free_fds->next_free;
g_cvfds.cvfds[idx].cvs = nullptr;
g_cvfds.cvfds[idx].is_set = 0;
fd_info->read_fd = GRPC_IDX_TO_FD(idx);
fd_info->write_fd = -1;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
grpc_cv_node* cvn;
gpr_mu_lock(&g_cvfds.mu);
g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 1;
cvn = g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs;
while (cvn) {
gpr_cv_signal(cvn->cv);
cvn = cvn->next;
}
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
static grpc_error* cv_fd_consume(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 0;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
static void cv_fd_destroy(grpc_wakeup_fd* fd_info) {
if (fd_info->read_fd == 0) {
return;
}
gpr_mu_lock(&g_cvfds.mu);
// Assert that there are no active pollers
GPR_ASSERT(!g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs);
g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
g_cvfds.free_fds = &g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)];
gpr_mu_unlock(&g_cvfds.mu);
}
static int cv_check_availability(void) { return 1; }
const grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable = {
cv_fd_init, cv_fd_consume, cv_fd_wakeup, cv_fd_destroy,
cv_check_availability};
#endif /* GRPC_POSIX_WAKUP_FD */

@ -1,69 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* wakeup_fd_cv uses condition variables to implement wakeup fds.
*
* It is intended for use only in cases when eventfd() and pipe() are not
* available. It can only be used with the "poll" engine.
*
* Implementation:
* A global table of cv wakeup fds is mantained. A cv wakeup fd is a negative
* file descriptor. poll() is then run in a background thread with only the
* real socket fds while we wait on a condition variable trigged by either the
* poll() completion or a wakeup_fd() call.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H
#define GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
#include "src/core/lib/iomgr/ev_posix.h"
#define GRPC_FD_TO_IDX(fd) (-(fd)-1)
#define GRPC_IDX_TO_FD(idx) (-(idx)-1)
typedef struct grpc_cv_node {
gpr_cv* cv;
struct grpc_cv_node* next;
struct grpc_cv_node* prev;
} grpc_cv_node;
typedef struct grpc_fd_node {
int is_set;
grpc_cv_node* cvs;
struct grpc_fd_node* next_free;
} grpc_fd_node;
typedef struct grpc_cv_fd_table {
gpr_mu mu;
gpr_refcount pollcount;
gpr_cv shutdown_cv;
grpc_fd_node* cvfds;
grpc_fd_node* free_fds;
unsigned int size;
grpc_poll_function_type poll;
} grpc_cv_fd_table;
extern const grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable;
#endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H */

@ -23,7 +23,6 @@
#ifdef GRPC_POSIX_WAKEUP_FD
#include <stddef.h>
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_pipe.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
@ -51,37 +50,20 @@ void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = nullptr; }
int grpc_has_wakeup_fd(void) { return has_real_wakeup_fd; }
int grpc_cv_wakeup_fds_enabled(void) { return cv_wakeup_fds_enabled; }
void grpc_enable_cv_wakeup_fds(int enable) { cv_wakeup_fds_enabled = enable; }
grpc_error* grpc_wakeup_fd_init(grpc_wakeup_fd* fd_info) {
if (cv_wakeup_fds_enabled) {
return grpc_cv_wakeup_fd_vtable.init(fd_info);
}
return wakeup_fd_vtable->init(fd_info);
}
grpc_error* grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd* fd_info) {
if (cv_wakeup_fds_enabled) {
return grpc_cv_wakeup_fd_vtable.consume(fd_info);
}
return wakeup_fd_vtable->consume(fd_info);
}
grpc_error* grpc_wakeup_fd_wakeup(grpc_wakeup_fd* fd_info) {
if (cv_wakeup_fds_enabled) {
return grpc_cv_wakeup_fd_vtable.wakeup(fd_info);
}
return wakeup_fd_vtable->wakeup(fd_info);
}
void grpc_wakeup_fd_destroy(grpc_wakeup_fd* fd_info) {
if (cv_wakeup_fds_enabled) {
grpc_cv_wakeup_fd_vtable.destroy(fd_info);
} else {
wakeup_fd_vtable->destroy(fd_info);
}
wakeup_fd_vtable->destroy(fd_info);
}
#endif /* GRPC_POSIX_WAKEUP_FD */

@ -167,19 +167,16 @@ struct grpc_tls_credentials_options
grpc_ssl_client_certificate_request_type cert_request_type() const {
return cert_request_type_;
}
const grpc_tls_key_materials_config* key_materials_config() const {
grpc_tls_key_materials_config* key_materials_config() const {
return key_materials_config_.get();
}
const grpc_tls_credential_reload_config* credential_reload_config() const {
grpc_tls_credential_reload_config* credential_reload_config() const {
return credential_reload_config_.get();
}
const grpc_tls_server_authorization_check_config*
grpc_tls_server_authorization_check_config*
server_authorization_check_config() const {
return server_authorization_check_config_.get();
}
grpc_tls_key_materials_config* mutable_key_materials_config() {
return key_materials_config_.get();
}
/* Setters for member fields. */
void set_cert_request_type(

@ -0,0 +1,129 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/security/credentials/tls/spiffe_credentials.h"
#include <cstring>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/security/security_connector/tls/spiffe_security_connector.h"
#define GRPC_CREDENTIALS_TYPE_SPIFFE "Spiffe"
namespace {
bool CredentialOptionSanityCheck(const grpc_tls_credentials_options* options,
bool is_client) {
if (options == nullptr) {
gpr_log(GPR_ERROR, "SPIFFE TLS credentials options is nullptr.");
return false;
}
if (options->key_materials_config() == nullptr &&
options->credential_reload_config() == nullptr) {
gpr_log(
GPR_ERROR,
"SPIFFE TLS credentials options must specify either key materials or "
"credential reload config.");
return false;
}
if (!is_client && options->server_authorization_check_config() != nullptr) {
gpr_log(GPR_INFO,
"Server's credentials options should not contain server "
"authorization check config.");
}
return true;
}
} // namespace
SpiffeCredentials::SpiffeCredentials(
grpc_core::RefCountedPtr<grpc_tls_credentials_options> options)
: grpc_channel_credentials(GRPC_CREDENTIALS_TYPE_SPIFFE),
options_(std::move(options)) {}
SpiffeCredentials::~SpiffeCredentials() {}
grpc_core::RefCountedPtr<grpc_channel_security_connector>
SpiffeCredentials::create_security_connector(
grpc_core::RefCountedPtr<grpc_call_credentials> call_creds,
const char* target_name, const grpc_channel_args* args,
grpc_channel_args** new_args) {
const char* overridden_target_name = nullptr;
tsi_ssl_session_cache* ssl_session_cache = nullptr;
for (size_t i = 0; args != nullptr && i < args->num_args; i++) {
grpc_arg* arg = &args->args[i];
if (strcmp(arg->key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == 0 &&
arg->type == GRPC_ARG_STRING) {
overridden_target_name = arg->value.string;
}
if (strcmp(arg->key, GRPC_SSL_SESSION_CACHE_ARG) == 0 &&
arg->type == GRPC_ARG_POINTER) {
ssl_session_cache =
static_cast<tsi_ssl_session_cache*>(arg->value.pointer.p);
}
}
grpc_core::RefCountedPtr<grpc_channel_security_connector> sc =
SpiffeChannelSecurityConnector::CreateSpiffeChannelSecurityConnector(
this->Ref(), std::move(call_creds), target_name,
overridden_target_name, ssl_session_cache);
if (sc == nullptr) {
return nullptr;
}
grpc_arg new_arg = grpc_channel_arg_string_create(
(char*)GRPC_ARG_HTTP2_SCHEME, (char*)"https");
*new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
return sc;
}
SpiffeServerCredentials::SpiffeServerCredentials(
grpc_core::RefCountedPtr<grpc_tls_credentials_options> options)
: grpc_server_credentials(GRPC_CREDENTIALS_TYPE_SPIFFE),
options_(std::move(options)) {}
SpiffeServerCredentials::~SpiffeServerCredentials() {}
grpc_core::RefCountedPtr<grpc_server_security_connector>
SpiffeServerCredentials::create_security_connector() {
return SpiffeServerSecurityConnector::CreateSpiffeServerSecurityConnector(
this->Ref());
}
grpc_channel_credentials* grpc_tls_spiffe_credentials_create(
grpc_tls_credentials_options* options) {
if (!CredentialOptionSanityCheck(options, true /* is_client */)) {
return nullptr;
}
return grpc_core::New<SpiffeCredentials>(
grpc_core::RefCountedPtr<grpc_tls_credentials_options>(options));
}
grpc_server_credentials* grpc_tls_spiffe_server_credentials_create(
grpc_tls_credentials_options* options) {
if (!CredentialOptionSanityCheck(options, false /* is_client */)) {
return nullptr;
}
return grpc_core::New<SpiffeServerCredentials>(
grpc_core::RefCountedPtr<grpc_tls_credentials_options>(options));
}

@ -0,0 +1,62 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_TLS_SPIFFE_CREDENTIALS_H
#define GRPC_CORE_LIB_SECURITY_CREDENTIALS_TLS_SPIFFE_CREDENTIALS_H
#include <grpc/support/port_platform.h>
#include <grpc/grpc_security.h>
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
class SpiffeCredentials final : public grpc_channel_credentials {
public:
explicit SpiffeCredentials(
grpc_core::RefCountedPtr<grpc_tls_credentials_options> options);
~SpiffeCredentials() override;
grpc_core::RefCountedPtr<grpc_channel_security_connector>
create_security_connector(
grpc_core::RefCountedPtr<grpc_call_credentials> call_creds,
const char* target_name, const grpc_channel_args* args,
grpc_channel_args** new_args) override;
const grpc_tls_credentials_options& options() const { return *options_; }
private:
grpc_core::RefCountedPtr<grpc_tls_credentials_options> options_;
};
class SpiffeServerCredentials final : public grpc_server_credentials {
public:
explicit SpiffeServerCredentials(
grpc_core::RefCountedPtr<grpc_tls_credentials_options> options);
~SpiffeServerCredentials() override;
grpc_core::RefCountedPtr<grpc_server_security_connector>
create_security_connector() override;
const grpc_tls_credentials_options& options() const { return *options_; }
private:
grpc_core::RefCountedPtr<grpc_tls_credentials_options> options_;
};
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_TLS_SPIFFE_CREDENTIALS_H */

@ -26,6 +26,8 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h"
#include "src/core/ext/transport/chttp2/alpn/alpn.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
@ -53,8 +55,11 @@ class grpc_fake_channel_security_connector final
target_(gpr_strdup(target)),
expected_targets_(
gpr_strdup(grpc_fake_transport_get_expected_targets(args))),
is_lb_channel_(grpc_core::FindTargetAuthorityTableInArgs(args) !=
nullptr) {
is_lb_channel_(
grpc_channel_args_find(
args, GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER) != nullptr ||
grpc_channel_args_find(
args, GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER) != nullptr) {
const grpc_arg* target_name_override_arg =
grpc_channel_args_find(args, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
if (target_name_override_arg != nullptr) {

@ -41,33 +41,6 @@
#include "src/core/tsi/transport_security.h"
namespace {
grpc_error* ssl_check_peer(
const char* peer_name, const tsi_peer* peer,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context) {
#if TSI_OPENSSL_ALPN_SUPPORT
/* Check the ALPN if ALPN is supported. */
const tsi_peer_property* p =
tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL);
if (p == nullptr) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Cannot check peer: missing selected ALPN property.");
}
if (!grpc_chttp2_is_alpn_version_supported(p->value.data, p->value.length)) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Cannot check peer: invalid ALPN value.");
}
#endif /* TSI_OPENSSL_ALPN_SUPPORT */
/* Check the peer name if specified. */
if (peer_name != nullptr && !grpc_ssl_host_matches_name(peer, peer_name)) {
char* msg;
gpr_asprintf(&msg, "Peer name %s is not in peer certificate", peer_name);
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
*auth_context = grpc_ssl_peer_to_auth_context(peer);
return GRPC_ERROR_NONE;
}
class grpc_ssl_channel_security_connector final
: public grpc_channel_security_connector {
@ -96,34 +69,10 @@ class grpc_ssl_channel_security_connector final
}
grpc_security_status InitializeHandshakerFactory(
const grpc_ssl_config* config, const char* pem_root_certs,
const tsi_ssl_root_certs_store* root_store,
tsi_ssl_session_cache* ssl_session_cache) {
bool has_key_cert_pair =
config->pem_key_cert_pair != nullptr &&
config->pem_key_cert_pair->private_key != nullptr &&
config->pem_key_cert_pair->cert_chain != nullptr;
tsi_ssl_client_handshaker_options options;
GPR_DEBUG_ASSERT(pem_root_certs != nullptr);
options.pem_root_certs = pem_root_certs;
options.root_store = root_store;
options.alpn_protocols =
grpc_fill_alpn_protocol_strings(&options.num_alpn_protocols);
if (has_key_cert_pair) {
options.pem_key_cert_pair = config->pem_key_cert_pair;
}
options.cipher_suites = grpc_get_ssl_cipher_suites();
options.session_cache = ssl_session_cache;
const tsi_result result =
tsi_create_ssl_client_handshaker_factory_with_options(
&options, &client_handshaker_factory_);
gpr_free((void*)options.alpn_protocols);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
return GRPC_SECURITY_ERROR;
}
return GRPC_SECURITY_OK;
const grpc_ssl_config* config, tsi_ssl_session_cache* ssl_session_cache) {
return grpc_ssl_tsi_client_handshaker_factory_init(
config->pem_key_cert_pair, config->pem_root_certs, ssl_session_cache,
&client_handshaker_factory_);
}
void add_handshakers(grpc_pollset_set* interested_parties,
@ -150,29 +99,35 @@ class grpc_ssl_channel_security_connector final
const char* target_name = overridden_target_name_ != nullptr
? overridden_target_name_
: target_name_;
grpc_error* error = ssl_check_peer(target_name, &peer, auth_context);
if (error == GRPC_ERROR_NONE &&
verify_options_->verify_peer_callback != nullptr) {
const tsi_peer_property* p =
tsi_peer_get_property_by_name(&peer, TSI_X509_PEM_CERT_PROPERTY);
if (p == nullptr) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Cannot check peer: missing pem cert property.");
} else {
char* peer_pem = static_cast<char*>(gpr_malloc(p->value.length + 1));
memcpy(peer_pem, p->value.data, p->value.length);
peer_pem[p->value.length] = '\0';
int callback_status = verify_options_->verify_peer_callback(
target_name, peer_pem,
verify_options_->verify_peer_callback_userdata);
gpr_free(peer_pem);
if (callback_status) {
char* msg;
gpr_asprintf(&msg, "Verify peer callback returned a failure (%d)",
callback_status);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
grpc_error* error = grpc_ssl_check_alpn(&peer);
if (error == GRPC_ERROR_NONE) {
error = grpc_ssl_check_peer_name(target_name, &peer);
if (error == GRPC_ERROR_NONE) {
if (verify_options_->verify_peer_callback != nullptr) {
const tsi_peer_property* p =
tsi_peer_get_property_by_name(&peer, TSI_X509_PEM_CERT_PROPERTY);
if (p == nullptr) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Cannot check peer: missing pem cert property.");
} else {
char* peer_pem =
static_cast<char*>(gpr_malloc(p->value.length + 1));
memcpy(peer_pem, p->value.data, p->value.length);
peer_pem[p->value.length] = '\0';
int callback_status = verify_options_->verify_peer_callback(
target_name, peer_pem,
verify_options_->verify_peer_callback_userdata);
gpr_free(peer_pem);
if (callback_status) {
char* msg;
gpr_asprintf(&msg, "Verify peer callback returned a failure (%d)",
callback_status);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
}
}
}
*auth_context = grpc_ssl_peer_to_auth_context(&peer);
}
}
GRPC_CLOSURE_SCHED(on_peer_checked, error);
@ -184,34 +139,16 @@ class grpc_ssl_channel_security_connector final
reinterpret_cast<const grpc_ssl_channel_security_connector*>(other_sc);
int c = channel_security_connector_cmp(other);
if (c != 0) return c;
c = strcmp(target_name_, other->target_name_);
if (c != 0) return c;
return (overridden_target_name_ == nullptr ||
other->overridden_target_name_ == nullptr)
? GPR_ICMP(overridden_target_name_,
other->overridden_target_name_)
: strcmp(overridden_target_name_,
other->overridden_target_name_);
return grpc_ssl_cmp_target_name(target_name_, other->target_name_,
overridden_target_name_,
other->overridden_target_name_);
}
bool check_call_host(const char* host, grpc_auth_context* auth_context,
grpc_closure* on_call_host_checked,
grpc_error** error) override {
grpc_security_status status = GRPC_SECURITY_ERROR;
tsi_peer peer = grpc_shallow_peer_from_ssl_auth_context(auth_context);
if (grpc_ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK;
/* If the target name was overridden, then the original target_name was
'checked' transitively during the previous peer check at the end of the
handshake. */
if (overridden_target_name_ != nullptr && strcmp(host, target_name_) == 0) {
status = GRPC_SECURITY_OK;
}
if (status != GRPC_SECURITY_OK) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"call host does not match SSL server name");
}
grpc_shallow_peer_destruct(&peer);
return true;
return grpc_ssl_check_call_host(host, target_name_, overridden_target_name_,
auth_context, on_call_host_checked, error);
}
void cancel_check_call_host(grpc_closure* on_call_host_checked,
@ -248,43 +185,25 @@ class grpc_ssl_server_security_connector
}
grpc_security_status InitializeHandshakerFactory() {
grpc_security_status retval = GRPC_SECURITY_OK;
if (has_cert_config_fetcher()) {
// Load initial credentials from certificate_config_fetcher:
if (!try_fetch_ssl_server_credentials()) {
gpr_log(GPR_ERROR,
"Failed loading SSL server credentials from fetcher.");
return GRPC_SECURITY_ERROR;
retval = GRPC_SECURITY_ERROR;
}
} else {
auto* server_credentials =
static_cast<const grpc_ssl_server_credentials*>(server_creds());
size_t num_alpn_protocols = 0;
const char** alpn_protocol_strings =
grpc_fill_alpn_protocol_strings(&num_alpn_protocols);
tsi_ssl_server_handshaker_options options;
options.pem_key_cert_pairs =
server_credentials->config().pem_key_cert_pairs;
options.num_key_cert_pairs =
server_credentials->config().num_key_cert_pairs;
options.pem_client_root_certs =
server_credentials->config().pem_root_certs;
options.client_certificate_request =
grpc_get_tsi_client_certificate_request_type(
server_credentials->config().client_certificate_request);
options.cipher_suites = grpc_get_ssl_cipher_suites();
options.alpn_protocols = alpn_protocol_strings;
options.num_alpn_protocols = static_cast<uint16_t>(num_alpn_protocols);
const tsi_result result =
tsi_create_ssl_server_handshaker_factory_with_options(
&options, &server_handshaker_factory_);
gpr_free((void*)alpn_protocol_strings);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
return GRPC_SECURITY_ERROR;
}
retval = grpc_ssl_tsi_server_handshaker_factory_init(
server_credentials->config().pem_key_cert_pairs,
server_credentials->config().num_key_cert_pairs,
server_credentials->config().pem_root_certs,
server_credentials->config().client_certificate_request,
&server_handshaker_factory_);
}
return GRPC_SECURITY_OK;
return retval;
}
void add_handshakers(grpc_pollset_set* interested_parties,
@ -306,7 +225,8 @@ class grpc_ssl_server_security_connector
void check_peer(tsi_peer peer, grpc_endpoint* ep,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) override {
grpc_error* error = ssl_check_peer(nullptr, &peer, auth_context);
grpc_error* error = grpc_ssl_check_alpn(&peer);
*auth_context = grpc_ssl_peer_to_auth_context(&peer);
tsi_peer_destruct(&peer);
GRPC_CLOSURE_SCHED(on_peer_checked, error);
}
@ -323,9 +243,7 @@ class grpc_ssl_server_security_connector
bool try_fetch_ssl_server_credentials() {
grpc_ssl_server_certificate_config* certificate_config = nullptr;
bool status;
if (!has_cert_config_fetcher()) return false;
grpc_ssl_server_credentials* server_creds =
static_cast<grpc_ssl_server_credentials*>(this->mutable_server_creds());
grpc_ssl_certificate_config_reload_status cb_result =
@ -342,7 +260,6 @@ class grpc_ssl_server_security_connector
"use previously-loaded credentials.");
status = false;
}
if (certificate_config != nullptr) {
grpc_ssl_server_certificate_config_destroy(certificate_config);
}
@ -361,34 +278,18 @@ class grpc_ssl_server_security_connector
"config.");
return false;
}
gpr_log(GPR_DEBUG, "Using new server certificate config (%p).", config);
size_t num_alpn_protocols = 0;
const char** alpn_protocol_strings =
grpc_fill_alpn_protocol_strings(&num_alpn_protocols);
tsi_ssl_server_handshaker_factory* new_handshaker_factory = nullptr;
const grpc_ssl_server_credentials* server_creds =
tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs =
grpc_convert_grpc_to_tsi_cert_pairs(config->pem_key_cert_pairs,
config->num_key_cert_pairs);
const grpc_ssl_server_credentials* server_credentials =
static_cast<const grpc_ssl_server_credentials*>(this->server_creds());
GPR_DEBUG_ASSERT(config->pem_root_certs != nullptr);
tsi_ssl_server_handshaker_options options;
options.pem_key_cert_pairs = grpc_convert_grpc_to_tsi_cert_pairs(
config->pem_key_cert_pairs, config->num_key_cert_pairs);
options.num_key_cert_pairs = config->num_key_cert_pairs;
options.pem_client_root_certs = config->pem_root_certs;
options.client_certificate_request =
grpc_get_tsi_client_certificate_request_type(
server_creds->config().client_certificate_request);
options.cipher_suites = grpc_get_ssl_cipher_suites();
options.alpn_protocols = alpn_protocol_strings;
options.num_alpn_protocols = static_cast<uint16_t>(num_alpn_protocols);
tsi_result result = tsi_create_ssl_server_handshaker_factory_with_options(
&options, &new_handshaker_factory);
gpr_free((void*)options.pem_key_cert_pairs);
gpr_free((void*)alpn_protocol_strings);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
tsi_ssl_server_handshaker_factory* new_handshaker_factory = nullptr;
grpc_security_status retval = grpc_ssl_tsi_server_handshaker_factory_init(
pem_key_cert_pairs, config->num_key_cert_pairs, config->pem_root_certs,
server_credentials->config().client_certificate_request,
&new_handshaker_factory);
gpr_free(pem_key_cert_pairs);
if (retval != GRPC_SECURITY_OK) {
return false;
}
set_server_handshaker_factory(new_handshaker_factory);
@ -418,28 +319,12 @@ grpc_ssl_channel_security_connector_create(
gpr_log(GPR_ERROR, "An ssl channel needs a config and a target name.");
return nullptr;
}
const char* pem_root_certs;
const tsi_ssl_root_certs_store* root_store;
if (config->pem_root_certs == nullptr) {
// Use default root certificates.
pem_root_certs = grpc_core::DefaultSslRootStore::GetPemRootCerts();
if (pem_root_certs == nullptr) {
gpr_log(GPR_ERROR, "Could not get default pem root certs.");
return nullptr;
}
root_store = grpc_core::DefaultSslRootStore::GetRootStore();
} else {
pem_root_certs = config->pem_root_certs;
root_store = nullptr;
}
grpc_core::RefCountedPtr<grpc_ssl_channel_security_connector> c =
grpc_core::MakeRefCounted<grpc_ssl_channel_security_connector>(
std::move(channel_creds), std::move(request_metadata_creds), config,
target_name, overridden_target_name);
const grpc_security_status result = c->InitializeHandshakerFactory(
config, pem_root_certs, root_store, ssl_session_cache);
const grpc_security_status result =
c->InitializeHandshakerFactory(config, ssl_session_cache);
if (result != GRPC_SECURITY_OK) {
return nullptr;
}

@ -112,6 +112,55 @@ grpc_get_tsi_client_certificate_request_type(
}
}
grpc_error* grpc_ssl_check_alpn(const tsi_peer* peer) {
#if TSI_OPENSSL_ALPN_SUPPORT
/* Check the ALPN if ALPN is supported. */
const tsi_peer_property* p =
tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL);
if (p == nullptr) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Cannot check peer: missing selected ALPN property.");
}
if (!grpc_chttp2_is_alpn_version_supported(p->value.data, p->value.length)) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Cannot check peer: invalid ALPN value.");
}
#endif /* TSI_OPENSSL_ALPN_SUPPORT */
return GRPC_ERROR_NONE;
}
grpc_error* grpc_ssl_check_peer_name(const char* peer_name,
const tsi_peer* peer) {
/* Check the peer name if specified. */
if (peer_name != nullptr && !grpc_ssl_host_matches_name(peer, peer_name)) {
char* msg;
gpr_asprintf(&msg, "Peer name %s is not in peer certificate", peer_name);
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
return GRPC_ERROR_NONE;
}
bool grpc_ssl_check_call_host(const char* host, const char* target_name,
const char* overridden_target_name,
grpc_auth_context* auth_context,
grpc_closure* on_call_host_checked,
grpc_error** error) {
grpc_security_status status = GRPC_SECURITY_ERROR;
tsi_peer peer = grpc_shallow_peer_from_ssl_auth_context(auth_context);
if (grpc_ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK;
if (overridden_target_name != nullptr && strcmp(host, target_name) == 0) {
status = GRPC_SECURITY_OK;
}
if (status != GRPC_SECURITY_OK) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"call host does not match SSL server name");
}
grpc_shallow_peer_destruct(&peer);
return true;
}
const char** grpc_fill_alpn_protocol_strings(size_t* num_alpn_protocols) {
GPR_ASSERT(num_alpn_protocols != nullptr);
*num_alpn_protocols = grpc_chttp2_num_alpn_versions();
@ -142,6 +191,18 @@ int grpc_ssl_host_matches_name(const tsi_peer* peer, const char* peer_name) {
return r;
}
bool grpc_ssl_cmp_target_name(const char* target_name,
const char* other_target_name,
const char* overridden_target_name,
const char* other_overridden_target_name) {
int c = strcmp(target_name, other_target_name);
if (c != 0) return c;
return (overridden_target_name == nullptr ||
other_overridden_target_name == nullptr)
? GPR_ICMP(overridden_target_name, other_overridden_target_name)
: strcmp(overridden_target_name, other_overridden_target_name);
}
grpc_core::RefCountedPtr<grpc_auth_context> grpc_ssl_peer_to_auth_context(
const tsi_peer* peer) {
size_t i;
@ -230,6 +291,79 @@ void grpc_shallow_peer_destruct(tsi_peer* peer) {
if (peer->properties != nullptr) gpr_free(peer->properties);
}
grpc_security_status grpc_ssl_tsi_client_handshaker_factory_init(
tsi_ssl_pem_key_cert_pair* pem_key_cert_pair, const char* pem_root_certs,
tsi_ssl_session_cache* ssl_session_cache,
tsi_ssl_client_handshaker_factory** handshaker_factory) {
const char* root_certs;
const tsi_ssl_root_certs_store* root_store;
if (pem_root_certs == nullptr) {
// Use default root certificates.
root_certs = grpc_core::DefaultSslRootStore::GetPemRootCerts();
if (root_certs == nullptr) {
gpr_log(GPR_ERROR, "Could not get default pem root certs.");
return GRPC_SECURITY_ERROR;
}
root_store = grpc_core::DefaultSslRootStore::GetRootStore();
} else {
root_certs = pem_root_certs;
root_store = nullptr;
}
bool has_key_cert_pair = pem_key_cert_pair != nullptr &&
pem_key_cert_pair->private_key != nullptr &&
pem_key_cert_pair->cert_chain != nullptr;
tsi_ssl_client_handshaker_options options;
GPR_DEBUG_ASSERT(root_certs != nullptr);
options.pem_root_certs = root_certs;
options.root_store = root_store;
options.alpn_protocols =
grpc_fill_alpn_protocol_strings(&options.num_alpn_protocols);
if (has_key_cert_pair) {
options.pem_key_cert_pair = pem_key_cert_pair;
}
options.cipher_suites = grpc_get_ssl_cipher_suites();
options.session_cache = ssl_session_cache;
const tsi_result result =
tsi_create_ssl_client_handshaker_factory_with_options(&options,
handshaker_factory);
gpr_free((void*)options.alpn_protocols);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
return GRPC_SECURITY_ERROR;
}
return GRPC_SECURITY_OK;
}
grpc_security_status grpc_ssl_tsi_server_handshaker_factory_init(
tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs, size_t num_key_cert_pairs,
const char* pem_root_certs,
grpc_ssl_client_certificate_request_type client_certificate_request,
tsi_ssl_server_handshaker_factory** handshaker_factory) {
size_t num_alpn_protocols = 0;
const char** alpn_protocol_strings =
grpc_fill_alpn_protocol_strings(&num_alpn_protocols);
tsi_ssl_server_handshaker_options options;
options.pem_key_cert_pairs = pem_key_cert_pairs;
options.num_key_cert_pairs = num_key_cert_pairs;
options.pem_client_root_certs = pem_root_certs;
options.client_certificate_request =
grpc_get_tsi_client_certificate_request_type(client_certificate_request);
options.cipher_suites = grpc_get_ssl_cipher_suites();
options.alpn_protocols = alpn_protocol_strings;
options.num_alpn_protocols = static_cast<uint16_t>(num_alpn_protocols);
const tsi_result result =
tsi_create_ssl_server_handshaker_factory_with_options(&options,
handshaker_factory);
gpr_free((void*)alpn_protocol_strings);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
return GRPC_SECURITY_ERROR;
}
return GRPC_SECURITY_OK;
}
/* --- Ssl cache implementation. --- */
grpc_ssl_session_cache* grpc_ssl_session_cache_create_lru(size_t capacity) {

@ -27,7 +27,10 @@
#include <grpc/slice_buffer.h>
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/security/security_connector/security_connector.h"
#include "src/core/tsi/ssl_transport_security.h"
#include "src/core/tsi/transport_security.h"
#include "src/core/tsi/transport_security_interface.h"
/* --- Util. --- */
@ -35,6 +38,23 @@
/* --- URL schemes. --- */
#define GRPC_SSL_URL_SCHEME "https"
/* Check ALPN information returned from SSL handshakes. */
grpc_error* grpc_ssl_check_alpn(const tsi_peer* peer);
/* Check peer name information returned from SSL handshakes. */
grpc_error* grpc_ssl_check_peer_name(const char* peer_name,
const tsi_peer* peer);
/* Compare targer_name information extracted from SSL security connectors. */
bool grpc_ssl_cmp_target_name(const char* target_name,
const char* other_target_name,
const char* overridden_target_name,
const char* other_overridden_target_name);
/* Check the host that will be set for a call is acceptable.*/
bool grpc_ssl_check_call_host(const char* host, const char* target_name,
const char* overridden_target_name,
grpc_auth_context* auth_context,
grpc_closure* on_call_host_checked,
grpc_error** error);
/* Return HTTP2-compliant cipher suites that gRPC accepts by default. */
const char* grpc_get_ssl_cipher_suites(void);
@ -47,6 +67,18 @@ grpc_get_tsi_client_certificate_request_type(
/* Return an array of strings containing alpn protocols. */
const char** grpc_fill_alpn_protocol_strings(size_t* num_alpn_protocols);
/* Initialize TSI SSL server/client handshaker factory. */
grpc_security_status grpc_ssl_tsi_client_handshaker_factory_init(
tsi_ssl_pem_key_cert_pair* key_cert_pair, const char* pem_root_certs,
tsi_ssl_session_cache* ssl_session_cache,
tsi_ssl_client_handshaker_factory** handshaker_factory);
grpc_security_status grpc_ssl_tsi_server_handshaker_factory_init(
tsi_ssl_pem_key_cert_pair* key_cert_pairs, size_t num_key_cert_pairs,
const char* pem_root_certs,
grpc_ssl_client_certificate_request_type client_certificate_request,
tsi_ssl_server_handshaker_factory** handshaker_factory);
/* Exposed for testing only. */
grpc_core::RefCountedPtr<grpc_auth_context> grpc_ssl_peer_to_auth_context(
const tsi_peer* peer);

@ -0,0 +1,426 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/security/security_connector/tls/spiffe_security_connector.h"
#include <stdbool.h>
#include <string.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/security/credentials/ssl/ssl_credentials.h"
#include "src/core/lib/security/credentials/tls/spiffe_credentials.h"
#include "src/core/lib/security/security_connector/ssl_utils.h"
#include "src/core/lib/security/transport/security_handshaker.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/tsi/ssl_transport_security.h"
#include "src/core/tsi/transport_security.h"
namespace {
tsi_ssl_pem_key_cert_pair* ConvertToTsiPemKeyCertPair(
const grpc_tls_key_materials_config::PemKeyCertPairList& cert_pair_list) {
tsi_ssl_pem_key_cert_pair* tsi_pairs = nullptr;
size_t num_key_cert_pairs = cert_pair_list.size();
if (num_key_cert_pairs > 0) {
GPR_ASSERT(cert_pair_list.data() != nullptr);
tsi_pairs = static_cast<tsi_ssl_pem_key_cert_pair*>(
gpr_zalloc(num_key_cert_pairs * sizeof(tsi_ssl_pem_key_cert_pair)));
}
for (size_t i = 0; i < num_key_cert_pairs; i++) {
GPR_ASSERT(cert_pair_list[i].private_key() != nullptr);
GPR_ASSERT(cert_pair_list[i].cert_chain() != nullptr);
tsi_pairs[i].cert_chain = gpr_strdup(cert_pair_list[i].cert_chain());
tsi_pairs[i].private_key = gpr_strdup(cert_pair_list[i].private_key());
}
return tsi_pairs;
}
/** -- Util function to populate SPIFFE server/channel credentials. -- */
grpc_core::RefCountedPtr<grpc_tls_key_materials_config>
PopulateSpiffeCredentials(const grpc_tls_credentials_options& options) {
GPR_ASSERT(options.credential_reload_config() != nullptr ||
options.key_materials_config() != nullptr);
grpc_core::RefCountedPtr<grpc_tls_key_materials_config> key_materials_config;
/* Use credential reload config to fetch credentials. */
if (options.credential_reload_config() != nullptr) {
grpc_tls_credential_reload_arg* arg =
grpc_core::New<grpc_tls_credential_reload_arg>();
key_materials_config = grpc_tls_key_materials_config_create()->Ref();
arg->key_materials_config = key_materials_config.get();
int result = options.credential_reload_config()->Schedule(arg);
if (result) {
/* Do not support async credential reload. */
gpr_log(GPR_ERROR, "Async credential reload is unsupported now.");
} else {
grpc_ssl_certificate_config_reload_status status = arg->status;
if (status == GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED) {
gpr_log(GPR_DEBUG, "Credential does not change after reload.");
} else if (status == GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL) {
gpr_log(GPR_ERROR, "Credential reload failed with an error: %s",
arg->error_details);
}
}
gpr_free((void*)arg->error_details);
grpc_core::Delete(arg);
/* Use existing key materials config. */
} else {
key_materials_config = options.key_materials_config()->Ref();
}
return key_materials_config;
}
} // namespace
SpiffeChannelSecurityConnector::SpiffeChannelSecurityConnector(
grpc_core::RefCountedPtr<grpc_channel_credentials> channel_creds,
grpc_core::RefCountedPtr<grpc_call_credentials> request_metadata_creds,
const char* target_name, const char* overridden_target_name)
: grpc_channel_security_connector(GRPC_SSL_URL_SCHEME,
std::move(channel_creds),
std::move(request_metadata_creds)),
overridden_target_name_(overridden_target_name == nullptr
? nullptr
: gpr_strdup(overridden_target_name)) {
check_arg_ = ServerAuthorizationCheckArgCreate(this);
char* port;
gpr_split_host_port(target_name, &target_name_, &port);
gpr_free(port);
}
SpiffeChannelSecurityConnector::~SpiffeChannelSecurityConnector() {
if (target_name_ != nullptr) {
gpr_free(target_name_);
}
if (overridden_target_name_ != nullptr) {
gpr_free(overridden_target_name_);
}
if (client_handshaker_factory_ != nullptr) {
tsi_ssl_client_handshaker_factory_unref(client_handshaker_factory_);
}
ServerAuthorizationCheckArgDestroy(check_arg_);
}
void SpiffeChannelSecurityConnector::add_handshakers(
grpc_pollset_set* interested_parties,
grpc_core::HandshakeManager* handshake_mgr) {
// Instantiate TSI handshaker.
tsi_handshaker* tsi_hs = nullptr;
tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
client_handshaker_factory_,
overridden_target_name_ != nullptr ? overridden_target_name_
: target_name_,
&tsi_hs);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
return;
}
// Create handshakers.
handshake_mgr->Add(grpc_core::SecurityHandshakerCreate(tsi_hs, this));
}
void SpiffeChannelSecurityConnector::check_peer(
tsi_peer peer, grpc_endpoint* ep,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) {
const char* target_name = overridden_target_name_ != nullptr
? overridden_target_name_
: target_name_;
grpc_error* error = grpc_ssl_check_alpn(&peer);
if (error != GRPC_ERROR_NONE) {
GRPC_CLOSURE_SCHED(on_peer_checked, error);
tsi_peer_destruct(&peer);
return;
}
*auth_context = grpc_ssl_peer_to_auth_context(&peer);
const SpiffeCredentials* creds =
static_cast<const SpiffeCredentials*>(channel_creds());
const grpc_tls_server_authorization_check_config* config =
creds->options().server_authorization_check_config();
/* If server authorization config is not null, use it to perform
* server authorization check. */
if (config != nullptr) {
const tsi_peer_property* p =
tsi_peer_get_property_by_name(&peer, TSI_X509_PEM_CERT_PROPERTY);
if (p == nullptr) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Cannot check peer: missing pem cert property.");
} else {
char* peer_pem = static_cast<char*>(gpr_malloc(p->value.length + 1));
memcpy(peer_pem, p->value.data, p->value.length);
peer_pem[p->value.length] = '\0';
GPR_ASSERT(check_arg_ != nullptr);
check_arg_->peer_cert = check_arg_->peer_cert == nullptr
? gpr_strdup(peer_pem)
: check_arg_->peer_cert;
check_arg_->target_name = check_arg_->target_name == nullptr
? gpr_strdup(target_name)
: check_arg_->target_name;
on_peer_checked_ = on_peer_checked;
gpr_free(peer_pem);
int callback_status = config->Schedule(check_arg_);
/* Server authorization check is handled asynchronously. */
if (callback_status) {
tsi_peer_destruct(&peer);
return;
}
/* Server authorization check is handled synchronously. */
error = ProcessServerAuthorizationCheckResult(check_arg_);
}
}
GRPC_CLOSURE_SCHED(on_peer_checked, error);
tsi_peer_destruct(&peer);
}
int SpiffeChannelSecurityConnector::cmp(
const grpc_security_connector* other_sc) const {
auto* other =
reinterpret_cast<const SpiffeChannelSecurityConnector*>(other_sc);
int c = channel_security_connector_cmp(other);
if (c != 0) {
return c;
}
return grpc_ssl_cmp_target_name(target_name_, other->target_name_,
overridden_target_name_,
other->overridden_target_name_);
}
bool SpiffeChannelSecurityConnector::check_call_host(
const char* host, grpc_auth_context* auth_context,
grpc_closure* on_call_host_checked, grpc_error** error) {
return grpc_ssl_check_call_host(host, target_name_, overridden_target_name_,
auth_context, on_call_host_checked, error);
}
void SpiffeChannelSecurityConnector::cancel_check_call_host(
grpc_closure* on_call_host_checked, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
grpc_core::RefCountedPtr<grpc_channel_security_connector>
SpiffeChannelSecurityConnector::CreateSpiffeChannelSecurityConnector(
grpc_core::RefCountedPtr<grpc_channel_credentials> channel_creds,
grpc_core::RefCountedPtr<grpc_call_credentials> request_metadata_creds,
const char* target_name, const char* overridden_target_name,
tsi_ssl_session_cache* ssl_session_cache) {
if (channel_creds == nullptr) {
gpr_log(GPR_ERROR,
"channel_creds is nullptr in "
"SpiffeChannelSecurityConnectorCreate()");
return nullptr;
}
if (target_name == nullptr) {
gpr_log(GPR_ERROR,
"target_name is nullptr in "
"SpiffeChannelSecurityConnectorCreate()");
return nullptr;
}
grpc_core::RefCountedPtr<SpiffeChannelSecurityConnector> c =
grpc_core::MakeRefCounted<SpiffeChannelSecurityConnector>(
std::move(channel_creds), std::move(request_metadata_creds),
target_name, overridden_target_name);
if (c->InitializeHandshakerFactory(ssl_session_cache) != GRPC_SECURITY_OK) {
return nullptr;
}
return c;
}
grpc_security_status
SpiffeChannelSecurityConnector::InitializeHandshakerFactory(
tsi_ssl_session_cache* ssl_session_cache) {
const SpiffeCredentials* creds =
static_cast<const SpiffeCredentials*>(channel_creds());
auto key_materials_config = PopulateSpiffeCredentials(creds->options());
if (key_materials_config->pem_key_cert_pair_list().empty()) {
key_materials_config->Unref();
return GRPC_SECURITY_ERROR;
}
tsi_ssl_pem_key_cert_pair* pem_key_cert_pair = ConvertToTsiPemKeyCertPair(
key_materials_config->pem_key_cert_pair_list());
grpc_security_status status = grpc_ssl_tsi_client_handshaker_factory_init(
pem_key_cert_pair, key_materials_config->pem_root_certs(),
ssl_session_cache, &client_handshaker_factory_);
// Free memory.
key_materials_config->Unref();
grpc_tsi_ssl_pem_key_cert_pairs_destroy(pem_key_cert_pair, 1);
return status;
}
void SpiffeChannelSecurityConnector::ServerAuthorizationCheckDone(
grpc_tls_server_authorization_check_arg* arg) {
GPR_ASSERT(arg != nullptr);
grpc_core::ExecCtx exec_ctx;
grpc_error* error = ProcessServerAuthorizationCheckResult(arg);
SpiffeChannelSecurityConnector* connector =
static_cast<SpiffeChannelSecurityConnector*>(arg->cb_user_data);
GRPC_CLOSURE_SCHED(connector->on_peer_checked_, error);
}
grpc_error*
SpiffeChannelSecurityConnector::ProcessServerAuthorizationCheckResult(
grpc_tls_server_authorization_check_arg* arg) {
grpc_error* error = GRPC_ERROR_NONE;
char* msg = nullptr;
/* Server authorization check is cancelled by caller. */
if (arg->status == GRPC_STATUS_CANCELLED) {
gpr_asprintf(&msg,
"Server authorization check is cancelled by the caller with "
"error: %s",
arg->error_details);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
} else if (arg->status == GRPC_STATUS_OK) {
/* Server authorization check completed successfully but returned check
* failure. */
if (!arg->success) {
gpr_asprintf(&msg, "Server authorization check failed with error: %s",
arg->error_details);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
}
/* Server authorization check did not complete correctly. */
} else {
gpr_asprintf(
&msg,
"Server authorization check did not finish correctly with error: %s",
arg->error_details);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
}
gpr_free(msg);
return error;
}
grpc_tls_server_authorization_check_arg*
SpiffeChannelSecurityConnector::ServerAuthorizationCheckArgCreate(
void* user_data) {
grpc_tls_server_authorization_check_arg* arg =
grpc_core::New<grpc_tls_server_authorization_check_arg>();
arg->cb = ServerAuthorizationCheckDone;
arg->cb_user_data = user_data;
arg->status = GRPC_STATUS_OK;
return arg;
}
void SpiffeChannelSecurityConnector::ServerAuthorizationCheckArgDestroy(
grpc_tls_server_authorization_check_arg* arg) {
if (arg == nullptr) {
return;
}
gpr_free((void*)arg->target_name);
gpr_free((void*)arg->peer_cert);
gpr_free((void*)arg->error_details);
grpc_core::Delete(arg);
}
SpiffeServerSecurityConnector::SpiffeServerSecurityConnector(
grpc_core::RefCountedPtr<grpc_server_credentials> server_creds)
: grpc_server_security_connector(GRPC_SSL_URL_SCHEME,
std::move(server_creds)) {}
SpiffeServerSecurityConnector::~SpiffeServerSecurityConnector() {
if (server_handshaker_factory_ != nullptr) {
tsi_ssl_server_handshaker_factory_unref(server_handshaker_factory_);
}
}
void SpiffeServerSecurityConnector::add_handshakers(
grpc_pollset_set* interested_parties,
grpc_core::HandshakeManager* handshake_mgr) {
/* Create a TLS SPIFFE TSI handshaker for server. */
RefreshServerHandshakerFactory();
tsi_handshaker* tsi_hs = nullptr;
tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
server_handshaker_factory_, &tsi_hs);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
return;
}
handshake_mgr->Add(grpc_core::SecurityHandshakerCreate(tsi_hs, this));
}
void SpiffeServerSecurityConnector::check_peer(
tsi_peer peer, grpc_endpoint* ep,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) {
grpc_error* error = grpc_ssl_check_alpn(&peer);
*auth_context = grpc_ssl_peer_to_auth_context(&peer);
tsi_peer_destruct(&peer);
GRPC_CLOSURE_SCHED(on_peer_checked, error);
}
int SpiffeServerSecurityConnector::cmp(
const grpc_security_connector* other) const {
return server_security_connector_cmp(
static_cast<const grpc_server_security_connector*>(other));
}
grpc_core::RefCountedPtr<grpc_server_security_connector>
SpiffeServerSecurityConnector::CreateSpiffeServerSecurityConnector(
grpc_core::RefCountedPtr<grpc_server_credentials> server_creds) {
if (server_creds == nullptr) {
gpr_log(GPR_ERROR,
"server_creds is nullptr in "
"SpiffeServerSecurityConnectorCreate()");
return nullptr;
}
grpc_core::RefCountedPtr<SpiffeServerSecurityConnector> c =
grpc_core::MakeRefCounted<SpiffeServerSecurityConnector>(
std::move(server_creds));
if (c->RefreshServerHandshakerFactory() != GRPC_SECURITY_OK) {
return nullptr;
}
return c;
}
grpc_security_status
SpiffeServerSecurityConnector::RefreshServerHandshakerFactory() {
const SpiffeServerCredentials* creds =
static_cast<const SpiffeServerCredentials*>(server_creds());
auto key_materials_config = PopulateSpiffeCredentials(creds->options());
/* Credential reload does NOT take effect and we need to keep using
* the existing handshaker factory. */
if (key_materials_config->pem_key_cert_pair_list().empty()) {
key_materials_config->Unref();
return GRPC_SECURITY_ERROR;
}
/* Credential reload takes effect and we need to free the existing
* handshaker library. */
if (server_handshaker_factory_) {
tsi_ssl_server_handshaker_factory_unref(server_handshaker_factory_);
}
tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs = ConvertToTsiPemKeyCertPair(
key_materials_config->pem_key_cert_pair_list());
size_t num_key_cert_pairs =
key_materials_config->pem_key_cert_pair_list().size();
grpc_security_status status = grpc_ssl_tsi_server_handshaker_factory_init(
pem_key_cert_pairs, num_key_cert_pairs,
key_materials_config->pem_root_certs(),
creds->options().cert_request_type(), &server_handshaker_factory_);
// Free memory.
key_materials_config->Unref();
grpc_tsi_ssl_pem_key_cert_pairs_destroy(pem_key_cert_pairs,
num_key_cert_pairs);
return status;
}

@ -0,0 +1,122 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_TLS_SPIFFE_SECURITY_CONNECTOR_H
#define GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_TLS_SPIFFE_SECURITY_CONNECTOR_H
#include <grpc/support/port_platform.h>
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
#define GRPC_TLS_SPIFFE_TRANSPORT_SECURITY_TYPE "spiffe"
// Spiffe channel security connector.
class SpiffeChannelSecurityConnector final
: public grpc_channel_security_connector {
public:
// static factory method to create a SPIFFE channel security connector.
static grpc_core::RefCountedPtr<grpc_channel_security_connector>
CreateSpiffeChannelSecurityConnector(
grpc_core::RefCountedPtr<grpc_channel_credentials> channel_creds,
grpc_core::RefCountedPtr<grpc_call_credentials> request_metadata_creds,
const char* target_name, const char* overridden_target_name,
tsi_ssl_session_cache* ssl_session_cache);
SpiffeChannelSecurityConnector(
grpc_core::RefCountedPtr<grpc_channel_credentials> channel_creds,
grpc_core::RefCountedPtr<grpc_call_credentials> request_metadata_creds,
const char* target_name, const char* overridden_target_name);
~SpiffeChannelSecurityConnector() override;
void add_handshakers(grpc_pollset_set* interested_parties,
grpc_core::HandshakeManager* handshake_mgr) override;
void check_peer(tsi_peer peer, grpc_endpoint* ep,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) override;
int cmp(const grpc_security_connector* other_sc) const override;
bool check_call_host(const char* host, grpc_auth_context* auth_context,
grpc_closure* on_call_host_checked,
grpc_error** error) override;
void cancel_check_call_host(grpc_closure* on_call_host_checked,
grpc_error* error) override;
private:
// Initialize SSL TSI client handshaker factory.
grpc_security_status InitializeHandshakerFactory(
tsi_ssl_session_cache* ssl_session_cache);
// gRPC-provided callback executed by application, which servers to bring the
// control back to gRPC core.
static void ServerAuthorizationCheckDone(
grpc_tls_server_authorization_check_arg* arg);
// A util function to process server authorization check result.
static grpc_error* ProcessServerAuthorizationCheckResult(
grpc_tls_server_authorization_check_arg* arg);
// A util function to create a server authorization check arg instance.
static grpc_tls_server_authorization_check_arg*
ServerAuthorizationCheckArgCreate(void* user_data);
// A util function to destroy a server authorization check arg instance.
static void ServerAuthorizationCheckArgDestroy(
grpc_tls_server_authorization_check_arg* arg);
grpc_closure* on_peer_checked_;
char* target_name_;
char* overridden_target_name_;
tsi_ssl_client_handshaker_factory* client_handshaker_factory_ = nullptr;
grpc_tls_server_authorization_check_arg* check_arg_;
};
// Spiffe server security connector.
class SpiffeServerSecurityConnector final
: public grpc_server_security_connector {
public:
// static factory method to create a SPIFFE server security connector.
static grpc_core::RefCountedPtr<grpc_server_security_connector>
CreateSpiffeServerSecurityConnector(
grpc_core::RefCountedPtr<grpc_server_credentials> server_creds);
explicit SpiffeServerSecurityConnector(
grpc_core::RefCountedPtr<grpc_server_credentials> server_creds);
~SpiffeServerSecurityConnector() override;
void add_handshakers(grpc_pollset_set* interested_parties,
grpc_core::HandshakeManager* handshake_mgr) override;
void check_peer(tsi_peer peer, grpc_endpoint* ep,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) override;
int cmp(const grpc_security_connector* other) const override;
private:
// A util function to refresh SSL TSI server handshaker factory with a valid
// credential.
grpc_security_status RefreshServerHandshakerFactory();
tsi_ssl_server_handshaker_factory* server_handshaker_factory_ = nullptr;
};
#endif /* GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_TLS_SPIFFE_SECURITY_CONNECTOR_H \
*/

@ -255,7 +255,7 @@ static void on_read(void* user_data, grpc_error* error) {
}
static void endpoint_read(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
grpc_closure* cb) {
grpc_closure* cb, bool urgent) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
ep->read_cb = cb;
ep->read_buffer = slices;
@ -269,7 +269,7 @@ static void endpoint_read(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
return;
}
grpc_endpoint_read(ep->wrapped_ep, &ep->source_buffer, &ep->on_read);
grpc_endpoint_read(ep->wrapped_ep, &ep->source_buffer, &ep->on_read, urgent);
}
static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,

@ -283,7 +283,7 @@ grpc_error* SecurityHandshaker::OnHandshakeNextDoneLocked(
if (result == TSI_INCOMPLETE_DATA) {
GPR_ASSERT(bytes_to_send_size == 0);
grpc_endpoint_read(args_->endpoint, args_->read_buffer,
&on_handshake_data_received_from_peer_);
&on_handshake_data_received_from_peer_, /*urgent=*/true);
return error;
}
if (result != TSI_OK) {
@ -306,7 +306,7 @@ grpc_error* SecurityHandshaker::OnHandshakeNextDoneLocked(
} else if (handshaker_result == nullptr) {
// There is nothing to send, but need to read from peer.
grpc_endpoint_read(args_->endpoint, args_->read_buffer,
&on_handshake_data_received_from_peer_);
&on_handshake_data_received_from_peer_, /*urgent=*/true);
} else {
// Handshake has finished, check peer and so on.
error = CheckPeerLocked();
@ -382,7 +382,8 @@ void SecurityHandshaker::OnHandshakeDataSentToPeerFn(void* arg,
// We may be done.
if (h->handshaker_result_ == nullptr) {
grpc_endpoint_read(h->args_->endpoint, h->args_->read_buffer,
&h->on_handshake_data_received_from_peer_);
&h->on_handshake_data_received_from_peer_,
/*urgent=*/true);
} else {
error = h->CheckPeerLocked();
if (error != GRPC_ERROR_NONE) {

@ -91,23 +91,6 @@ void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader) {
}
}
int grpc_byte_buffer_reader_peek(grpc_byte_buffer_reader* reader,
grpc_slice** slice) {
switch (reader->buffer_in->type) {
case GRPC_BB_RAW: {
grpc_slice_buffer* slice_buffer;
slice_buffer = &reader->buffer_out->data.raw.slice_buffer;
if (reader->current.index < slice_buffer->count) {
*slice = &slice_buffer->slices[reader->current.index];
reader->current.index += 1;
return 1;
}
break;
}
}
return 0;
}
int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
grpc_slice* slice) {
switch (reader->buffer_in->type) {

@ -344,18 +344,24 @@ static tsi_result add_subject_alt_names_properties_to_peer(
size_t subject_alt_name_count) {
size_t i;
tsi_result result = TSI_OK;
/* Reset for DNS entries filtering. */
peer->property_count -= subject_alt_name_count;
for (i = 0; i < subject_alt_name_count; i++) {
GENERAL_NAME* subject_alt_name =
sk_GENERAL_NAME_value(subject_alt_names, TSI_SIZE_AS_SIZE(i));
/* Filter out the non-dns entries names. */
if (subject_alt_name->type == GEN_DNS) {
if (subject_alt_name->type == GEN_DNS ||
subject_alt_name->type == GEN_EMAIL ||
subject_alt_name->type == GEN_URI) {
unsigned char* name = nullptr;
int name_size;
name_size = ASN1_STRING_to_UTF8(&name, subject_alt_name->d.dNSName);
if (subject_alt_name->type == GEN_DNS) {
name_size = ASN1_STRING_to_UTF8(&name, subject_alt_name->d.dNSName);
} else if (subject_alt_name->type == GEN_EMAIL) {
name_size = ASN1_STRING_to_UTF8(&name, subject_alt_name->d.rfc822Name);
} else {
name_size = ASN1_STRING_to_UTF8(
&name, subject_alt_name->d.uniformResourceIdentifier);
}
if (name_size < 0) {
gpr_log(GPR_ERROR, "Could not get utf8 from asn1 string.");
result = TSI_INTERNAL_ERROR;
@ -369,7 +375,6 @@ static tsi_result add_subject_alt_names_properties_to_peer(
} else if (subject_alt_name->type == GEN_IPADD) {
char ntop_buf[INET6_ADDRSTRLEN];
int af;
if (subject_alt_name->d.iPAddress->length == 4) {
af = AF_INET;
} else if (subject_alt_name->d.iPAddress->length == 16) {
@ -386,7 +391,6 @@ static tsi_result add_subject_alt_names_properties_to_peer(
result = TSI_INTERNAL_ERROR;
break;
}
result = tsi_construct_string_peer_property_from_cstring(
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, name,
&peer->properties[peer->property_count++]);
@ -1017,7 +1021,6 @@ static void tsi_ssl_handshaker_factory_init(
}
/* --- tsi_handshaker_result methods implementation. ---*/
static tsi_result ssl_handshaker_result_extract_peer(
const tsi_handshaker_result* self, tsi_peer* peer) {
tsi_result result = TSI_OK;
@ -1025,6 +1028,7 @@ static tsi_result ssl_handshaker_result_extract_peer(
unsigned int alpn_selected_len;
const tsi_ssl_handshaker_result* impl =
reinterpret_cast<const tsi_ssl_handshaker_result*>(self);
// TODO(yihuazhang): Return a full certificate chain as a peer property.
X509* peer_cert = SSL_get_peer_certificate(impl->ssl);
if (peer_cert != nullptr) {
result = peer_from_x509(peer_cert, 1, peer);
@ -1066,7 +1070,6 @@ static tsi_result ssl_handshaker_result_extract_peer(
&peer->properties[peer->property_count]);
if (result != TSI_OK) return result;
peer->property_count++;
return result;
}
@ -1400,7 +1403,6 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX* ctx, int is_client,
static_cast<unsigned char*>(gpr_zalloc(impl->outgoing_bytes_buffer_size));
impl->base.vtable = &handshaker_vtable;
impl->factory_ref = tsi_ssl_handshaker_factory_ref(factory);
*handshaker = &impl->base;
return TSI_OK;
}
@ -1634,7 +1636,6 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
const char** alpn_protocols, uint16_t num_alpn_protocols,
tsi_ssl_client_handshaker_factory** factory) {
tsi_ssl_client_handshaker_options options;
memset(&options, 0, sizeof(options));
options.pem_key_cert_pair = pem_key_cert_pair;
options.pem_root_certs = pem_root_certs;
options.cipher_suites = cipher_suites;
@ -1764,7 +1765,6 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
const char* cipher_suites, const char** alpn_protocols,
uint16_t num_alpn_protocols, tsi_ssl_server_handshaker_factory** factory) {
tsi_ssl_server_handshaker_options options;
memset(&options, 0, sizeof(options));
options.pem_key_cert_pairs = pem_key_cert_pairs;
options.num_key_cert_pairs = num_key_cert_pairs;
options.pem_client_root_certs = pem_client_root_certs;

@ -139,11 +139,6 @@ int CoreCodegen::grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
return ::grpc_byte_buffer_reader_next(reader, slice);
}
int CoreCodegen::grpc_byte_buffer_reader_peek(grpc_byte_buffer_reader* reader,
grpc_slice** slice) {
return ::grpc_byte_buffer_reader_peek(reader, slice);
}
grpc_byte_buffer* CoreCodegen::grpc_raw_byte_buffer_create(grpc_slice* slice,
size_t nslices) {
return ::grpc_raw_byte_buffer_create(slice, nslices);

@ -44,8 +44,7 @@ ServerBuilder::ServerBuilder()
: max_receive_message_size_(INT_MIN),
max_send_message_size_(INT_MIN),
sync_server_settings_(SyncServerSettings()),
resource_quota_(nullptr),
generic_service_(nullptr) {
resource_quota_(nullptr) {
gpr_once_init(&once_init_plugin_list, do_plugin_list_init);
for (auto it = g_plugin_factory_list->begin();
it != g_plugin_factory_list->end(); it++) {
@ -91,9 +90,9 @@ ServerBuilder& ServerBuilder::RegisterService(const grpc::string& addr,
ServerBuilder& ServerBuilder::RegisterAsyncGenericService(
grpc::AsyncGenericService* service) {
if (generic_service_) {
if (generic_service_ || callback_generic_service_) {
gpr_log(GPR_ERROR,
"Adding multiple AsyncGenericService is unsupported for now. "
"Adding multiple generic services is unsupported for now. "
"Dropping the service %p",
(void*)service);
} else {
@ -102,6 +101,19 @@ ServerBuilder& ServerBuilder::RegisterAsyncGenericService(
return *this;
}
ServerBuilder& ServerBuilder::experimental_type::RegisterCallbackGenericService(
grpc::experimental::CallbackGenericService* service) {
if (builder_->generic_service_ || builder_->callback_generic_service_) {
gpr_log(GPR_ERROR,
"Adding multiple generic services is unsupported for now. "
"Dropping the service %p",
(void*)service);
} else {
builder_->callback_generic_service_ = service;
}
return *builder_;
}
ServerBuilder& ServerBuilder::SetOption(
std::unique_ptr<grpc::ServerBuilderOption> option) {
options_.push_back(std::move(option));
@ -311,7 +323,7 @@ std::unique_ptr<grpc::Server> ServerBuilder::BuildAndStart() {
has_frequently_polled_cqs = true;
}
if (has_callback_methods) {
if (has_callback_methods || callback_generic_service_ != nullptr) {
auto* cq = server->CallbackCQ();
grpc_server_register_completion_queue(server->server_, cq->cq(), nullptr);
}
@ -345,6 +357,8 @@ std::unique_ptr<grpc::Server> ServerBuilder::BuildAndStart() {
if (generic_service_) {
server->RegisterAsyncGenericService(generic_service_);
} else if (callback_generic_service_) {
server->RegisterCallbackGenericService(callback_generic_service_);
} else {
for (auto it = services_.begin(); it != services_.end(); ++it) {
if ((*it)->service->has_generic_methods()) {

@ -19,6 +19,7 @@
#include <cstdlib>
#include <sstream>
#include <type_traits>
#include <utility>
#include <grpc/grpc.h>
@ -348,8 +349,24 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
grpc_completion_queue* cq_;
};
class Server::CallbackRequest final : public internal::CompletionQueueTag {
class Server::CallbackRequestBase : public internal::CompletionQueueTag {
public:
virtual ~CallbackRequestBase() {}
virtual bool Request() = 0;
};
template <class ServerContextType>
class Server::CallbackRequest final : public Server::CallbackRequestBase {
public:
static_assert(std::is_base_of<ServerContext, ServerContextType>::value,
"ServerContextType must be derived from ServerContext");
// The constructor needs to know the server for this callback request and its
// index in the server's request count array to allow for proper dynamic
// requesting of incoming RPCs. For codegen services, the values of method and
// method_tag represent the defined characteristics of the method being
// requested. For generic services, method and method_tag are nullptr since
// these services don't have pre-defined methods or method registration tags.
CallbackRequest(Server* server, size_t method_idx,
internal::RpcServiceMethod* method, void* method_tag)
: server_(server),
@ -357,8 +374,9 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
method_(method),
method_tag_(method_tag),
has_request_payload_(
method->method_type() == internal::RpcMethod::NORMAL_RPC ||
method->method_type() == internal::RpcMethod::SERVER_STREAMING),
method_ != nullptr &&
(method->method_type() == internal::RpcMethod::NORMAL_RPC ||
method->method_type() == internal::RpcMethod::SERVER_STREAMING)),
cq_(server->CallbackCQ()),
tag_(this) {
server_->callback_reqs_outstanding_++;
@ -376,7 +394,7 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
}
}
bool Request() {
bool Request() override {
if (method_tag_) {
if (GRPC_CALL_OK !=
grpc_server_request_registered_call(
@ -400,12 +418,18 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
return true;
}
bool FinalizeResult(void** tag, bool* status) override { return false; }
// Needs specialization to account for different processing of metadata
// in generic API
bool FinalizeResult(void** tag, bool* status) override;
private:
// method_name needs to be specialized between named method and generic
const char* method_name() const;
class CallbackCallTag : public grpc_experimental_completion_queue_functor {
public:
CallbackCallTag(Server::CallbackRequest* req) : req_(req) {
CallbackCallTag(Server::CallbackRequest<ServerContextType>* req)
: req_(req) {
functor_run = &CallbackCallTag::StaticRun;
}
@ -415,7 +439,7 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
void force_run(bool ok) { Run(ok); }
private:
Server::CallbackRequest* req_;
Server::CallbackRequest<ServerContextType>* req_;
internal::Call* call_;
static void StaticRun(grpc_experimental_completion_queue_functor* cb,
@ -446,8 +470,9 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
if (count == 0 || (count < SOFT_MINIMUM_SPARE_CALLBACK_REQS_PER_METHOD &&
req_->server_->callback_reqs_outstanding_ <
SOFT_MAXIMUM_CALLBACK_REQS_OUTSTANDING)) {
auto* new_req = new CallbackRequest(req_->server_, req_->method_index_,
req_->method_, req_->method_tag_);
auto* new_req = new CallbackRequest<ServerContextType>(
req_->server_, req_->method_index_, req_->method_,
req_->method_tag_);
if (!new_req->Request()) {
// The server must have just decided to shutdown.
gpr_atm_no_barrier_fetch_add(
@ -467,12 +492,14 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
// Create a C++ Call to control the underlying core call
call_ = new (grpc_call_arena_alloc(req_->call_, sizeof(internal::Call)))
internal::Call(
req_->call_, req_->server_, req_->cq_,
req_->server_->max_receive_message_size(),
req_->ctx_.set_server_rpc_info(
req_->method_->name(), req_->method_->method_type(),
req_->server_->interceptor_creators_));
internal::Call(req_->call_, req_->server_, req_->cq_,
req_->server_->max_receive_message_size(),
req_->ctx_.set_server_rpc_info(
req_->method_name(),
(req_->method_ != nullptr)
? req_->method_->method_type()
: internal::RpcMethod::BIDI_STREAMING,
req_->server_->interceptor_creators_));
req_->interceptor_methods_.SetCall(call_);
req_->interceptor_methods_.SetReverse();
@ -501,31 +528,32 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
}
}
void ContinueRunAfterInterception() {
req_->method_->handler()->RunHandler(
internal::MethodHandler::HandlerParameter(
call_, &req_->ctx_, req_->request_, req_->request_status_,
[this] {
// Recycle this request if there aren't too many outstanding.
// Note that we don't have to worry about a case where there
// are no requests waiting to match for this method since that
// is already taken care of when binding a request to a call.
// TODO(vjpai): Also don't recycle this request if the dynamic
// load no longer justifies it. Consider measuring
// dynamic load and setting a target accordingly.
if (req_->server_->callback_reqs_outstanding_ <
SOFT_MAXIMUM_CALLBACK_REQS_OUTSTANDING) {
req_->Clear();
req_->Setup();
} else {
// We can free up this request because there are too many
delete req_;
return;
}
if (!req_->Request()) {
// The server must have just decided to shutdown.
delete req_;
}
}));
auto* handler = (req_->method_ != nullptr)
? req_->method_->handler()
: req_->server_->generic_handler_.get();
handler->RunHandler(internal::MethodHandler::HandlerParameter(
call_, &req_->ctx_, req_->request_, req_->request_status_, [this] {
// Recycle this request if there aren't too many outstanding.
// Note that we don't have to worry about a case where there
// are no requests waiting to match for this method since that
// is already taken care of when binding a request to a call.
// TODO(vjpai): Also don't recycle this request if the dynamic
// load no longer justifies it. Consider measuring
// dynamic load and setting a target accordingly.
if (req_->server_->callback_reqs_outstanding_ <
SOFT_MAXIMUM_CALLBACK_REQS_OUTSTANDING) {
req_->Clear();
req_->Setup();
} else {
// We can free up this request because there are too many
delete req_;
return;
}
if (!req_->Request()) {
// The server must have just decided to shutdown.
delete req_;
}
}));
}
};
@ -553,7 +581,7 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
}
Server* const server_;
size_t method_index_;
const size_t method_index_;
internal::RpcServiceMethod* const method_;
void* const method_tag_;
const bool has_request_payload_;
@ -566,10 +594,39 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
grpc_metadata_array request_metadata_;
CompletionQueue* cq_;
CallbackCallTag tag_;
ServerContext ctx_;
ServerContextType ctx_;
internal::InterceptorBatchMethodsImpl interceptor_methods_;
};
template <>
bool Server::CallbackRequest<ServerContext>::FinalizeResult(void** tag,
bool* status) {
return false;
}
template <>
bool Server::CallbackRequest<GenericServerContext>::FinalizeResult(
void** tag, bool* status) {
if (*status) {
// TODO(yangg) remove the copy here
ctx_.method_ = StringFromCopiedSlice(call_details_->method);
ctx_.host_ = StringFromCopiedSlice(call_details_->host);
}
grpc_slice_unref(call_details_->method);
grpc_slice_unref(call_details_->host);
return false;
}
template <>
const char* Server::CallbackRequest<ServerContext>::method_name() const {
return method_->name();
}
template <>
const char* Server::CallbackRequest<GenericServerContext>::method_name() const {
return ctx_.method().c_str();
}
// Implementation of ThreadManager. Each instance of SyncRequestThreadManager
// manages a pool of threads that poll for incoming Sync RPCs and call the
// appropriate RPC handlers
@ -708,7 +765,6 @@ Server::Server(
started_(false),
shutdown_(false),
shutdown_notified_(false),
has_generic_service_(false),
server_(nullptr),
server_initializer_(new ServerInitializer(this)),
health_check_service_disabled_(false) {
@ -865,7 +921,7 @@ bool Server::RegisterService(const grpc::string* host, Service* service) {
auto method_index = callback_unmatched_reqs_count_.size() - 1;
// TODO(vjpai): Register these dynamically based on need
for (int i = 0; i < DEFAULT_CALLBACK_REQS_PER_METHOD; i++) {
callback_reqs_to_start_.push_back(new CallbackRequest(
callback_reqs_to_start_.push_back(new CallbackRequest<ServerContext>(
this, method_index, method, method_registration_tag));
}
// Enqueue it so that it will be Request'ed later after all request
@ -891,7 +947,25 @@ void Server::RegisterAsyncGenericService(AsyncGenericService* service) {
GPR_ASSERT(service->server_ == nullptr &&
"Can only register an async generic service against one server.");
service->server_ = this;
has_generic_service_ = true;
has_async_generic_service_ = true;
}
void Server::RegisterCallbackGenericService(
experimental::CallbackGenericService* service) {
GPR_ASSERT(
service->server_ == nullptr &&
"Can only register a callback generic service against one server.");
service->server_ = this;
has_callback_generic_service_ = true;
generic_handler_.reset(service->Handler());
callback_unmatched_reqs_count_.push_back(0);
auto method_index = callback_unmatched_reqs_count_.size() - 1;
// TODO(vjpai): Register these dynamically based on need
for (int i = 0; i < DEFAULT_CALLBACK_REQS_PER_METHOD; i++) {
callback_reqs_to_start_.push_back(new CallbackRequest<GenericServerContext>(
this, method_index, nullptr, nullptr));
}
}
int Server::AddListeningPort(const grpc::string& addr,
@ -930,9 +1004,17 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
RegisterService(nullptr, default_health_check_service_impl);
}
// If this server uses callback methods, then create a callback generic
// service to handle any unimplemented methods using the default reactor
// creator
if (!callback_reqs_to_start_.empty() && !has_callback_generic_service_) {
unimplemented_service_.reset(new experimental::CallbackGenericService);
RegisterCallbackGenericService(unimplemented_service_.get());
}
grpc_server_start(server_);
if (!has_generic_service_) {
if (!has_async_generic_service_ && !has_callback_generic_service_) {
for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
(*it)->AddUnknownSyncMethod();
}

@ -32,6 +32,7 @@
#include <grpcpp/impl/call.h>
#include <grpcpp/support/time.h>
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/surface/call.h"
namespace grpc {
@ -116,13 +117,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
done_intercepting_ = true;
if (!has_tag_) {
/* We don't have a tag to return. */
std::unique_lock<std::mutex> lock(mu_);
if (--refs_ == 0) {
lock.unlock();
grpc_call* call = call_.call();
delete this;
grpc_call_unref(call);
}
Unref();
return;
}
/* Start a dummy op so that we can return the tag */
@ -142,8 +137,8 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
bool has_tag_;
void* tag_;
void* core_cq_tag_;
grpc_core::RefCount refs_;
std::mutex mu_;
int refs_;
bool finalized_;
int cancelled_; // This is an int (not bool) because it is passed to core
bool done_intercepting_;
@ -151,9 +146,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
};
void ServerContext::CompletionOp::Unref() {
std::unique_lock<std::mutex> lock(mu_);
if (--refs_ == 0) {
lock.unlock();
if (refs_.Unref()) {
grpc_call* call = call_.call();
delete this;
grpc_call_unref(call);
@ -183,12 +176,7 @@ bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
*tag = tag_;
ret = true;
}
if (--refs_ == 0) {
lock.unlock();
grpc_call* call = call_.call();
delete this;
grpc_call_unref(call);
}
Unref();
return ret;
}
finalized_ = true;
@ -220,13 +208,7 @@ bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
*tag = tag_;
ret = true;
}
lock.lock();
if (--refs_ == 0) {
lock.unlock();
grpc_call* call = call_.call();
delete this;
grpc_call_unref(call);
}
Unref();
return ret;
}
/* There are interceptors to be run. Return false for now */

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save