diff --git a/.gitignore b/.gitignore index f37989176ea..09223fa1ead 100644 --- a/.gitignore +++ b/.gitignore @@ -96,3 +96,6 @@ Pods/ # Artifacts directory artifacts/ + +# Git generated files for conflicting +*.orig diff --git a/.travis.yml b/.travis.yml index 7576e076a0b..94bf382b25e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,5 @@ +git: + depth: 1 language: objective-c osx_image: xcode7.3 env: @@ -6,43 +8,47 @@ env: - TEST=objc - JOBS=1 matrix: - - SCHEME="RxLibraryUnitTests" WORKSPACE="Tests.xcworkspace" - TEST_PATH="src/objective-c/tests" BUILD_ONLY="false" + - SCHEME="RxLibraryUnitTests" + WORKSPACE="Tests.xcworkspace" TEST_PATH="src/objective-c/tests" BUILD_ONLY="false" INTEROP_SERVER="false" - - SCHEME="InteropTestsLocalSSL" WORKSPACE="Tests.xcworkspace" - TEST_PATH="src/objective-c/tests" BUILD_ONLY="false" INTEROP_SERVER="true" - - SCHEME="InteropTestsLocalCleartext" WORKSPACE="Tests.xcworkspace" - TEST_PATH="src/objective-c/tests" BUILD_ONLY="false" + - SCHEME="InteropTestsLocalSSL" + WORKSPACE="Tests.xcworkspace" TEST_PATH="src/objective-c/tests" BUILD_ONLY="false" INTEROP_SERVER="true" - # TODO(jcanizales): Investigate why they time out: - # - SCHEME="InteropTestsRemote" WORKSPACE="Tests.xcworkspace" - # TEST_PATH="src/objective-c/tests" BUILD_ONLY="false" + - SCHEME="InteropTestsLocalCleartext" + WORKSPACE="Tests.xcworkspace" TEST_PATH="src/objective-c/tests" BUILD_ONLY="false" + INTEROP_SERVER="true" + # TODO(jcanizales): Make tests an app project (instead of library), so the following will work. + # - SCHEME="InteropTestsRemote" + # WORKSPACE="Tests.xcworkspace" TEST_PATH="src/objective-c/tests" BUILD_ONLY="false" # INTEROP_SERVER="true" - - SCHEME="HelloWorld" WORKSPACE="HelloWorld.xcworkspace" - TEST_PATH="examples/objective-c/helloworld" BUILD_ONLY="true" - INTEROP_SERVER="false" - - SCHEME="RouteGuideClient" WORKSPACE="RouteGuideClient.xcworkspace" - TEST_PATH="examples/objective-c/route_guide" BUILD_ONLY="true" - INTEROP_SERVER="false" - - SCHEME="AuthSample" WORKSPACE="AuthSample.xcworkspace" - TEST_PATH="examples/objective-c/auth_sample" BUILD_ONLY="true" - INTEROP_SERVER="false" - - SCHEME="Sample" WORKSPACE="Sample.xcworkspace" - TEST_PATH="src/objective-c/examples/Sample" BUILD_ONLY="true" - INTEROP_SERVER="false" - - SCHEME="SwiftSample" WORKSPACE="SwiftSample.xcworkspace" - TEST_PATH="src/objective-c/examples/SwiftSample" BUILD_ONLY="true" + - SCHEME="HelloWorld" + WORKSPACE="HelloWorld.xcworkspace" TEST_PATH="examples/objective-c/helloworld" + BUILD_ONLY="true" INTEROP_SERVER="false" + - SCHEME="RouteGuideClient" + WORKSPACE="RouteGuideClient.xcworkspace" TEST_PATH="examples/objective-c/route_guide" + BUILD_ONLY="true" INTEROP_SERVER="false" + - SCHEME="AuthSample" + WORKSPACE="AuthSample.xcworkspace" TEST_PATH="examples/objective-c/auth_sample" + BUILD_ONLY="true" INTEROP_SERVER="false" + - SCHEME="Sample" + WORKSPACE="Sample.xcworkspace" TEST_PATH="src/objective-c/examples/Sample" BUILD_ONLY="true" INTEROP_SERVER="false" + - SCHEME="Sample" + WORKSPACE="Sample.xcworkspace" TEST_PATH="src/objective-c/examples/Sample" BUILD_ONLY="true" + INTEROP_SERVER="false" FRAMEWORKS="YES" + - SCHEME="SwiftSample" + WORKSPACE="SwiftSample.xcworkspace" TEST_PATH="src/objective-c/examples/SwiftSample" + BUILD_ONLY="true" INTEROP_SERVER="false" before_install: + # Until Travis upgrades from Cocoapods 0.39, we need to do it here. - pod --version - gem uninstall cocoapods -a - - gem install cocoapods -v '1.0.0' + - gem install cocoapods -v '1.0.1' - pod --version + # Recent pods aren't found if we don't explicitly update Cocoapods' repo. + - pod repo update - brew install gflags install: - - make grpc_objective_c_plugin - - install bins/opt/grpc_objective_c_plugin /usr/local/bin/protoc-gen-objcgrpc - - install bins/opt/protobuf/protoc /usr/local/bin/protoc - pushd $TEST_PATH - pod install - popd diff --git a/BUILD b/BUILD index 33323be229d..8a6d7fdc0a6 100644 --- a/BUILD +++ b/BUILD @@ -165,6 +165,7 @@ cc_library( "src/core/lib/channel/compress_filter.h", "src/core/lib/channel/connected_channel.h", "src/core/lib/channel/context.h", + "src/core/lib/channel/handshaker.h", "src/core/lib/channel/http_client_filter.h", "src/core/lib/channel/http_server_filter.h", "src/core/lib/compression/algorithm_metadata.h", @@ -316,6 +317,7 @@ cc_library( "src/core/lib/channel/channel_stack_builder.c", "src/core/lib/channel/compress_filter.c", "src/core/lib/channel/connected_channel.c", + "src/core/lib/channel/handshaker.c", "src/core/lib/channel/http_client_filter.c", "src/core/lib/channel/http_server_filter.c", "src/core/lib/compression/compression.c", @@ -552,6 +554,7 @@ cc_library( "src/core/lib/channel/compress_filter.h", "src/core/lib/channel/connected_channel.h", "src/core/lib/channel/context.h", + "src/core/lib/channel/handshaker.h", "src/core/lib/channel/http_client_filter.h", "src/core/lib/channel/http_server_filter.h", "src/core/lib/compression/algorithm_metadata.h", @@ -693,6 +696,7 @@ cc_library( "src/core/lib/channel/channel_stack_builder.c", "src/core/lib/channel/compress_filter.c", "src/core/lib/channel/connected_channel.c", + "src/core/lib/channel/handshaker.c", "src/core/lib/channel/http_client_filter.c", "src/core/lib/channel/http_server_filter.c", "src/core/lib/compression/compression.c", @@ -904,6 +908,7 @@ cc_library( "src/core/lib/channel/compress_filter.h", "src/core/lib/channel/connected_channel.h", "src/core/lib/channel/context.h", + "src/core/lib/channel/handshaker.h", "src/core/lib/channel/http_client_filter.h", "src/core/lib/channel/http_server_filter.h", "src/core/lib/compression/algorithm_metadata.h", @@ -1032,6 +1037,7 @@ cc_library( "src/core/lib/channel/channel_stack_builder.c", "src/core/lib/channel/compress_filter.c", "src/core/lib/channel/connected_channel.c", + "src/core/lib/channel/handshaker.c", "src/core/lib/channel/http_client_filter.c", "src/core/lib/channel/http_server_filter.c", "src/core/lib/compression/compression.c", @@ -1791,6 +1797,7 @@ objc_library( "src/core/lib/channel/channel_stack_builder.c", "src/core/lib/channel/compress_filter.c", "src/core/lib/channel/connected_channel.c", + "src/core/lib/channel/handshaker.c", "src/core/lib/channel/http_client_filter.c", "src/core/lib/channel/http_server_filter.c", "src/core/lib/compression/compression.c", @@ -2006,6 +2013,7 @@ objc_library( "src/core/lib/channel/compress_filter.h", "src/core/lib/channel/connected_channel.h", "src/core/lib/channel/context.h", + "src/core/lib/channel/handshaker.h", "src/core/lib/channel/http_client_filter.h", "src/core/lib/channel/http_server_filter.h", "src/core/lib/compression/algorithm_metadata.h", diff --git a/CMakeLists.txt b/CMakeLists.txt index 8ac48183127..c55825b37d0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,35 +42,121 @@ cmake_minimum_required(VERSION 2.8) set(PACKAGE_NAME "grpc") -set(PACKAGE_VERSION "0.16.0-dev") +set(PACKAGE_VERSION "1.1.0-dev") set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}") set(PACKAGE_TARNAME "${PACKAGE_NAME}-${PACKAGE_VERSION}") set(PACKAGE_BUGREPORT "https://github.com/grpc/grpc/issues/") project(${PACKAGE_NAME} C CXX) -if(NOT BORINGSSL_ROOT_DIR) - set(BORINGSSL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/boringssl) +set(gRPC_ZLIB_PROVIDER "module" CACHE STRING "Provider of zlib library") +set_property(CACHE gRPC_ZLIB_PROVIDER PROPERTY STRINGS "module" "package") + +set(gRPC_SSL_PROVIDER "module" CACHE STRING "Provider of ssl library") +set_property(CACHE gRPC_SSL_PROVIDER PROPERTY STRINGS "module" "package") + +set(gRPC_PROTOBUF_PROVIDER "module" CACHE STRING "Provider of protobuf library") +set_property(CACHE gRPC_PROTOBUF_PROVIDER PROPERTY STRINGS "module" "package") + +set(gRPC_USE_PROTO_LITE OFF CACHE BOOL "Use the protobuf-lite library") + +if (gRPC_USE_PROTO_LITE) + set(_gRPC_PROTOBUF_LIBRARY_NAME "libprotobuf-lite") + add_definitions("-DGRPC_USE_PROTO_LITE") +else() + set(_gRPC_PROTOBUF_LIBRARY_NAME "libprotobuf") endif() -if(NOT PROTOBUF_ROOT_DIR) - set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf) + +if("${gRPC_ZLIB_PROVIDER}" STREQUAL "module") + if(NOT ZLIB_ROOT_DIR) + set(ZLIB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib) + endif() + set(ZLIB_INCLUDE_DIR "${ZLIB_ROOT_DIR}") + if(EXISTS "${ZLIB_ROOT_DIR}/CMakeLists.txt") + add_subdirectory(${ZLIB_ROOT_DIR} third_party/zlib) + if(TARGET zlibstatic) + set(_gRPC_ZLIB_LIBRARIES zlibstatic) + endif() + else() + message(WARNING "gRPC_ZLIB_PROVIDER is \"module\" but ZLIB_ROOT_DIR is wrong") + endif() +elseif("${gRPC_ZLIB_PROVIDER}" STREQUAL "package") + find_package(ZLIB) + if(TARGET ZLIB::ZLIB) + set(_gRPC_ZLIB_LIBRARIES ZLIB::ZLIB) + endif() + set(_gRPC_FIND_ZLIB "if(NOT ZLIB_FOUND)\n find_package(ZLIB)\nendif()") endif() -if(NOT ZLIB_ROOT_DIR) - set(ZLIB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib) + +if("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "module") + # Building the protobuf tests require gmock what is not part of a standard protobuf checkout. + # Disable them unless they are explicitly requested from the cmake command line (when we assume + # gmock is downloaded to the right location inside protobuf). + if(NOT protobuf_BUILD_TESTS) + set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") + endif() + if(NOT PROTOBUF_ROOT_DIR) + set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf) + endif() + if(EXISTS "${PROTOBUF_ROOT_DIR}/cmake/CMakeLists.txt") + add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf) + if(TARGET ${_gRPC_PROTOBUF_LIBRARY_NAME}) + set(_gRPC_PROTOBUF_LIBRARIES ${_gRPC_PROTOBUF_LIBRARY_NAME}) + endif() + if(TARGET libprotoc) + set(_gRPC_PROTOBUF_PROTOC_LIBRARIES libprotoc) + endif() + else() + message(WARNING "gRPC_PROTOBUF_PROVIDER is \"module\" but PROTOBUF_ROOT_DIR is wrong") + endif() +elseif("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package") + find_package(protobuf CONFIG) + if(protobuf_FOUND) + if(TARGET protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME}) + set(_gRPC_PROTOBUF_LIBRARIES protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME}) + endif() + if(TARGET protobuf::libprotoc) + set(_gRPC_PROTOBUF_PROTOC_LIBRARIES protobuf::libprotoc) + endif() + set(_gRPC_FIND_PROTOBUF "if(NOT protobuf_FOUND)\n find_package(protobuf CONFIG)\nendif()") + else() + find_package(Protobuf MODULE) + set(_gRPC_FIND_PROTOBUF "if(NOT Protobuf_FOUND)\n find_package(Protobuf)\nendif()") + endif() endif() -# Building the protobuf tests require gmock what is not part of a standard protobuf checkout. -# Disable them unless they are explicitly requested from the cmake command line (when we assume -# gmock is downloaded to the right location inside protobuf). -if(NOT protobuf_BUILD_TESTS) - set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") +if("${gRPC_SSL_PROVIDER}" STREQUAL "module") + if(NOT BORINGSSL_ROOT_DIR) + set(BORINGSSL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/boringssl) + endif() + if(EXISTS "${BORINGSSL_ROOT_DIR}/CMakeLists.txt") + add_subdirectory(${BORINGSSL_ROOT_DIR} third_party/boringssl) + if(TARGET ssl) + set(_gRPC_SSL_LIBRARIES ssl) + endif() + else() + message(WARNING "gRPC_SSL_PROVIDER is \"module\" but BORINGSSL_ROOT_DIR is wrong") + endif() +elseif("${gRPC_SSL_PROVIDER}" STREQUAL "package") + find_package(OpenSSL) + if(TARGET OpenSSL::SSL) + set(_gRPC_SSL_LIBRARIES OpenSSL::SSL) + endif() + set(_gRPC_FIND_SSL "if(NOT OpenSSL_FOUND)\n find_package(OpenSSL)\nendif()") endif() -add_subdirectory(${BORINGSSL_ROOT_DIR} third_party/boringssl) -add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf) -add_subdirectory(${ZLIB_ROOT_DIR} third_party/zlib) +if(NOT MSVC) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +endif() -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +if(WIN32 AND MSVC) + set(_gRPC_BASELIB_LIBRARIES wsock32 ws2_32) +endif() + +include(GNUInstallDirs) +if(NOT DEFINED CMAKE_INSTALL_CMAKEDIR) + set(CMAKE_INSTALL_CMAKEDIR "${CMAKE_INSTALL_LIBDIR}/cmake/gRPC") +endif() add_library(gpr @@ -126,11 +212,69 @@ target_include_directories(gpr PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${BORINGSSL_ROOT_DIR}/include PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${ZLIB_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) +foreach(_hdr + include/grpc/support/alloc.h + include/grpc/support/atm.h + include/grpc/support/atm_gcc_atomic.h + include/grpc/support/atm_gcc_sync.h + include/grpc/support/atm_windows.h + include/grpc/support/avl.h + include/grpc/support/cmdline.h + include/grpc/support/cpu.h + include/grpc/support/histogram.h + include/grpc/support/host_port.h + include/grpc/support/log.h + include/grpc/support/log_windows.h + include/grpc/support/port_platform.h + include/grpc/support/slice.h + include/grpc/support/slice_buffer.h + include/grpc/support/string_util.h + include/grpc/support/subprocess.h + include/grpc/support/sync.h + include/grpc/support/sync_generic.h + include/grpc/support/sync_posix.h + include/grpc/support/sync_windows.h + include/grpc/support/thd.h + include/grpc/support/time.h + include/grpc/support/tls.h + include/grpc/support/tls_gcc.h + include/grpc/support/tls_msvc.h + include/grpc/support/tls_pthread.h + include/grpc/support/useful.h + include/grpc/impl/codegen/alloc.h + include/grpc/impl/codegen/atm.h + include/grpc/impl/codegen/atm_gcc_atomic.h + include/grpc/impl/codegen/atm_gcc_sync.h + include/grpc/impl/codegen/atm_windows.h + include/grpc/impl/codegen/log.h + include/grpc/impl/codegen/port_platform.h + include/grpc/impl/codegen/slice.h + include/grpc/impl/codegen/slice_buffer.h + include/grpc/impl/codegen/sync.h + include/grpc/impl/codegen/sync_generic.h + include/grpc/impl/codegen/sync_posix.h + include/grpc/impl/codegen/sync_windows.h + include/grpc/impl/codegen/time.h +) + string(REPLACE "include/" "" _path ${_hdr}) + get_filename_component(_path ${_path} PATH) + install(FILES ${_hdr} + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${_path}" + ) +endforeach() + + +install(TARGETS gpr EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + add_library(grpc src/core/lib/surface/init.c @@ -139,6 +283,7 @@ add_library(grpc src/core/lib/channel/channel_stack_builder.c src/core/lib/channel/compress_filter.c src/core/lib/channel/connected_channel.c + src/core/lib/channel/handshaker.c src/core/lib/channel/http_client_filter.c src/core/lib/channel/http_server_filter.c src/core/lib/compression/compression.c @@ -326,16 +471,63 @@ target_include_directories(grpc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${BORINGSSL_ROOT_DIR}/include PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${ZLIB_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) target_link_libraries(grpc - ssl - zlibstatic + ${_gRPC_BASELIB_LIBRARIES} + ${_gRPC_SSL_LIBRARIES} + ${_gRPC_ZLIB_LIBRARIES} gpr ) +foreach(_hdr + include/grpc/byte_buffer.h + include/grpc/byte_buffer_reader.h + include/grpc/compression.h + include/grpc/grpc.h + include/grpc/grpc_posix.h + include/grpc/status.h + include/grpc/impl/codegen/byte_buffer.h + include/grpc/impl/codegen/byte_buffer_reader.h + include/grpc/impl/codegen/compression_types.h + include/grpc/impl/codegen/connectivity_state.h + include/grpc/impl/codegen/grpc_types.h + include/grpc/impl/codegen/propagation_bits.h + include/grpc/impl/codegen/status.h + include/grpc/impl/codegen/alloc.h + include/grpc/impl/codegen/atm.h + include/grpc/impl/codegen/atm_gcc_atomic.h + include/grpc/impl/codegen/atm_gcc_sync.h + include/grpc/impl/codegen/atm_windows.h + include/grpc/impl/codegen/log.h + include/grpc/impl/codegen/port_platform.h + include/grpc/impl/codegen/slice.h + include/grpc/impl/codegen/slice_buffer.h + include/grpc/impl/codegen/sync.h + include/grpc/impl/codegen/sync_generic.h + include/grpc/impl/codegen/sync_posix.h + include/grpc/impl/codegen/sync_windows.h + include/grpc/impl/codegen/time.h + include/grpc/grpc_security.h + include/grpc/grpc_security_constants.h + include/grpc/census.h +) + string(REPLACE "include/" "" _path ${_hdr}) + get_filename_component(_path ${_path} PATH) + install(FILES ${_hdr} + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${_path}" + ) +endforeach() + + +install(TARGETS grpc EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + add_library(grpc_cronet src/core/lib/surface/init.c @@ -344,6 +536,7 @@ add_library(grpc_cronet src/core/lib/channel/channel_stack_builder.c src/core/lib/channel/compress_filter.c src/core/lib/channel/connected_channel.c + src/core/lib/channel/handshaker.c src/core/lib/channel/http_client_filter.c src/core/lib/channel/http_server_filter.c src/core/lib/compression/compression.c @@ -508,15 +701,62 @@ target_include_directories(grpc_cronet PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${BORINGSSL_ROOT_DIR}/include PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${ZLIB_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) target_link_libraries(grpc_cronet - ssl + ${_gRPC_BASELIB_LIBRARIES} + ${_gRPC_SSL_LIBRARIES} gpr ) +foreach(_hdr + include/grpc/byte_buffer.h + include/grpc/byte_buffer_reader.h + include/grpc/compression.h + include/grpc/grpc.h + include/grpc/grpc_posix.h + include/grpc/status.h + include/grpc/impl/codegen/byte_buffer.h + include/grpc/impl/codegen/byte_buffer_reader.h + include/grpc/impl/codegen/compression_types.h + include/grpc/impl/codegen/connectivity_state.h + include/grpc/impl/codegen/grpc_types.h + include/grpc/impl/codegen/propagation_bits.h + include/grpc/impl/codegen/status.h + include/grpc/impl/codegen/alloc.h + include/grpc/impl/codegen/atm.h + include/grpc/impl/codegen/atm_gcc_atomic.h + include/grpc/impl/codegen/atm_gcc_sync.h + include/grpc/impl/codegen/atm_windows.h + include/grpc/impl/codegen/log.h + include/grpc/impl/codegen/port_platform.h + include/grpc/impl/codegen/slice.h + include/grpc/impl/codegen/slice_buffer.h + include/grpc/impl/codegen/sync.h + include/grpc/impl/codegen/sync_generic.h + include/grpc/impl/codegen/sync_posix.h + include/grpc/impl/codegen/sync_windows.h + include/grpc/impl/codegen/time.h + include/grpc/grpc_cronet.h + include/grpc/grpc_security.h + include/grpc/grpc_security_constants.h +) + string(REPLACE "include/" "" _path ${_hdr}) + get_filename_component(_path ${_path} PATH) + install(FILES ${_hdr} + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${_path}" + ) +endforeach() + + +install(TARGETS grpc_cronet EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + add_library(grpc_unsecure src/core/lib/surface/init.c @@ -526,6 +766,7 @@ add_library(grpc_unsecure src/core/lib/channel/channel_stack_builder.c src/core/lib/channel/compress_filter.c src/core/lib/channel/connected_channel.c + src/core/lib/channel/handshaker.c src/core/lib/channel/http_client_filter.c src/core/lib/channel/http_server_filter.c src/core/lib/compression/compression.c @@ -683,14 +924,59 @@ target_include_directories(grpc_unsecure PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${BORINGSSL_ROOT_DIR}/include PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${ZLIB_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) target_link_libraries(grpc_unsecure + ${_gRPC_BASELIB_LIBRARIES} gpr ) +foreach(_hdr + include/grpc/byte_buffer.h + include/grpc/byte_buffer_reader.h + include/grpc/compression.h + include/grpc/grpc.h + include/grpc/grpc_posix.h + include/grpc/status.h + include/grpc/impl/codegen/byte_buffer.h + include/grpc/impl/codegen/byte_buffer_reader.h + include/grpc/impl/codegen/compression_types.h + include/grpc/impl/codegen/connectivity_state.h + include/grpc/impl/codegen/grpc_types.h + include/grpc/impl/codegen/propagation_bits.h + include/grpc/impl/codegen/status.h + include/grpc/impl/codegen/alloc.h + include/grpc/impl/codegen/atm.h + include/grpc/impl/codegen/atm_gcc_atomic.h + include/grpc/impl/codegen/atm_gcc_sync.h + include/grpc/impl/codegen/atm_windows.h + include/grpc/impl/codegen/log.h + include/grpc/impl/codegen/port_platform.h + include/grpc/impl/codegen/slice.h + include/grpc/impl/codegen/slice_buffer.h + include/grpc/impl/codegen/sync.h + include/grpc/impl/codegen/sync_generic.h + include/grpc/impl/codegen/sync_posix.h + include/grpc/impl/codegen/sync_windows.h + include/grpc/impl/codegen/time.h + include/grpc/census.h +) + string(REPLACE "include/" "" _path ${_hdr}) + get_filename_component(_path ${_path} PATH) + install(FILES ${_hdr} + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${_path}" + ) +endforeach() + + +install(TARGETS grpc_unsecure EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + add_library(grpc++ src/cpp/client/secure_credentials.cc @@ -733,16 +1019,131 @@ target_include_directories(grpc++ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${BORINGSSL_ROOT_DIR}/include PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${ZLIB_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) target_link_libraries(grpc++ - ssl - libprotobuf + ${_gRPC_BASELIB_LIBRARIES} + ${_gRPC_SSL_LIBRARIES} + ${_gRPC_PROTOBUF_LIBRARIES} grpc ) +foreach(_hdr + include/grpc++/alarm.h + include/grpc++/channel.h + include/grpc++/client_context.h + include/grpc++/completion_queue.h + include/grpc++/create_channel.h + include/grpc++/create_channel_posix.h + include/grpc++/generic/async_generic_service.h + include/grpc++/generic/generic_stub.h + include/grpc++/grpc++.h + include/grpc++/impl/call.h + include/grpc++/impl/client_unary_call.h + include/grpc++/impl/codegen/core_codegen.h + include/grpc++/impl/grpc_library.h + include/grpc++/impl/method_handler_impl.h + include/grpc++/impl/rpc_method.h + include/grpc++/impl/rpc_service_method.h + include/grpc++/impl/serialization_traits.h + include/grpc++/impl/server_builder_option.h + include/grpc++/impl/server_builder_plugin.h + include/grpc++/impl/server_initializer.h + include/grpc++/impl/service_type.h + include/grpc++/impl/sync.h + include/grpc++/impl/sync_cxx11.h + include/grpc++/impl/sync_no_cxx11.h + include/grpc++/impl/thd.h + include/grpc++/impl/thd_cxx11.h + include/grpc++/impl/thd_no_cxx11.h + include/grpc++/security/auth_context.h + include/grpc++/security/auth_metadata_processor.h + include/grpc++/security/credentials.h + include/grpc++/security/server_credentials.h + include/grpc++/server.h + include/grpc++/server_builder.h + include/grpc++/server_context.h + include/grpc++/server_posix.h + include/grpc++/support/async_stream.h + include/grpc++/support/async_unary_call.h + include/grpc++/support/byte_buffer.h + include/grpc++/support/channel_arguments.h + include/grpc++/support/config.h + include/grpc++/support/slice.h + include/grpc++/support/status.h + include/grpc++/support/status_code_enum.h + include/grpc++/support/string_ref.h + include/grpc++/support/stub_options.h + include/grpc++/support/sync_stream.h + include/grpc++/support/time.h + include/grpc++/impl/codegen/async_stream.h + include/grpc++/impl/codegen/async_unary_call.h + include/grpc++/impl/codegen/call.h + include/grpc++/impl/codegen/call_hook.h + include/grpc++/impl/codegen/channel_interface.h + include/grpc++/impl/codegen/client_context.h + include/grpc++/impl/codegen/client_unary_call.h + include/grpc++/impl/codegen/completion_queue.h + include/grpc++/impl/codegen/completion_queue_tag.h + include/grpc++/impl/codegen/config.h + include/grpc++/impl/codegen/core_codegen_interface.h + include/grpc++/impl/codegen/create_auth_context.h + include/grpc++/impl/codegen/grpc_library.h + include/grpc++/impl/codegen/method_handler_impl.h + include/grpc++/impl/codegen/rpc_method.h + include/grpc++/impl/codegen/rpc_service_method.h + include/grpc++/impl/codegen/security/auth_context.h + include/grpc++/impl/codegen/serialization_traits.h + include/grpc++/impl/codegen/server_context.h + include/grpc++/impl/codegen/server_interface.h + include/grpc++/impl/codegen/service_type.h + include/grpc++/impl/codegen/status.h + include/grpc++/impl/codegen/status_code_enum.h + include/grpc++/impl/codegen/string_ref.h + include/grpc++/impl/codegen/stub_options.h + include/grpc++/impl/codegen/sync.h + include/grpc++/impl/codegen/sync_cxx11.h + include/grpc++/impl/codegen/sync_no_cxx11.h + include/grpc++/impl/codegen/sync_stream.h + include/grpc++/impl/codegen/time.h + include/grpc/impl/codegen/byte_buffer.h + include/grpc/impl/codegen/byte_buffer_reader.h + include/grpc/impl/codegen/compression_types.h + include/grpc/impl/codegen/connectivity_state.h + include/grpc/impl/codegen/grpc_types.h + include/grpc/impl/codegen/propagation_bits.h + include/grpc/impl/codegen/status.h + include/grpc/impl/codegen/alloc.h + include/grpc/impl/codegen/atm.h + include/grpc/impl/codegen/atm_gcc_atomic.h + include/grpc/impl/codegen/atm_gcc_sync.h + include/grpc/impl/codegen/atm_windows.h + include/grpc/impl/codegen/log.h + include/grpc/impl/codegen/port_platform.h + include/grpc/impl/codegen/slice.h + include/grpc/impl/codegen/slice_buffer.h + include/grpc/impl/codegen/sync.h + include/grpc/impl/codegen/sync_generic.h + include/grpc/impl/codegen/sync_posix.h + include/grpc/impl/codegen/sync_windows.h + include/grpc/impl/codegen/time.h +) + string(REPLACE "include/" "" _path ${_hdr}) + get_filename_component(_path ${_path} PATH) + install(FILES ${_hdr} + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${_path}" + ) +endforeach() + + +install(TARGETS grpc++ EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + add_library(grpc++_reflection src/cpp/ext/proto_server_reflection.cc @@ -756,7 +1157,7 @@ target_include_directories(grpc++_reflection PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${BORINGSSL_ROOT_DIR}/include PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${ZLIB_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) @@ -764,6 +1165,78 @@ target_link_libraries(grpc++_reflection grpc++ ) +foreach(_hdr + include/grpc++/ext/proto_server_reflection_plugin.h + include/grpc++/ext/reflection.grpc.pb.h + include/grpc++/ext/reflection.pb.h + include/grpc++/impl/codegen/proto_utils.h + include/grpc++/impl/codegen/async_stream.h + include/grpc++/impl/codegen/async_unary_call.h + include/grpc++/impl/codegen/call.h + include/grpc++/impl/codegen/call_hook.h + include/grpc++/impl/codegen/channel_interface.h + include/grpc++/impl/codegen/client_context.h + include/grpc++/impl/codegen/client_unary_call.h + include/grpc++/impl/codegen/completion_queue.h + include/grpc++/impl/codegen/completion_queue_tag.h + include/grpc++/impl/codegen/config.h + include/grpc++/impl/codegen/core_codegen_interface.h + include/grpc++/impl/codegen/create_auth_context.h + include/grpc++/impl/codegen/grpc_library.h + include/grpc++/impl/codegen/method_handler_impl.h + include/grpc++/impl/codegen/rpc_method.h + include/grpc++/impl/codegen/rpc_service_method.h + include/grpc++/impl/codegen/security/auth_context.h + include/grpc++/impl/codegen/serialization_traits.h + include/grpc++/impl/codegen/server_context.h + include/grpc++/impl/codegen/server_interface.h + include/grpc++/impl/codegen/service_type.h + include/grpc++/impl/codegen/status.h + include/grpc++/impl/codegen/status_code_enum.h + include/grpc++/impl/codegen/string_ref.h + include/grpc++/impl/codegen/stub_options.h + include/grpc++/impl/codegen/sync.h + include/grpc++/impl/codegen/sync_cxx11.h + include/grpc++/impl/codegen/sync_no_cxx11.h + include/grpc++/impl/codegen/sync_stream.h + include/grpc++/impl/codegen/time.h + include/grpc/impl/codegen/byte_buffer.h + include/grpc/impl/codegen/byte_buffer_reader.h + include/grpc/impl/codegen/compression_types.h + include/grpc/impl/codegen/connectivity_state.h + include/grpc/impl/codegen/grpc_types.h + include/grpc/impl/codegen/propagation_bits.h + include/grpc/impl/codegen/status.h + include/grpc/impl/codegen/alloc.h + include/grpc/impl/codegen/atm.h + include/grpc/impl/codegen/atm_gcc_atomic.h + include/grpc/impl/codegen/atm_gcc_sync.h + include/grpc/impl/codegen/atm_windows.h + include/grpc/impl/codegen/log.h + include/grpc/impl/codegen/port_platform.h + include/grpc/impl/codegen/slice.h + include/grpc/impl/codegen/slice_buffer.h + include/grpc/impl/codegen/sync.h + include/grpc/impl/codegen/sync_generic.h + include/grpc/impl/codegen/sync_posix.h + include/grpc/impl/codegen/sync_windows.h + include/grpc/impl/codegen/time.h + include/grpc++/impl/codegen/config_protobuf.h +) + string(REPLACE "include/" "" _path ${_hdr}) + get_filename_component(_path ${_path} PATH) + install(FILES ${_hdr} + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${_path}" + ) +endforeach() + + +install(TARGETS grpc++_reflection EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + add_library(grpc++_unsecure src/cpp/common/insecure_create_auth_context.cc @@ -801,17 +1274,132 @@ target_include_directories(grpc++_unsecure PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${BORINGSSL_ROOT_DIR}/include PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${ZLIB_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) target_link_libraries(grpc++_unsecure - libprotobuf + ${_gRPC_BASELIB_LIBRARIES} + ${_gRPC_PROTOBUF_LIBRARIES} gpr grpc_unsecure grpc ) +foreach(_hdr + include/grpc++/alarm.h + include/grpc++/channel.h + include/grpc++/client_context.h + include/grpc++/completion_queue.h + include/grpc++/create_channel.h + include/grpc++/create_channel_posix.h + include/grpc++/generic/async_generic_service.h + include/grpc++/generic/generic_stub.h + include/grpc++/grpc++.h + include/grpc++/impl/call.h + include/grpc++/impl/client_unary_call.h + include/grpc++/impl/codegen/core_codegen.h + include/grpc++/impl/grpc_library.h + include/grpc++/impl/method_handler_impl.h + include/grpc++/impl/rpc_method.h + include/grpc++/impl/rpc_service_method.h + include/grpc++/impl/serialization_traits.h + include/grpc++/impl/server_builder_option.h + include/grpc++/impl/server_builder_plugin.h + include/grpc++/impl/server_initializer.h + include/grpc++/impl/service_type.h + include/grpc++/impl/sync.h + include/grpc++/impl/sync_cxx11.h + include/grpc++/impl/sync_no_cxx11.h + include/grpc++/impl/thd.h + include/grpc++/impl/thd_cxx11.h + include/grpc++/impl/thd_no_cxx11.h + include/grpc++/security/auth_context.h + include/grpc++/security/auth_metadata_processor.h + include/grpc++/security/credentials.h + include/grpc++/security/server_credentials.h + include/grpc++/server.h + include/grpc++/server_builder.h + include/grpc++/server_context.h + include/grpc++/server_posix.h + include/grpc++/support/async_stream.h + include/grpc++/support/async_unary_call.h + include/grpc++/support/byte_buffer.h + include/grpc++/support/channel_arguments.h + include/grpc++/support/config.h + include/grpc++/support/slice.h + include/grpc++/support/status.h + include/grpc++/support/status_code_enum.h + include/grpc++/support/string_ref.h + include/grpc++/support/stub_options.h + include/grpc++/support/sync_stream.h + include/grpc++/support/time.h + include/grpc++/impl/codegen/async_stream.h + include/grpc++/impl/codegen/async_unary_call.h + include/grpc++/impl/codegen/call.h + include/grpc++/impl/codegen/call_hook.h + include/grpc++/impl/codegen/channel_interface.h + include/grpc++/impl/codegen/client_context.h + include/grpc++/impl/codegen/client_unary_call.h + include/grpc++/impl/codegen/completion_queue.h + include/grpc++/impl/codegen/completion_queue_tag.h + include/grpc++/impl/codegen/config.h + include/grpc++/impl/codegen/core_codegen_interface.h + include/grpc++/impl/codegen/create_auth_context.h + include/grpc++/impl/codegen/grpc_library.h + include/grpc++/impl/codegen/method_handler_impl.h + include/grpc++/impl/codegen/rpc_method.h + include/grpc++/impl/codegen/rpc_service_method.h + include/grpc++/impl/codegen/security/auth_context.h + include/grpc++/impl/codegen/serialization_traits.h + include/grpc++/impl/codegen/server_context.h + include/grpc++/impl/codegen/server_interface.h + include/grpc++/impl/codegen/service_type.h + include/grpc++/impl/codegen/status.h + include/grpc++/impl/codegen/status_code_enum.h + include/grpc++/impl/codegen/string_ref.h + include/grpc++/impl/codegen/stub_options.h + include/grpc++/impl/codegen/sync.h + include/grpc++/impl/codegen/sync_cxx11.h + include/grpc++/impl/codegen/sync_no_cxx11.h + include/grpc++/impl/codegen/sync_stream.h + include/grpc++/impl/codegen/time.h + include/grpc/impl/codegen/byte_buffer.h + include/grpc/impl/codegen/byte_buffer_reader.h + include/grpc/impl/codegen/compression_types.h + include/grpc/impl/codegen/connectivity_state.h + include/grpc/impl/codegen/grpc_types.h + include/grpc/impl/codegen/propagation_bits.h + include/grpc/impl/codegen/status.h + include/grpc/impl/codegen/alloc.h + include/grpc/impl/codegen/atm.h + include/grpc/impl/codegen/atm_gcc_atomic.h + include/grpc/impl/codegen/atm_gcc_sync.h + include/grpc/impl/codegen/atm_windows.h + include/grpc/impl/codegen/log.h + include/grpc/impl/codegen/port_platform.h + include/grpc/impl/codegen/slice.h + include/grpc/impl/codegen/slice_buffer.h + include/grpc/impl/codegen/sync.h + include/grpc/impl/codegen/sync_generic.h + include/grpc/impl/codegen/sync_posix.h + include/grpc/impl/codegen/sync_windows.h + include/grpc/impl/codegen/time.h +) + string(REPLACE "include/" "" _path ${_hdr}) + get_filename_component(_path ${_path} PATH) + install(FILES ${_hdr} + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${_path}" + ) +endforeach() + + +install(TARGETS grpc++_unsecure EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + add_library(grpc_plugin_support src/compiler/cpp_generator.cc @@ -827,12 +1415,29 @@ target_include_directories(grpc_plugin_support PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${BORINGSSL_ROOT_DIR}/include PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${ZLIB_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) target_link_libraries(grpc_plugin_support - libprotoc + ${_gRPC_PROTOBUF_PROTOC_LIBRARIES} +) + +foreach(_hdr + include/grpc++/impl/codegen/config_protobuf.h +) + string(REPLACE "include/" "" _path ${_hdr}) + get_filename_component(_path ${_path} PATH) + install(FILES ${_hdr} + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${_path}" + ) +endforeach() + + +install(TARGETS grpc_plugin_support EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} ) @@ -845,7 +1450,7 @@ target_include_directories(grpc_csharp_ext PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${BORINGSSL_ROOT_DIR}/include PRIVATE ${PROTOBUF_ROOT_DIR}/src - PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${ZLIB_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) @@ -855,6 +1460,14 @@ target_link_libraries(grpc_csharp_ext ) + +install(TARGETS grpc_csharp_ext EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(gen_hpack_tables tools/codegen/core/gen_hpack_tables.c @@ -875,6 +1488,13 @@ target_link_libraries(gen_hpack_tables ) +install(TARGETS gen_hpack_tables EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(gen_legal_metadata_characters tools/codegen/core/gen_legal_metadata_characters.c ) @@ -890,6 +1510,13 @@ target_include_directories(gen_legal_metadata_characters +install(TARGETS gen_legal_metadata_characters EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(grpc_create_jwt test/core/security/create_jwt.c ) @@ -904,12 +1531,19 @@ target_include_directories(grpc_create_jwt ) target_link_libraries(grpc_create_jwt - ssl + ${_gRPC_SSL_LIBRARIES} grpc gpr ) +install(TARGETS grpc_create_jwt EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(grpc_print_google_default_creds_token test/core/security/print_google_default_creds_token.c ) @@ -929,6 +1563,13 @@ target_link_libraries(grpc_print_google_default_creds_token ) +install(TARGETS grpc_print_google_default_creds_token EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(grpc_verify_jwt test/core/security/verify_jwt.c ) @@ -948,6 +1589,13 @@ target_link_libraries(grpc_verify_jwt ) +install(TARGETS grpc_verify_jwt EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(grpc_cpp_plugin src/compiler/cpp_plugin.cc ) @@ -962,11 +1610,18 @@ target_include_directories(grpc_cpp_plugin ) target_link_libraries(grpc_cpp_plugin - libprotoc + ${_gRPC_PROTOBUF_PROTOC_LIBRARIES} grpc_plugin_support ) +install(TARGETS grpc_cpp_plugin EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(grpc_csharp_plugin src/compiler/csharp_plugin.cc ) @@ -981,11 +1636,18 @@ target_include_directories(grpc_csharp_plugin ) target_link_libraries(grpc_csharp_plugin - libprotoc + ${_gRPC_PROTOBUF_PROTOC_LIBRARIES} grpc_plugin_support ) +install(TARGETS grpc_csharp_plugin EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(grpc_node_plugin src/compiler/node_plugin.cc ) @@ -1000,11 +1662,18 @@ target_include_directories(grpc_node_plugin ) target_link_libraries(grpc_node_plugin - libprotoc + ${_gRPC_PROTOBUF_PROTOC_LIBRARIES} grpc_plugin_support ) +install(TARGETS grpc_node_plugin EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(grpc_objective_c_plugin src/compiler/objective_c_plugin.cc ) @@ -1019,11 +1688,18 @@ target_include_directories(grpc_objective_c_plugin ) target_link_libraries(grpc_objective_c_plugin - libprotoc + ${_gRPC_PROTOBUF_PROTOC_LIBRARIES} grpc_plugin_support ) +install(TARGETS grpc_objective_c_plugin EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(grpc_python_plugin src/compiler/python_plugin.cc ) @@ -1038,11 +1714,18 @@ target_include_directories(grpc_python_plugin ) target_link_libraries(grpc_python_plugin - libprotoc + ${_gRPC_PROTOBUF_PROTOC_LIBRARIES} grpc_plugin_support ) +install(TARGETS grpc_python_plugin EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + add_executable(grpc_ruby_plugin src/compiler/ruby_plugin.cc ) @@ -1057,11 +1740,33 @@ target_include_directories(grpc_ruby_plugin ) target_link_libraries(grpc_ruby_plugin - libprotoc + ${_gRPC_PROTOBUF_PROTOC_LIBRARIES} grpc_plugin_support ) +install(TARGETS grpc_ruby_plugin EXPORT gRPCTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} +) + + + + + +install(EXPORT gRPCTargets + DESTINATION ${CMAKE_INSTALL_CMAKEDIR} + NAMESPACE gRPC:: +) + +foreach(_config gRPCConfig gRPCConfigVersion) + configure_file(tools/cmake/${_config}.cmake.in + ${_config}.cmake @ONLY) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${_config}.cmake + DESTINATION ${CMAKE_INSTALL_CMAKEDIR} + ) +endforeach() diff --git a/Makefile b/Makefile index b357414c52b..e0be2ffbac9 100644 --- a/Makefile +++ b/Makefile @@ -405,6 +405,29 @@ LIBS = m pthread ws2_32 LDFLAGS += -pthread endif +# +# The steps for cross-compiling are as follows: +# First, clone and make install of grpc using the native compilers for the host. +# Also, install protoc (e.g., from a package like apt-get) +# Then clone a fresh grpc for the actual cross-compiled build +# Set the environment variable GRPC_CROSS_COMPILE to true +# Set CC, CXX, LD, LDXX, AR, and STRIP to the cross-compiling binaries +# Also set PROTOBUF_CONFIG_OPTS to indicate cross-compilation to protobuf (e.g., +# PROTOBUF_CONFIG_OPTS="--host=arm-linux --with-protoc=/usr/local/bin/protoc" ) +# Set HAS_PKG_CONFIG=false +# To build tests, go to third_party/gflags and follow its ccmake instructions +# Make sure that you enable building shared libraries and set your prefix to +# something useful like /usr/local/cross +# You will also need to set GRPC_CROSS_LDOPTS and GRPC_CROSS_AROPTS to hold +# additional required arguments for LD and AR (examples below) +# Then you can do a make from the cross-compiling fresh clone! +# +ifeq ($(GRPC_CROSS_COMPILE),true) +LDFLAGS += $(GRPC_CROSS_LDOPTS) # e.g. -L/usr/local/lib -L/usr/local/cross/lib +AROPTS = $(GRPC_CROSS_AROPTS) # e.g., rc --target=elf32-little +USE_BUILT_PROTOC = false +endif + GTEST_LIB = -Ithird_party/googletest/include -Ithird_party/googletest third_party/googletest/src/gtest-all.cc GTEST_LIB += -lgflags ifeq ($(V),1) @@ -415,7 +438,7 @@ E = @echo Q = @ endif -VERSION = 0.16.0-dev +VERSION = 1.1.0-dev CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES)) CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS) @@ -448,7 +471,7 @@ PC_TEMPLATE = prefix=$(prefix),exec_prefix=\$${prefix},includedir=\$${prefix}/in ifeq ($(SYSTEM),MINGW32) SHARED_EXT = dll SHARED_PREFIX = -SHARED_VERSION = -0 +SHARED_VERSION = -1 else ifeq ($(SYSTEM),Darwin) SHARED_EXT = dylib SHARED_PREFIX = lib @@ -709,6 +732,9 @@ PC_LIBS_GRPCXX = CPPFLAGS := -Ithird_party/googletest/include $(CPPFLAGS) +PROTOC_PLUGINS_ALL = $(BINDIR)/$(CONFIG)/grpc_cpp_plugin $(BINDIR)/$(CONFIG)/grpc_csharp_plugin $(BINDIR)/$(CONFIG)/grpc_node_plugin $(BINDIR)/$(CONFIG)/grpc_objective_c_plugin $(BINDIR)/$(CONFIG)/grpc_python_plugin $(BINDIR)/$(CONFIG)/grpc_ruby_plugin +PROTOC_PLUGINS_DIR = $(BINDIR)/$(CONFIG) + ifeq ($(HAS_SYSTEM_PROTOBUF),true) ifeq ($(HAS_PKG_CONFIG),true) PROTOBUF_PKG_CONFIG = true @@ -723,12 +749,19 @@ endif else PC_LIBS_GRPCXX = -lprotobuf endif +PROTOC_PLUGINS = $(PROTOC_PLUGINS_ALL) else ifeq ($(HAS_EMBEDDED_PROTOBUF),true) PROTOBUF_DEP = $(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a CPPFLAGS := -Ithird_party/protobuf/src $(CPPFLAGS) LDFLAGS := -L$(LIBDIR)/$(CONFIG)/protobuf $(LDFLAGS) +ifneq ($(USE_BUILT_PROTOC),false) PROTOC = $(BINDIR)/$(CONFIG)/protobuf/protoc +PROTOC_PLUGINS = $(PROTOC_PLUGINS_ALL) +else +PROTOC_PLUGINS = +PROTOC_PLUGINS_DIR = $(prefix)/bin +endif else NO_PROTOBUF = true endif @@ -776,7 +809,6 @@ endif .SECONDARY = %.pb.h %.pb.cc -PROTOC_PLUGINS = $(BINDIR)/$(CONFIG)/grpc_cpp_plugin $(BINDIR)/$(CONFIG)/grpc_csharp_plugin $(BINDIR)/$(CONFIG)/grpc_node_plugin $(BINDIR)/$(CONFIG)/grpc_objective_c_plugin $(BINDIR)/$(CONFIG)/grpc_python_plugin $(BINDIR)/$(CONFIG)/grpc_ruby_plugin ifeq ($(DEP_MISSING),) all: static shared plugins dep_error: @@ -991,7 +1023,6 @@ transport_security_test: $(BINDIR)/$(CONFIG)/transport_security_test udp_server_test: $(BINDIR)/$(CONFIG)/udp_server_test uri_fuzzer_test: $(BINDIR)/$(CONFIG)/uri_fuzzer_test uri_parser_test: $(BINDIR)/$(CONFIG)/uri_parser_test -workqueue_test: $(BINDIR)/$(CONFIG)/workqueue_test alarm_cpp_test: $(BINDIR)/$(CONFIG)/alarm_cpp_test async_end2end_test: $(BINDIR)/$(CONFIG)/async_end2end_test auth_property_iterator_test: $(BINDIR)/$(CONFIG)/auth_property_iterator_test @@ -1148,7 +1179,7 @@ third_party/protobuf/configure: $(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure $(E) "[MAKE] Building protobuf" - $(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static) + $(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static $(PROTOBUF_CONFIG_OPTS)) $(Q)$(MAKE) -C third_party/protobuf clean $(Q)$(MAKE) -C third_party/protobuf $(Q)mkdir -p $(LIBDIR)/$(CONFIG)/protobuf @@ -1185,9 +1216,9 @@ pc_cxx: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc pc_cxx_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc ifeq ($(EMBED_OPENSSL),true) -privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a +privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_reflection_codegen.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a else -privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a +privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_reflection_codegen.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a endif @@ -1295,7 +1326,6 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/transport_security_test \ $(BINDIR)/$(CONFIG)/udp_server_test \ $(BINDIR)/$(CONFIG)/uri_parser_test \ - $(BINDIR)/$(CONFIG)/workqueue_test \ $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 \ $(BINDIR)/$(CONFIG)/badreq_bad_client_test \ $(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test \ @@ -1674,8 +1704,6 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/udp_server_test || ( echo test udp_server_test failed ; exit 1 ) $(E) "[RUN] Testing uri_parser_test" $(Q) $(BINDIR)/$(CONFIG)/uri_parser_test || ( echo test uri_parser_test failed ; exit 1 ) - $(E) "[RUN] Testing workqueue_test" - $(Q) $(BINDIR)/$(CONFIG)/workqueue_test || ( echo test workqueue_test failed ; exit 1 ) $(E) "[RUN] Testing public_headers_must_be_c89" $(Q) $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 || ( echo test public_headers_must_be_c89 failed ; exit 1 ) $(E) "[RUN] Testing badreq_bad_client_test" @@ -1890,7 +1918,22 @@ $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc: src/proto/grpc/lb/v1/load_ba $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc: src/proto/grpc/lb/v1/load_balancer.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< +endif + +ifeq ($(NO_PROTOC),true) +$(GENDIR)/src/proto/grpc/reflection/v1alpha/reflection.pb.cc: protoc_dep_error +$(GENDIR)/src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.cc: protoc_dep_error +else +$(GENDIR)/src/proto/grpc/reflection/v1alpha/reflection.pb.cc: src/proto/grpc/reflection/v1alpha/reflection.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) + $(E) "[PROTOC] Generating protobuf CC file from $<" + $(Q) mkdir -p `dirname $@` + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --cpp_out=$(GENDIR) $< + +$(GENDIR)/src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.cc: src/proto/grpc/reflection/v1alpha/reflection.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) + $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" + $(Q) mkdir -p `dirname $@` + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -1905,7 +1948,7 @@ $(GENDIR)/src/proto/grpc/testing/compiler_test.pb.cc: src/proto/grpc/testing/com $(GENDIR)/src/proto/grpc/testing/compiler_test.grpc.pb.cc: src/proto/grpc/testing/compiler_test.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -1920,7 +1963,7 @@ $(GENDIR)/src/proto/grpc/testing/control.pb.cc: src/proto/grpc/testing/control.p $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc: src/proto/grpc/testing/control.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -1935,7 +1978,7 @@ $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc: src/proto/grpc/ $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc: src/proto/grpc/testing/duplicate/echo_duplicate.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -1950,7 +1993,7 @@ $(GENDIR)/src/proto/grpc/testing/echo.pb.cc: src/proto/grpc/testing/echo.proto $ $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc: src/proto/grpc/testing/echo.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -1965,7 +2008,7 @@ $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc: src/proto/grpc/testing/ech $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc: src/proto/grpc/testing/echo_messages.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -1980,7 +2023,7 @@ $(GENDIR)/src/proto/grpc/testing/empty.pb.cc: src/proto/grpc/testing/empty.proto $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc: src/proto/grpc/testing/empty.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -1995,7 +2038,7 @@ $(GENDIR)/src/proto/grpc/testing/messages.pb.cc: src/proto/grpc/testing/messages $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc: src/proto/grpc/testing/messages.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -2010,7 +2053,7 @@ $(GENDIR)/src/proto/grpc/testing/metrics.pb.cc: src/proto/grpc/testing/metrics.p $(GENDIR)/src/proto/grpc/testing/metrics.grpc.pb.cc: src/proto/grpc/testing/metrics.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -2025,7 +2068,7 @@ $(GENDIR)/src/proto/grpc/testing/payloads.pb.cc: src/proto/grpc/testing/payloads $(GENDIR)/src/proto/grpc/testing/payloads.grpc.pb.cc: src/proto/grpc/testing/payloads.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -2040,7 +2083,7 @@ $(GENDIR)/src/proto/grpc/testing/services.pb.cc: src/proto/grpc/testing/services $(GENDIR)/src/proto/grpc/testing/services.grpc.pb.cc: src/proto/grpc/testing/services.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/control.pb.cc $(GENDIR)/src/proto/grpc/testing/control.grpc.pb.cc $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -2055,7 +2098,7 @@ $(GENDIR)/src/proto/grpc/testing/stats.pb.cc: src/proto/grpc/testing/stats.proto $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc: src/proto/grpc/testing/stats.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif ifeq ($(NO_PROTOC),true) @@ -2070,7 +2113,7 @@ $(GENDIR)/src/proto/grpc/testing/test.pb.cc: src/proto/grpc/testing/test.proto $ $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc: src/proto/grpc/testing/test.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif @@ -2164,7 +2207,7 @@ install-shared_c: shared_c strip-shared_c install-pkg-config_c ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgpr-imp.a $(prefix)/lib/libgpr-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so.0 + $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so.1 $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgpr.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT)" @@ -2173,7 +2216,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc-imp.a $(prefix)/lib/libgrpc-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT)" @@ -2182,7 +2225,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_cronet-imp.a $(prefix)/lib/libgrpc_cronet-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_cronet.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT)" @@ -2191,7 +2234,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure-imp.a $(prefix)/lib/libgrpc_unsecure-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_unsecure.so endif ifneq ($(SYSTEM),MINGW32) @@ -2208,7 +2251,7 @@ install-shared_cxx: shared_cxx strip-shared_cxx install-shared_c install-pkg-con ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++-imp.a $(prefix)/lib/libgrpc++-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT)" @@ -2217,7 +2260,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection-imp.a $(prefix)/lib/libgrpc++_reflection-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_reflection.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT)" @@ -2226,7 +2269,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure-imp.a $(prefix)/lib/libgrpc++_unsecure-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc++_unsecure.so endif ifneq ($(SYSTEM),MINGW32) @@ -2243,7 +2286,7 @@ install-shared_csharp: shared_csharp strip-shared_csharp ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext-imp.a $(prefix)/lib/libgrpc_csharp_ext-imp.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so.0 + $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(prefix)/lib/libgrpc_csharp_ext.so endif ifneq ($(SYSTEM),MINGW32) @@ -2417,7 +2460,7 @@ $(LIBDIR)/$(CONFIG)/libgpr.a: $(ZLIB_DEP) $(LIBGPR_OBJS) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgpr.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBGPR_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBGPR_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgpr.a endif @@ -2436,8 +2479,8 @@ $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT): $(LIBGPR_OBJS) $(ZLI ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.0 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.1 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION).so endif endif @@ -2459,7 +2502,7 @@ $(LIBDIR)/$(CONFIG)/libgpr_test_util.a: $(ZLIB_DEP) $(LIBGPR_TEST_UTIL_OBJS) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgpr_test_util.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBGPR_TEST_UTIL_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBGPR_TEST_UTIL_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgpr_test_util.a endif @@ -2479,6 +2522,7 @@ LIBGRPC_SRC = \ src/core/lib/channel/channel_stack_builder.c \ src/core/lib/channel/compress_filter.c \ src/core/lib/channel/connected_channel.c \ + src/core/lib/channel/handshaker.c \ src/core/lib/channel/http_client_filter.c \ src/core/lib/channel/http_server_filter.c \ src/core/lib/compression/compression.c \ @@ -2710,7 +2754,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC_OBJS) $(LIB $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBGRPC_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) $(OPENSSL_MERGE_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBGRPC_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) $(OPENSSL_MERGE_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc.a endif @@ -2729,8 +2773,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_OBJS) $(Z ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION).so endif endif @@ -2751,6 +2795,7 @@ LIBGRPC_CRONET_SRC = \ src/core/lib/channel/channel_stack_builder.c \ src/core/lib/channel/compress_filter.c \ src/core/lib/channel/connected_channel.c \ + src/core/lib/channel/handshaker.c \ src/core/lib/channel/http_client_filter.c \ src/core/lib/channel/http_server_filter.c \ src/core/lib/compression/compression.c \ @@ -2959,7 +3004,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc_cronet.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC_CRONE $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc_cronet.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc_cronet.a $(LIBGRPC_CRONET_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) $(OPENSSL_MERGE_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc_cronet.a $(LIBGRPC_CRONET_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) $(OPENSSL_MERGE_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc_cronet.a endif @@ -2978,8 +3023,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_CRO ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CRONET_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION).so endif endif @@ -3031,7 +3076,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC_TE $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBGRPC_TEST_UTIL_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBGRPC_TEST_UTIL_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a endif @@ -3071,7 +3116,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a: $(ZLIB_DEP) $(LIBGRPC_TEST_UT $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBGRPC_TEST_UTIL_UNSECURE_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBGRPC_TEST_UTIL_UNSECURE_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a endif @@ -3092,6 +3137,7 @@ LIBGRPC_UNSECURE_SRC = \ src/core/lib/channel/channel_stack_builder.c \ src/core/lib/channel/compress_filter.c \ src/core/lib/channel/connected_channel.c \ + src/core/lib/channel/handshaker.c \ src/core/lib/channel/http_client_filter.c \ src/core/lib/channel/http_server_filter.c \ src/core/lib/compression/compression.c \ @@ -3280,7 +3326,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a: $(ZLIB_DEP) $(LIBGRPC_UNSECURE_OBJS) $ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBGRPC_UNSECURE_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBGRPC_UNSECURE_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a endif @@ -3299,8 +3345,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC_U ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION).so endif endif @@ -3332,7 +3378,7 @@ $(LIBDIR)/$(CONFIG)/libreconnect_server.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBRECON $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libreconnect_server.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libreconnect_server.a $(LIBRECONNECT_SERVER_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libreconnect_server.a $(LIBRECONNECT_SERVER_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libreconnect_server.a endif @@ -3371,7 +3417,7 @@ $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBTEST_T $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a $(LIBTEST_TCP_SERVER_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a $(LIBTEST_TCP_SERVER_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libtest_tcp_server.a endif @@ -3550,7 +3596,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc++.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTOBUF_DEP) $(LI $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc++.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBGRPC++_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBGRPC++_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc++.a endif @@ -3569,8 +3615,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC++_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc else - $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc - $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so.0 + $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc + $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION).so endif endif @@ -3677,7 +3723,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_reflection.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTOBU $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc++_reflection.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection.a $(LIBGRPC++_REFLECTION_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection.a $(LIBGRPC++_REFLECTION_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc++_reflection.a endif @@ -3696,8 +3742,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT): $(LIBGR ifeq ($(SYSTEM),Darwin) $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++ else - $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++ - $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so.0 + $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_reflection.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_REFLECTION_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgrpc++ + $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION).so endif endif @@ -3713,6 +3759,55 @@ endif endif +LIBGRPC++_REFLECTION_CODEGEN_SRC = \ + $(GENDIR)/src/proto/grpc/reflection/v1alpha/reflection.pb.cc $(GENDIR)/src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.cc \ + +PUBLIC_HEADERS_CXX += \ + +LIBGRPC++_REFLECTION_CODEGEN_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_REFLECTION_CODEGEN_SRC)))) + + +ifeq ($(NO_SECURE),true) + +# You can't build secure libraries if you don't have OpenSSL. + +$(LIBDIR)/$(CONFIG)/libgrpc++_reflection_codegen.a: openssl_dep_error + + +else + +ifeq ($(NO_PROTOBUF),true) + +# You can't build a C++ library if you don't have protobuf - a bit overreached, but still okay. + +$(LIBDIR)/$(CONFIG)/libgrpc++_reflection_codegen.a: protobuf_dep_error + + +else + +$(LIBDIR)/$(CONFIG)/libgrpc++_reflection_codegen.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTOBUF_DEP) $(LIBGRPC++_REFLECTION_CODEGEN_OBJS) + $(E) "[AR] Creating $@" + $(Q) mkdir -p `dirname $@` + $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc++_reflection_codegen.a + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection_codegen.a $(LIBGRPC++_REFLECTION_CODEGEN_OBJS) +ifeq ($(SYSTEM),Darwin) + $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc++_reflection_codegen.a +endif + + + + +endif + +endif + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(LIBGRPC++_REFLECTION_CODEGEN_OBJS:.o=.dep) +endif +endif + + LIBGRPC++_TEST_CONFIG_SRC = \ test/cpp/util/test_config.cc \ @@ -3743,7 +3838,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTOB $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBGRPC++_TEST_CONFIG_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBGRPC++_TEST_CONFIG_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a endif @@ -3854,7 +3949,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTOBUF $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBGRPC++_TEST_UTIL_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBGRPC++_TEST_UTIL_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a endif @@ -4027,7 +4122,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBGRPC $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBGRPC++_UNSECURE_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a $(LIBGRPC++_UNSECURE_OBJS) $(LIBGPR_OBJS) $(ZLIB_MERGE_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure.a endif @@ -4046,8 +4141,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC ifeq ($(SYSTEM),Darwin) $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc else - $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc - $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so.0 + $(Q) $(LDXX) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc++_unsecure.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC++_UNSECURE_OBJS) $(LDLIBS) $(ZLIB_MERGE_LIBS) $(LDLIBSXX) $(LDLIBS_PROTOBUF) -lgpr -lgrpc_unsecure -lgrpc + $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION).so endif endif @@ -4062,6 +4157,7 @@ endif LIBGRPC_CLI_LIBS_SRC = \ test/cpp/util/cli_call.cc \ test/cpp/util/proto_file_parser.cc \ + test/cpp/util/proto_reflection_descriptor_database.cc \ PUBLIC_HEADERS_CXX += \ @@ -4090,7 +4186,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTOBUF_DE $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBGRPC_CLI_LIBS_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBGRPC_CLI_LIBS_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a endif @@ -4136,7 +4232,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc_plugin_support.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIB $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc_plugin_support.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc_plugin_support.a $(LIBGRPC_PLUGIN_SUPPORT_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc_plugin_support.a $(LIBGRPC_PLUGIN_SUPPORT_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc_plugin_support.a endif @@ -4182,7 +4278,7 @@ $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PRO $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBINTEROP_CLIENT_HELPER_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBINTEROP_CLIENT_HELPER_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a endif @@ -4236,7 +4332,7 @@ $(LIBDIR)/$(CONFIG)/libinterop_client_main.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTO $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libinterop_client_main.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBINTEROP_CLIENT_MAIN_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBINTEROP_CLIENT_MAIN_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libinterop_client_main.a endif @@ -4287,7 +4383,7 @@ $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PRO $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBINTEROP_SERVER_HELPER_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBINTEROP_SERVER_HELPER_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a endif @@ -4339,7 +4435,7 @@ $(LIBDIR)/$(CONFIG)/libinterop_server_main.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTO $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libinterop_server_main.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBINTEROP_SERVER_MAIN_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBINTEROP_SERVER_MAIN_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libinterop_server_main.a endif @@ -4404,7 +4500,7 @@ $(LIBDIR)/$(CONFIG)/libqps.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(PROTOBUF_DEP) $(LIBQP $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libqps.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBQPS_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBQPS_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libqps.a endif @@ -4457,7 +4553,7 @@ $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC_C $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext.a $(LIBGRPC_CSHARP_EXT_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext.a $(LIBGRPC_CSHARP_EXT_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext.a endif @@ -4476,8 +4572,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT): $(LIBGRPC ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) else - $(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.0 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so.0 + $(Q) $(LD) $(LDFLAGS) $(if $(subst Linux,,$(SYSTEM)),,-Wl$(comma)-wrap$(comma)memcpy) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_csharp_ext.so.1 -o $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBGRPC_CSHARP_EXT_OBJS) $(LDLIBS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so.1 $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION).$(SHARED_EXT) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION).so endif endif @@ -4801,7 +4897,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl.a: $(ZLIB_DEP) $(LIBBORINGSSL_OBJS) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl.a $(LIBBORINGSSL_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl.a $(LIBBORINGSSL_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl.a endif @@ -4839,7 +4935,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIB $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBBORINGSSL_TEST_UTIL_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBBORINGSSL_TEST_UTIL_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a endif @@ -4877,7 +4973,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $( $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBBORINGSSL_AES_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBBORINGSSL_AES_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a endif @@ -4915,7 +5011,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBBORINGSSL_ASN1_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBBORINGSSL_ASN1_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a endif @@ -4953,7 +5049,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBBORINGSSL_BASE64_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBBORINGSSL_BASE64_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a endif @@ -4991,7 +5087,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $( $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBBORINGSSL_BIO_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBBORINGSSL_BIO_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a endif @@ -5029,7 +5125,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(L $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBBORINGSSL_BN_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBBORINGSSL_BN_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a endif @@ -5067,7 +5163,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBBORINGSSL_BYTESTRING_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBBORINGSSL_BYTESTRING_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a endif @@ -5105,7 +5201,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBBORINGSSL_AEAD_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBBORINGSSL_AEAD_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a endif @@ -5143,7 +5239,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBBORINGSSL_CIPHER_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBBORINGSSL_CIPHER_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a endif @@ -5181,7 +5277,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBBORINGSSL_CMAC_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBBORINGSSL_CMAC_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a endif @@ -5210,7 +5306,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_constant_time_test_lib.a: $(ZLIB_DEP) $(LIBBOR $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_constant_time_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_constant_time_test_lib.a $(LIBBORINGSSL_CONSTANT_TIME_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_constant_time_test_lib.a $(LIBBORINGSSL_CONSTANT_TIME_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_constant_time_test_lib.a endif @@ -5246,7 +5342,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBBORINGSSL_ED25519_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBBORINGSSL_ED25519_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a endif @@ -5284,7 +5380,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBBORINGSSL_X25519_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBBORINGSSL_X25519_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a endif @@ -5322,7 +5418,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(L $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBBORINGSSL_DH_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBBORINGSSL_DH_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a endif @@ -5360,7 +5456,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBBORINGSSL_DIGEST_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBBORINGSSL_DIGEST_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a endif @@ -5389,7 +5485,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_dsa_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_DSA $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_dsa_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_dsa_test_lib.a $(LIBBORINGSSL_DSA_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_dsa_test_lib.a $(LIBBORINGSSL_DSA_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_dsa_test_lib.a endif @@ -5425,7 +5521,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(L $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBBORINGSSL_EC_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBBORINGSSL_EC_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a endif @@ -5454,7 +5550,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_example_mul_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_example_mul_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_example_mul_lib.a $(LIBBORINGSSL_EXAMPLE_MUL_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_example_mul_lib.a $(LIBBORINGSSL_EXAMPLE_MUL_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_example_mul_lib.a endif @@ -5490,7 +5586,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBBORINGSSL_ECDSA_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBBORINGSSL_ECDSA_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a endif @@ -5528,7 +5624,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $( $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBBORINGSSL_ERR_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBBORINGSSL_ERR_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a endif @@ -5566,7 +5662,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_D $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBBORINGSSL_EVP_EXTRA_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBBORINGSSL_EVP_EXTRA_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a endif @@ -5604,7 +5700,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $( $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBBORINGSSL_EVP_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBBORINGSSL_EVP_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a endif @@ -5642,7 +5738,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBBORINGSSL_PBKDF_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBBORINGSSL_PBKDF_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a endif @@ -5671,7 +5767,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_hkdf_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_HK $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_hkdf_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_hkdf_test_lib.a $(LIBBORINGSSL_HKDF_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_hkdf_test_lib.a $(LIBBORINGSSL_HKDF_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_hkdf_test_lib.a endif @@ -5707,7 +5803,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBBORINGSSL_HMAC_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBBORINGSSL_HMAC_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a endif @@ -5736,7 +5832,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_lhash_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_L $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_lhash_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_lhash_test_lib.a $(LIBBORINGSSL_LHASH_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_lhash_test_lib.a $(LIBBORINGSSL_LHASH_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_lhash_test_lib.a endif @@ -5763,7 +5859,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_gcm_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_GCM $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_gcm_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_gcm_test_lib.a $(LIBBORINGSSL_GCM_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_gcm_test_lib.a $(LIBBORINGSSL_GCM_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_gcm_test_lib.a endif @@ -5799,7 +5895,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBBORINGSSL_PKCS12_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBBORINGSSL_PKCS12_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a endif @@ -5837,7 +5933,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBBORINGSSL_PKCS8_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBBORINGSSL_PKCS8_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a endif @@ -5875,7 +5971,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DE $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBBORINGSSL_POLY1305_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBBORINGSSL_POLY1305_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a endif @@ -5904,7 +6000,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_refcount_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSS $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_refcount_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_refcount_test_lib.a $(LIBBORINGSSL_REFCOUNT_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_refcount_test_lib.a $(LIBBORINGSSL_REFCOUNT_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_refcount_test_lib.a endif @@ -5940,7 +6036,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $( $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBBORINGSSL_RSA_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBBORINGSSL_RSA_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a endif @@ -5969,7 +6065,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_thread_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_thread_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_thread_test_lib.a $(LIBBORINGSSL_THREAD_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_thread_test_lib.a $(LIBBORINGSSL_THREAD_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_thread_test_lib.a endif @@ -5996,7 +6092,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_pkcs7_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_P $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_pkcs7_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_pkcs7_test_lib.a $(LIBBORINGSSL_PKCS7_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_pkcs7_test_lib.a $(LIBBORINGSSL_PKCS7_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_pkcs7_test_lib.a endif @@ -6032,7 +6128,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBBORINGSSL_X509_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBBORINGSSL_X509_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a endif @@ -6061,7 +6157,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_tab_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_TAB $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_tab_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_tab_test_lib.a $(LIBBORINGSSL_TAB_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_tab_test_lib.a $(LIBBORINGSSL_TAB_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_tab_test_lib.a endif @@ -6088,7 +6184,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_v3name_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_v3name_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_v3name_test_lib.a $(LIBBORINGSSL_V3NAME_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_v3name_test_lib.a $(LIBBORINGSSL_V3NAME_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_v3name_test_lib.a endif @@ -6115,7 +6211,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_pqueue_test_lib.a: $(ZLIB_DEP) $(LIBBORINGSSL_ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_pqueue_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_pqueue_test_lib.a $(LIBBORINGSSL_PQUEUE_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_pqueue_test_lib.a $(LIBBORINGSSL_PQUEUE_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_pqueue_test_lib.a endif @@ -6151,7 +6247,7 @@ $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $( $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a $(LIBBORINGSSL_SSL_TEST_LIB_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a $(LIBBORINGSSL_SSL_TEST_LIB_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a endif @@ -6193,7 +6289,7 @@ $(LIBDIR)/$(CONFIG)/libz.a: $(LIBZ_OBJS) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libz.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libz.a $(LIBZ_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libz.a $(LIBZ_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libz.a endif @@ -6228,7 +6324,7 @@ $(LIBDIR)/$(CONFIG)/libbad_client_test.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBBAD_CL $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libbad_client_test.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libbad_client_test.a $(LIBBAD_CLIENT_TEST_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libbad_client_test.a $(LIBBAD_CLIENT_TEST_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libbad_client_test.a endif @@ -6267,7 +6363,7 @@ $(LIBDIR)/$(CONFIG)/libbad_ssl_test_server.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBBA $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libbad_ssl_test_server.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libbad_ssl_test_server.a $(LIBBAD_SSL_TEST_SERVER_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libbad_ssl_test_server.a $(LIBBAD_SSL_TEST_SERVER_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libbad_ssl_test_server.a endif @@ -6347,7 +6443,7 @@ $(LIBDIR)/$(CONFIG)/libend2end_tests.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBEND2END_ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libend2end_tests.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBEND2END_TESTS_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBEND2END_TESTS_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libend2end_tests.a endif @@ -6416,7 +6512,7 @@ $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a: $(ZLIB_DEP) $(LIBEND2END_NOSEC_TE $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBEND2END_NOSEC_TESTS_OBJS) + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBEND2END_NOSEC_TESTS_OBJS) ifeq ($(SYSTEM),Darwin) $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a endif @@ -10177,38 +10273,6 @@ endif endif -WORKQUEUE_TEST_SRC = \ - test/core/iomgr/workqueue_test.c \ - -WORKQUEUE_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(WORKQUEUE_TEST_SRC)))) -ifeq ($(NO_SECURE),true) - -# You can't build secure targets if you don't have OpenSSL. - -$(BINDIR)/$(CONFIG)/workqueue_test: openssl_dep_error - -else - - - -$(BINDIR)/$(CONFIG)/workqueue_test: $(WORKQUEUE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a - $(E) "[LD] Linking $@" - $(Q) mkdir -p `dirname $@` - $(Q) $(LD) $(LDFLAGS) $(WORKQUEUE_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/workqueue_test - -endif - -$(OBJDIR)/$(CONFIG)/test/core/iomgr/workqueue_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a - -deps_workqueue_test: $(WORKQUEUE_TEST_OBJS:.o=.dep) - -ifneq ($(NO_SECURE),true) -ifneq ($(NO_DEPS),true) --include $(WORKQUEUE_TEST_OBJS:.o=.dep) -endif -endif - - ALARM_CPP_TEST_SRC = \ test/cpp/common/alarm_cpp_test.cc \ @@ -11003,16 +11067,16 @@ $(BINDIR)/$(CONFIG)/grpc_cli: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/grpc_cli: $(PROTOBUF_DEP) $(GRPC_CLI_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(BINDIR)/$(CONFIG)/grpc_cli: $(PROTOBUF_DEP) $(GRPC_CLI_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_reflection.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(GRPC_CLI_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/grpc_cli + $(Q) $(LDXX) $(LDFLAGS) $(GRPC_CLI_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_reflection.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/grpc_cli endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/util/grpc_cli.o: $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(OBJDIR)/$(CONFIG)/test/cpp/util/grpc_cli.o: $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_reflection.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a deps_grpc_cli: $(GRPC_CLI_OBJS:.o=.dep) @@ -14926,6 +14990,7 @@ test/cpp/util/byte_buffer_proto_helper.cc: $(OPENSSL_DEP) test/cpp/util/cli_call.cc: $(OPENSSL_DEP) test/cpp/util/create_test_channel.cc: $(OPENSSL_DEP) test/cpp/util/proto_file_parser.cc: $(OPENSSL_DEP) +test/cpp/util/proto_reflection_descriptor_database.cc: $(OPENSSL_DEP) test/cpp/util/string_ref_helper.cc: $(OPENSSL_DEP) test/cpp/util/subprocess.cc: $(OPENSSL_DEP) test/cpp/util/test_config.cc: $(OPENSSL_DEP) diff --git a/PYTHON-MANIFEST.in b/PYTHON-MANIFEST.in index 175a47f1576..96bcdb515c9 100644 --- a/PYTHON-MANIFEST.in +++ b/PYTHON-MANIFEST.in @@ -7,7 +7,7 @@ graft include/grpc graft third_party/boringssl graft third_party/nanopb graft third_party/zlib -include src/python/grpcio/build.py +include src/python/grpcio/_unixccompiler_patch.py include src/python/grpcio/commands.py include src/python/grpcio/grpc_version.py include src/python/grpcio/grpc_core_dependencies.py diff --git a/Rakefile b/Rakefile index f44946fe937..5b6f101d8ff 100755 --- a/Rakefile +++ b/Rakefile @@ -83,7 +83,7 @@ task 'dlls' do env += 'EMBED_ZLIB=true ' env += 'BUILDDIR=/tmp ' env += "V=#{verbose} " - out = '/tmp/libs/opt/grpc-0.dll' + out = '/tmp/libs/opt/grpc-1.dll' w64 = { cross: 'x86_64-w64-mingw32', out: 'grpc_c.64.ruby' } w32 = { cross: 'i686-w64-mingw32', out: 'grpc_c.32.ruby' } diff --git a/binding.gyp b/binding.gyp index e1130ad01ab..1dd7ee95555 100644 --- a/binding.gyp +++ b/binding.gyp @@ -568,6 +568,7 @@ 'src/core/lib/channel/channel_stack_builder.c', 'src/core/lib/channel/compress_filter.c', 'src/core/lib/channel/connected_channel.c', + 'src/core/lib/channel/handshaker.c', 'src/core/lib/channel/http_client_filter.c', 'src/core/lib/channel/http_server_filter.c', 'src/core/lib/compression/compression.c', diff --git a/build.yaml b/build.yaml index 1b20dde5631..37c95ccdc0a 100644 --- a/build.yaml +++ b/build.yaml @@ -7,7 +7,7 @@ settings: '#3': Use "-preN" suffixes to identify pre-release versions '#4': Per-language overrides are possible with (eg) ruby_version tag here '#5': See the expand_version.py for all the quirks here - version: 0.16.0-dev + version: 1.1.0-dev filegroups: - name: census public_headers: @@ -156,6 +156,7 @@ filegroups: - src/core/lib/channel/compress_filter.h - src/core/lib/channel/connected_channel.h - src/core/lib/channel/context.h + - src/core/lib/channel/handshaker.h - src/core/lib/channel/http_client_filter.h - src/core/lib/channel/http_server_filter.h - src/core/lib/compression/algorithm_metadata.h @@ -234,6 +235,7 @@ filegroups: - src/core/lib/channel/channel_stack_builder.c - src/core/lib/channel/compress_filter.c - src/core/lib/channel/connected_channel.c + - src/core/lib/channel/handshaker.c - src/core/lib/channel/http_client_filter.c - src/core/lib/channel/http_server_filter.c - src/core/lib/compression/compression.c @@ -768,6 +770,16 @@ filegroups: language: c++ public_headers: - include/grpc++/impl/codegen/config_protobuf.h +- name: grpc++_reflection_proto + language: c++ + public_headers: + - include/grpc++/ext/reflection.grpc.pb.h + - include/grpc++/ext/reflection.pb.h + src: + - src/cpp/ext/reflection.grpc.pb.cc + - src/cpp/ext/reflection.pb.cc + uses: + - grpc++_codegen_proto libs: - name: gpr build: all @@ -960,19 +972,20 @@ libs: language: c++ public_headers: - include/grpc++/ext/proto_server_reflection_plugin.h - - include/grpc++/ext/reflection.grpc.pb.h - - include/grpc++/ext/reflection.pb.h headers: - src/cpp/ext/proto_server_reflection.h src: - src/cpp/ext/proto_server_reflection.cc - src/cpp/ext/proto_server_reflection_plugin.cc - - src/cpp/ext/reflection.grpc.pb.cc - - src/cpp/ext/reflection.pb.cc deps: - grpc++ filegroups: - - grpc++_codegen_proto + - grpc++_reflection_proto +- name: grpc++_reflection_codegen + build: private + language: c++ + src: + - src/proto/grpc/reflection/v1alpha/reflection.proto - name: grpc++_test_config build: private language: c++ @@ -1030,10 +1043,13 @@ libs: headers: - test/cpp/util/cli_call.h - test/cpp/util/proto_file_parser.h + - test/cpp/util/proto_reflection_descriptor_database.h src: - test/cpp/util/cli_call.cc - test/cpp/util/proto_file_parser.cc + - test/cpp/util/proto_reflection_descriptor_database.cc deps: + - grpc++_reflection - grpc++ - grpc_plugin_support - name: grpc_plugin_support @@ -2430,20 +2446,6 @@ targets: - grpc - gpr_test_util - gpr -- name: workqueue_test - build: test - language: c - src: - - test/core/iomgr/workqueue_test.c - deps: - - grpc_test_util - - grpc - - gpr_test_util - - gpr - platforms: - - mac - - linux - - posix - name: alarm_cpp_test gtest: true build: test @@ -2671,6 +2673,7 @@ targets: - grpc_cli_libs - grpc++_test_util - grpc_test_util + - grpc++_reflection - grpc++ - grpc - gpr_test_util @@ -3367,6 +3370,7 @@ php_config_m4: - src/php/ext/grpc/channel.h - src/php/ext/grpc/channel_credentials.h - src/php/ext/grpc/completion_queue.h + - src/php/ext/grpc/php7_wrapper.h - src/php/ext/grpc/php_grpc.h - src/php/ext/grpc/server.h - src/php/ext/grpc/server_credentials.h diff --git a/composer.json b/composer.json index 6e7f24b451e..78366208edb 100644 --- a/composer.json +++ b/composer.json @@ -5,15 +5,9 @@ "keywords": ["rpc"], "homepage": "http://grpc.io", "license": "BSD-3-Clause", - "repositories": [ - { - "type": "vcs", - "url": "https://github.com/stanley-cheung/Protobuf-PHP" - } - ], "require": { "php": ">=5.5.0", - "datto/protobuf-php": "dev-master" + "stanley-cheung/protobuf-php": "v0.6" }, "require-dev": { "google/auth": "v0.9" diff --git a/config.m4 b/config.m4 index 0c3322d8efe..e4e410884d0 100644 --- a/config.m4 +++ b/config.m4 @@ -87,6 +87,7 @@ if test "$PHP_GRPC" != "no"; then src/core/lib/channel/channel_stack_builder.c \ src/core/lib/channel/compress_filter.c \ src/core/lib/channel/connected_channel.c \ + src/core/lib/channel/handshaker.c \ src/core/lib/channel/http_client_filter.c \ src/core/lib/channel/http_server_filter.c \ src/core/lib/compression/compression.c \ diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 00000000000..95464d3e664 --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1,2 @@ +build/ +src/ diff --git a/doc/c-style-guide.md b/doc/c-style-guide.md index d6f9bbd7d4a..369bd56a463 100644 --- a/doc/c-style-guide.md +++ b/doc/c-style-guide.md @@ -9,16 +9,17 @@ Here we document style rules for C usage in the gRPC Core library. General ------- -- Layout rules are defined by clang-format, and all code should be passed through - clang-format. A (docker-based) script to do so is included in - [tools/distrib/clang\_format\_code.sh] (../tools/distrib/clang_format_code.sh). +- Layout rules are defined by clang-format, and all code should be passed + through clang-format. A (docker-based) script to do so is included in + [tools/distrib/clang\_format\_code.sh](../tools/distrib/clang_format_code.sh). Header Files ------------ -- Public header files (those in the include/grpc tree) should compile as pedantic C89 -- Public header files should be includable from C++ programs. That is, they should - include the following: +- Public header files (those in the include/grpc tree) should compile as + pedantic C89. +- Public header files should be includable from C++ programs. That is, they + should include the following: ```c #ifdef __cplusplus extern "C" { @@ -34,24 +35,34 @@ Header Files - All header files should have a #define guard to prevent multiple inclusion. To guarantee uniqueness they should be based on the file's path. - For public headers: include/grpc/grpc.h --> GRPC_GRPC_H + For public headers: `include/grpc/grpc.h` → `GRPC_GRPC_H` + + For private headers: + `src/core/channel/channel_stack.h` → + `GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H` + +Variable Initialization +----------------------- + +When declaring a (non-static) pointer variable, always initialize it to `NULL`. +Even in the case of static pointer variables, it's recommended to explicitly +initialize them to `NULL`. - For private headers: - src/core/channel/channel_stack.h --> GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_STACK_H C99 Features ------------ -- Variable sized arrays are not allowed -- Do not use the 'inline' keyword -- Flexible array members are allowed (https://en.wikipedia.org/wiki/Flexible_array_member) +- Variable sized arrays are not allowed. +- Do not use the 'inline' keyword. +- Flexible array members are allowed + (https://en.wikipedia.org/wiki/Flexible_array_member). Comments -------- Within public header files, only `/* */` comments are allowed. -Within implementation files and private headers, either single line `//` +Within implementation files and private headers, either single line `//` or multi line `/* */` comments are allowed. Only one comment style per file is allowed however (i.e. if single line comments are used anywhere within a file, ALL comments within that file must be single line comments). @@ -59,7 +70,15 @@ ALL comments within that file must be single line comments). Symbol Names ------------ -- Non-static functions must be prefixed by grpc_ -- static functions must not be prefixed by grpc_ -- enumeration values and #define names are uppercased, all others are lowercased -- Multiple word identifiers use underscore as a delimiter (NEVER camel casing) +- Non-static functions must be prefixed by `grpc_` +- Static functions must *not* be prefixed by `grpc_` +- Enumeration values and `#define` names must be uppercase. All other values + must be lowercase. +- Multiple word identifiers use underscore as a delimiter, *never* camel + case. E.g. `variable_name`. + +Functions +---------- + +- The use of [`atexit()`](http://man7.org/linux/man-pages/man3/atexit.3.html) is + in forbidden in libgrpc. diff --git a/doc/command_line_tool.md b/doc/command_line_tool.md index 89a70548b8b..79a131c041a 100644 --- a/doc/command_line_tool.md +++ b/doc/command_line_tool.md @@ -15,6 +15,7 @@ The command line tool can do the following things: - Send unary rpc. - Attach metadata and display received metadata. - Handle common authentication to server. +- Infer request/response types from server reflection result. - Find the request/response types from a given proto file. - Read proto request in text form. - Read request in wire form (for protobuf messages, this means serialized binary form). @@ -24,7 +25,6 @@ The command line tool can do the following things: The command line tool should support the following things: - List server services and methods through server reflection. -- Infer request/response types from server reflection result. - Fine-grained auth control (such as, use this oauth token to talk to the server). - Send streaming rpc. @@ -46,24 +46,35 @@ https://github.com/grpc/grpc/blob/master/test/cpp/util/grpc_cli.cc Send a rpc to a helloworld server at `localhost:50051`: ``` -$ bins/opt/grpc_cli call localhost:50051 SayHello examples/protos/helloworld.proto \ - "name: 'world'" --enable_ssl=false +$ bins/opt/grpc_cli call localhost:50051 SayHello "name: 'world'" \ + --enable_ssl=false ``` On success, the tool will print out ``` Rpc succeeded with OK status -Response: +Response: message: "Hello world" ``` The `localhost:50051` part indicates the server you are connecting to. `SayHello` is (part of) the -gRPC method string. Then there is the path to the proto file containing the service definition, -if it is not under current directory, you can use `--proto_path` to specify a new search root. -`"name: 'world'"` is the text format of the request proto message. -We are not using ssl here by `--enable_ssl=false`. For information on more -flags, look at the comments of `grpc_cli.cc`. +gRPC method string. Then `"name: 'world'"` is the text format of the request proto message. We are +not using ssl here by `--enable_ssl=false`. For information on more flags, look at the comments of `grpc_cli.cc`. + +### Use local proto files + +If the server does not have the server reflection service, you will need to provide local proto +files containing the service definition. The tool will try to find request/response types from +them. + +``` +$ bins/opt/grpc_cli call localhost:50051 SayHello "name: 'world'" \ + --protofiles=examples/protos/helloworld.proto --enable_ssl=false +``` + +If the proto files is not under current directory, you can use `--proto_path` to specify a new +search root. ### Send non-proto rpc diff --git a/doc/images/load_balancing_design.png b/doc/images/load_balancing_design.png new file mode 100644 index 00000000000..86183966fb8 Binary files /dev/null and b/doc/images/load_balancing_design.png differ diff --git a/doc/load-balancing.md b/doc/load-balancing.md index 681be02a72f..dfaa7a7f33b 100644 --- a/doc/load-balancing.md +++ b/doc/load-balancing.md @@ -4,7 +4,7 @@ Load Balancing in gRPC # Objective To design a load balancing API between a gRPC client and a Load Balancer to -instruct the client how to send load to multiple backend servers. +instruct the client how to send load to multiple backend servers. # Background @@ -19,7 +19,7 @@ have temporary copies of the RPC request and response. This model also increases latency to the RPCs. The proxy model was deemed inefficient when considering request heavy services -like storage. +like storage. ### Balancing-aware Client @@ -28,7 +28,7 @@ example, the client could contain many load balancing policies (Round Robin, Random, etc) used to select servers from a list. In this model, a list of servers would be either statically configured in the client, provided by the name resolution system, an external load balancer, etc. In any case, the client -is responsible for choosing the preferred server from the list. +is responsible for choosing the preferred server from the list. One of the drawbacks of this approach is writing and maintaining the load balancing policies in multiple languages and/or versions of the clients. These @@ -53,14 +53,69 @@ unavailability or health issues. The load balancer will make any necessary complex decisions and inform the client. The load balancer may communicate with the backend servers to collect load and health information. + +## Requirements + +#### Simple API and client + +The gRPC client load balancing code must be simple and portable. The client +should only contain simple algorithms (ie Round Robin) for server selection. For +complex algorithms, the client should rely on a load balancer to provide load +balancing configuration and the list of servers to which the client should send +requests. The balancer will update the server list as needed to balance the load +as well as handle server unavailability or health issues. The load balancer will +make any necessary complex decisions and inform the client. The load balancer +may communicate with the backend servers to collect load and health information. + +#### Security + +The load balancer may be separate from the actual server backends and a +compromise of the load balancer should only lead to a compromise of the +loadbalancing functionality. In other words, a compromised load balancer should +not be able to cause a client to trust a (potentially malicious) backend server +any more than in a comparable situation without loadbalancing. + # Proposed Architecture -The gRPC load balancing approach follows the third approach, by having an -external load balancer which provides simple clients with a list of servers. +The gRPC load balancing implements the external load balancing server approach: +an external load balancer provides simple clients with an up-to-date list of +servers. + +![image](images/load_balancing_design.png) + +1. On startup, the gRPC client issues a name resolution request for the service. + The name will resolve to one or more IP addresses to gRPC servers, a hint on + whether the IP address(es) point to a load balancer or not, and also return a + client config. +2. The gRPC client connects to a gRPC Server. + 1. If the name resolution has hinted that the endpoint is a load balancer, + the client's gRPC LB policy will attempt to open a stream to the load + balancer service. The server may respond in only one of the following + ways. + 1. `status::UNIMPLEMENTED`. There is no loadbalancing in use. The client + call will fail. + 2. "I am a Load Balancer and here is the server list." (Goto Step 4.) + 3. "Please contact Load Balancer X" (See Step 3.) The client will close + this connection and cancel the stream. + 4. If the server fails to respond, the client will wait for some timeout + and then re-resolve the name (process to Step 1 above). + 2. If the name resolution has not hinted that the endpoint is a load + balancer, the client connects directly to the service it wants to talk to. +3. The gRPC client's gRPC LB policy opens a separate connection to the Load + Balancer. If this fails, it will go back to step 1 and try another address. + 1. During channel initialization to the Load Balancer, the client will + attempt to open a stream to the Load Balancer service. + 2. The Load Balancer will return a server list to the gRPC client. If the + server list is empty, the call will wait until a non-empty one is + received. Optional: The Load Balancer will also open channels to the gRPC + servers if load reporting is needed. +4. The gRPC client will send RPCs to the gRPC servers contained in the server + list from the Load Balancer. +5. Optional: The gRPC servers may periodically report load to the Load Balancer. ## Client -When establishing a gRPC stream to the balancer, the client will send an initial +When establishing a gRPC _stream_ to the balancer, the client will send an initial request to the load balancer (via a regular gRPC message). The load balancer will respond with client config (including, for example, settings for flow control, RPC deadlines, etc.) or a redirect to another load balancer. If the @@ -87,11 +142,3 @@ balancer in order to compute the next list of servers. The gRPC Server is responsible for answering RPC requests and providing responses to the client. The server will also report load to the load balancer if a reporting stream was opened for this purpose. - -### Security - -The load balancer may be separate from the actual server backends and a -compromise of the load balancer should only lead to a compromise of the -loadbalancing functionality. In other words, a compromised load balancer should -not be able to cause a client to trust a (potentially malicious) backend server -any more than in a comparable situation without loadbalancing. diff --git a/doc/statuscodes.md b/doc/statuscodes.md index c918f9ed9ab..1cd72df30ad 100644 --- a/doc/statuscodes.md +++ b/doc/statuscodes.md @@ -18,6 +18,7 @@ Only a subset of the pre-defined status codes are generated by the gRPC librarie | Could not decompress, but compression algorithm supported (Server -> Client) | INTERNAL | Client | | Compression mechanism used by client not supported at server | UNIMPLEMENTED | Server | | Server temporarily out of resources (e.g., Flow-control resource limits reached) | RESOURCE_EXHAUSTED | Server| +| Client does not have enough memory to hold the server response | RESOURCE_EXHAUSTED | Client | | Flow-control protocol violation | INTERNAL | Both | | Error parsing returned status | UNKNOWN | Client | | Incorrect Auth metadata ( Credentials failed to get metadata, Incompatible credentials set on channel and call, Invalid host set in `:authority` metadata, etc.) | UNAUTHENTICATED | Both | diff --git a/etc/roots.pem b/etc/roots.pem index 7b4d5f10fba..e6df88ea923 100644 --- a/etc/roots.pem +++ b/etc/roots.pem @@ -2,33 +2,6 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -# Issuer: O=Equifax OU=Equifax Secure Certificate Authority -# Subject: O=Equifax OU=Equifax Secure Certificate Authority -# Label: "Equifax Secure CA" -# Serial: 903804111 -# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4 -# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a -# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78 ------BEGIN CERTIFICATE----- -MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV -UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy -dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1 -MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx -dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B -AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f -BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A -cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC -AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ -MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm -aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw -ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj -IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF -MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA -A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y -7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh -1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4 ------END CERTIFICATE----- - # Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA # Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA # Label: "GlobalSign Root CA" @@ -120,38 +93,6 @@ F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== -----END CERTIFICATE----- -# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Label: "Verisign Class 4 Public Primary Certification Authority - G3" -# Serial: 314531972711909413743075096039378935511 -# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df -# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d -# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06 ------BEGIN CERTIFICATE----- -MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw -CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl -cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu -LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT -aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD -VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT -aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ -bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu -IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg -LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1 -GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ -+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd -U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm -NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY -ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ -ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1 -CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq -g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm -fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c -2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/ -bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== ------END CERTIFICATE----- - # Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Label: "Entrust.net Premium 2048 Secure Server CA" @@ -214,30 +155,6 @@ ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp -----END CERTIFICATE----- -# Issuer: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc. -# Subject: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc. -# Label: "Equifax Secure Global eBusiness CA" -# Serial: 1 -# MD5 Fingerprint: 8f:5d:77:06:27:c4:98:3c:5b:93:78:e7:d7:7d:9b:cc -# SHA1 Fingerprint: 7e:78:4a:10:1c:82:65:cc:2d:e1:f1:6d:47:b4:40:ca:d9:0a:19:45 -# SHA256 Fingerprint: 5f:0b:62:ea:b5:e3:53:ea:65:21:65:16:58:fb:b6:53:59:f4:43:28:0a:4a:fb:d1:04:d7:7d:10:f9:f0:4c:07 ------BEGIN CERTIFICATE----- -MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEc -MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBT -ZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIw -MDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0VxdWlmYXggU2Vj -dXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEdsb2JhbCBlQnVzaW5l -c3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRVPEnC -UdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc -58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/ -o5brhTMhHD4ePmBudpxnhcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAH -MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1dr -aGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUA -A4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkA -Z70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv -8qIYNMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV ------END CERTIFICATE----- - # Issuer: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network # Subject: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network # Label: "AddTrust Low-Value Services Root" @@ -907,70 +824,6 @@ Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M -----END CERTIFICATE----- -# Issuer: CN=Staat der Nederlanden Root CA O=Staat der Nederlanden -# Subject: CN=Staat der Nederlanden Root CA O=Staat der Nederlanden -# Label: "Staat der Nederlanden Root CA" -# Serial: 10000010 -# MD5 Fingerprint: 60:84:7c:5a:ce:db:0c:d4:cb:a7:e9:fe:02:c6:a9:c0 -# SHA1 Fingerprint: 10:1d:fa:3f:d5:0b:cb:bb:9b:b5:60:0c:19:55:a4:1a:f4:73:3a:04 -# SHA256 Fingerprint: d4:1d:82:9e:8c:16:59:82:2a:f9:3f:ce:62:bf:fc:de:26:4f:c8:4e:8b:95:0c:5f:f2:75:d0:52:35:46:95:a3 ------BEGIN CERTIFICATE----- -MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJO -TDEeMBwGA1UEChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFh -dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEy -MTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4wHAYDVQQKExVTdGFhdCBkZXIgTmVk -ZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxhbmRlbiBSb290IENB -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFtvszn -ExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw71 -9tV2U02PjLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MO -hXeiD+EwR+4A5zN9RGcaC1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+U -tFE5A3+y3qcym7RHjm+0Sq7lr7HcsBthvJly3uSJt3omXdozSVtSnA71iq3DuD3o -BmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn622r+I/q85Ej0ZytqERAh -SQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRVHSAAMDww -OgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMv -cm9vdC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA -7Jbg0zTBLL9s+DANBgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k -/rvuFbQvBgwp8qiSpGEN/KtcCFtREytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzm -eafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbwMVcoEoJz6TMvplW0C5GUR5z6 -u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3ynGQI0DvDKcWy -7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR -iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== ------END CERTIFICATE----- - -# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com -# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com -# Label: "UTN DATACorp SGC Root CA" -# Serial: 91374294542884689855167577680241077609 -# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06 -# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4 -# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48 ------BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug -Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI ------END CERTIFICATE----- - # Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com # Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com # Label: "UTN USERFirst Hardware Root CA" @@ -1077,51 +930,6 @@ ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/ AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== -----END CERTIFICATE----- -# Issuer: CN=NetLock Kozjegyzoi (Class A) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok -# Subject: CN=NetLock Kozjegyzoi (Class A) Tanusitvanykiado O=NetLock Halozatbiztonsagi Kft. OU=Tanusitvanykiadok -# Label: "NetLock Notary (Class A) Root" -# Serial: 259 -# MD5 Fingerprint: 86:38:6d:5e:49:63:6c:85:5c:db:6d:dc:94:b7:d0:f7 -# SHA1 Fingerprint: ac:ed:5f:65:53:fd:25:ce:01:5f:1f:7a:48:3b:6a:74:9f:61:78:c6 -# SHA256 Fingerprint: 7f:12:cd:5f:7e:5e:29:0e:c7:d8:51:79:d5:b7:2c:20:a5:be:75:08:ff:db:5b:f8:1a:b9:68:4a:7f:c9:f6:67 ------BEGIN CERTIFICATE----- -MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhV -MRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMe -TmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0 -dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFzcyBB -KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oXDTE5MDIxOTIzMTQ0 -N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQHEwhC -dWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQu -MRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBL -b3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSMD7tM9DceqQWC2ObhbHDqeLVu0ThEDaiD -zl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZz+qMkjvN9wfcZnSX9EUi -3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC/tmwqcm8 -WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LY -Oph7tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2Esi -NCubMvJIH5+hCoR64sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCC -ApswDgYDVR0PAQH/BAQDAgAGMBIGA1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4 -QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZRUxFTSEgRXplbiB0 -YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRhdGFz -aSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu -IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtm -ZWxlbG9zc2VnLWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMg -ZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVs -amFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJhc2EgbWVndGFsYWxoYXRv -IGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBzOi8vd3d3 -Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6 -ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1 -YW5jZSBhbmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3Qg -dG8gdGhlIE5ldExvY2sgQ1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRs -b2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBjcHNAbmV0bG9jay5uZXQuMA0G -CSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5ayZrU3/b39/zcT0mwBQO -xmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjPytoUMaFP -0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQ -QeJBCWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxk -f1qbFFgBJ34TUMdrKuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK -8CtmdWOMovsEPoMOmzbwGOQmIMOM8CgHrTwXZoi1/baI ------END CERTIFICATE----- - # Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com # Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com # Label: "XRamp Global CA Root" @@ -1534,71 +1342,6 @@ rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2 9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis= -----END CERTIFICATE----- -# Issuer: CN=TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı O=(c) 2005 TÃœRKTRUST Bilgi Ä°letiÅŸim ve BiliÅŸim GüvenliÄŸi Hizmetleri A.Åž. -# Subject: CN=TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı O=(c) 2005 TÃœRKTRUST Bilgi Ä°letiÅŸim ve BiliÅŸim GüvenliÄŸi Hizmetleri A.Åž. -# Label: "TURKTRUST Certificate Services Provider Root 1" -# Serial: 1 -# MD5 Fingerprint: f1:6a:22:18:c9:cd:df:ce:82:1d:1d:b7:78:5c:a9:a5 -# SHA1 Fingerprint: 79:98:a3:08:e1:4d:65:85:e6:c2:1e:15:3a:71:9f:ba:5a:d3:4a:d9 -# SHA256 Fingerprint: 44:04:e3:3b:5e:14:0d:cf:99:80:51:fd:fc:80:28:c7:c8:16:15:c5:ee:73:7b:11:1b:58:82:33:a9:b5:35:a0 ------BEGIN CERTIFICATE----- -MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOc -UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx -c8SxMQswCQYDVQQGDAJUUjEPMA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykg -MjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8 -dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMxMDI3MTdaFw0xNTAz -MjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsgU2Vy -dGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYD -VQQHDAZBTktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kg -xLBsZXRpxZ9pbSB2ZSBCaWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEu -xZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7 -XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GXyGl8hMW0kWxsE2qkVa2k -heiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8iSi9BB35J -YbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5C -urKZ8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1 -JuTm5Rh8i27fbMx4W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51 -b0dewQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV -9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46sWrv7/hg0Uw2ZkUd82YCdAR7 -kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxEq8Sn5RTOPEFh -fEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy -B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdA -aLX/7KfS0zgYnNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKS -RGQDJereW26fyfJOrN3H ------END CERTIFICATE----- - -# Issuer: CN=TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı O=TÃœRKTRUST Bilgi Ä°letiÅŸim ve BiliÅŸim GüvenliÄŸi Hizmetleri A.Åž. (c) Kasım 2005 -# Subject: CN=TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı O=TÃœRKTRUST Bilgi Ä°letiÅŸim ve BiliÅŸim GüvenliÄŸi Hizmetleri A.Åž. (c) Kasım 2005 -# Label: "TURKTRUST Certificate Services Provider Root 2" -# Serial: 1 -# MD5 Fingerprint: 37:a5:6e:d4:b1:25:84:97:b7:fd:56:15:7a:f9:a2:00 -# SHA1 Fingerprint: b4:35:d4:e1:11:9d:1c:66:90:a7:49:eb:b3:94:bd:63:7b:a7:82:b7 -# SHA256 Fingerprint: c4:70:cf:54:7e:23:02:b9:77:fb:29:dd:71:a8:9a:7b:6c:1f:60:77:7b:03:29:f5:60:17:f3:28:bf:4f:6b:e6 ------BEGIN CERTIFICATE----- -MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOc -UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx -c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xS -S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg -SGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcNMDUxMTA3MTAwNzU3 -WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVrdHJv -bmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJU -UjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSw -bGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWe -LiAoYykgS2FzxLFtIDIwMDUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqeLCDe2JAOCtFp0if7qnef -J1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKIx+XlZEdh -R3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJ -Qv2gQrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGX -JHpsmxcPbe9TmJEr5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1p -zpwACPI2/z7woQ8arBT9pmAPAgMBAAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58S -Fq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8GA1UdEwEB/wQFMAMBAf8wDQYJ -KoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/nttRbj2hWyfIvwq -ECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 -Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFz -gw2lGh1uEpJ+hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotH -uFEJjOp9zYhys2AzsfAKRO8P9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LS -y3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5UrbnBEI= ------END CERTIFICATE----- - # Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG # Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG # Label: "SwissSign Gold CA - G2" @@ -2137,107 +1880,6 @@ t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== -----END CERTIFICATE----- -# Issuer: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA -# Subject: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA -# Label: "TC TrustCenter Class 2 CA II" -# Serial: 941389028203453866782103406992443 -# MD5 Fingerprint: ce:78:33:5c:59:78:01:6e:18:ea:b9:36:a0:b9:2e:23 -# SHA1 Fingerprint: ae:50:83:ed:7c:f4:5c:bc:8f:61:c6:21:fe:68:5d:79:42:21:15:6e -# SHA256 Fingerprint: e6:b8:f8:76:64:85:f8:07:ae:7f:8d:ac:16:70:46:1f:07:c0:a1:3e:ef:3a:1f:f7:17:53:8d:7a:ba:d3:91:b4 ------BEGIN CERTIFICATE----- -MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjEL -MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV -BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0 -Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYwMTEyMTQzODQzWhcNMjUxMjMxMjI1 -OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i -SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UEAxMc -VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jf -tMjWQ+nEdVl//OEd+DFwIxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKg -uNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2J -XjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQXa7pIXSSTYtZgo+U4+lK -8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7uSNQZu+99 -5OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1Ud -EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3 -kUrL84J6E1wIqzCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy -dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6 -Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz -JTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290 -Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u -TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iS -GNn3Bzn1LL4GdXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprt -ZjluS5TmVfwLG4t3wVMTZonZKNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8 -au0WOB9/WIFaGusyiC2y8zl3gK9etmF1KdsjTYjKUCjLhdLTEKJZbtOTVAB6okaV -hgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kPJOzHdiEoZa5X6AeI -dUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfkvQ== ------END CERTIFICATE----- - -# Issuer: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA -# Subject: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA -# Label: "TC TrustCenter Class 3 CA II" -# Serial: 1506523511417715638772220530020799 -# MD5 Fingerprint: 56:5f:aa:80:61:12:17:f6:67:21:e6:2b:6d:61:56:8e -# SHA1 Fingerprint: 80:25:ef:f4:6e:70:c8:d4:72:24:65:84:fe:40:3b:8a:8d:6a:db:f5 -# SHA256 Fingerprint: 8d:a0:84:fc:f9:9c:e0:77:22:f8:9b:32:05:93:98:06:fa:5c:b8:11:e1:c8:13:f6:a1:08:c7:d3:36:b3:40:8e ------BEGIN CERTIFICATE----- -MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjEL -MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV -BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0 -Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYwMTEyMTQ0MTU3WhcNMjUxMjMxMjI1 -OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i -SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UEAxMc -VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJW -Ht4bNwcwIi9v8Qbxq63WyKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+Q -Vl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo6SI7dYnWRBpl8huXJh0obazovVkdKyT2 -1oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZuV3bOx4a+9P/FRQI2Alq -ukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk2ZyqBwi1 -Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1Ud -EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NX -XAek0CSnwPIA1DCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy -dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6 -Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz -JTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290 -Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u -TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlN -irTzwppVMXzEO2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8 -TtXqluJucsG7Kv5sbviRmEb8yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6 -g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9IJqDnxrcOfHFcqMRA/07QlIp2+gB -95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal092Y+tTmBvTwtiBj -S+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc5A== ------END CERTIFICATE----- - -# Issuer: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA -# Subject: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA -# Label: "TC TrustCenter Universal CA I" -# Serial: 601024842042189035295619584734726 -# MD5 Fingerprint: 45:e1:a5:72:c5:a9:36:64:40:9e:f5:e4:58:84:67:8c -# SHA1 Fingerprint: 6b:2f:34:ad:89:58:be:62:fd:b0:6b:5c:ce:bb:9d:d9:4f:4e:39:f3 -# SHA256 Fingerprint: eb:f3:c0:2a:87:89:b1:fb:7d:51:19:95:d6:63:b7:29:06:d9:13:ce:0d:5e:10:56:8a:8a:77:e2:58:61:67:e7 ------BEGIN CERTIFICATE----- -MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTEL -MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV -BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1 -c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcNMDYwMzIyMTU1NDI4WhcNMjUxMjMx -MjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIg -R21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYwJAYD -VQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSR -JJZ4Hgmgm5qVSkr1YnwCqMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3T -fCZdzHd55yx4Oagmcw6iXSVphU9VDprvxrlE4Vc93x9UIuVvZaozhDrzznq+VZeu -jRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtwag+1m7Z3W0hZneTvWq3z -wZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9OgdwZu5GQ -fezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYD -VR0jBBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAO -BgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0G -CSqGSIb3DQEBBQUAA4IBAQAo0uCG1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X1 -7caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/CyvwbZ71q+s2IhtNerNXxTPqYn -8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3ghUJGooWMNjs -ydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT -ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/ -2TYcuiUaUj0a7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY ------END CERTIFICATE----- - # Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center # Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center # Label: "Deutsche Telekom Root CA 2" @@ -2268,36 +1910,6 @@ xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU Cm26OWMohpLzGITY+9HPBVZkVw== -----END CERTIFICATE----- -# Issuer: CN=ComSign Secured CA O=ComSign -# Subject: CN=ComSign Secured CA O=ComSign -# Label: "ComSign Secured CA" -# Serial: 264725503855295744117309814499492384489 -# MD5 Fingerprint: 40:01:25:06:8d:21:43:6a:0e:43:00:9c:e7:43:f3:d5 -# SHA1 Fingerprint: f9:cd:0e:2c:da:76:24:c1:8f:bd:f0:f0:ab:b6:45:b8:f7:fe:d5:7a -# SHA256 Fingerprint: 50:79:41:c7:44:60:a0:b4:70:86:22:0d:4e:99:32:57:2a:b5:d1:b5:bb:cb:89:80:ab:1c:b1:76:51:a8:44:d2 ------BEGIN CERTIFICATE----- -MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAw -PDEbMBkGA1UEAxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWdu -MQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwx -GzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBDQTEQMA4GA1UEChMHQ29tU2lnbjEL -MAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGtWhf -HZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs49oh -gHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sW -v+bznkqH7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ue -Mv5WJDmyVIRD9YTC2LxBkMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr -9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d19guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt -6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUwAwEB/zBEBgNVHR8EPTA7 -MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29tU2lnblNl -Y3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58 -ADsAj8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkq -hkiG9w0BAQUFAAOCAQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7p -iL1DRYHjZiM/EoZNGeQFsOY3wo3aBijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtC -dsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtpFhpFfTMDZflScZAmlaxMDPWL -kz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP51qJThRv4zdL -hfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz -OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== ------END CERTIFICATE----- - # Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc # Subject: CN=Cybertrust Global Root O=Cybertrust, Inc # Label: "Cybertrust Global Root" @@ -2435,34 +2047,6 @@ h7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5wwDX3OaJdZtB7WZ+oRxKaJyOk LY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho -----END CERTIFICATE----- -# Issuer: CN=Buypass Class 3 CA 1 O=Buypass AS-983163327 -# Subject: CN=Buypass Class 3 CA 1 O=Buypass AS-983163327 -# Label: "Buypass Class 3 CA 1" -# Serial: 2 -# MD5 Fingerprint: df:3c:73:59:81:e7:39:50:81:04:4c:34:a2:cb:b3:7b -# SHA1 Fingerprint: 61:57:3a:11:df:0e:d8:7e:d5:92:65:22:ea:d0:56:d7:44:b3:23:71 -# SHA256 Fingerprint: b7:b1:2b:17:1f:82:1d:aa:99:0c:d0:fe:50:87:b1:28:44:8b:a8:e5:18:4f:84:c5:1e:02:b5:c8:fb:96:2b:24 ------BEGIN CERTIFICATE----- -MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEd -MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3Mg -Q2xhc3MgMyBDQSAxMB4XDTA1MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzEL -MAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MR0wGwYD -VQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKxifZg -isRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//z -NIqeKNc0n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI -+MkcVyzwPX6UvCWThOiaAJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2R -hzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+ -mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNCMEAwDwYDVR0TAQH/BAUw -AwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0PAQH/BAQD -AgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFP -Bdy7pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27s -EzNxZy5p+qksP2bAEllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2 -mSlf56oBzKwzqBwKu5HEA6BvtjT5htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yC -e/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQjel/wroQk5PMr+4okoyeYZdow -dXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 ------END CERTIFICATE----- - # Issuer: CN=EBG Elektronik Sertifika Hizmet SaÄŸlayıcısı O=EBG BiliÅŸim Teknolojileri ve Hizmetleri A.Åž. # Subject: CN=EBG Elektronik Sertifika Hizmet SaÄŸlayıcısı O=EBG BiliÅŸim Teknolojileri ve Hizmetleri A.Åž. # Label: "EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1" @@ -2843,38 +2427,6 @@ Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== -----END CERTIFICATE----- -# Issuer: CN=CA Disig O=Disig a.s. -# Subject: CN=CA Disig O=Disig a.s. -# Label: "CA Disig" -# Serial: 1 -# MD5 Fingerprint: 3f:45:96:39:e2:50:87:f7:bb:fe:98:0c:3c:20:98:e6 -# SHA1 Fingerprint: 2a:c8:d5:8b:57:ce:bf:2f:49:af:f2:fc:76:8f:51:14:62:90:7a:41 -# SHA256 Fingerprint: 92:bf:51:19:ab:ec:ca:d0:b1:33:2d:c4:e1:d0:5f:ba:75:b5:67:90:44:ee:0c:a2:6e:93:1f:74:4f:2f:33:cf ------BEGIN CERTIFICATE----- -MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzET -MBEGA1UEBxMKQnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UE -AxMIQ0EgRGlzaWcwHhcNMDYwMzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQsw -CQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcg -YS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgmGErE -Nx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnX -mjxUizkDPw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYD -XcDtab86wYqg6I7ZuUUohwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhW -S8+2rT+MitcE5eN4TPWGqvWP+j1scaMtymfraHtuM6kMgiioTGohQBUgDCZbg8Kp -FhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8wgfwwDwYDVR0TAQH/BAUw -AwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0PAQH/BAQD -AgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cu -ZGlzaWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5z -ay9jYS9jcmwvY2FfZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2sv -Y2EvY3JsL2NhX2Rpc2lnLmNybDAaBgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEw -DQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59tWDYcPQuBDRIrRhCA/ec8J9B6 -yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3mkkp7M5+cTxq -EEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ -CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeB -EicTXxChds6KezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFN -PGO+I++MzVpQuGhU+QqZMxEA4Z7CRneC9VkGjCFMhwnN5ag= ------END CERTIFICATE----- - # Issuer: CN=Juur-SK O=AS Sertifitseerimiskeskus # Subject: CN=Juur-SK O=AS Sertifitseerimiskeskus # Label: "Juur-SK" @@ -3042,36 +2594,6 @@ tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW -----END CERTIFICATE----- -# Issuer: CN=e-Guven Kok Elektronik Sertifika Hizmet Saglayicisi O=Elektronik Bilgi Guvenligi A.S. -# Subject: CN=e-Guven Kok Elektronik Sertifika Hizmet Saglayicisi O=Elektronik Bilgi Guvenligi A.S. -# Label: "E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi" -# Serial: 91184789765598910059173000485363494069 -# MD5 Fingerprint: 3d:41:29:cb:1e:aa:11:74:cd:5d:b0:62:af:b0:43:5b -# SHA1 Fingerprint: dd:e1:d2:a9:01:80:2e:1d:87:5e:84:b3:80:7e:4b:b1:fd:99:41:34 -# SHA256 Fingerprint: e6:09:07:84:65:a4:19:78:0c:b6:ac:4c:1c:0b:fb:46:53:d9:d9:cc:6e:b3:94:6e:b7:f3:d6:99:97:ba:d5:98 ------BEGIN CERTIFICATE----- -MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1 -MQswCQYDVQQGEwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxp -Z2kgQS5TLjE8MDoGA1UEAxMzZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZp -a2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3MDEwNDExMzI0OFoXDTE3MDEwNDEx -MzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0cm9uaWsgQmlsZ2kg -R3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9uaWsg -U2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdU -MZTe1RK6UxYC6lhj71vY8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlT -L/jDj/6z/P2douNffb7tC+Bg62nsM+3YjfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H -5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAIJjjcJRFHLfO6IxClv7wC -90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk9Ok0oSy1 -c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoE -VtstxNulMA0GCSqGSIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLP -qk/CaOv/gKlR6D1id4k9CnU58W5dF4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S -/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwqD2fK/A+JYZ1lpTzlvBNbCNvj -/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4Vwpm+Vganf2X -KWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq -fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX ------END CERTIFICATE----- - # Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 # Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 # Label: "GlobalSign Root CA - R3" @@ -3610,37 +3132,6 @@ ducTZnV+ZfsBn5OHiJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmC IoaZM3Fa6hlXPZHNqcCjbgcTpsnt+GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= -----END CERTIFICATE----- -# Issuer: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03 -# Subject: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03 -# Label: "A-Trust-nQual-03" -# Serial: 93214 -# MD5 Fingerprint: 49:63:ae:27:f4:d5:95:3d:d8:db:24:86:b8:9c:07:53 -# SHA1 Fingerprint: d3:c0:63:f2:19:ed:07:3e:34:ad:5d:75:0b:32:76:29:ff:d5:9a:f2 -# SHA256 Fingerprint: 79:3c:bf:45:59:b9:fd:e3:8a:b2:2d:f1:68:69:f6:98:81:ae:14:c4:b0:13:9a:c7:88:a7:8a:1a:fc:ca:02:fb ------BEGIN CERTIFICATE----- -MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJB -VDFIMEYGA1UECgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBp -bSBlbGVrdHIuIERhdGVudmVya2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5R -dWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5RdWFsLTAzMB4XDTA1MDgxNzIyMDAw -MFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgwRgYDVQQKDD9BLVRy -dXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0ZW52 -ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMM -EEEtVHJ1c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQCtPWFuA/OQO8BBC4SAzewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUj -lUC5B3ilJfYKvUWG6Nm9wASOhURh73+nyfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZ -znF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPESU7l0+m0iKsMrmKS1GWH -2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4iHQF63n1 -k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs -2e3Vcuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYD -VR0OBAoECERqlWdVeRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC -AQEAVdRU0VlIXLOThaq/Yy/kgM40ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fG -KOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmrsQd7TZjTXLDR8KdCoLXEjq/+ -8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZdJXDRZslo+S4R -FGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS -mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmE -DNuxUCAKGkq6ahq97BvIxYSazQ== ------END CERTIFICATE----- - # Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Label: "TWCA Root Certification Authority" @@ -3699,6 +3190,45 @@ t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 -----END CERTIFICATE----- +# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes +# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes +# Label: "EC-ACC" +# Serial: -23701579247955709139626555126524820479 +# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09 +# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8 +# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99 +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB +8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy +dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1 +YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3 +dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh +IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD +LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG +EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g +KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD +ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu +bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg +ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R +85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm +4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV +HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd +QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t +lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB +o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4 +opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo +dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW +ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN +AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y +/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k +SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy +Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS +Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl +nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI= +-----END CERTIFICATE----- + # Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority # Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority # Label: "Hellenic Academic and Research Institutions RootCA 2011" @@ -5112,3 +4642,748 @@ KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg 515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO xwy8p2Fp8fc74SrL+SvzZpA3 -----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden +# Label: "Staat der Nederlanden Root CA - G3" +# Serial: 10003001 +# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37 +# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc +# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28 +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX +DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP +cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW +IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX +xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy +KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR +9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az +5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 +6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 +Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP +bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt +BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt +XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd +INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp +LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 +Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp +gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh +/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw +0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A +fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq +4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR +1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ +QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM +94B7IWcnMFk= +-----END CERTIFICATE----- + +# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden +# Label: "Staat der Nederlanden EV Root CA" +# Serial: 10000013 +# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba +# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb +# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı H5 O=TÃœRKTRUST Bilgi Ä°letiÅŸim ve BiliÅŸim GüvenliÄŸi Hizmetleri A.Åž. +# Subject: CN=TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı H5 O=TÃœRKTRUST Bilgi Ä°letiÅŸim ve BiliÅŸim GüvenliÄŸi Hizmetleri A.Åž. +# Label: "TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı H5" +# Serial: 156233699172481 +# MD5 Fingerprint: da:70:8e:f0:22:df:93:26:f6:5f:9f:d3:15:06:52:4e +# SHA1 Fingerprint: c4:18:f6:4d:46:d1:df:00:3d:27:30:13:72:43:a9:12:11:c6:75:fb +# SHA256 Fingerprint: 49:35:1b:90:34:44:c1:85:cc:dc:5c:69:3d:24:d8:55:5c:b2:08:d6:a8:14:13:07:69:9f:4a:f0:63:19:9d:78 +-----BEGIN CERTIFICATE----- +MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UE +BhMCVFIxDzANBgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxn +aSDEsGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkg +QS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1QgRWxla3Ryb25payBTZXJ0aWZpa2Eg +SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAwODA3MDFaFw0yMzA0 +MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0wSwYD +VQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8 +dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApCUZ4WWe60ghUEoI5RHwWrom +/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537jVJp45wnEFPzpALFp/kR +Gml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1mep5Fimh3 +4khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z +5UNP9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0 +hO8EuPbJbKoCPrZV4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QID +AQABo0IwQDAdBgNVHQ4EFgQUVpkHHtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJ5FdnsX +SDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPoBP5yCccLqh0l +VX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq +URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nf +peYVhDfwwvJllpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CF +Yv4HAqGEVka+lgqaE9chTLd8B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW ++qtB4Uu2NQvAmxU= +-----END CERTIFICATE----- + +# Issuer: CN=TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı H6 O=TÃœRKTRUST Bilgi Ä°letiÅŸim ve BiliÅŸim GüvenliÄŸi Hizmetleri A.Åž. +# Subject: CN=TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı H6 O=TÃœRKTRUST Bilgi Ä°letiÅŸim ve BiliÅŸim GüvenliÄŸi Hizmetleri A.Åž. +# Label: "TÃœRKTRUST Elektronik Sertifika Hizmet SaÄŸlayıcısı H6" +# Serial: 138134509972618 +# MD5 Fingerprint: f8:c5:ee:2a:6b:be:95:8d:08:f7:25:4a:ea:71:3e:46 +# SHA1 Fingerprint: 8a:5c:8c:ee:a5:03:e6:05:56:ba:d8:1b:d4:f6:c9:b0:ed:e5:2f:e0 +# SHA256 Fingerprint: 8d:e7:86:55:e1:be:7f:78:47:80:0b:93:f6:94:d2:1d:36:8c:c0:6e:03:3e:7f:ab:04:bb:5e:b9:9d:a6:b7:00 +-----BEGIN CERTIFICATE----- +MIIEJjCCAw6gAwIBAgIGfaHyZeyKMA0GCSqGSIb3DQEBCwUAMIGxMQswCQYDVQQG +EwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0wSwYDVQQKDERUw5xSS1RSVVNUIEJpbGdp +IMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBB +LsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBI +aXptZXQgU2HEn2xhecSxY8Sxc8SxIEg2MB4XDTEzMTIxODA5MDQxMFoXDTIzMTIx +NjA5MDQxMFowgbExCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExTTBLBgNV +BAoMRFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBCaWxpxZ9pbSBHw7x2 +ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMUIwQAYDVQQDDDlUw5xSS1RSVVNUIEVs +ZWt0cm9uaWsgU2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLEgSDYwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCdsGjW6L0UlqMACprx9MfMkU1x +eHe59yEmFXNRFpQJRwXiM/VomjX/3EsvMsew7eKC5W/a2uqsxgbPJQ1BgfbBOCK9 ++bGlprMBvD9QFyv26WZV1DOzXPhDIHiTVRZwGTLmiddk671IUP320EEDwnS3/faA +z1vFq6TWlRKb55cTMgPp1KtDWxbtMyJkKbbSk60vbNg9tvYdDjTu0n2pVQ8g9P0p +u5FbHH3GQjhtQiht1AH7zYiXSX6484P4tZgvsycLSF5W506jM7NE1qXyGJTtHB6p +lVxiSvgNZ1GpryHV+DKdeboaX+UEVU0TRv/yz3THGmNtwx8XEsMeED5gCLMxAgMB +AAGjQjBAMB0GA1UdDgQWBBTdVRcT9qzoSCHK77Wv0QAy7Z6MtTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAb1gNl0Oq +FlQ+v6nfkkU/hQu7VtMMUszIv3ZnXuaqs6fvuay0EBQNdH49ba3RfdCaqaXKGDsC +QC4qnFAUi/5XfldcEQlLNkVS9z2sFP1E34uXI9TDwe7UU5X+LEr+DXCqu4svLcsy +o4LyVN/Y8t3XSHLuSqMplsNEzm61kod2pLv0kmzOLBQJZo6NrRa1xxsJYTvjIKID +gI6tflEATseWhvtDmHd9KMeP2Cpu54Rvl0EpABZeTeIT6lnAY2c6RPuY/ATTMHKm +9ocJV612ph1jmv3XZch4gyt1O6VbuA1df74jrlZVlFjvH4GMKrLN5ptjnhi85WsG +tAuYSyher4hYyw== +-----END CERTIFICATE----- + +# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 +# Label: "Certinomis - Root CA" +# Serial: 1 +# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f +# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8 +# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58 +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb +BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz +MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx +FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g +Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 +fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl +LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV +WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF +TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb +5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc +CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri +wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ +wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG +m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 +F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng +WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 +2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ +0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw +F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS +g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj +qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN +h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ +ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V +btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj +Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ +8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW +gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=Certification Authority of WoSign G2 O=WoSign CA Limited +# Subject: CN=Certification Authority of WoSign G2 O=WoSign CA Limited +# Label: "Certification Authority of WoSign G2" +# Serial: 142423943073812161787490648904721057092 +# MD5 Fingerprint: c8:1c:7d:19:aa:cb:71:93:f2:50:f8:52:a8:1e:ba:60 +# SHA1 Fingerprint: fb:ed:dc:90:65:b7:27:20:37:bc:55:0c:9c:56:de:bb:f2:78:94:e1 +# SHA256 Fingerprint: d4:87:a5:6f:83:b0:74:82:e8:5e:96:33:94:c1:ec:c2:c9:e5:1d:09:03:ee:94:6b:02:c3:01:58:1e:d9:9e:16 +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBY +MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNV +BAMTJENlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDEx +MDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgxCzAJBgNVBAYTAkNOMRowGAYDVQQK +ExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPX +JYY1kBaiXW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgO +gHzKtB0TiGsOqCR3A9DuW/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg +5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg95k4ot+vElbGs/V6r+kHLXZ1L3PR8du9n +fwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BKv0mUYQs4kI9dJGwlezt5 +2eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJ +KoZIhvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8 +fHulwqZm46qwtyeYP0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G +3CE4Q3RM+zD4F3LBMvzIkRfEzFg3TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yy +SrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu+sif/a+RZQp4OBXllxcU3fng +LDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+7Q9LGOHSJDy7 +XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg= +-----END CERTIFICATE----- + +# Issuer: CN=CA WoSign ECC Root O=WoSign CA Limited +# Subject: CN=CA WoSign ECC Root O=WoSign CA Limited +# Label: "CA WoSign ECC Root" +# Serial: 138625735294506723296996289575837012112 +# MD5 Fingerprint: 80:c6:53:ee:61:82:28:72:f0:ff:21:b9:17:ca:b2:20 +# SHA1 Fingerprint: d2:7a:d2:be:ed:94:c0:a1:3c:c7:25:21:ea:5d:71:be:81:19:f3:2b +# SHA256 Fingerprint: 8b:45:da:1c:06:f7:91:eb:0c:ab:f2:6b:e5:88:f5:fb:23:16:5c:2e:61:4b:f8:85:56:2d:0d:ce:50:b2:9b:02 +-----BEGIN CERTIFICATE----- +MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQsw +CQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMT +EkNBIFdvU2lnbiBFQ0MgUm9vdDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4 +NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEb +MBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACID +YgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiUt5v8 +KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES +1ns2o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUqv3VWqP2h4syhf3RMluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB +1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0Daupn75OcsqF1NnstTJFGG+rrQIwfcf3 +aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYua/GRspBl9JrmkO5K +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=Certplus Root CA G1 O=Certplus +# Subject: CN=Certplus Root CA G1 O=Certplus +# Label: "Certplus Root CA G1" +# Serial: 1491911565779898356709731176965615564637713 +# MD5 Fingerprint: 7f:09:9c:f7:d9:b9:5c:69:69:56:d5:37:3e:14:0d:42 +# SHA1 Fingerprint: 22:fd:d0:b7:fd:a2:4e:0d:ac:49:2c:a0:ac:a6:7b:6a:1f:e3:f7:66 +# SHA256 Fingerprint: 15:2a:40:2b:fc:df:2c:d5:48:05:4d:22:75:b3:9c:7f:ca:3e:c0:97:80:78:b0:f0:ea:76:e5:61:a6:c7:43:3e +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUA +MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy +dHBsdXMgUm9vdCBDQSBHMTAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBa +MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy +dHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHNr49a +iZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt +6kuJPKNxQv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP +0FG7Yn2ksYyy/yARujVjBYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f +6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTvLRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDE +EW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2z4QTd28n6v+WZxcIbekN +1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc4nBvCGrc +h2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCT +mehd4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV +4EJQeIQEQWGw9CEjjy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPO +WftwenMGE9nTdDckQQoRb5fc5+R+ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1Ud +DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSowcCbkahDFXxd +Bie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHYlwuBsTANBgkq +hkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh +66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7 +/SMNkPX0XtPGYX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BS +S7CTKtQ+FjPlnsZlFT5kOwQ/2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j +2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F6ALEUz65noe8zDUa3qHpimOHZR4R +Kttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilXCNQ314cnrUlZp5Gr +RHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWetUNy +6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEV +V/xuZDDCVRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5 +g4VCXA9DO2pJNdWY9BW/+mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl +++O/QmueD6i9a5jc2NvLi6Td11n0bt3+qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo= +-----END CERTIFICATE----- + +# Issuer: CN=Certplus Root CA G2 O=Certplus +# Subject: CN=Certplus Root CA G2 O=Certplus +# Label: "Certplus Root CA G2" +# Serial: 1492087096131536844209563509228951875861589 +# MD5 Fingerprint: a7:ee:c4:78:2d:1b:ee:2d:b9:29:ce:d6:a7:96:32:31 +# SHA1 Fingerprint: 4f:65:8e:1f:e9:06:d8:28:02:e9:54:47:41:c9:54:25:5d:69:cc:1a +# SHA256 Fingerprint: 6c:c0:50:41:e6:44:5e:74:69:6c:4c:fb:c9:f8:0f:54:3b:7e:ab:bb:44:b4:ce:6f:78:7c:6a:99:71:c4:2f:17 +-----BEGIN CERTIFICATE----- +MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4x +CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs +dXMgUm9vdCBDQSBHMjAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4x +CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs +dXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABM0PW1aC3/BFGtat +93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uNAm8x +Ik0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0P +AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwj +FNiPwyCrKGBZMB8GA1UdIwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqG +SM49BAMDA2gAMGUCMHD+sAvZ94OX7PNVHdTcswYO/jOYnYs5kGuUIe22113WTNch +p+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjlvPl5adytRSv3tjFzzAal +U5ORGpOucGpnutee5WEaXw== +-----END CERTIFICATE----- + +# Issuer: CN=OpenTrust Root CA G1 O=OpenTrust +# Subject: CN=OpenTrust Root CA G1 O=OpenTrust +# Label: "OpenTrust Root CA G1" +# Serial: 1492036577811947013770400127034825178844775 +# MD5 Fingerprint: 76:00:cc:81:29:cd:55:5e:88:6a:7a:2e:f7:4d:39:da +# SHA1 Fingerprint: 79:91:e8:34:f7:e2:ee:dd:08:95:01:52:e9:55:2d:14:e9:58:d5:7e +# SHA256 Fingerprint: 56:c7:71:28:d9:8c:18:d9:1b:4c:fd:ff:bc:25:ee:91:03:d4:75:8e:a2:ab:ad:82:6a:90:f3:45:7d:46:0e:b4 +-----BEGIN CERTIFICATE----- +MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUA +MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w +ZW5UcnVzdCBSb290IENBIEcxMB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAw +MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU +T3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7faYp6b +wiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX +/uMftk87ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR0 +77F9jAHiOH3BX2pfJLKOYheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGP +uY4zbGneWK2gDqdkVBFpRGZPTBKnjix9xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLx +p2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO9z0M+Yo0FMT7MzUj8czx +Kselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq3ywgsNw2 +TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+W +G+Oin6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPw +vFEVVJSmdz7QdFG9URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYY +EQRVzXR7z2FwefR7LFxckvzluFqrTJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUl0YhVyE1 +2jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/PxN3DlCPaTKbYw +DQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E +PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kf +gLMtMrpkZ2CvuVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbS +FXJfLkur1J1juONI5f6ELlgKn0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0 +V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLhX4SPgPL0DTatdrOjteFkdjpY3H1P +XlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80nR14SohWZ25g/4/I +i+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcmGS3t +TAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L91 +09S5zvE/bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/Ky +Pu1svf0OnWZzsD2097+o4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJ +AwSQiumPv+i2tCqjI40cHLI5kqiPAlxAOXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj +1oxx +-----END CERTIFICATE----- + +# Issuer: CN=OpenTrust Root CA G2 O=OpenTrust +# Subject: CN=OpenTrust Root CA G2 O=OpenTrust +# Label: "OpenTrust Root CA G2" +# Serial: 1492012448042702096986875987676935573415441 +# MD5 Fingerprint: 57:24:b6:59:24:6b:ae:c8:fe:1c:0c:20:f2:c0:4e:eb +# SHA1 Fingerprint: 79:5f:88:60:c5:ab:7c:3d:92:e6:cb:f4:8d:e1:45:cd:11:ef:60:0b +# SHA256 Fingerprint: 27:99:58:29:fe:6a:75:15:c1:bf:e8:48:f9:c4:76:1d:b1:6c:22:59:29:25:7b:f4:0d:08:94:f2:9e:a8:ba:f2 +-----BEGIN CERTIFICATE----- +MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUA +MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w +ZW5UcnVzdCBSb290IENBIEcyMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAw +MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU +T3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+Ntmh +/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78e +CbY2albz4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/6 +1UWY0jUJ9gNDlP7ZvyCVeYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fE +FY8ElggGQgT4hNYdvJGmQr5J1WqIP7wtUdGejeBSzFfdNTVY27SPJIjki9/ca1TS +gSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz3GIZ38i1MH/1PCZ1Eb3X +G7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj3CzMpSZy +YhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaH +vGOz9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4 +t/bQWVyJ98LVtZR00dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/ +gh7PU3+06yzbXfZqfUAkBXKJOAGTy3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUajn6QiL3 +5okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59M4PLuG53hq8w +DQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz +Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0 +nXGEL8pZ0keImUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qT +RmTFAHneIWv2V6CG1wZy7HBGS4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpT +wm+bREx50B1ws9efAvSyB7DH5fitIw6mVskpEndI2S9G/Tvw/HRwkqWOOAgfZDC2 +t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ6e18CL13zSdkzJTa +TkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97krgCf2 +o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU +3jg9CcCoSmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eA +iN1nE28daCSLT7d0geX0YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14f +WKGVyasvc0rQLW6aWQ9VGHgtPFGml4vmu7JwqkwR3v98KzfUetF3NI/n+UL3PIEM +S1IK +-----END CERTIFICATE----- + +# Issuer: CN=OpenTrust Root CA G3 O=OpenTrust +# Subject: CN=OpenTrust Root CA G3 O=OpenTrust +# Label: "OpenTrust Root CA G3" +# Serial: 1492104908271485653071219941864171170455615 +# MD5 Fingerprint: 21:37:b4:17:16:92:7b:67:46:70:a9:96:d7:a8:13:24 +# SHA1 Fingerprint: 6e:26:64:f3:56:bf:34:55:bf:d1:93:3f:7c:01:de:d8:13:da:8a:a6 +# SHA256 Fingerprint: b7:c3:62:31:70:6e:81:07:8c:36:7c:b8:96:19:8f:1e:32:08:dd:92:69:49:dd:8f:57:09:a4:10:f7:5b:62:92 +-----BEGIN CERTIFICATE----- +MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAx +CzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5U +cnVzdCBSb290IENBIEczMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFow +QDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwUT3Bl +blRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARK7liuTcpm +3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5Bta1d +oYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5 +DMlv4VBN0BBY3JWIbTAfBgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAK +BggqhkjOPQQDAwNpADBmAjEAj6jcnboMBBf6Fek9LykBl7+BFjNAk2z8+e2AcG+q +j9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta3U1fJAuwACEl74+nBCZx +4nxp5V2a+EEfOzmTk51V6s2N8fvB +-----END CERTIFICATE----- diff --git a/examples/cpp/helloworld/CMakeLists.txt b/examples/cpp/helloworld/CMakeLists.txt new file mode 100644 index 00000000000..8f098c91a6b --- /dev/null +++ b/examples/cpp/helloworld/CMakeLists.txt @@ -0,0 +1,49 @@ +# Minimum CMake required +cmake_minimum_required(VERSION 2.8) + +# Project +project(HelloWorld CXX) + +# Protobuf +set(protobuf_MODULE_COMPATIBLE TRUE) +find_package(protobuf CONFIG REQUIRED) +message(STATUS "Using protobuf ${protobuf_VERSION}") + +# gRPC +find_package(gRPC CONFIG REQUIRED) +message(STATUS "Using gRPC ${gRPC_VERSION}") + +# gRPC C++ plugin +get_target_property(gRPC_CPP_PLUGIN_EXECUTABLE gRPC::grpc_cpp_plugin + IMPORTED_LOCATION_RELEASE) + +# Proto file +get_filename_component(hw_proto "../../protos/helloworld.proto" ABSOLUTE) +get_filename_component(hw_proto_path "${hw_proto}" PATH) + +# Generated sources +protobuf_generate_cpp(hw_proto_srcs hw_proto_hdrs "${hw_proto}") +set(hw_grpc_srcs "${CMAKE_CURRENT_BINARY_DIR}/helloworld.grpc.pb.cc") +set(hw_grpc_hdrs "${CMAKE_CURRENT_BINARY_DIR}/helloworld.grpc.pb.h") +add_custom_command( + OUTPUT "${hw_grpc_srcs}" "${hw_grpc_hdrs}" + COMMAND protobuf::protoc + ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}" -I "${hw_proto_path}" + --plugin=protoc-gen-grpc="${gRPC_CPP_PLUGIN_EXECUTABLE}" + "${hw_proto}" + DEPENDS "${hw_proto}") + +# Generated include directory +include_directories("${CMAKE_CURRENT_BINARY_DIR}") + +# Targets greeter_[async_](client|server) +foreach(_target + greeter_client greeter_server + greeter_async_client greeter_async_server) + add_executable(${_target} "${_target}.cc" + ${hw_proto_srcs} + ${hw_grpc_srcs}) + target_link_libraries(${_target} + protobuf::libprotobuf + gRPC::grpc++_unsecure) +endforeach() diff --git a/examples/cpp/helloworld/Makefile b/examples/cpp/helloworld/Makefile index b45b3c7ee0c..df82f4688fc 100644 --- a/examples/cpp/helloworld/Makefile +++ b/examples/cpp/helloworld/Makefile @@ -29,10 +29,20 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # +HOST_SYSTEM = $(shell uname | cut -f 1 -d_) +SYSTEM ?= $(HOST_SYSTEM) CXX = g++ CPPFLAGS += -I/usr/local/include -pthread CXXFLAGS += -std=c++11 -LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++ grpc` -lprotobuf -lpthread -ldl +ifeq ($(SYSTEM),Darwin) +LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++ grpc` \ + -lgrpc++_reflection \ + -lprotobuf -lpthread -ldl +else +LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++ grpc` \ + -Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed \ + -lprotobuf -lpthread -ldl +endif PROTOC = protoc GRPC_CPP_PLUGIN = grpc_cpp_plugin GRPC_CPP_PLUGIN_PATH ?= `which $(GRPC_CPP_PLUGIN)` diff --git a/examples/cpp/route_guide/Makefile b/examples/cpp/route_guide/Makefile index 50ecf041f56..ba5e45c05c2 100644 --- a/examples/cpp/route_guide/Makefile +++ b/examples/cpp/route_guide/Makefile @@ -29,10 +29,20 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # +HOST_SYSTEM = $(shell uname | cut -f 1 -d_) +SYSTEM ?= $(HOST_SYSTEM) CXX = g++ CPPFLAGS += -I/usr/local/include -pthread CXXFLAGS += -std=c++11 -LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++` -lprotobuf -lpthread -ldl +ifeq ($(SYSTEM),Darwin) +LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++` \ + -lgrpc++_reflection \ + -lprotobuf -lpthread -ldl +else +LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++` \ + -Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed \ + -lprotobuf -lpthread -ldl +endif PROTOC = protoc GRPC_CPP_PLUGIN = grpc_cpp_plugin GRPC_CPP_PLUGIN_PATH ?= `which $(GRPC_CPP_PLUGIN)` diff --git a/examples/csharp/helloworld/README.md b/examples/csharp/helloworld/README.md index 63131ed98c0..d13c9ac9db4 100644 --- a/examples/csharp/helloworld/README.md +++ b/examples/csharp/helloworld/README.md @@ -5,23 +5,16 @@ BACKGROUND ------------- For this sample, we've already generated the server and client stubs from [helloworld.proto][]. -Example projects depend on the [Grpc](https://www.nuget.org/packages/Grpc/) +Example projects depend on the [Grpc](https://www.nuget.org/packages/Grpc/), [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) and [Google.Protobuf](https://www.nuget.org/packages/Google.Protobuf/) NuGet packages which have been already added to the project for you. PREREQUISITES ------------- -**Windows** -- .NET 4.5+ -- Visual Studio 2013 or 2015 -**Linux** -- Mono 4.0+ -- Monodevelop 5.9+ (with NuGet plugin installed) - -**Mac OS X** -- Xamarin Studio 5.9+ -- [homebrew][] +- Windows: .NET Framework 4.5+, Visual Studio 2013 or 2015 +- Linux: Mono 4+, MonoDevelop 5.9+ (with NuGet add-in installed) +- Mac OS X: Xamarin Studio 5.9+ BUILD ------- @@ -56,6 +49,5 @@ Tutorial You can find a more detailed tutorial in [gRPC Basics: C#][] -[homebrew]:http://brew.sh [helloworld.proto]:../../protos/helloworld.proto [gRPC Basics: C#]:http://www.grpc.io/docs/tutorials/basic/csharp.html diff --git a/examples/objective-c/auth_sample/AuthTestService.podspec b/examples/objective-c/auth_sample/AuthTestService.podspec index d246653ea79..d709c7e636f 100644 --- a/examples/objective-c/auth_sample/AuthTestService.podspec +++ b/examples/objective-c/auth_sample/AuthTestService.podspec @@ -13,27 +13,51 @@ Pod::Spec.new do |s| # Base directory where the .proto files are. src = "../../protos" + # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. + s.dependency "!ProtoCompiler-gRPCPlugin", "~> 1.0.0-pre1" + + # Pods directory corresponding to this app's Podfile, relative to the location of this podspec. + pods_root = 'Pods' + + # Path where Cocoapods downloads protoc and the gRPC plugin. + protoc_dir = "#{pods_root}/!ProtoCompiler" + protoc = "#{protoc_dir}/protoc" + plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin" + # Directory where the generated files will be placed. - dir = "Pods/" + s.name + dir = "#{pods_root}/#{s.name}" - # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. s.prepare_command = <<-CMD mkdir -p #{dir} - protoc -I #{src} --objc_out=#{dir} --objcgrpc_out=#{dir} #{src}/auth_sample.proto + #{protoc} \ + --plugin=protoc-gen-grpc=#{plugin} \ + --objc_out=#{dir} \ + --grpc_out=#{dir} \ + -I #{src} \ + -I #{protoc_dir} \ + #{src}/auth_sample.proto CMD + # Files generated by protoc s.subspec "Messages" do |ms| ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}" ms.header_mappings_dir = dir ms.requires_arc = false - ms.dependency "Protobuf", "~> 3.0.0-alpha-4" + # The generated files depend on the protobuf runtime. + ms.dependency "Protobuf" + # This is needed by all pods that depend on Protobuf: + ms.pod_target_xcconfig = { + 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', + } end + # Files generated by the gRPC plugin s.subspec "Services" do |ss| ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}" ss.header_mappings_dir = dir ss.requires_arc = true - ss.dependency "gRPC", "~> 0.12" + # The generated files depend on the gRPC runtime, and on the files generated by protoc. + ss.dependency "gRPC-ProtoRPC" ss.dependency "#{s.name}/Messages" end end diff --git a/examples/objective-c/auth_sample/Misc/GoogleService-Info.plist b/examples/objective-c/auth_sample/Misc/GoogleService-Info.plist index 86909d84a31..ff225074238 100644 --- a/examples/objective-c/auth_sample/Misc/GoogleService-Info.plist +++ b/examples/objective-c/auth_sample/Misc/GoogleService-Info.plist @@ -2,9 +2,39 @@ + AD_UNIT_ID_FOR_BANNER_TEST + redacted + AD_UNIT_ID_FOR_INTERSTITIAL_TEST + redacted CLIENT_ID 15087385131-lh9bpkiai9nls53uadju0if6k7un3uih.apps.googleusercontent.com REVERSED_CLIENT_ID com.googleusercontent.apps.15087385131-lh9bpkiai9nls53uadju0if6k7un3uih + API_KEY + redacted + GCM_SENDER_ID + redacted + PLIST_VERSION + 1 + BUNDLE_ID + io.grpc.AuthSample + PROJECT_ID + grpc-authsample + STORAGE_BUCKET + grpc-authsample.appspot.com + IS_ADS_ENABLED + + IS_ANALYTICS_ENABLED + + IS_APPINVITE_ENABLED + + IS_GCM_ENABLED + + IS_SIGNIN_ENABLED + + GOOGLE_APP_ID + 1:15087385131:ios:d547168abe3c362f + DATABASE_URL + https://grpc-authsample.firebaseio.com - \ No newline at end of file + diff --git a/examples/objective-c/auth_sample/Podfile b/examples/objective-c/auth_sample/Podfile index 32157a9dcee..a25d20f477d 100644 --- a/examples/objective-c/auth_sample/Podfile +++ b/examples/objective-c/auth_sample/Podfile @@ -3,44 +3,10 @@ platform :ios, '8.0' install! 'cocoapods', :deterministic_uuids => false -# Location of gRPC's repo root relative to this file. -GRPC_LOCAL_SRC = '../../..' - target 'AuthSample' do # Depend on the generated AuthTestService library. pod 'AuthTestService', :path => '.' # Depend on Google's OAuth2 library pod 'Google/SignIn' - - # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following - # lines in your application. - pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" - - pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" - - pod 'gRPC', :path => GRPC_LOCAL_SRC - pod 'gRPC-Core', :path => GRPC_LOCAL_SRC - pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC - pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC -end - -# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in -# your application. -pre_install do |installer| - # This is the gRPC-Core podspec object, as initialized by its podspec file. - grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec - - # Copied from gRPC-Core.podspec, except for the adjusted src_root: - src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}" - grpc_core_spec.pod_target_xcconfig = { - 'GRPC_SRC_ROOT' => src_root, - 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', - 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', - # If we don't set these two settings, `include/grpc/support/time.h` and - # `src/core/lib/support/string.h` shadow the system `` and ``, breaking the - # build. - 'USE_HEADERMAP' => 'NO', - 'ALWAYS_SEARCH_USER_PATHS' => 'NO', - } end diff --git a/examples/objective-c/helloworld/HelloWorld.podspec b/examples/objective-c/helloworld/HelloWorld.podspec index 17b016b31a1..48364fc0af1 100644 --- a/examples/objective-c/helloworld/HelloWorld.podspec +++ b/examples/objective-c/helloworld/HelloWorld.podspec @@ -13,27 +13,51 @@ Pod::Spec.new do |s| # Base directory where the .proto files are. src = "../../protos" + # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. + s.dependency "!ProtoCompiler-gRPCPlugin", "~> 1.0.0-pre1" + + # Pods directory corresponding to this app's Podfile, relative to the location of this podspec. + pods_root = 'Pods' + + # Path where Cocoapods downloads protoc and the gRPC plugin. + protoc_dir = "#{pods_root}/!ProtoCompiler" + protoc = "#{protoc_dir}/protoc" + plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin" + # Directory where the generated files will be placed. - dir = "Pods/" + s.name + dir = "#{pods_root}/#{s.name}" - # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. s.prepare_command = <<-CMD mkdir -p #{dir} - protoc -I #{src} --objc_out=#{dir} --objcgrpc_out=#{dir} #{src}/helloworld.proto + #{protoc} \ + --plugin=protoc-gen-grpc=#{plugin} \ + --objc_out=#{dir} \ + --grpc_out=#{dir} \ + -I #{src} \ + -I #{protoc_dir} \ + #{src}/helloworld.proto CMD + # Files generated by protoc s.subspec "Messages" do |ms| ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}" ms.header_mappings_dir = dir ms.requires_arc = false - ms.dependency "Protobuf", "~> 3.0.0-alpha-4" + # The generated files depend on the protobuf runtime. + ms.dependency "Protobuf" + # This is needed by all pods that depend on Protobuf: + ms.pod_target_xcconfig = { + 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', + } end + # Files generated by the gRPC plugin s.subspec "Services" do |ss| ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}" ss.header_mappings_dir = dir ss.requires_arc = true - ss.dependency "gRPC", "~> 0.12" + # The generated files depend on the gRPC runtime, and on the files generated by protoc. + ss.dependency "gRPC-ProtoRPC" ss.dependency "#{s.name}/Messages" end end diff --git a/examples/objective-c/helloworld/Podfile b/examples/objective-c/helloworld/Podfile index e1bb4ddfd5e..0c3feaa47eb 100644 --- a/examples/objective-c/helloworld/Podfile +++ b/examples/objective-c/helloworld/Podfile @@ -3,41 +3,7 @@ platform :ios, '8.0' install! 'cocoapods', :deterministic_uuids => false -# Location of gRPC's repo root relative to this file. -GRPC_LOCAL_SRC = '../../..' - target 'HelloWorld' do # Depend on the generated HelloWorld library. pod 'HelloWorld', :path => '.' - - # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following - # lines in your application. - pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" - - pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" - - pod 'gRPC', :path => GRPC_LOCAL_SRC - pod 'gRPC-Core', :path => GRPC_LOCAL_SRC - pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC - pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC -end - -# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in -# your application. -pre_install do |installer| - # This is the gRPC-Core podspec object, as initialized by its podspec file. - grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec - - # Copied from gRPC-Core.podspec, except for the adjusted src_root: - src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}" - grpc_core_spec.pod_target_xcconfig = { - 'GRPC_SRC_ROOT' => src_root, - 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', - 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', - # If we don't set these two settings, `include/grpc/support/time.h` and - # `src/core/lib/support/string.h` shadow the system `` and ``, breaking the - # build. - 'USE_HEADERMAP' => 'NO', - 'ALWAYS_SEARCH_USER_PATHS' => 'NO', - } end diff --git a/examples/objective-c/route_guide/Podfile b/examples/objective-c/route_guide/Podfile index 943f5464d89..b77eb1b11d1 100644 --- a/examples/objective-c/route_guide/Podfile +++ b/examples/objective-c/route_guide/Podfile @@ -3,41 +3,7 @@ platform :ios, '8.0' install! 'cocoapods', :deterministic_uuids => false -# Location of gRPC's repo root relative to this file. -GRPC_LOCAL_SRC = '../../..' - target 'RouteGuideClient' do # Depend on the generated RouteGuide library. pod 'RouteGuide', :path => '.' - - # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following - # lines in your application. - pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" - - pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" - - pod 'gRPC', :path => GRPC_LOCAL_SRC - pod 'gRPC-Core', :path => GRPC_LOCAL_SRC - pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC - pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC -end - -# This pre_install hook is only needed to use the local version of gRPC-Core. You don't need it in -# your application. -pre_install do |installer| - # This is the gRPC-Core podspec object, as initialized by its podspec file. - grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec - - # Copied from gRPC-Core.podspec, except for the adjusted src_root: - src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}" - grpc_core_spec.pod_target_xcconfig = { - 'GRPC_SRC_ROOT' => src_root, - 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"', - 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"', - # If we don't set these two settings, `include/grpc/support/time.h` and - # `src/core/lib/support/string.h` shadow the system `` and ``, breaking the - # build. - 'USE_HEADERMAP' => 'NO', - 'ALWAYS_SEARCH_USER_PATHS' => 'NO', - } end diff --git a/examples/objective-c/route_guide/RouteGuide.podspec b/examples/objective-c/route_guide/RouteGuide.podspec index 97a61ff51a5..04bc10bc0b5 100644 --- a/examples/objective-c/route_guide/RouteGuide.podspec +++ b/examples/objective-c/route_guide/RouteGuide.podspec @@ -13,27 +13,51 @@ Pod::Spec.new do |s| # Base directory where the .proto files are. src = "../../protos" + # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. + s.dependency "!ProtoCompiler-gRPCPlugin", "~> 1.0.0-pre1" + + # Pods directory corresponding to this app's Podfile, relative to the location of this podspec. + pods_root = 'Pods' + + # Path where Cocoapods downloads protoc and the gRPC plugin. + protoc_dir = "#{pods_root}/!ProtoCompiler" + protoc = "#{protoc_dir}/protoc" + plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin" + # Directory where the generated files will be placed. - dir = "Pods/" + s.name + dir = "#{pods_root}/#{s.name}" - # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. s.prepare_command = <<-CMD mkdir -p #{dir} - protoc -I #{src} --objc_out=#{dir} --objcgrpc_out=#{dir} #{src}/route_guide.proto + #{protoc} \ + --plugin=protoc-gen-grpc=#{plugin} \ + --objc_out=#{dir} \ + --grpc_out=#{dir} \ + -I #{src} \ + -I #{protoc_dir} \ + #{src}/route_guide.proto CMD + # Files generated by protoc s.subspec "Messages" do |ms| ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}" ms.header_mappings_dir = dir ms.requires_arc = false - ms.dependency "Protobuf", "~> 3.0.0-alpha-4" + # The generated files depend on the protobuf runtime. + ms.dependency "Protobuf" + # This is needed by all pods that depend on Protobuf: + ms.pod_target_xcconfig = { + 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', + } end + # Files generated by the gRPC plugin s.subspec "Services" do |ss| ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}" ss.header_mappings_dir = dir ss.requires_arc = true - ss.dependency "gRPC", "~> 0.12" + # The generated files depend on the gRPC runtime, and on the files generated by protoc. + ss.dependency "gRPC-ProtoRPC" ss.dependency "#{s.name}/Messages" end end diff --git a/examples/php/README.md b/examples/php/README.md index 6889a6cb7e0..54cc97d8c2f 100644 --- a/examples/php/README.md +++ b/examples/php/README.md @@ -11,7 +11,7 @@ INSTALL - Install the gRPC PHP extension ```sh - $ [sudo] pecl install grpc-beta + $ [sudo] pecl install grpc ``` - Clone this repository diff --git a/examples/php/composer.json b/examples/php/composer.json index 950e11367d0..d40b5db059e 100644 --- a/examples/php/composer.json +++ b/examples/php/composer.json @@ -2,13 +2,7 @@ "name": "grpc/grpc-demo", "description": "gRPC example for PHP", "minimum-stability": "dev", - "repositories": [ - { - "type": "vcs", - "url": "https://github.com/stanley-cheung/Protobuf-PHP" - } - ], "require": { - "grpc/grpc": "v0.15.0" + "grpc/grpc": "v0.15.2" } } diff --git a/examples/python/helloworld/greeter_client.py b/examples/python/helloworld/greeter_client.py index 40d637fb7b4..44d42c102b5 100644 --- a/examples/python/helloworld/greeter_client.py +++ b/examples/python/helloworld/greeter_client.py @@ -31,17 +31,15 @@ from __future__ import print_function -from grpc.beta import implementations +import grpc import helloworld_pb2 -_TIMEOUT_SECONDS = 10 - def run(): - channel = implementations.insecure_channel('localhost', 50051) - stub = helloworld_pb2.beta_create_Greeter_stub(channel) - response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'), _TIMEOUT_SECONDS) + channel = grpc.insecure_channel('localhost:50051') + stub = helloworld_pb2.GreeterStub(channel) + response = stub.SayHello(helloworld_pb2.HelloRequest(name='you')) print("Greeter client received: " + response.message) diff --git a/examples/python/helloworld/greeter_server.py b/examples/python/helloworld/greeter_server.py index 2cde5add435..37d8bd49ccd 100644 --- a/examples/python/helloworld/greeter_server.py +++ b/examples/python/helloworld/greeter_server.py @@ -29,21 +29,25 @@ """The Python implementation of the GRPC helloworld.Greeter server.""" +from concurrent import futures import time +import grpc + import helloworld_pb2 _ONE_DAY_IN_SECONDS = 60 * 60 * 24 -class Greeter(helloworld_pb2.BetaGreeterServicer): +class Greeter(helloworld_pb2.GreeterServicer): def SayHello(self, request, context): return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name) def serve(): - server = helloworld_pb2.beta_create_Greeter_server(Greeter()) + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + helloworld_pb2.add_GreeterServicer_to_server(Greeter(), server) server.add_insecure_port('[::]:50051') server.start() try: diff --git a/examples/python/helloworld/helloworld_pb2.py b/examples/python/helloworld/helloworld_pb2.py index 1ee80e4034c..3ce33fbf2bf 100644 --- a/examples/python/helloworld/helloworld_pb2.py +++ b/examples/python/helloworld/helloworld_pb2.py @@ -107,13 +107,55 @@ _sym_db.RegisterMessage(HelloReply) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW')) -import abc -import six +import grpc from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities + +class GreeterStub(object): + """The greeting service definition. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=HelloRequest.SerializeToString, + response_deserializer=HelloReply.FromString, + ) + + +class GreeterServicer(object): + """The greeting service definition. + """ + + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=HelloRequest.FromString, + response_serializer=HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + class BetaGreeterServicer(object): """The greeting service definition. """ @@ -122,23 +164,23 @@ class BetaGreeterServicer(object): """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + class BetaGreeterStub(object): """The greeting service definition. """ - def SayHello(self, request, timeout): + def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Sends a greeting """ raise NotImplementedError() SayHello.future = None + def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import helloworld_pb2 - import helloworld_pb2 request_deserializers = { - ('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloRequest.FromString, + ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, } response_serializers = { - ('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloReply.SerializeToString, + ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, } method_implementations = { ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), @@ -146,14 +188,13 @@ def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_time server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) + def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import helloworld_pb2 - import helloworld_pb2 request_serializers = { - ('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloRequest.SerializeToString, + ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, } response_deserializers = { - ('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloReply.FromString, + ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, } cardinalities = { 'SayHello': cardinality.Cardinality.UNARY_UNARY, diff --git a/src/python/grpcio/grpc/framework/core/__init__.py b/examples/python/helloworld/run_codegen.py similarity index 86% rename from src/python/grpcio/grpc/framework/core/__init__.py rename to examples/python/helloworld/run_codegen.py index 70865191060..4835ec2b4d2 100644 --- a/src/python/grpcio/grpc/framework/core/__init__.py +++ b/examples/python/helloworld/run_codegen.py @@ -27,4 +27,16 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Runs protoc with the gRPC plugin to generate messages and gRPC stubs.""" +from grpc.tools import protoc + +protoc.main( + ( + '', + '-I../../protos', + '--python_out=.', + '--grpc_python_out=.', + '../../protos/helloworld.proto', + ) +) diff --git a/examples/python/multiplex/.gitignore b/examples/python/multiplex/.gitignore new file mode 100644 index 00000000000..0d20b6487c6 --- /dev/null +++ b/examples/python/multiplex/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/examples/python/multiplex/README.md b/examples/python/multiplex/README.md new file mode 100644 index 00000000000..bad3a42b370 --- /dev/null +++ b/examples/python/multiplex/README.md @@ -0,0 +1 @@ +An example showing two stubs sharing a channel and two servicers sharing a server. diff --git a/examples/python/multiplex/helloworld_pb2.py b/examples/python/multiplex/helloworld_pb2.py new file mode 100644 index 00000000000..3ce33fbf2bf --- /dev/null +++ b/examples/python/multiplex/helloworld_pb2.py @@ -0,0 +1,204 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: helloworld.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='helloworld.proto', + package='helloworld', + syntax='proto3', + serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_HELLOREQUEST = _descriptor.Descriptor( + name='HelloRequest', + full_name='helloworld.HelloRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='helloworld.HelloRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=32, + serialized_end=60, +) + + +_HELLOREPLY = _descriptor.Descriptor( + name='HelloReply', + full_name='helloworld.HelloReply', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='message', full_name='helloworld.HelloReply.message', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=62, + serialized_end=91, +) + +DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST +DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY + +HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict( + DESCRIPTOR = _HELLOREQUEST, + __module__ = 'helloworld_pb2' + # @@protoc_insertion_point(class_scope:helloworld.HelloRequest) + )) +_sym_db.RegisterMessage(HelloRequest) + +HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict( + DESCRIPTOR = _HELLOREPLY, + __module__ = 'helloworld_pb2' + # @@protoc_insertion_point(class_scope:helloworld.HelloReply) + )) +_sym_db.RegisterMessage(HelloReply) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW')) +import grpc +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class GreeterStub(object): + """The greeting service definition. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=HelloRequest.SerializeToString, + response_deserializer=HelloReply.FromString, + ) + + +class GreeterServicer(object): + """The greeting service definition. + """ + + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=HelloRequest.FromString, + response_serializer=HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaGreeterServicer(object): + """The greeting service definition. + """ + def SayHello(self, request, context): + """Sends a greeting + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaGreeterStub(object): + """The greeting service definition. + """ + def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Sends a greeting + """ + raise NotImplementedError() + SayHello.future = None + + +def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, + } + response_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, + } + method_implementations = { + ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, + } + response_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, + } + cardinalities = { + 'SayHello': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options) +# @@protoc_insertion_point(module_scope) diff --git a/examples/python/multiplex/multiplex_client.py b/examples/python/multiplex/multiplex_client.py new file mode 100644 index 00000000000..2e8162926b7 --- /dev/null +++ b/examples/python/multiplex/multiplex_client.py @@ -0,0 +1,139 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A client that makes both Greeter and RouteGuide RPCs.""" + +from __future__ import print_function + +import random +import time + +import grpc + +import helloworld_pb2 +import route_guide_pb2 +import route_guide_resources + + +def make_route_note(message, latitude, longitude): + return route_guide_pb2.RouteNote( + message=message, + location=route_guide_pb2.Point(latitude=latitude, longitude=longitude)) + + +def guide_get_one_feature(route_guide_stub, point): + feature = route_guide_stub.GetFeature(point) + if not feature.location: + print("Server returned incomplete feature") + return + + if feature.name: + print("Feature called %s at %s" % (feature.name, feature.location)) + else: + print("Found no feature at %s" % feature.location) + + +def guide_get_feature(route_guide_stub): + guide_get_one_feature( + route_guide_stub, + route_guide_pb2.Point(latitude=409146138, longitude=-746188906)) + guide_get_one_feature( + route_guide_stub, route_guide_pb2.Point(latitude=0, longitude=0)) + + +def guide_list_features(route_guide_stub): + rectangle = route_guide_pb2.Rectangle( + lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000), + hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000)) + print("Looking for features between 40, -75 and 42, -73") + + features = route_guide_stub.ListFeatures(rectangle) + + for feature in features: + print("Feature called %s at %s" % (feature.name, feature.location)) + + +def generate_route(feature_list): + for _ in range(0, 10): + random_feature = feature_list[random.randint(0, len(feature_list) - 1)] + print("Visiting point %s" % random_feature.location) + yield random_feature.location + time.sleep(random.uniform(0.5, 1.5)) + + +def guide_record_route(route_guide_stub): + feature_list = route_guide_resources.read_route_guide_database() + + route_iterator = generate_route(feature_list) + route_summary = route_guide_stub.RecordRoute(route_iterator) + print("Finished trip with %s points " % route_summary.point_count) + print("Passed %s features " % route_summary.feature_count) + print("Travelled %s meters " % route_summary.distance) + print("It took %s seconds " % route_summary.elapsed_time) + + +def generate_messages(): + messages = [ + make_route_note("First message", 0, 0), + make_route_note("Second message", 0, 1), + make_route_note("Third message", 1, 0), + make_route_note("Fourth message", 0, 0), + make_route_note("Fifth message", 1, 0), + ] + for msg in messages: + print("Sending %s at %s" % (msg.message, msg.location)) + yield msg + time.sleep(random.uniform(0.5, 1.0)) + + +def guide_route_chat(route_guide_stub): + responses = route_guide_stub.RouteChat(generate_messages()) + for response in responses: + print("Received message %s at %s" % (response.message, response.location)) + + +def run(): + channel = grpc.insecure_channel('localhost:50051') + greeter_stub = helloworld_pb2.GreeterStub(channel) + route_guide_stub = route_guide_pb2.RouteGuideStub(channel) + greeter_response = greeter_stub.SayHello( + helloworld_pb2.HelloRequest(name='you')) + print("Greeter client received: " + greeter_response.message) + print("-------------- GetFeature --------------") + guide_get_feature(route_guide_stub) + print("-------------- ListFeatures --------------") + guide_list_features(route_guide_stub) + print("-------------- RecordRoute --------------") + guide_record_route(route_guide_stub) + print("-------------- RouteChat --------------") + guide_route_chat(route_guide_stub) + + +if __name__ == '__main__': + run() diff --git a/examples/python/multiplex/multiplex_server.py b/examples/python/multiplex/multiplex_server.py new file mode 100644 index 00000000000..32a4ee4a497 --- /dev/null +++ b/examples/python/multiplex/multiplex_server.py @@ -0,0 +1,149 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A gRPC server servicing both Greeter and RouteGuide RPCs.""" + +from concurrent import futures +import time +import math + +import grpc + +import helloworld_pb2 +import route_guide_pb2 +import route_guide_resources + +_ONE_DAY_IN_SECONDS = 60 * 60 * 24 + + +def _get_feature(feature_db, point): + """Returns Feature at given location or None.""" + for feature in feature_db: + if feature.location == point: + return feature + return None + + +def _get_distance(start, end): + """Distance between two points.""" + coord_factor = 10000000.0 + lat_1 = start.latitude / coord_factor + lat_2 = end.latitude / coord_factor + lon_1 = start.longitude / coord_factor + lon_2 = end.longitude / coord_factor + lat_rad_1 = math.radians(lat_1) + lat_rad_2 = math.radians(lat_2) + delta_lat_rad = math.radians(lat_2 - lat_1) + delta_lon_rad = math.radians(lon_2 - lon_1) + + a = (pow(math.sin(delta_lat_rad / 2), 2) + + (math.cos(lat_rad_1) * math.cos(lat_rad_2) * + pow(math.sin(delta_lon_rad / 2), 2))) + c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) + R = 6371000; # metres + return R * c; + + +class _GreeterServicer(helloworld_pb2.GreeterServicer): + + def SayHello(self, request, context): + return helloworld_pb2.HelloReply(message='Hello, {}!'.format(request.name)) + + +class _RouteGuideServicer(route_guide_pb2.RouteGuideServicer): + """Provides methods that implement functionality of route guide server.""" + + def __init__(self): + self.db = route_guide_resources.read_route_guide_database() + + def GetFeature(self, request, context): + feature = _get_feature(self.db, request) + if feature is None: + return route_guide_pb2.Feature(name="", location=request) + else: + return feature + + def ListFeatures(self, request, context): + left = min(request.lo.longitude, request.hi.longitude) + right = max(request.lo.longitude, request.hi.longitude) + top = max(request.lo.latitude, request.hi.latitude) + bottom = min(request.lo.latitude, request.hi.latitude) + for feature in self.db: + if (feature.location.longitude >= left and + feature.location.longitude <= right and + feature.location.latitude >= bottom and + feature.location.latitude <= top): + yield feature + + def RecordRoute(self, request_iterator, context): + point_count = 0 + feature_count = 0 + distance = 0.0 + prev_point = None + + start_time = time.time() + for point in request_iterator: + point_count += 1 + if _get_feature(self.db, point): + feature_count += 1 + if prev_point: + distance += _get_distance(prev_point, point) + prev_point = point + + elapsed_time = time.time() - start_time + return route_guide_pb2.RouteSummary(point_count=point_count, + feature_count=feature_count, + distance=int(distance), + elapsed_time=int(elapsed_time)) + + def RouteChat(self, request_iterator, context): + prev_notes = [] + for new_note in request_iterator: + for prev_note in prev_notes: + if prev_note.location == new_note.location: + yield prev_note + prev_notes.append(new_note) + + +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + helloworld_pb2.add_GreeterServicer_to_server(_GreeterServicer(), server) + route_guide_pb2.add_RouteGuideServicer_to_server( + _RouteGuideServicer(), server) + server.add_insecure_port('[::]:50051') + server.start() + try: + while True: + time.sleep(_ONE_DAY_IN_SECONDS) + except KeyboardInterrupt: + server.stop(0) + + +if __name__ == '__main__': + serve() diff --git a/examples/python/multiplex/route_guide_db.json b/examples/python/multiplex/route_guide_db.json new file mode 100644 index 00000000000..9d6a980ab7d --- /dev/null +++ b/examples/python/multiplex/route_guide_db.json @@ -0,0 +1,601 @@ +[{ + "location": { + "latitude": 407838351, + "longitude": -746143763 + }, + "name": "Patriots Path, Mendham, NJ 07945, USA" +}, { + "location": { + "latitude": 408122808, + "longitude": -743999179 + }, + "name": "101 New Jersey 10, Whippany, NJ 07981, USA" +}, { + "location": { + "latitude": 413628156, + "longitude": -749015468 + }, + "name": "U.S. 6, Shohola, PA 18458, USA" +}, { + "location": { + "latitude": 419999544, + "longitude": -740371136 + }, + "name": "5 Conners Road, Kingston, NY 12401, USA" +}, { + "location": { + "latitude": 414008389, + "longitude": -743951297 + }, + "name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA" +}, { + "location": { + "latitude": 419611318, + "longitude": -746524769 + }, + "name": "287 Flugertown Road, Livingston Manor, NY 12758, USA" +}, { + "location": { + "latitude": 406109563, + "longitude": -742186778 + }, + "name": "4001 Tremley Point Road, Linden, NJ 07036, USA" +}, { + "location": { + "latitude": 416802456, + "longitude": -742370183 + }, + "name": "352 South Mountain Road, Wallkill, NY 12589, USA" +}, { + "location": { + "latitude": 412950425, + "longitude": -741077389 + }, + "name": "Bailey Turn Road, Harriman, NY 10926, USA" +}, { + "location": { + "latitude": 412144655, + "longitude": -743949739 + }, + "name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA" +}, { + "location": { + "latitude": 415736605, + "longitude": -742847522 + }, + "name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA" +}, { + "location": { + "latitude": 413843930, + "longitude": -740501726 + }, + "name": "162 Merrill Road, Highland Mills, NY 10930, USA" +}, { + "location": { + "latitude": 410873075, + "longitude": -744459023 + }, + "name": "Clinton Road, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 412346009, + "longitude": -744026814 + }, + "name": "16 Old Brook Lane, Warwick, NY 10990, USA" +}, { + "location": { + "latitude": 402948455, + "longitude": -747903913 + }, + "name": "3 Drake Lane, Pennington, NJ 08534, USA" +}, { + "location": { + "latitude": 406337092, + "longitude": -740122226 + }, + "name": "6324 8th Avenue, Brooklyn, NY 11220, USA" +}, { + "location": { + "latitude": 406421967, + "longitude": -747727624 + }, + "name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA" +}, { + "location": { + "latitude": 416318082, + "longitude": -749677716 + }, + "name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA" +}, { + "location": { + "latitude": 415301720, + "longitude": -748416257 + }, + "name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA" +}, { + "location": { + "latitude": 402647019, + "longitude": -747071791 + }, + "name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA" +}, { + "location": { + "latitude": 412567807, + "longitude": -741058078 + }, + "name": "New York State Reference Route 987E, Southfields, NY 10975, USA" +}, { + "location": { + "latitude": 416855156, + "longitude": -744420597 + }, + "name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA" +}, { + "location": { + "latitude": 404663628, + "longitude": -744820157 + }, + "name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA" +}, { + "location": { + "latitude": 407113723, + "longitude": -749746483 + }, + "name": "" +}, { + "location": { + "latitude": 402133926, + "longitude": -743613249 + }, + "name": "" +}, { + "location": { + "latitude": 400273442, + "longitude": -741220915 + }, + "name": "" +}, { + "location": { + "latitude": 411236786, + "longitude": -744070769 + }, + "name": "" +}, { + "location": { + "latitude": 411633782, + "longitude": -746784970 + }, + "name": "211-225 Plains Road, Augusta, NJ 07822, USA" +}, { + "location": { + "latitude": 415830701, + "longitude": -742952812 + }, + "name": "" +}, { + "location": { + "latitude": 413447164, + "longitude": -748712898 + }, + "name": "165 Pedersen Ridge Road, Milford, PA 18337, USA" +}, { + "location": { + "latitude": 405047245, + "longitude": -749800722 + }, + "name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA" +}, { + "location": { + "latitude": 418858923, + "longitude": -746156790 + }, + "name": "" +}, { + "location": { + "latitude": 417951888, + "longitude": -748484944 + }, + "name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA" +}, { + "location": { + "latitude": 407033786, + "longitude": -743977337 + }, + "name": "26 East 3rd Street, New Providence, NJ 07974, USA" +}, { + "location": { + "latitude": 417548014, + "longitude": -740075041 + }, + "name": "" +}, { + "location": { + "latitude": 410395868, + "longitude": -744972325 + }, + "name": "" +}, { + "location": { + "latitude": 404615353, + "longitude": -745129803 + }, + "name": "" +}, { + "location": { + "latitude": 406589790, + "longitude": -743560121 + }, + "name": "611 Lawrence Avenue, Westfield, NJ 07090, USA" +}, { + "location": { + "latitude": 414653148, + "longitude": -740477477 + }, + "name": "18 Lannis Avenue, New Windsor, NY 12553, USA" +}, { + "location": { + "latitude": 405957808, + "longitude": -743255336 + }, + "name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA" +}, { + "location": { + "latitude": 411733589, + "longitude": -741648093 + }, + "name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA" +}, { + "location": { + "latitude": 412676291, + "longitude": -742606606 + }, + "name": "1270 Lakes Road, Monroe, NY 10950, USA" +}, { + "location": { + "latitude": 409224445, + "longitude": -748286738 + }, + "name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA" +}, { + "location": { + "latitude": 406523420, + "longitude": -742135517 + }, + "name": "652 Garden Street, Elizabeth, NJ 07202, USA" +}, { + "location": { + "latitude": 401827388, + "longitude": -740294537 + }, + "name": "349 Sea Spray Court, Neptune City, NJ 07753, USA" +}, { + "location": { + "latitude": 410564152, + "longitude": -743685054 + }, + "name": "13-17 Stanley Street, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 408472324, + "longitude": -740726046 + }, + "name": "47 Industrial Avenue, Teterboro, NJ 07608, USA" +}, { + "location": { + "latitude": 412452168, + "longitude": -740214052 + }, + "name": "5 White Oak Lane, Stony Point, NY 10980, USA" +}, { + "location": { + "latitude": 409146138, + "longitude": -746188906 + }, + "name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA" +}, { + "location": { + "latitude": 404701380, + "longitude": -744781745 + }, + "name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 409642566, + "longitude": -746017679 + }, + "name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA" +}, { + "location": { + "latitude": 408031728, + "longitude": -748645385 + }, + "name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA" +}, { + "location": { + "latitude": 413700272, + "longitude": -742135189 + }, + "name": "367 Prospect Road, Chester, NY 10918, USA" +}, { + "location": { + "latitude": 404310607, + "longitude": -740282632 + }, + "name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA" +}, { + "location": { + "latitude": 409319800, + "longitude": -746201391 + }, + "name": "11 Ward Street, Mount Arlington, NJ 07856, USA" +}, { + "location": { + "latitude": 406685311, + "longitude": -742108603 + }, + "name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA" +}, { + "location": { + "latitude": 419018117, + "longitude": -749142781 + }, + "name": "43 Dreher Road, Roscoe, NY 12776, USA" +}, { + "location": { + "latitude": 412856162, + "longitude": -745148837 + }, + "name": "Swan Street, Pine Island, NY 10969, USA" +}, { + "location": { + "latitude": 416560744, + "longitude": -746721964 + }, + "name": "66 Pleasantview Avenue, Monticello, NY 12701, USA" +}, { + "location": { + "latitude": 405314270, + "longitude": -749836354 + }, + "name": "" +}, { + "location": { + "latitude": 414219548, + "longitude": -743327440 + }, + "name": "" +}, { + "location": { + "latitude": 415534177, + "longitude": -742900616 + }, + "name": "565 Winding Hills Road, Montgomery, NY 12549, USA" +}, { + "location": { + "latitude": 406898530, + "longitude": -749127080 + }, + "name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA" +}, { + "location": { + "latitude": 407586880, + "longitude": -741670168 + }, + "name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA" +}, { + "location": { + "latitude": 400106455, + "longitude": -742870190 + }, + "name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA" +}, { + "location": { + "latitude": 400066188, + "longitude": -746793294 + }, + "name": "" +}, { + "location": { + "latitude": 418803880, + "longitude": -744102673 + }, + "name": "40 Mountain Road, Napanoch, NY 12458, USA" +}, { + "location": { + "latitude": 414204288, + "longitude": -747895140 + }, + "name": "" +}, { + "location": { + "latitude": 414777405, + "longitude": -740615601 + }, + "name": "" +}, { + "location": { + "latitude": 415464475, + "longitude": -747175374 + }, + "name": "48 North Road, Forestburgh, NY 12777, USA" +}, { + "location": { + "latitude": 404062378, + "longitude": -746376177 + }, + "name": "" +}, { + "location": { + "latitude": 405688272, + "longitude": -749285130 + }, + "name": "" +}, { + "location": { + "latitude": 400342070, + "longitude": -748788996 + }, + "name": "" +}, { + "location": { + "latitude": 401809022, + "longitude": -744157964 + }, + "name": "" +}, { + "location": { + "latitude": 404226644, + "longitude": -740517141 + }, + "name": "9 Thompson Avenue, Leonardo, NJ 07737, USA" +}, { + "location": { + "latitude": 410322033, + "longitude": -747871659 + }, + "name": "" +}, { + "location": { + "latitude": 407100674, + "longitude": -747742727 + }, + "name": "" +}, { + "location": { + "latitude": 418811433, + "longitude": -741718005 + }, + "name": "213 Bush Road, Stone Ridge, NY 12484, USA" +}, { + "location": { + "latitude": 415034302, + "longitude": -743850945 + }, + "name": "" +}, { + "location": { + "latitude": 411349992, + "longitude": -743694161 + }, + "name": "" +}, { + "location": { + "latitude": 404839914, + "longitude": -744759616 + }, + "name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 414638017, + "longitude": -745957854 + }, + "name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA" +}, { + "location": { + "latitude": 412127800, + "longitude": -740173578 + }, + "name": "" +}, { + "location": { + "latitude": 401263460, + "longitude": -747964303 + }, + "name": "" +}, { + "location": { + "latitude": 412843391, + "longitude": -749086026 + }, + "name": "" +}, { + "location": { + "latitude": 418512773, + "longitude": -743067823 + }, + "name": "" +}, { + "location": { + "latitude": 404318328, + "longitude": -740835638 + }, + "name": "42-102 Main Street, Belford, NJ 07718, USA" +}, { + "location": { + "latitude": 419020746, + "longitude": -741172328 + }, + "name": "" +}, { + "location": { + "latitude": 404080723, + "longitude": -746119569 + }, + "name": "" +}, { + "location": { + "latitude": 401012643, + "longitude": -744035134 + }, + "name": "" +}, { + "location": { + "latitude": 404306372, + "longitude": -741079661 + }, + "name": "" +}, { + "location": { + "latitude": 403966326, + "longitude": -748519297 + }, + "name": "" +}, { + "location": { + "latitude": 405002031, + "longitude": -748407866 + }, + "name": "" +}, { + "location": { + "latitude": 409532885, + "longitude": -742200683 + }, + "name": "" +}, { + "location": { + "latitude": 416851321, + "longitude": -742674555 + }, + "name": "" +}, { + "location": { + "latitude": 406411633, + "longitude": -741722051 + }, + "name": "3387 Richmond Terrace, Staten Island, NY 10303, USA" +}, { + "location": { + "latitude": 413069058, + "longitude": -744597778 + }, + "name": "261 Van Sickle Road, Goshen, NY 10924, USA" +}, { + "location": { + "latitude": 418465462, + "longitude": -746859398 + }, + "name": "" +}, { + "location": { + "latitude": 411733222, + "longitude": -744228360 + }, + "name": "" +}, { + "location": { + "latitude": 410248224, + "longitude": -747127767 + }, + "name": "3 Hasta Way, Newton, NJ 07860, USA" +}] diff --git a/examples/python/multiplex/route_guide_pb2.py b/examples/python/multiplex/route_guide_pb2.py new file mode 100644 index 00000000000..924e186e06e --- /dev/null +++ b/examples/python/multiplex/route_guide_pb2.py @@ -0,0 +1,516 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: route_guide.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='route_guide.proto', + package='routeguide', + syntax='proto3', + serialized_pb=_b('\n\x11route_guide.proto\x12\nrouteguide\",\n\x05Point\x12\x10\n\x08latitude\x18\x01 \x01(\x05\x12\x11\n\tlongitude\x18\x02 \x01(\x05\"I\n\tRectangle\x12\x1d\n\x02lo\x18\x01 \x01(\x0b\x32\x11.routeguide.Point\x12\x1d\n\x02hi\x18\x02 \x01(\x0b\x32\x11.routeguide.Point\"<\n\x07\x46\x65\x61ture\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x08location\x18\x02 \x01(\x0b\x32\x11.routeguide.Point\"A\n\tRouteNote\x12#\n\x08location\x18\x01 \x01(\x0b\x32\x11.routeguide.Point\x12\x0f\n\x07message\x18\x02 \x01(\t\"b\n\x0cRouteSummary\x12\x13\n\x0bpoint_count\x18\x01 \x01(\x05\x12\x15\n\rfeature_count\x18\x02 \x01(\x05\x12\x10\n\x08\x64istance\x18\x03 \x01(\x05\x12\x14\n\x0c\x65lapsed_time\x18\x04 \x01(\x05\x32\x85\x02\n\nRouteGuide\x12\x36\n\nGetFeature\x12\x11.routeguide.Point\x1a\x13.routeguide.Feature\"\x00\x12>\n\x0cListFeatures\x12\x15.routeguide.Rectangle\x1a\x13.routeguide.Feature\"\x00\x30\x01\x12>\n\x0bRecordRoute\x12\x11.routeguide.Point\x1a\x18.routeguide.RouteSummary\"\x00(\x01\x12?\n\tRouteChat\x12\x15.routeguide.RouteNote\x1a\x15.routeguide.RouteNote\"\x00(\x01\x30\x01\x42\x36\n\x1bio.grpc.examples.routeguideB\x0fRouteGuideProtoP\x01\xa2\x02\x03RTGb\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_POINT = _descriptor.Descriptor( + name='Point', + full_name='routeguide.Point', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='latitude', full_name='routeguide.Point.latitude', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='longitude', full_name='routeguide.Point.longitude', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=33, + serialized_end=77, +) + + +_RECTANGLE = _descriptor.Descriptor( + name='Rectangle', + full_name='routeguide.Rectangle', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='lo', full_name='routeguide.Rectangle.lo', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hi', full_name='routeguide.Rectangle.hi', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=79, + serialized_end=152, +) + + +_FEATURE = _descriptor.Descriptor( + name='Feature', + full_name='routeguide.Feature', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='routeguide.Feature.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='location', full_name='routeguide.Feature.location', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=154, + serialized_end=214, +) + + +_ROUTENOTE = _descriptor.Descriptor( + name='RouteNote', + full_name='routeguide.RouteNote', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='location', full_name='routeguide.RouteNote.location', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='message', full_name='routeguide.RouteNote.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=216, + serialized_end=281, +) + + +_ROUTESUMMARY = _descriptor.Descriptor( + name='RouteSummary', + full_name='routeguide.RouteSummary', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='point_count', full_name='routeguide.RouteSummary.point_count', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='feature_count', full_name='routeguide.RouteSummary.feature_count', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='distance', full_name='routeguide.RouteSummary.distance', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='elapsed_time', full_name='routeguide.RouteSummary.elapsed_time', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=283, + serialized_end=381, +) + +_RECTANGLE.fields_by_name['lo'].message_type = _POINT +_RECTANGLE.fields_by_name['hi'].message_type = _POINT +_FEATURE.fields_by_name['location'].message_type = _POINT +_ROUTENOTE.fields_by_name['location'].message_type = _POINT +DESCRIPTOR.message_types_by_name['Point'] = _POINT +DESCRIPTOR.message_types_by_name['Rectangle'] = _RECTANGLE +DESCRIPTOR.message_types_by_name['Feature'] = _FEATURE +DESCRIPTOR.message_types_by_name['RouteNote'] = _ROUTENOTE +DESCRIPTOR.message_types_by_name['RouteSummary'] = _ROUTESUMMARY + +Point = _reflection.GeneratedProtocolMessageType('Point', (_message.Message,), dict( + DESCRIPTOR = _POINT, + __module__ = 'route_guide_pb2' + # @@protoc_insertion_point(class_scope:routeguide.Point) + )) +_sym_db.RegisterMessage(Point) + +Rectangle = _reflection.GeneratedProtocolMessageType('Rectangle', (_message.Message,), dict( + DESCRIPTOR = _RECTANGLE, + __module__ = 'route_guide_pb2' + # @@protoc_insertion_point(class_scope:routeguide.Rectangle) + )) +_sym_db.RegisterMessage(Rectangle) + +Feature = _reflection.GeneratedProtocolMessageType('Feature', (_message.Message,), dict( + DESCRIPTOR = _FEATURE, + __module__ = 'route_guide_pb2' + # @@protoc_insertion_point(class_scope:routeguide.Feature) + )) +_sym_db.RegisterMessage(Feature) + +RouteNote = _reflection.GeneratedProtocolMessageType('RouteNote', (_message.Message,), dict( + DESCRIPTOR = _ROUTENOTE, + __module__ = 'route_guide_pb2' + # @@protoc_insertion_point(class_scope:routeguide.RouteNote) + )) +_sym_db.RegisterMessage(RouteNote) + +RouteSummary = _reflection.GeneratedProtocolMessageType('RouteSummary', (_message.Message,), dict( + DESCRIPTOR = _ROUTESUMMARY, + __module__ = 'route_guide_pb2' + # @@protoc_insertion_point(class_scope:routeguide.RouteSummary) + )) +_sym_db.RegisterMessage(RouteSummary) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG')) +import grpc +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class RouteGuideStub(object): + """Interface exported by the server. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=Point.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=Rectangle.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=Point.SerializeToString, + response_deserializer=RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=RouteNote.SerializeToString, + response_deserializer=RouteNote.FromString, + ) + + +class RouteGuideServicer(object): + """Interface exported by the server. + """ + + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=Point.FromString, + response_serializer=Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=Rectangle.FromString, + response_serializer=Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=Point.FromString, + response_serializer=RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=RouteNote.FromString, + response_serializer=RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaRouteGuideServicer(object): + """Interface exported by the server. + """ + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaRouteGuideStub(object): + """Interface exported by the server. + """ + def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + raise NotImplementedError() + GetFeature.future = None + def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + raise NotImplementedError() + def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + raise NotImplementedError() + RecordRoute.future = None + def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + raise NotImplementedError() + + +def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + response_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + method_implementations = { + ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), + ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures), + ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute), + ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + response_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + cardinalities = { + 'GetFeature': cardinality.Cardinality.UNARY_UNARY, + 'ListFeatures': cardinality.Cardinality.UNARY_STREAM, + 'RecordRoute': cardinality.Cardinality.STREAM_UNARY, + 'RouteChat': cardinality.Cardinality.STREAM_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options) +# @@protoc_insertion_point(module_scope) diff --git a/src/python/grpcio/grpc/framework/foundation/later.py b/examples/python/multiplex/route_guide_resources.py similarity index 69% rename from src/python/grpcio/grpc/framework/foundation/later.py rename to examples/python/multiplex/route_guide_resources.py index 1d1e0650413..30c7711019f 100644 --- a/src/python/grpcio/grpc/framework/foundation/later.py +++ b/examples/python/multiplex/route_guide_resources.py @@ -27,25 +27,27 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Enables scheduling execution at a later time.""" +"""Common resources used in the gRPC route guide example.""" -import time +import json -from grpc.framework.foundation import _timer_future +import route_guide_pb2 -def later(delay, computation): - """Schedules later execution of a callable. - - Args: - delay: Any numeric value. Represents the minimum length of time in seconds - to allow to pass before beginning the computation. No guarantees are made - about the maximum length of time that will pass. - computation: A callable that accepts no arguments. +def read_route_guide_database(): + """Reads the route guide database. Returns: - A Future representing the scheduled computation. + The full contents of the route guide database as a sequence of + route_guide_pb2.Features. """ - timer_future = _timer_future.TimerFuture(time.time() + delay, computation) - timer_future.start() - return timer_future + feature_list = [] + with open("route_guide_db.json") as route_guide_db_file: + for item in json.load(route_guide_db_file): + feature = route_guide_pb2.Feature( + name=item["name"], + location=route_guide_pb2.Point( + latitude=item["location"]["latitude"], + longitude=item["location"]["longitude"])) + feature_list.append(feature) + return feature_list diff --git a/src/python/grpcio/grpc/framework/interfaces/links/utilities.py b/examples/python/multiplex/run_codegen.py old mode 100644 new mode 100755 similarity index 77% rename from src/python/grpcio/grpc/framework/interfaces/links/utilities.py rename to examples/python/multiplex/run_codegen.py index 6e4fd76d932..7922a0f5c7f --- a/src/python/grpcio/grpc/framework/interfaces/links/utilities.py +++ b/examples/python/multiplex/run_codegen.py @@ -1,4 +1,4 @@ -# Copyright 2015, Google Inc. +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,18 +27,25 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Utilities provided as part of the links interface.""" +"""Generates protocol messages and gRPC stubs.""" -from grpc.framework.interfaces.links import links +from grpc.tools import protoc - -class _NullLink(links.Link): - """A do-nothing links.Link.""" - - def accept_ticket(self, ticket): - pass - - def join_link(self, link): - pass - -NULL_LINK = _NullLink() +protoc.main( + ( + '', + '-I../../protos', + '--python_out=.', + '--grpc_python_out=.', + '../../protos/helloworld.proto', + ) +) +protoc.main( + ( + '', + '-I../../protos', + '--python_out=.', + '--grpc_python_out=.', + '../../protos/route_guide.proto', + ) +) diff --git a/examples/python/route_guide/route_guide_client.py b/examples/python/route_guide/route_guide_client.py index ffcbd061d6e..8a80ed892de 100644 --- a/examples/python/route_guide/route_guide_client.py +++ b/examples/python/route_guide/route_guide_client.py @@ -34,13 +34,11 @@ from __future__ import print_function import random import time -from grpc.beta import implementations +import grpc import route_guide_pb2 import route_guide_resources -_TIMEOUT_SECONDS = 30 - def make_route_note(message, latitude, longitude): return route_guide_pb2.RouteNote( @@ -49,7 +47,7 @@ def make_route_note(message, latitude, longitude): def guide_get_one_feature(stub, point): - feature = stub.GetFeature(point, _TIMEOUT_SECONDS) + feature = stub.GetFeature(point) if not feature.location: print("Server returned incomplete feature") return @@ -66,14 +64,12 @@ def guide_get_feature(stub): def guide_list_features(stub): - rect = route_guide_pb2.Rectangle( - lo=route_guide_pb2.Point( - latitude=400000000, longitude = -750000000), - hi=route_guide_pb2.Point( - latitude = 420000000, longitude = -730000000)) + rectangle = route_guide_pb2.Rectangle( + lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000), + hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000)) print("Looking for features between 40, -75 and 42, -73") - features = stub.ListFeatures(rect, _TIMEOUT_SECONDS) + features = stub.ListFeatures(rectangle) for feature in features: print("Feature called %s at %s" % (feature.name, feature.location)) @@ -90,8 +86,8 @@ def generate_route(feature_list): def guide_record_route(stub): feature_list = route_guide_resources.read_route_guide_database() - route_iter = generate_route(feature_list) - route_summary = stub.RecordRoute(route_iter, _TIMEOUT_SECONDS) + route_iterator = generate_route(feature_list) + route_summary = stub.RecordRoute(route_iterator) print("Finished trip with %s points " % route_summary.point_count) print("Passed %s features " % route_summary.feature_count) print("Travelled %s meters " % route_summary.distance) @@ -113,14 +109,14 @@ def generate_messages(): def guide_route_chat(stub): - responses = stub.RouteChat(generate_messages(), _TIMEOUT_SECONDS) + responses = stub.RouteChat(generate_messages()) for response in responses: print("Received message %s at %s" % (response.message, response.location)) def run(): - channel = implementations.insecure_channel('localhost', 50051) - stub = route_guide_pb2.beta_create_RouteGuide_stub(channel) + channel = grpc.insecure_channel('localhost:50051') + stub = route_guide_pb2.RouteGuideStub(channel) print("-------------- GetFeature --------------") guide_get_feature(stub) print("-------------- ListFeatures --------------") diff --git a/examples/python/route_guide/route_guide_pb2.py b/examples/python/route_guide/route_guide_pb2.py index 81d5d075274..924e186e06e 100644 --- a/examples/python/route_guide/route_guide_pb2.py +++ b/examples/python/route_guide/route_guide_pb2.py @@ -277,13 +277,122 @@ _sym_db.RegisterMessage(RouteSummary) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG')) -import abc -import six +import grpc from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities + +class RouteGuideStub(object): + """Interface exported by the server. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=Point.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=Rectangle.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=Point.SerializeToString, + response_deserializer=RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=RouteNote.SerializeToString, + response_deserializer=RouteNote.FromString, + ) + + +class RouteGuideServicer(object): + """Interface exported by the server. + """ + + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=Point.FromString, + response_serializer=Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=Rectangle.FromString, + response_serializer=Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=Point.FromString, + response_serializer=RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=RouteNote.FromString, + response_serializer=RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + class BetaRouteGuideServicer(object): """Interface exported by the server. """ @@ -320,10 +429,11 @@ class BetaRouteGuideServicer(object): """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + class BetaRouteGuideStub(object): """Interface exported by the server. """ - def GetFeature(self, request, timeout): + def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """A simple RPC. Obtains the feature at a given position. @@ -333,7 +443,7 @@ class BetaRouteGuideStub(object): """ raise NotImplementedError() GetFeature.future = None - def ListFeatures(self, request, timeout): + def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """A server-to-client streaming RPC. Obtains the Features available within the given Rectangle. Results are @@ -342,7 +452,7 @@ class BetaRouteGuideStub(object): huge number of features. """ raise NotImplementedError() - def RecordRoute(self, request_iterator, timeout): + def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): """A client-to-server streaming RPC. Accepts a stream of Points on a route being traversed, returning a @@ -350,7 +460,7 @@ class BetaRouteGuideStub(object): """ raise NotImplementedError() RecordRoute.future = None - def RouteChat(self, request_iterator, timeout): + def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): """A Bidirectional streaming RPC. Accepts a stream of RouteNotes sent while a route is being traversed, @@ -358,26 +468,19 @@ class BetaRouteGuideStub(object): """ raise NotImplementedError() + def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 request_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): route_guide_pb2.Point.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): route_guide_pb2.Rectangle.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): route_guide_pb2.Point.FromString, - ('routeguide.RouteGuide', 'RouteChat'): route_guide_pb2.RouteNote.FromString, + ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, } response_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): route_guide_pb2.Feature.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): route_guide_pb2.Feature.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): route_guide_pb2.RouteSummary.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): route_guide_pb2.RouteNote.SerializeToString, + ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, } method_implementations = { ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), @@ -388,26 +491,19 @@ def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_t server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) + def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 - import route_guide_pb2 request_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): route_guide_pb2.Point.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): route_guide_pb2.Rectangle.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): route_guide_pb2.Point.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): route_guide_pb2.RouteNote.SerializeToString, + ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, } response_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): route_guide_pb2.Feature.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): route_guide_pb2.Feature.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): route_guide_pb2.RouteSummary.FromString, - ('routeguide.RouteGuide', 'RouteChat'): route_guide_pb2.RouteNote.FromString, + ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, } cardinalities = { 'GetFeature': cardinality.Cardinality.UNARY_UNARY, diff --git a/examples/python/route_guide/route_guide_resources.py b/examples/python/route_guide/route_guide_resources.py old mode 100755 new mode 100644 diff --git a/examples/python/route_guide/route_guide_server.py b/examples/python/route_guide/route_guide_server.py index 2d8b33ac178..4e780a70a19 100644 --- a/examples/python/route_guide/route_guide_server.py +++ b/examples/python/route_guide/route_guide_server.py @@ -29,9 +29,12 @@ """The Python implementation of the gRPC route guide server.""" +from concurrent import futures import time import math +import grpc + import route_guide_pb2 import route_guide_resources @@ -121,7 +124,9 @@ class RouteGuideServicer(route_guide_pb2.BetaRouteGuideServicer): def serve(): - server = route_guide_pb2.beta_create_RouteGuide_server(RouteGuideServicer()) + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + route_guide_pb2.add_RouteGuideServicer_to_server( + RouteGuideServicer(), server) server.add_insecure_port('[::]:50051') server.start() try: diff --git a/src/python/grpcio/grpc/framework/crust/__init__.py b/examples/python/route_guide/run_codegen.py similarity index 86% rename from src/python/grpcio/grpc/framework/crust/__init__.py rename to examples/python/route_guide/run_codegen.py index 70865191060..c7c60085809 100644 --- a/src/python/grpcio/grpc/framework/crust/__init__.py +++ b/examples/python/route_guide/run_codegen.py @@ -27,4 +27,16 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Runs protoc with the gRPC plugin to generate messages and gRPC stubs.""" +from grpc.tools import protoc + +protoc.main( + ( + '', + '-I../../protos', + '--python_out=.', + '--grpc_python_out=.', + '../../protos/route_guide.proto', + ) +) diff --git a/examples/python/route_guide/run_codegen.sh b/examples/python/route_guide/run_codegen.sh deleted file mode 100755 index a377a1ab409..00000000000 --- a/examples/python/route_guide/run_codegen.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Runs the protoc with gRPC plugin to generate protocol messages and gRPC stubs. -python -m grpc.tools.protoc -I../../protos --python_out=. --grpc_python_out=. ../../protos/route_guide.proto diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index e10e05387b1..10e9d00d707 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -35,7 +35,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-Core' - version = '0.14.0' + version = '1.0.0-pre1' s.version = version s.summary = 'Core cross-platform gRPC library, written in C' s.homepage = 'http://www.grpc.io' @@ -44,7 +44,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}", + :tag => "objective-c-v#{version}", # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules. :submodules => true, } @@ -101,6 +101,8 @@ Pod::Spec.new do |s| 'ALWAYS_SEARCH_USER_PATHS' => 'NO', } + s.default_subspecs = 'Interface', 'Implementation' + # Like many other C libraries, gRPC-Core has its public headers under `include//` and its # sources and private headers in other directories outside `include/`. Cocoapods' linter doesn't # allow any header to be listed outside the `header_mappings_dir` (even though doing so works in @@ -191,7 +193,7 @@ Pod::Spec.new do |s| ss.header_mappings_dir = '.' ss.libraries = 'z' ss.dependency "#{s.name}/Interface", version - ss.dependency 'BoringSSL', '~> 4.0' + ss.dependency 'BoringSSL', '~> 5.0' # To save you from scrolling, this is the last part of the podspec. ss.source_files = 'src/core/lib/profiling/timers.h', @@ -256,6 +258,7 @@ Pod::Spec.new do |s| 'src/core/lib/channel/compress_filter.h', 'src/core/lib/channel/connected_channel.h', 'src/core/lib/channel/context.h', + 'src/core/lib/channel/handshaker.h', 'src/core/lib/channel/http_client_filter.h', 'src/core/lib/channel/http_server_filter.h', 'src/core/lib/compression/algorithm_metadata.h', @@ -411,6 +414,7 @@ Pod::Spec.new do |s| 'src/core/lib/channel/channel_stack_builder.c', 'src/core/lib/channel/compress_filter.c', 'src/core/lib/channel/connected_channel.c', + 'src/core/lib/channel/handshaker.c', 'src/core/lib/channel/http_client_filter.c', 'src/core/lib/channel/http_server_filter.c', 'src/core/lib/compression/compression.c', @@ -609,6 +613,7 @@ Pod::Spec.new do |s| 'src/core/lib/channel/compress_filter.h', 'src/core/lib/channel/connected_channel.h', 'src/core/lib/channel/context.h', + 'src/core/lib/channel/handshaker.h', 'src/core/lib/channel/http_client_filter.h', 'src/core/lib/channel/http_server_filter.h', 'src/core/lib/compression/algorithm_metadata.h', @@ -759,4 +764,26 @@ Pod::Spec.new do |s| 'src/core/ext/census/mlog.h', 'src/core/ext/census/rpc_metric_id.h' end + + s.subspec 'Cronet-Interface' do |ss| + ss.header_mappings_dir = 'include/grpc' + ss.source_files = 'include/grpc/grpc_cronet.h' + end + + s.subspec 'Cronet-Tests' do |ss| + ss.header_mappings_dir = '.' + + ss.source_files = 'src/core/ext/transport/cronet/client/secure/cronet_channel_create.c', + 'src/core/ext/transport/cronet/transport/cronet_transport.c', + 'test/core/end2end/cq_verifier.{c,h}', + 'test/core/end2end/end2end_tests.{c,h}', + 'test/core/end2end/tests/*.{c,h}', + 'test/core/end2end/data/*.{c,h}', + 'test/core/util/test_config.{c,h}', + 'test/core/util/port.h', + 'test/core/util/port_posix.c', + 'test/core/util/port_server_client.{c,h}' + + ss.dependency 'CronetFramework' + end end diff --git a/gRPC-ProtoRPC.podspec b/gRPC-ProtoRPC.podspec index 9cc33c7dbd0..741067b23f4 100644 --- a/gRPC-ProtoRPC.podspec +++ b/gRPC-ProtoRPC.podspec @@ -36,7 +36,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-ProtoRPC' - version = '0.14.0' + version = '1.0.0-pre1' s.version = version s.summary = 'RPC library for Protocol Buffers, based on gRPC' s.homepage = 'http://www.grpc.io' @@ -45,7 +45,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}", + :tag => "objective-c-v#{version}", } s.ios.deployment_target = '7.1' diff --git a/gRPC-RxLibrary.podspec b/gRPC-RxLibrary.podspec index 6263878213e..862ed8974ba 100644 --- a/gRPC-RxLibrary.podspec +++ b/gRPC-RxLibrary.podspec @@ -36,7 +36,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-RxLibrary' - version = '0.14.0' + version = '1.0.0-pre1' s.version = version s.summary = 'Reactive Extensions library for iOS/OSX.' s.homepage = 'http://www.grpc.io' @@ -45,7 +45,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}", + :tag => "objective-c-v#{version}", } s.ios.deployment_target = '7.1' diff --git a/gRPC.podspec b/gRPC.podspec index e5556cc5448..eda965a3280 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -36,7 +36,7 @@ Pod::Spec.new do |s| s.name = 'gRPC' - version = '0.14.0' + version = '1.0.0-pre1' s.version = version s.summary = 'gRPC client library for iOS/OSX' s.homepage = 'http://www.grpc.io' @@ -45,7 +45,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}", + :tag => "objective-c-v#{version}", } s.ios.deployment_target = '7.1' diff --git a/grpc.def b/grpc.def index 0849f84e0bc..c2f83f577fe 100644 --- a/grpc.def +++ b/grpc.def @@ -148,6 +148,7 @@ EXPORTS gpr_slice_ref gpr_slice_unref gpr_slice_new + gpr_slice_new_with_user_data gpr_slice_new_with_len gpr_slice_malloc gpr_slice_from_copied_string diff --git a/grpc.gemspec b/grpc.gemspec index 369851b0f24..6b8beba9078 100755 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -177,6 +177,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/channel/compress_filter.h ) s.files += %w( src/core/lib/channel/connected_channel.h ) s.files += %w( src/core/lib/channel/context.h ) + s.files += %w( src/core/lib/channel/handshaker.h ) s.files += %w( src/core/lib/channel/http_client_filter.h ) s.files += %w( src/core/lib/channel/http_server_filter.h ) s.files += %w( src/core/lib/compression/algorithm_metadata.h ) @@ -332,6 +333,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/channel/channel_stack_builder.c ) s.files += %w( src/core/lib/channel/compress_filter.c ) s.files += %w( src/core/lib/channel/connected_channel.c ) + s.files += %w( src/core/lib/channel/handshaker.c ) s.files += %w( src/core/lib/channel/http_client_filter.c ) s.files += %w( src/core/lib/channel/http_server_filter.c ) s.files += %w( src/core/lib/compression/compression.c ) diff --git a/include/grpc++/impl/codegen/async_stream.h b/include/grpc++/impl/codegen/async_stream.h index e96d224ddbe..70533aa4d9f 100644 --- a/include/grpc++/impl/codegen/async_stream.h +++ b/include/grpc++/impl/codegen/async_stream.h @@ -330,6 +330,9 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncReaderInterface { meta_ops_.set_output_tag(tag); meta_ops_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + meta_ops_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; call_.PerformOps(&meta_ops_); } @@ -345,6 +348,9 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncReaderInterface { if (!ctx_->sent_initial_metadata_) { finish_ops_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + finish_ops_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } // The response is dropped if the status is not OK. @@ -363,6 +369,9 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncReaderInterface { if (!ctx_->sent_initial_metadata_) { finish_ops_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + finish_ops_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status); @@ -400,6 +409,9 @@ class ServerAsyncWriter GRPC_FINAL : public ServerAsyncWriterInterface { meta_ops_.set_output_tag(tag); meta_ops_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + meta_ops_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; call_.PerformOps(&meta_ops_); } @@ -409,6 +421,9 @@ class ServerAsyncWriter GRPC_FINAL : public ServerAsyncWriterInterface { if (!ctx_->sent_initial_metadata_) { write_ops_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + write_ops_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } // TODO(ctiller): don't assert @@ -421,6 +436,9 @@ class ServerAsyncWriter GRPC_FINAL : public ServerAsyncWriterInterface { if (!ctx_->sent_initial_metadata_) { finish_ops_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + finish_ops_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status); @@ -459,6 +477,9 @@ class ServerAsyncReaderWriter GRPC_FINAL meta_ops_.set_output_tag(tag); meta_ops_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + meta_ops_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; call_.PerformOps(&meta_ops_); } @@ -474,6 +495,9 @@ class ServerAsyncReaderWriter GRPC_FINAL if (!ctx_->sent_initial_metadata_) { write_ops_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + write_ops_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } // TODO(ctiller): don't assert @@ -486,6 +510,9 @@ class ServerAsyncReaderWriter GRPC_FINAL if (!ctx_->sent_initial_metadata_) { finish_ops_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + finish_ops_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status); diff --git a/include/grpc++/impl/codegen/async_unary_call.h b/include/grpc++/impl/codegen/async_unary_call.h index 47ac5bee925..5ceab73cea6 100644 --- a/include/grpc++/impl/codegen/async_unary_call.h +++ b/include/grpc++/impl/codegen/async_unary_call.h @@ -65,7 +65,7 @@ class ClientAsyncResponseReader GRPC_FINAL const W& request) : context_(context), call_(channel->CreateCall(method, context, cq)), - collection_(new CallOpSetCollection) { + collection_(std::make_shared()) { collection_->init_buf_.SetCollection(collection_); collection_->init_buf_.SendInitialMetadata( context->send_initial_metadata_, context->initial_metadata_flags()); @@ -126,6 +126,9 @@ class ServerAsyncResponseWriter GRPC_FINAL meta_buf_.set_output_tag(tag); meta_buf_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + meta_buf_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; call_.PerformOps(&meta_buf_); } @@ -135,6 +138,9 @@ class ServerAsyncResponseWriter GRPC_FINAL if (!ctx_->sent_initial_metadata_) { finish_buf_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + finish_buf_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } // The response is dropped if the status is not OK. @@ -153,6 +159,9 @@ class ServerAsyncResponseWriter GRPC_FINAL if (!ctx_->sent_initial_metadata_) { finish_buf_.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + finish_buf_.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } finish_buf_.ServerSendStatus(ctx_->trailing_metadata_, status); diff --git a/include/grpc++/impl/codegen/call.h b/include/grpc++/impl/codegen/call.h index fab85d15176..dfac177970a 100644 --- a/include/grpc++/impl/codegen/call.h +++ b/include/grpc++/impl/codegen/call.h @@ -180,17 +180,23 @@ class CallNoOp { class CallOpSendInitialMetadata { public: - CallOpSendInitialMetadata() : send_(false) {} + CallOpSendInitialMetadata() : send_(false) { + maybe_compression_level_.is_set = false; + } void SendInitialMetadata( const std::multimap& metadata, uint32_t flags) { + maybe_compression_level_.is_set = false; send_ = true; flags_ = flags; initial_metadata_count_ = metadata.size(); initial_metadata_ = FillMetadataArray(metadata); - // TODO(dgq): expose compression level in API so it can be properly set. - maybe_compression_level_.is_set = false; + } + + void set_compression_level(grpc_compression_level level) { + maybe_compression_level_.is_set = true; + maybe_compression_level_.level = level; } protected: diff --git a/include/grpc++/impl/codegen/client_context.h b/include/grpc++/impl/codegen/client_context.h index a132c9a57aa..012bcc2bbe5 100644 --- a/include/grpc++/impl/codegen/client_context.h +++ b/include/grpc++/impl/codegen/client_context.h @@ -41,7 +41,7 @@ /// /// Context settings are only relevant to the call they are invoked with, that /// is to say, they aren't sticky. Some of these settings, such as the -/// compression options, can be made persistant at channel construction time +/// compression options, can be made persistent at channel construction time /// (see \a grpc::CreateCustomChannel). /// /// \warning ClientContext instances should \em not be reused across rpcs. diff --git a/include/grpc++/impl/codegen/config_protobuf.h b/include/grpc++/impl/codegen/config_protobuf.h index 4bee1bc4227..8620d4b2183 100644 --- a/include/grpc++/impl/codegen/config_protobuf.h +++ b/include/grpc++/impl/codegen/config_protobuf.h @@ -40,16 +40,21 @@ #endif #ifndef GRPC_CUSTOM_MESSAGE +#ifdef GRPC_USE_PROTO_LITE +#include +#define GRPC_CUSTOM_MESSAGE ::google::protobuf::MessageLite +#else #include #define GRPC_CUSTOM_MESSAGE ::google::protobuf::Message #endif +#endif #ifndef GRPC_CUSTOM_DESCRIPTOR #include #include #define GRPC_CUSTOM_DESCRIPTOR ::google::protobuf::Descriptor #define GRPC_CUSTOM_DESCRIPTORPOOL ::google::protobuf::DescriptorPool -#define GPRC_CUSTOM_FIELDDESCRIPTOR ::google::protobuf::FieldDescriptor +#define GRPC_CUSTOM_FIELDDESCRIPTOR ::google::protobuf::FieldDescriptor #define GRPC_CUSTOM_FILEDESCRIPTOR ::google::protobuf::FileDescriptor #define GRPC_CUSTOM_FILEDESCRIPTORPROTO ::google::protobuf::FileDescriptorProto #define GRPC_CUSTOM_METHODDESCRIPTOR ::google::protobuf::MethodDescriptor @@ -57,6 +62,13 @@ #define GRPC_CUSTOM_SOURCELOCATION ::google::protobuf::SourceLocation #endif +#ifndef GRPC_CUSTOM_DESCRIPTORDATABASE +#include +#define GRPC_CUSTOM_DESCRIPTORDATABASE ::google::protobuf::DescriptorDatabase +#define GRPC_CUSTOM_SIMPLEDESCRIPTORDATABASE \ + ::google::protobuf::SimpleDescriptorDatabase +#endif + #ifndef GRPC_CUSTOM_ZEROCOPYOUTPUTSTREAM #include #include @@ -75,11 +87,13 @@ typedef GRPC_CUSTOM_PROTOBUF_INT64 int64; typedef GRPC_CUSTOM_DESCRIPTOR Descriptor; typedef GRPC_CUSTOM_DESCRIPTORPOOL DescriptorPool; -typedef GPRC_CUSTOM_FIELDDESCRIPTOR FieldDescriptor; +typedef GRPC_CUSTOM_DESCRIPTORDATABASE DescriptorDatabase; +typedef GRPC_CUSTOM_FIELDDESCRIPTOR FieldDescriptor; typedef GRPC_CUSTOM_FILEDESCRIPTOR FileDescriptor; typedef GRPC_CUSTOM_FILEDESCRIPTORPROTO FileDescriptorProto; typedef GRPC_CUSTOM_METHODDESCRIPTOR MethodDescriptor; typedef GRPC_CUSTOM_SERVICEDESCRIPTOR ServiceDescriptor; +typedef GRPC_CUSTOM_SIMPLEDESCRIPTORDATABASE SimpleDescriptorDatabase; typedef GRPC_CUSTOM_SOURCELOCATION SourceLocation; namespace io { diff --git a/include/grpc++/impl/codegen/impl/async_stream.h b/include/grpc++/impl/codegen/impl/async_stream.h deleted file mode 100644 index 7d7a9568077..00000000000 --- a/include/grpc++/impl/codegen/impl/async_stream.h +++ /dev/null @@ -1,486 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef GRPCXX_IMPL_CODEGEN_IMPL_ASYNC_STREAM_H -#define GRPCXX_IMPL_CODEGEN_IMPL_ASYNC_STREAM_H - -#include -#include -#include -#include -#include -#include - -namespace grpc { - -class CompletionQueue; - -/// Common interface for all client side asynchronous streaming. -class ClientAsyncStreamingInterface { - public: - virtual ~ClientAsyncStreamingInterface() {} - - /// Request notification of the reading of the initial metadata. Completion - /// will be notified by \a tag on the associated completion queue. - /// - /// \param[in] tag Tag identifying this request. - virtual void ReadInitialMetadata(void* tag) = 0; - - /// Request notification completion. - /// - /// \param[out] status To be updated with the operation status. - /// \param[in] tag Tag identifying this request. - virtual void Finish(Status* status, void* tag) = 0; -}; - -/// An interface that yields a sequence of messages of type \a R. -template -class AsyncReaderInterface { - public: - virtual ~AsyncReaderInterface() {} - - /// Read a message of type \a R into \a msg. Completion will be notified by \a - /// tag on the associated completion queue. - /// - /// \param[out] msg Where to eventually store the read message. - /// \param[in] tag The tag identifying the operation. - virtual void Read(R* msg, void* tag) = 0; -}; - -/// An interface that can be fed a sequence of messages of type \a W. -template -class AsyncWriterInterface { - public: - virtual ~AsyncWriterInterface() {} - - /// Request the writing of \a msg with identifying tag \a tag. - /// - /// Only one write may be outstanding at any given time. This means that - /// after calling Write, one must wait to receive \a tag from the completion - /// queue BEFORE calling Write again. - /// - /// \param[in] msg The message to be written. - /// \param[in] tag The tag identifying the operation. - virtual void Write(const W& msg, void* tag) = 0; -}; - -template -class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface, - public AsyncReaderInterface {}; - -template -class ClientAsyncReader GRPC_FINAL : public ClientAsyncReaderInterface { - public: - /// Create a stream and write the first request out. - template - ClientAsyncReader(ChannelInterface* channel, CompletionQueue* cq, - const RpcMethod& method, ClientContext* context, - const W& request, void* tag) - : context_(context), call_(channel->CreateCall(method, context, cq)) { - init_ops_.set_output_tag(tag); - init_ops_.SendInitialMetadata(context->send_initial_metadata_); - // TODO(ctiller): don't assert - GPR_CODEGEN_ASSERT(init_ops_.SendMessage(request).ok()); - init_ops_.ClientSendClose(); - call_.PerformOps(&init_ops_); - } - - void ReadInitialMetadata(void* tag) GRPC_OVERRIDE { - GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); - - meta_ops_.set_output_tag(tag); - meta_ops_.RecvInitialMetadata(context_); - call_.PerformOps(&meta_ops_); - } - - void Read(R* msg, void* tag) GRPC_OVERRIDE { - read_ops_.set_output_tag(tag); - if (!context_->initial_metadata_received_) { - read_ops_.RecvInitialMetadata(context_); - } - read_ops_.RecvMessage(msg); - call_.PerformOps(&read_ops_); - } - - void Finish(Status* status, void* tag) GRPC_OVERRIDE { - finish_ops_.set_output_tag(tag); - if (!context_->initial_metadata_received_) { - finish_ops_.RecvInitialMetadata(context_); - } - finish_ops_.ClientRecvStatus(context_, status); - call_.PerformOps(&finish_ops_); - } - - private: - ClientContext* context_; - Call call_; - CallOpSet - init_ops_; - CallOpSet meta_ops_; - CallOpSet> read_ops_; - CallOpSet finish_ops_; -}; - -/// Common interface for client side asynchronous writing. -template -class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface, - public AsyncWriterInterface { - public: - /// Signal the client is done with the writes. - /// - /// \param[in] tag The tag identifying the operation. - virtual void WritesDone(void* tag) = 0; -}; - -template -class ClientAsyncWriter GRPC_FINAL : public ClientAsyncWriterInterface { - public: - template - ClientAsyncWriter(ChannelInterface* channel, CompletionQueue* cq, - const RpcMethod& method, ClientContext* context, - R* response, void* tag) - : context_(context), call_(channel->CreateCall(method, context, cq)) { - finish_ops_.RecvMessage(response); - - init_ops_.set_output_tag(tag); - init_ops_.SendInitialMetadata(context->send_initial_metadata_); - call_.PerformOps(&init_ops_); - } - - void ReadInitialMetadata(void* tag) GRPC_OVERRIDE { - GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); - - meta_ops_.set_output_tag(tag); - meta_ops_.RecvInitialMetadata(context_); - call_.PerformOps(&meta_ops_); - } - - void Write(const W& msg, void* tag) GRPC_OVERRIDE { - write_ops_.set_output_tag(tag); - // TODO(ctiller): don't assert - GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); - call_.PerformOps(&write_ops_); - } - - void WritesDone(void* tag) GRPC_OVERRIDE { - writes_done_ops_.set_output_tag(tag); - writes_done_ops_.ClientSendClose(); - call_.PerformOps(&writes_done_ops_); - } - - void Finish(Status* status, void* tag) GRPC_OVERRIDE { - finish_ops_.set_output_tag(tag); - if (!context_->initial_metadata_received_) { - finish_ops_.RecvInitialMetadata(context_); - } - finish_ops_.ClientRecvStatus(context_, status); - call_.PerformOps(&finish_ops_); - } - - private: - ClientContext* context_; - Call call_; - CallOpSet init_ops_; - CallOpSet meta_ops_; - CallOpSet write_ops_; - CallOpSet writes_done_ops_; - CallOpSet - finish_ops_; -}; - -/// Client-side interface for asynchronous bi-directional streaming. -template -class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface, - public AsyncWriterInterface, - public AsyncReaderInterface { - public: - /// Signal the client is done with the writes. - /// - /// \param[in] tag The tag identifying the operation. - virtual void WritesDone(void* tag) = 0; -}; - -template -class ClientAsyncReaderWriter GRPC_FINAL - : public ClientAsyncReaderWriterInterface { - public: - ClientAsyncReaderWriter(ChannelInterface* channel, CompletionQueue* cq, - const RpcMethod& method, ClientContext* context, - void* tag) - : context_(context), call_(channel->CreateCall(method, context, cq)) { - init_ops_.set_output_tag(tag); - init_ops_.SendInitialMetadata(context->send_initial_metadata_); - call_.PerformOps(&init_ops_); - } - - void ReadInitialMetadata(void* tag) GRPC_OVERRIDE { - GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_); - - meta_ops_.set_output_tag(tag); - meta_ops_.RecvInitialMetadata(context_); - call_.PerformOps(&meta_ops_); - } - - void Read(R* msg, void* tag) GRPC_OVERRIDE { - read_ops_.set_output_tag(tag); - if (!context_->initial_metadata_received_) { - read_ops_.RecvInitialMetadata(context_); - } - read_ops_.RecvMessage(msg); - call_.PerformOps(&read_ops_); - } - - void Write(const W& msg, void* tag) GRPC_OVERRIDE { - write_ops_.set_output_tag(tag); - // TODO(ctiller): don't assert - GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); - call_.PerformOps(&write_ops_); - } - - void WritesDone(void* tag) GRPC_OVERRIDE { - writes_done_ops_.set_output_tag(tag); - writes_done_ops_.ClientSendClose(); - call_.PerformOps(&writes_done_ops_); - } - - void Finish(Status* status, void* tag) GRPC_OVERRIDE { - finish_ops_.set_output_tag(tag); - if (!context_->initial_metadata_received_) { - finish_ops_.RecvInitialMetadata(context_); - } - finish_ops_.ClientRecvStatus(context_, status); - call_.PerformOps(&finish_ops_); - } - - private: - ClientContext* context_; - Call call_; - CallOpSet init_ops_; - CallOpSet meta_ops_; - CallOpSet> read_ops_; - CallOpSet write_ops_; - CallOpSet writes_done_ops_; - CallOpSet finish_ops_; -}; - -template -class ServerAsyncReaderInterface : public ServerAsyncStreamingInterface, - public AsyncReaderInterface { - public: - virtual void Finish(const W& msg, const Status& status, void* tag) = 0; - - virtual void FinishWithError(const Status& status, void* tag) = 0; -}; - -template -class ServerAsyncReader GRPC_FINAL : public ServerAsyncReaderInterface { - public: - explicit ServerAsyncReader(ServerContext* ctx) - : call_(nullptr, nullptr, nullptr), ctx_(ctx) {} - - void SendInitialMetadata(void* tag) GRPC_OVERRIDE { - GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); - - meta_ops_.set_output_tag(tag); - meta_ops_.SendInitialMetadata(ctx_->initial_metadata_); - ctx_->sent_initial_metadata_ = true; - call_.PerformOps(&meta_ops_); - } - - void Read(R* msg, void* tag) GRPC_OVERRIDE { - read_ops_.set_output_tag(tag); - read_ops_.RecvMessage(msg); - call_.PerformOps(&read_ops_); - } - - void Finish(const W& msg, const Status& status, void* tag) GRPC_OVERRIDE { - finish_ops_.set_output_tag(tag); - if (!ctx_->sent_initial_metadata_) { - finish_ops_.SendInitialMetadata(ctx_->initial_metadata_); - ctx_->sent_initial_metadata_ = true; - } - // The response is dropped if the status is not OK. - if (status.ok()) { - finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, - finish_ops_.SendMessage(msg)); - } else { - finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status); - } - call_.PerformOps(&finish_ops_); - } - - void FinishWithError(const Status& status, void* tag) GRPC_OVERRIDE { - GPR_CODEGEN_ASSERT(!status.ok()); - finish_ops_.set_output_tag(tag); - if (!ctx_->sent_initial_metadata_) { - finish_ops_.SendInitialMetadata(ctx_->initial_metadata_); - ctx_->sent_initial_metadata_ = true; - } - finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status); - call_.PerformOps(&finish_ops_); - } - - private: - void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; } - - Call call_; - ServerContext* ctx_; - CallOpSet meta_ops_; - CallOpSet> read_ops_; - CallOpSet - finish_ops_; -}; - -template -class ServerAsyncWriterInterface : public ServerAsyncStreamingInterface, - public AsyncWriterInterface { - public: - virtual void Finish(const Status& status, void* tag) = 0; -}; - -template -class ServerAsyncWriter GRPC_FINAL : public ServerAsyncWriterInterface { - public: - explicit ServerAsyncWriter(ServerContext* ctx) - : call_(nullptr, nullptr, nullptr), ctx_(ctx) {} - - void SendInitialMetadata(void* tag) GRPC_OVERRIDE { - GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); - - meta_ops_.set_output_tag(tag); - meta_ops_.SendInitialMetadata(ctx_->initial_metadata_); - ctx_->sent_initial_metadata_ = true; - call_.PerformOps(&meta_ops_); - } - - void Write(const W& msg, void* tag) GRPC_OVERRIDE { - write_ops_.set_output_tag(tag); - if (!ctx_->sent_initial_metadata_) { - write_ops_.SendInitialMetadata(ctx_->initial_metadata_); - ctx_->sent_initial_metadata_ = true; - } - // TODO(ctiller): don't assert - GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); - call_.PerformOps(&write_ops_); - } - - void Finish(const Status& status, void* tag) GRPC_OVERRIDE { - finish_ops_.set_output_tag(tag); - if (!ctx_->sent_initial_metadata_) { - finish_ops_.SendInitialMetadata(ctx_->initial_metadata_); - ctx_->sent_initial_metadata_ = true; - } - finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status); - call_.PerformOps(&finish_ops_); - } - - private: - void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; } - - Call call_; - ServerContext* ctx_; - CallOpSet meta_ops_; - CallOpSet write_ops_; - CallOpSet finish_ops_; -}; - -/// Server-side interface for asynchronous bi-directional streaming. -template -class ServerAsyncReaderWriterInterface : public ServerAsyncStreamingInterface, - public AsyncWriterInterface, - public AsyncReaderInterface { - public: - virtual void Finish(const Status& status, void* tag) = 0; -}; - -template -class ServerAsyncReaderWriter GRPC_FINAL - : public ServerAsyncReaderWriterInterface { - public: - explicit ServerAsyncReaderWriter(ServerContext* ctx) - : call_(nullptr, nullptr, nullptr), ctx_(ctx) {} - - void SendInitialMetadata(void* tag) GRPC_OVERRIDE { - GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); - - meta_ops_.set_output_tag(tag); - meta_ops_.SendInitialMetadata(ctx_->initial_metadata_); - ctx_->sent_initial_metadata_ = true; - call_.PerformOps(&meta_ops_); - } - - void Read(R* msg, void* tag) GRPC_OVERRIDE { - read_ops_.set_output_tag(tag); - read_ops_.RecvMessage(msg); - call_.PerformOps(&read_ops_); - } - - void Write(const W& msg, void* tag) GRPC_OVERRIDE { - write_ops_.set_output_tag(tag); - if (!ctx_->sent_initial_metadata_) { - write_ops_.SendInitialMetadata(ctx_->initial_metadata_); - ctx_->sent_initial_metadata_ = true; - } - // TODO(ctiller): don't assert - GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok()); - call_.PerformOps(&write_ops_); - } - - void Finish(const Status& status, void* tag) GRPC_OVERRIDE { - finish_ops_.set_output_tag(tag); - if (!ctx_->sent_initial_metadata_) { - finish_ops_.SendInitialMetadata(ctx_->initial_metadata_); - ctx_->sent_initial_metadata_ = true; - } - finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status); - call_.PerformOps(&finish_ops_); - } - - private: - friend class ::grpc::Server; - - void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; } - - Call call_; - ServerContext* ctx_; - CallOpSet meta_ops_; - CallOpSet> read_ops_; - CallOpSet write_ops_; - CallOpSet finish_ops_; -}; - -} // namespace grpc - -#endif // GRPCXX_IMPL_CODEGEN_IMPL_ASYNC_STREAM_H diff --git a/include/grpc++/impl/codegen/method_handler_impl.h b/include/grpc++/impl/codegen/method_handler_impl.h index 21ac6c4fb55..2f4be644bae 100644 --- a/include/grpc++/impl/codegen/method_handler_impl.h +++ b/include/grpc++/impl/codegen/method_handler_impl.h @@ -65,6 +65,9 @@ class RpcMethodHandler : public MethodHandler { ops; ops.SendInitialMetadata(param.server_context->initial_metadata_, param.server_context->initial_metadata_flags()); + if (param.server_context->compression_level_set()) { + ops.set_compression_level(param.server_context->compression_level()); + } if (status.ok()) { status = ops.SendMessage(rsp); } @@ -104,6 +107,9 @@ class ClientStreamingHandler : public MethodHandler { ops; ops.SendInitialMetadata(param.server_context->initial_metadata_, param.server_context->initial_metadata_flags()); + if (param.server_context->compression_level_set()) { + ops.set_compression_level(param.server_context->compression_level()); + } if (status.ok()) { status = ops.SendMessage(rsp); } @@ -144,6 +150,9 @@ class ServerStreamingHandler : public MethodHandler { if (!param.server_context->sent_initial_metadata_) { ops.SendInitialMetadata(param.server_context->initial_metadata_, param.server_context->initial_metadata_flags()); + if (param.server_context->compression_level_set()) { + ops.set_compression_level(param.server_context->compression_level()); + } } ops.ServerSendStatus(param.server_context->trailing_metadata_, status); param.call->PerformOps(&ops); @@ -177,6 +186,9 @@ class BidiStreamingHandler : public MethodHandler { if (!param.server_context->sent_initial_metadata_) { ops.SendInitialMetadata(param.server_context->initial_metadata_, param.server_context->initial_metadata_flags()); + if (param.server_context->compression_level_set()) { + ops.set_compression_level(param.server_context->compression_level()); + } } ops.ServerSendStatus(param.server_context->trailing_metadata_, status); param.call->PerformOps(&ops); @@ -199,6 +211,9 @@ class UnknownMethodHandler : public MethodHandler { if (!context->sent_initial_metadata_) { ops->SendInitialMetadata(context->initial_metadata_, context->initial_metadata_flags()); + if (context->compression_level_set()) { + ops->set_compression_level(context->compression_level()); + } context->sent_initial_metadata_ = true; } ops->ServerSendStatus(context->trailing_metadata_, status); diff --git a/include/grpc++/impl/codegen/server_context.h b/include/grpc++/impl/codegen/server_context.h index cea13a513f6..08212af861b 100644 --- a/include/grpc++/impl/codegen/server_context.h +++ b/include/grpc++/impl/codegen/server_context.h @@ -130,7 +130,13 @@ class ServerContext { grpc_compression_level compression_level() const { return compression_level_; } - void set_compression_level(grpc_compression_level level); + + void set_compression_level(grpc_compression_level level) { + compression_level_set_ = true; + compression_level_ = level; + } + + bool compression_level_set() const { return compression_level_set_; } grpc_compression_algorithm compression_algorithm() const { return compression_algorithm_; @@ -217,6 +223,7 @@ class ServerContext { std::multimap initial_metadata_; std::multimap trailing_metadata_; + bool compression_level_set_; grpc_compression_level compression_level_; grpc_compression_algorithm compression_algorithm_; }; diff --git a/include/grpc++/impl/codegen/sync_stream.h b/include/grpc++/impl/codegen/sync_stream.h index cbfa4106995..b2b972760db 100644 --- a/include/grpc++/impl/codegen/sync_stream.h +++ b/include/grpc++/impl/codegen/sync_stream.h @@ -347,6 +347,9 @@ class ServerReader GRPC_FINAL : public ReaderInterface { CallOpSet ops; ops.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + ops.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; call_->PerformOps(&ops); call_->cq()->Pluck(&ops); @@ -375,6 +378,9 @@ class ServerWriter GRPC_FINAL : public WriterInterface { CallOpSet ops; ops.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + ops.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; call_->PerformOps(&ops); call_->cq()->Pluck(&ops); @@ -389,6 +395,9 @@ class ServerWriter GRPC_FINAL : public WriterInterface { if (!ctx_->sent_initial_metadata_) { ops.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + ops.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } call_->PerformOps(&ops); @@ -413,6 +422,9 @@ class ServerReaderWriter GRPC_FINAL : public WriterInterface, CallOpSet ops; ops.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + ops.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; call_->PerformOps(&ops); call_->cq()->Pluck(&ops); @@ -434,6 +446,9 @@ class ServerReaderWriter GRPC_FINAL : public WriterInterface, if (!ctx_->sent_initial_metadata_) { ops.SendInitialMetadata(ctx_->initial_metadata_, ctx_->initial_metadata_flags()); + if (ctx_->compression_level_set()) { + ops.set_compression_level(ctx_->compression_level()); + } ctx_->sent_initial_metadata_ = true; } call_->PerformOps(&ops); diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 7a8858ef194..6876961e210 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -179,10 +179,13 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { grpc::mutex mu_; bool started_; bool shutdown_; + bool shutdown_notified_; // The number of threads which are running callbacks. int num_running_cb_; grpc::condition_variable callback_cv_; + grpc::condition_variable shutdown_cv_; + std::shared_ptr global_callbacks_; std::list* sync_methods_; diff --git a/include/grpc++/support/slice.h b/include/grpc++/support/slice.h index cec9062d4fd..5874b4f5ae3 100644 --- a/include/grpc++/support/slice.h +++ b/include/grpc++/support/slice.h @@ -77,6 +77,9 @@ class Slice GRPC_FINAL { /// Raw pointer to the end (one byte \em past the last element) of the slice. const uint8_t* end() const { return GPR_SLICE_END_PTR(slice_); } + /// Raw C slice. Caller needs to call gpr_slice_unref when done. + gpr_slice c_slice() const { return gpr_slice_ref(slice_); } + private: friend class ByteBuffer; diff --git a/include/grpc/impl/codegen/compression_types.h b/include/grpc/impl/codegen/compression_types.h index 9065d1edd02..3034182d4c5 100644 --- a/include/grpc/impl/codegen/compression_types.h +++ b/include/grpc/impl/codegen/compression_types.h @@ -46,12 +46,27 @@ extern "C" { #define GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY \ "grpc-internal-encoding-request" -/** To be used in channel arguments */ +/** To be used in channel arguments. + * + * \addtogroup grpc_arg_keys + * \{ */ +/** Default compression algorithm for the channel. + * Its value is an int from the \a grpc_compression_algorithm enum. */ #define GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM \ "grpc.default_compression_algorithm" +/** Default compression level for the channel. + * Its value is an int from the \a grpc_compression_level enum. */ #define GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL "grpc.default_compression_level" +/** Compression algorithms supported by the channel. + * Its value is a bitset (an int). Bits correspond to algorithms in \a + * grpc_compression_algorithm. For example, its LSB corresponds to + * GRPC_COMPRESS_NONE, the next bit to GRPC_COMPRESS_DEFLATE, etc. + * Unset bits disable support for the algorithm. By default all algorithms are + * supported. It's not possible to disable GRPC_COMPRESS_NONE (the attempt will + * be ignored). */ #define GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET \ "grpc.compression_enabled_algorithms_bitset" +/** \} */ /* The various compression algorithms supported by gRPC */ typedef enum { diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h index c0ed1395066..e5a82883be5 100644 --- a/include/grpc/impl/codegen/grpc_types.h +++ b/include/grpc/impl/codegen/grpc_types.h @@ -106,58 +106,66 @@ typedef struct { by grpc_arg; keys are strings to allow easy backwards-compatible extension by arbitrary parties. All evaluation is performed at channel creation time (i.e. the values in - this structure need only live through the creation invocation). */ + this structure need only live through the creation invocation). + + See the description of the \ref grpc_arg_keys "available args" for more + details. */ typedef struct { size_t num_args; grpc_arg *args; } grpc_channel_args; -/* Channel argument keys: */ -/** Enable census for tracing and stats collection */ +/** \defgroup grpc_arg_keys + * Channel argument keys. + * \{ + */ +/** If non-zero, enable census for tracing and stats collection. */ #define GRPC_ARG_ENABLE_CENSUS "grpc.census" -/** Enable load reporting */ +/** If non-zero, enable load reporting. */ #define GRPC_ARG_ENABLE_LOAD_REPORTING "grpc.loadreporting" /** Maximum number of concurrent incoming streams to allow on a http2 - connection */ + connection. Int valued. */ #define GRPC_ARG_MAX_CONCURRENT_STREAMS "grpc.max_concurrent_streams" -/** Maximum message length that the channel can receive */ +/** Maximum message length that the channel can receive. Int valued, bytes. */ #define GRPC_ARG_MAX_MESSAGE_LENGTH "grpc.max_message_length" -/** Initial sequence number for http2 transports */ +/** Initial sequence number for http2 transports. Int valued. */ #define GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER \ "grpc.http2.initial_sequence_number" /** Amount to read ahead on individual streams. Defaults to 64kb, larger values can help throughput on high-latency connections. NOTE: at some point we'd like to auto-tune this, and this parameter - will become a no-op. */ + will become a no-op. Int valued, bytes. */ #define GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES "grpc.http2.lookahead_bytes" -/** How much memory to use for hpack decoding */ +/** How much memory to use for hpack decoding. Int valued, bytes. */ #define GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER \ "grpc.http2.hpack_table_size.decoder" -/** How much memory to use for hpack encoding */ +/** How much memory to use for hpack encoding. Int valued, bytes. */ #define GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER \ "grpc.http2.hpack_table_size.encoder" -/** Default authority to pass if none specified on call construction */ +/** Default authority to pass if none specified on call construction. A string. + * */ #define GRPC_ARG_DEFAULT_AUTHORITY "grpc.default_authority" /** Primary user agent: goes at the start of the user-agent metadata - sent on each request */ + sent on each request. A string. */ #define GRPC_ARG_PRIMARY_USER_AGENT_STRING "grpc.primary_user_agent" /** Secondary user agent: goes at the end of the user-agent metadata - sent on each request */ + sent on each request. A string. */ #define GRPC_ARG_SECONDARY_USER_AGENT_STRING "grpc.secondary_user_agent" /** The maximum time between subsequent connection attempts, in ms */ #define GRPC_ARG_MAX_RECONNECT_BACKOFF_MS "grpc.max_reconnect_backoff_ms" /* The caller of the secure_channel_create functions may override the target name used for SSL host name checking using this channel argument which is of - type GRPC_ARG_STRING. This *should* be used for testing only. + type \a GRPC_ARG_STRING. This *should* be used for testing only. If this argument is not specified, the name used for SSL host name checking will be the target parameter (assuming that the secure channel is an SSL channel). If this parameter is specified and the underlying is not an SSL channel, it will just be ignored. */ #define GRPC_SSL_TARGET_NAME_OVERRIDE_ARG "grpc.ssl_target_name_override" -/* Maximum metadata size */ +/* Maximum metadata size, in bytes. */ #define GRPC_ARG_MAX_METADATA_SIZE "grpc.max_metadata_size" /** If non-zero, allow the use of SO_REUSEPORT if it's available (default 1) */ #define GRPC_ARG_ALLOW_REUSEPORT "grpc.so_reuseport" +/** \} */ /** Result of a grpc call. If the caller satisfies the prerequisites of a particular operation, the grpc_call_error returned will be GRPC_CALL_OK. diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index 3ad665a7a29..7c67bad5ae1 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -119,7 +119,7 @@ // libraries; it should be integrated with the `__linux__` definitions below. #define GPR_PLATFORM_STRING "manylinux" #define GPR_POSIX_CRASH_HANDLER 1 -#define GPR_CPU_LINUX 1 +#define GPR_CPU_POSIX 1 #define GPR_GCC_ATOMIC 1 #define GPR_GCC_TLS 1 #define GPR_LINUX 1 diff --git a/include/grpc/impl/codegen/slice.h b/include/grpc/impl/codegen/slice.h index c684b7587d6..dfc0a774fcc 100644 --- a/include/grpc/impl/codegen/slice.h +++ b/include/grpc/impl/codegen/slice.h @@ -120,6 +120,14 @@ GPRAPI void gpr_slice_unref(gpr_slice s); passed in at destruction. */ GPRAPI gpr_slice gpr_slice_new(void *p, size_t len, void (*destroy)(void *)); +/* Equivalent to gpr_slice_new, but with a separate pointer that is + passed to the destroy function. This function can be useful when + the data is part of a larger structure that must be destroyed when + the data is no longer needed. */ +GPRAPI gpr_slice gpr_slice_new_with_user_data(void *p, size_t len, + void (*destroy)(void *), + void *user_data); + /* Equivalent to gpr_slice_new, but with a two argument destroy function that also takes the slice length. */ GPRAPI gpr_slice gpr_slice_new_with_len(void *p, size_t len, diff --git a/include/grpc/module.modulemap b/include/grpc/module.modulemap index ae11a78b74a..02fb2f39b20 100644 --- a/include/grpc/module.modulemap +++ b/include/grpc/module.modulemap @@ -1,5 +1,15 @@ framework module grpc { umbrella header "grpc.h" + + header "byte_buffer_reader.h" + header "grpc_security.h" + header "grpc_security_constants.h" + header "impl/codegen/alloc.h" + header "impl/codegen/byte_buffer_reader.h" + header "support/alloc.h" + header "support/port_platform.h" + header "support/string_util.h" + export * module * { export * } } diff --git a/package.json b/package.json index 1fec9cb40f8..0e229c9842b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "grpc", - "version": "0.16.0-dev", + "version": "1.1.0-dev", "author": "Google Inc.", "description": "gRPC Library for Node", "homepage": "http://www.grpc.io/", diff --git a/package.xml b/package.xml index 49a6d900f12..f395334745c 100644 --- a/package.xml +++ b/package.xml @@ -10,19 +10,19 @@ grpc-packages@google.com yes - 2016-06-30 + 2016-07-28 - 0.16.0 - 0.16.0 + 1.1.0 + 1.1.0 - beta - beta + stable + stable BSD -- Fix shutdown hang problem #4017 +- PHP7 Support continued, reduce code duplication #7543 @@ -46,6 +46,7 @@ + @@ -184,6 +185,7 @@ + @@ -339,6 +341,7 @@ + @@ -1086,18 +1089,49 @@ Update to wrap gRPC C Core version 0.10.0 - 0.15.1 - 0.15.1 + 1.0.0RC1 + 1.0.0RC1 - beta - beta + stable + stable - 2016-06-30 + 2016-07-13 BSD +- GA release - Fix shutdown hang problem #4017 + + + 1.0.0RC2 + 1.0.0RC2 + + + stable + stable + + 2016-07-21 + BSD + +- PHP7 Support #7464 + + + + + 1.0.0RC3 + 1.0.0RC3 + + + stable + stable + + 2016-07-28 + BSD + +- PHP7 Support continued, reduce code duplication #7543 + + diff --git a/setup.cfg b/setup.cfg index 7194716f61b..dd9161ca8b9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,5 +9,5 @@ build_base=python_build [build_ext] inplace=1 -[build_proto_modules] +[build_package_protos] exclude=.*protoc_plugin/protoc_plugin_test\.proto$ diff --git a/setup.py b/setup.py index 700515b894d..cad300c144a 100644 --- a/setup.py +++ b/setup.py @@ -29,17 +29,18 @@ """A setup module for the GRPC Python package.""" +from distutils import extension as _extension +from distutils import util import os import os.path +import pkg_resources import platform +import re import shlex import shutil import sys import sysconfig -from distutils import core as _core -from distutils import extension as _extension -import pkg_resources import setuptools from setuptools.command import egg_info @@ -47,10 +48,10 @@ from setuptools.command import egg_info egg_info.manifest_maker.template = 'PYTHON-MANIFEST.in' PY3 = sys.version_info.major == 3 -PYTHON_STEM = './src/python/grpcio' -CORE_INCLUDE = ('./include', '.',) -BORINGSSL_INCLUDE = ('./third_party/boringssl/include',) -ZLIB_INCLUDE = ('./third_party/zlib',) +PYTHON_STEM = os.path.join('src', 'python', 'grpcio') +CORE_INCLUDE = ('include', '.',) +BORINGSSL_INCLUDE = (os.path.join('third_party', 'boringssl', 'include'),) +ZLIB_INCLUDE = (os.path.join('third_party', 'zlib'),) # Ensure we're in the proper directory whether or not we're being used by pip. os.chdir(os.path.dirname(os.path.abspath(__file__))) @@ -62,15 +63,17 @@ import commands import grpc_core_dependencies import grpc_version -# TODO(atash) make this conditional on being on a mingw32 build -_unixccompiler_patch.monkeypatch_unix_compiler() +if 'win32' in sys.platform: + _unixccompiler_patch.monkeypatch_unix_compiler() LICENSE = '3-clause BSD' # Environment variable to determine whether or not the Cython extension should # *use* Cython or use the generated C files. Note that this requires the C files -# to have been generated by building first *with* Cython support. +# to have been generated by building first *with* Cython support. Even if this +# is set to false, if the script detects that the generated `.c` file isn't +# present, then it will still attempt to use Cython. BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False) # Environment variable to determine whether or not to enable coverage analysis @@ -82,18 +85,46 @@ ENABLE_CYTHON_TRACING = os.environ.get( # entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support. # We use these environment variables to thus get around that without locking # ourselves in w.r.t. the multitude of operating systems this ought to build on. -# By default we assume a GCC-like compiler. -EXTRA_COMPILE_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_CFLAGS', '')) -EXTRA_LINK_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_LDFLAGS', '')) +# We can also use these variables as a way to inject environment-specific +# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a +# reasonable default. +EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None) +EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None) +if EXTRA_ENV_COMPILE_ARGS is None: + EXTRA_ENV_COMPILE_ARGS = '-fno-wrapv' + if 'win32' in sys.platform: + # We use define flags here and don't directly add to DEFINE_MACROS below to + # ensure that the expert user/builder has a way of turning it off (via the + # envvars) without adding yet more GRPC-specific envvars. + # See https://sourceforge.net/p/mingw-w64/bugs/363/ + if '32' in platform.architecture()[0]: + EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s' + else: + EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64' + elif "linux" in sys.platform or "darwin" in sys.platform: + EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden' +if EXTRA_ENV_LINK_ARGS is None: + EXTRA_ENV_LINK_ARGS = '-lpthread' + if 'win32' in sys.platform: + # TODO(atash) check if this is actually safe to just import and call on + # non-Windows (to avoid breaking import style) + from distutils.cygwinccompiler import get_msvcr + msvcr = get_msvcr()[0] + # TODO(atash) sift through the GCC specs to see if libstdc++ can have any + # influence on the linkage outcome on MinGW for non-C++ programs. + EXTRA_ENV_LINK_ARGS += ( + ' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} ' + '-static'.format(msvcr=msvcr)) + elif "linux" in sys.platform: + EXTRA_ENV_LINK_ARGS += ' -Wl,-wrap,memcpy' +EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS) +EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS) CYTHON_EXTENSION_PACKAGE_NAMES = () CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',) -CYTHON_HELPER_C_FILES = ( - os.path.join(PYTHON_STEM, 'grpc/_cython/loader.c'), - os.path.join(PYTHON_STEM, 'grpc/_cython/imports.generated.c'), -) +CYTHON_HELPER_C_FILES = () CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES) @@ -108,7 +139,9 @@ if not "win32" in sys.platform: if "win32" in sys.platform: EXTENSION_LIBRARIES += ('ws2_32',) -DEFINE_MACROS = (('OPENSSL_NO_ASM', 1), ('_WIN32_WINNT', 0x600), ('GPR_BACKWARDS_COMPATIBILITY_MODE', 1),) +DEFINE_MACROS = ( + ('OPENSSL_NO_ASM', 1), ('_WIN32_WINNT', 0x600), + ('GPR_BACKWARDS_COMPATIBILITY_MODE', 1),) if "win32" in sys.platform: DEFINE_MACROS += (('OPENSSL_WINDOWS', 1), ('WIN32_LEAN_AND_MEAN', 1),) if '64bit' in platform.architecture()[0]: @@ -116,13 +149,8 @@ if "win32" in sys.platform: LDFLAGS = tuple(EXTRA_LINK_ARGS) CFLAGS = tuple(EXTRA_COMPILE_ARGS) -if "linux" in sys.platform: - LDFLAGS += ('-Wl,-wrap,memcpy',) if "linux" in sys.platform or "darwin" in sys.platform: - CFLAGS += ('-fvisibility=hidden',) - pymodinit_type = 'PyObject*' if PY3 else 'void' - pymodinit = '__attribute__((visibility ("default"))) {}'.format(pymodinit_type) DEFINE_MACROS += (('PyMODINIT_FUNC', pymodinit),) @@ -135,45 +163,32 @@ if 'darwin' in sys.platform and PY3: if mac_target and (pkg_resources.parse_version(mac_target) < pkg_resources.parse_version('10.7.0')): os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.7' - - -def cython_extensions(module_names, extra_sources, include_dirs, - libraries, define_macros, build_with_cython=False): - # Set compiler directives linetrace argument only if we care about tracing; - # this is due to Cython having different behavior between linetrace being - # False and linetrace being unset. See issue #5689. - cython_compiler_directives = {} - if ENABLE_CYTHON_TRACING: - define_macros = define_macros + [('CYTHON_TRACE_NOGIL', 1)] - cython_compiler_directives['linetrace'] = True - file_extension = 'pyx' if build_with_cython else 'c' - module_files = [os.path.join(PYTHON_STEM, - name.replace('.', '/') + '.' + file_extension) - for name in module_names] + os.environ['_PYTHON_HOST_PLATFORM'] = re.sub( + r'macosx-[0-9]+\.[0-9]+-(.+)', + r'macosx-10.7-\1', + util.get_platform()) + +def cython_extensions_and_necessity(): + cython_module_files = [os.path.join(PYTHON_STEM, + name.replace('.', '/') + '.pyx') + for name in CYTHON_EXTENSION_MODULE_NAMES] extensions = [ _extension.Extension( name=module_name, - sources=[module_file] + extra_sources, - include_dirs=include_dirs, libraries=libraries, - define_macros=define_macros, + sources=[module_file] + list(CYTHON_HELPER_C_FILES) + list(CORE_C_FILES), + include_dirs=list(EXTENSION_INCLUDE_DIRECTORIES), + libraries=list(EXTENSION_LIBRARIES), + define_macros=list(DEFINE_MACROS), extra_compile_args=list(CFLAGS), extra_link_args=list(LDFLAGS), - ) for (module_name, module_file) in zip(module_names, module_files) + ) for (module_name, module_file) in zip(list(CYTHON_EXTENSION_MODULE_NAMES), cython_module_files) ] - if build_with_cython: - import Cython.Build - return Cython.Build.cythonize( - extensions, - include_path=include_dirs, - compiler_directives=cython_compiler_directives) - else: - return extensions - -CYTHON_EXTENSION_MODULES = cython_extensions( - list(CYTHON_EXTENSION_MODULE_NAMES), - list(CYTHON_HELPER_C_FILES) + list(CORE_C_FILES), - list(EXTENSION_INCLUDE_DIRECTORIES), list(EXTENSION_LIBRARIES), - list(DEFINE_MACROS), bool(BUILD_WITH_CYTHON)) + need_cython = BUILD_WITH_CYTHON + if not BUILD_WITH_CYTHON: + need_cython = need_cython or not commands.check_and_update_cythonization(extensions) + return commands.try_cythonize(extensions, linetracing=ENABLE_CYTHON_TRACING, mandatory=BUILD_WITH_CYTHON), need_cython + +CYTHON_EXTENSION_MODULES, need_cython = cython_extensions_and_necessity() PACKAGE_DIRECTORIES = { '': PYTHON_STEM, @@ -193,6 +208,15 @@ SETUP_REQUIRES = INSTALL_REQUIRES + ( 'sphinx_rtd_theme>=0.1.8', 'six>=1.10', ) +if BUILD_WITH_CYTHON: + sys.stderr.write( + "You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, " + "but do not have Cython installed. We won't stop you from using " + "other commands, but the extension files will fail to build.\n") +elif need_cython: + sys.stderr.write( + 'We could not find Cython. Setup may take 10-20 minutes.\n') + SETUP_REQUIRES += ('cython>=0.23',) COMMAND_CLASS = { 'doc': commands.SphinxDocumentation, @@ -203,12 +227,13 @@ COMMAND_CLASS = { } # Ensure that package data is copied over before any commands have been run: -credentials_dir = os.path.join(PYTHON_STEM, 'grpc/_cython/_credentials') +credentials_dir = os.path.join(PYTHON_STEM, 'grpc', '_cython', '_credentials') try: os.mkdir(credentials_dir) except OSError: pass -shutil.copyfile('etc/roots.pem', os.path.join(credentials_dir, 'roots.pem')) +shutil.copyfile(os.path.join('etc', 'roots.pem'), + os.path.join(credentials_dir, 'roots.pem')) PACKAGE_DATA = { # Binaries that may or may not be present in the final installation, but are diff --git a/src/compiler/config.h b/src/compiler/config.h index 1cbd842f0af..ba44cd8a31c 100644 --- a/src/compiler/config.h +++ b/src/compiler/config.h @@ -60,7 +60,8 @@ #ifndef GRPC_CUSTOM_PARSEGENERATORPARAMETER #include -#define GRPC_CUSTOM_PARSEGENERATORPARAMETER ::google::protobuf::compiler::ParseGeneratorParameter +#define GRPC_CUSTOM_PARSEGENERATORPARAMETER \ + ::google::protobuf::compiler::ParseGeneratorParameter #endif #ifndef GRPC_CUSTOM_STRING @@ -81,8 +82,8 @@ static inline int PluginMain(int argc, char* argv[], const CodeGenerator* generator) { return GRPC_CUSTOM_PLUGINMAIN(argc, argv, generator); } -static inline void ParseGeneratorParameter(const string& parameter, - std::vector >* options) { +static inline void ParseGeneratorParameter( + const string& parameter, std::vector >* options) { GRPC_CUSTOM_PARSEGENERATORPARAMETER(parameter, options); } diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc index 2288ba41636..c386115ec20 100644 --- a/src/compiler/cpp_generator.cc +++ b/src/compiler/cpp_generator.cc @@ -64,19 +64,22 @@ grpc::string FilenameIdentifier(const grpc::string &filename) { } } // namespace -template -T *array_end(T (&array)[N]) { return array + N; } +template +T *array_end(T (&array)[N]) { + return array + N; +} -void PrintIncludes(Printer *printer, const std::vector& headers, const Parameters ¶ms) { +void PrintIncludes(Printer *printer, const std::vector &headers, + const Parameters ¶ms) { std::map vars; vars["l"] = params.use_system_headers ? '<' : '"'; vars["r"] = params.use_system_headers ? '>' : '"'; - auto& s = params.grpc_search_path; + auto &s = params.grpc_search_path; if (!s.empty()) { vars["l"] += s; - if (s[s.size()-1] != '/') { + if (s[s.size() - 1] != '/') { vars["l"] += '/'; } } @@ -101,7 +104,7 @@ grpc::string GetHeaderPrologue(File *file, const Parameters & /*params*/) { printer->Print(vars, "// Generated by the gRPC protobuf plugin.\n"); printer->Print(vars, - "// If you make any local change, they will be lost.\n"); + "// If you make any local change, they will be lost.\n"); printer->Print(vars, "// source: $filename$\n"); grpc::string leading_comments = file->GetLeadingComments(); if (!leading_comments.empty()) { @@ -117,8 +120,7 @@ grpc::string GetHeaderPrologue(File *file, const Parameters & /*params*/) { return output; } -grpc::string GetHeaderIncludes(File *file, - const Parameters ¶ms) { +grpc::string GetHeaderIncludes(File *file, const Parameters ¶ms) { grpc::string output; { // Scope the output stream so it closes and finalizes output to the string. @@ -126,15 +128,14 @@ grpc::string GetHeaderIncludes(File *file, std::map vars; static const char *headers_strs[] = { - "grpc++/impl/codegen/async_stream.h", - "grpc++/impl/codegen/async_unary_call.h", - "grpc++/impl/codegen/proto_utils.h", - "grpc++/impl/codegen/rpc_method.h", - "grpc++/impl/codegen/service_type.h", - "grpc++/impl/codegen/status.h", - "grpc++/impl/codegen/stub_options.h", - "grpc++/impl/codegen/sync_stream.h" - }; + "grpc++/impl/codegen/async_stream.h", + "grpc++/impl/codegen/async_unary_call.h", + "grpc++/impl/codegen/proto_utils.h", + "grpc++/impl/codegen/rpc_method.h", + "grpc++/impl/codegen/service_type.h", + "grpc++/impl/codegen/status.h", + "grpc++/impl/codegen/stub_options.h", + "grpc++/impl/codegen/sync_stream.h"}; std::vector headers(headers_strs, array_end(headers_strs)); PrintIncludes(printer.get(), headers, params); printer->Print(vars, "\n"); @@ -309,8 +310,7 @@ void PrintHeaderClientMethodInterfaces( } } -void PrintHeaderClientMethod(Printer *printer, - const Method *method, +void PrintHeaderClientMethod(Printer *printer, const Method *method, std::map *vars, bool is_public) { (*vars)["Method"] = method->name(); @@ -490,10 +490,8 @@ void PrintHeaderServerMethodSync(Printer *printer, const Method *method, printer->Print(method->GetTrailingComments().c_str()); } -void PrintHeaderServerMethodAsync( - Printer *printer, - const Method *method, - std::map *vars) { +void PrintHeaderServerMethodAsync(Printer *printer, const Method *method, + std::map *vars) { (*vars)["Method"] = method->name(); (*vars)["Request"] = method->input_type_name(); (*vars)["Response"] = method->output_type_name(); @@ -607,8 +605,7 @@ void PrintHeaderServerMethodAsync( } void PrintHeaderServerMethodGeneric( - Printer *printer, - const Method *method, + Printer *printer, const Method *method, std::map *vars) { (*vars)["Method"] = method->name(); (*vars)["Request"] = method->input_type_name(); @@ -677,8 +674,7 @@ void PrintHeaderServerMethodGeneric( printer->Print(*vars, "};\n"); } -void PrintHeaderService(Printer *printer, - const Service *service, +void PrintHeaderService(Printer *printer, const Service *service, std::map *vars) { (*vars)["Service"] = service->name(); @@ -696,14 +692,16 @@ void PrintHeaderService(Printer *printer, printer->Print("virtual ~StubInterface() {}\n"); for (int i = 0; i < service->method_count(); ++i) { printer->Print(service->method(i)->GetLeadingComments().c_str()); - PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars, true); + PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars, + true); printer->Print(service->method(i)->GetTrailingComments().c_str()); } printer->Outdent(); printer->Print("private:\n"); printer->Indent(); for (int i = 0; i < service->method_count(); ++i) { - PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars, false); + PrintHeaderClientMethodInterfaces(printer, service->method(i).get(), vars, + false); } printer->Outdent(); printer->Print("};\n"); @@ -711,7 +709,8 @@ void PrintHeaderService(Printer *printer, "class Stub GRPC_FINAL : public StubInterface" " {\n public:\n"); printer->Indent(); - printer->Print("Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n"); + printer->Print( + "Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n"); for (int i = 0; i < service->method_count(); ++i) { PrintHeaderClientMethod(printer, service->method(i).get(), vars, true); } @@ -776,8 +775,7 @@ void PrintHeaderService(Printer *printer, printer->Print(service->GetTrailingComments().c_str()); } -grpc::string GetHeaderServices(File *file, - const Parameters ¶ms) { +grpc::string GetHeaderServices(File *file, const Parameters ¶ms) { grpc::string output; { // Scope the output stream so it closes and finalizes output to the string. @@ -849,7 +847,7 @@ grpc::string GetSourcePrologue(File *file, const Parameters & /*params*/) { printer->Print(vars, "// Generated by the gRPC protobuf plugin.\n"); printer->Print(vars, - "// If you make any local change, they will be lost.\n"); + "// If you make any local change, they will be lost.\n"); printer->Print(vars, "// source: $filename$\n\n"); printer->Print(vars, "#include \"$filename_base$$message_header_ext$\"\n"); @@ -860,8 +858,7 @@ grpc::string GetSourcePrologue(File *file, const Parameters & /*params*/) { return output; } -grpc::string GetSourceIncludes(File *file, - const Parameters ¶ms) { +grpc::string GetSourceIncludes(File *file, const Parameters ¶ms) { grpc::string output; { // Scope the output stream so it closes and finalizes output to the string. @@ -869,15 +866,14 @@ grpc::string GetSourceIncludes(File *file, std::map vars; static const char *headers_strs[] = { - "grpc++/impl/codegen/async_stream.h", - "grpc++/impl/codegen/async_unary_call.h", - "grpc++/impl/codegen/channel_interface.h", - "grpc++/impl/codegen/client_unary_call.h", - "grpc++/impl/codegen/method_handler_impl.h", - "grpc++/impl/codegen/rpc_service_method.h", - "grpc++/impl/codegen/service_type.h", - "grpc++/impl/codegen/sync_stream.h" - }; + "grpc++/impl/codegen/async_stream.h", + "grpc++/impl/codegen/async_unary_call.h", + "grpc++/impl/codegen/channel_interface.h", + "grpc++/impl/codegen/client_unary_call.h", + "grpc++/impl/codegen/method_handler_impl.h", + "grpc++/impl/codegen/rpc_service_method.h", + "grpc++/impl/codegen/service_type.h", + "grpc++/impl/codegen/sync_stream.h"}; std::vector headers(headers_strs, array_end(headers_strs)); PrintIncludes(printer.get(), headers, params); @@ -895,8 +891,7 @@ grpc::string GetSourceIncludes(File *file, return output; } -void PrintSourceClientMethod(Printer *printer, - const Method *method, +void PrintSourceClientMethod(Printer *printer, const Method *method, std::map *vars) { (*vars)["Method"] = method->name(); (*vars)["Request"] = method->input_type_name(); @@ -996,8 +991,7 @@ void PrintSourceClientMethod(Printer *printer, } } -void PrintSourceServerMethod(Printer *printer, - const Method *method, +void PrintSourceServerMethod(Printer *printer, const Method *method, std::map *vars) { (*vars)["Method"] = method->name(); (*vars)["Request"] = method->input_type_name(); @@ -1055,8 +1049,7 @@ void PrintSourceServerMethod(Printer *printer, } } -void PrintSourceService(Printer *printer, - const Service *service, +void PrintSourceService(Printer *printer, const Service *service, std::map *vars) { (*vars)["Service"] = service->name(); @@ -1168,8 +1161,7 @@ void PrintSourceService(Printer *printer, } } -grpc::string GetSourceServices(File *file, - const Parameters ¶ms) { +grpc::string GetSourceServices(File *file, const Parameters ¶ms) { grpc::string output; { // Scope the output stream so it closes and finalizes output to the string. diff --git a/src/compiler/cpp_plugin.cc b/src/compiler/cpp_plugin.cc index fc0296cd282..38f8f738ed9 100644 --- a/src/compiler/cpp_plugin.cc +++ b/src/compiler/cpp_plugin.cc @@ -48,7 +48,7 @@ using grpc_cpp_generator::GetCppComments; class ProtoBufMethod : public grpc_cpp_generator::Method { public: ProtoBufMethod(const grpc::protobuf::MethodDescriptor *method) - : method_(method) {} + : method_(method) {} grpc::string name() const { return method_->name(); } @@ -90,14 +90,14 @@ class ProtoBufMethod : public grpc_cpp_generator::Method { class ProtoBufService : public grpc_cpp_generator::Service { public: ProtoBufService(const grpc::protobuf::ServiceDescriptor *service) - : service_(service) {} + : service_(service) {} grpc::string name() const { return service_->name(); } int method_count() const { return service_->method_count(); }; std::unique_ptr method(int i) const { return std::unique_ptr( - new ProtoBufMethod(service_->method(i))); + new ProtoBufMethod(service_->method(i))); }; grpc::string GetLeadingComments() const { @@ -115,7 +115,7 @@ class ProtoBufService : public grpc_cpp_generator::Service { class ProtoBufPrinter : public grpc_cpp_generator::Printer { public: ProtoBufPrinter(grpc::string *str) - : output_stream_(str), printer_(&output_stream_, '$') {} + : output_stream_(str), printer_(&output_stream_, '$') {} void Print(const std::map &vars, const char *string_template) { @@ -152,13 +152,14 @@ class ProtoBufFile : public grpc_cpp_generator::File { int service_count() const { return file_->service_count(); }; std::unique_ptr service(int i) const { - return std::unique_ptr ( - new ProtoBufService(file_->service(i))); + return std::unique_ptr( + new ProtoBufService(file_->service(i))); } - std::unique_ptr CreatePrinter(grpc::string *str) const { + std::unique_ptr CreatePrinter( + grpc::string *str) const { return std::unique_ptr( - new ProtoBufPrinter(str)); + new ProtoBufPrinter(str)); } grpc::string GetLeadingComments() const { @@ -197,12 +198,11 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { if (!parameter.empty()) { std::vector parameters_list = - grpc_generator::tokenize(parameter, ","); + grpc_generator::tokenize(parameter, ","); for (auto parameter_string = parameters_list.begin(); - parameter_string != parameters_list.end(); - parameter_string++) { + parameter_string != parameters_list.end(); parameter_string++) { std::vector param = - grpc_generator::tokenize(*parameter_string, "="); + grpc_generator::tokenize(*parameter_string, "="); if (param[0] == "services_namespace") { generator_parameters.services_namespace = param[1]; } else if (param[0] == "use_system_headers") { @@ -232,8 +232,7 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { grpc_cpp_generator::GetHeaderEpilogue(&pbfile, generator_parameters); std::unique_ptr header_output( context->Open(file_name + ".grpc.pb.h")); - grpc::protobuf::io::CodedOutputStream header_coded_out( - header_output.get()); + grpc::protobuf::io::CodedOutputStream header_coded_out(header_output.get()); header_coded_out.WriteRaw(header_code.data(), header_code.size()); grpc::string source_code = @@ -243,8 +242,7 @@ class CppGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { grpc_cpp_generator::GetSourceEpilogue(&pbfile, generator_parameters); std::unique_ptr source_output( context->Open(file_name + ".grpc.pb.cc")); - grpc::protobuf::io::CodedOutputStream source_coded_out( - source_output.get()); + grpc::protobuf::io::CodedOutputStream source_coded_out(source_output.get()); source_coded_out.WriteRaw(source_code.data(), source_code.size()); return true; diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc index f5a0876cf98..591e5ae3d42 100644 --- a/src/compiler/csharp_generator.cc +++ b/src/compiler/csharp_generator.cc @@ -36,11 +36,10 @@ #include #include -#include "src/compiler/csharp_generator.h" #include "src/compiler/config.h" -#include "src/compiler/csharp_generator_helpers.h" #include "src/compiler/csharp_generator.h" - +#include "src/compiler/csharp_generator.h" +#include "src/compiler/csharp_generator_helpers.h" using google::protobuf::compiler::csharp::GetFileNamespace; using google::protobuf::compiler::csharp::GetClassName; @@ -61,7 +60,6 @@ using grpc_generator::StringReplace; using std::map; using std::vector; - namespace grpc_csharp_generator { namespace { @@ -70,34 +68,43 @@ namespace { // Currently, we cannot easily reuse the functionality as // google/protobuf/compiler/csharp/csharp_doc_comment.h is not a public header. // TODO(jtattermusch): reuse the functionality from google/protobuf. -void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer* printer, grpc::protobuf::SourceLocation location) { - grpc::string comments = location.leading_comments.empty() ? - location.trailing_comments : location.leading_comments; +void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer, + grpc::protobuf::SourceLocation location) { + grpc::string comments = location.leading_comments.empty() + ? location.trailing_comments + : location.leading_comments; if (comments.empty()) { return; } - // XML escaping... no need for apostrophes etc as the whole text is going to be a child + // XML escaping... no need for apostrophes etc as the whole text is going to + // be a child // node of a summary element, not part of an attribute. comments = grpc_generator::StringReplace(comments, "&", "&", true); comments = grpc_generator::StringReplace(comments, "<", "<", true); std::vector lines; grpc_generator::Split(comments, '\n', &lines); - // TODO: We really should work out which part to put in the summary and which to put in the remarks... - // but that needs to be part of a bigger effort to understand the markdown better anyway. + // TODO: We really should work out which part to put in the summary and which + // to put in the remarks... + // but that needs to be part of a bigger effort to understand the markdown + // better anyway. printer->Print("/// \n"); bool last_was_empty = false; - // We squash multiple blank lines down to one, and remove any trailing blank lines. We need - // to preserve the blank lines themselves, as this is relevant in the markdown. - // Note that we can't remove leading or trailing whitespace as *that's* relevant in markdown too. + // We squash multiple blank lines down to one, and remove any trailing blank + // lines. We need + // to preserve the blank lines themselves, as this is relevant in the + // markdown. + // Note that we can't remove leading or trailing whitespace as *that's* + // relevant in markdown too. // (We don't skip "just whitespace" lines, either.) - for (std::vector::iterator it = lines.begin(); it != lines.end(); ++it) { + for (std::vector::iterator it = lines.begin(); + it != lines.end(); ++it) { grpc::string line = *it; if (line.empty()) { last_was_empty = true; } else { if (last_was_empty) { - printer->Print("///\n"); + printer->Print("///\n"); } last_was_empty = false; printer->Print("/// $line$\n", "line", *it); @@ -107,23 +114,23 @@ void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer* printer, grpc::prot } template -void GenerateDocCommentBody( - grpc::protobuf::io::Printer* printer, const DescriptorType* descriptor) { +void GenerateDocCommentBody(grpc::protobuf::io::Printer *printer, + const DescriptorType *descriptor) { grpc::protobuf::SourceLocation location; if (descriptor->GetSourceLocation(&location)) { GenerateDocCommentBodyImpl(printer, location); } } -std::string GetServiceClassName(const ServiceDescriptor* service) { +std::string GetServiceClassName(const ServiceDescriptor *service) { return service->name(); } -std::string GetClientClassName(const ServiceDescriptor* service) { +std::string GetClientClassName(const ServiceDescriptor *service) { return service->name() + "Client"; } -std::string GetServerClassName(const ServiceDescriptor* service) { +std::string GetServerClassName(const ServiceDescriptor *service) { return service->name() + "Base"; } @@ -138,13 +145,11 @@ std::string GetCSharpMethodType(MethodType method_type) { case METHODTYPE_BIDI_STREAMING: return "MethodType.DuplexStreaming"; } - GOOGLE_LOG(FATAL)<< "Can't get here."; + GOOGLE_LOG(FATAL) << "Can't get here."; return ""; } -std::string GetServiceNameFieldName() { - return "__ServiceName"; -} +std::string GetServiceNameFieldName() { return "__ServiceName"; } std::string GetMarshallerFieldName(const Descriptor *message) { return "__Marshaller_" + message->name(); @@ -155,7 +160,7 @@ std::string GetMethodFieldName(const MethodDescriptor *method) { } std::string GetMethodRequestParamMaybe(const MethodDescriptor *method, - bool invocation_param=false) { + bool invocation_param = false) { if (method->client_streaming()) { return ""; } @@ -174,16 +179,16 @@ std::string GetMethodReturnTypeClient(const MethodDescriptor *method) { case METHODTYPE_NO_STREAMING: return "AsyncUnaryCall<" + GetClassName(method->output_type()) + ">"; case METHODTYPE_CLIENT_STREAMING: - return "AsyncClientStreamingCall<" + GetClassName(method->input_type()) - + ", " + GetClassName(method->output_type()) + ">"; + return "AsyncClientStreamingCall<" + GetClassName(method->input_type()) + + ", " + GetClassName(method->output_type()) + ">"; case METHODTYPE_SERVER_STREAMING: - return "AsyncServerStreamingCall<" + GetClassName(method->output_type()) - + ">"; + return "AsyncServerStreamingCall<" + GetClassName(method->output_type()) + + ">"; case METHODTYPE_BIDI_STREAMING: - return "AsyncDuplexStreamingCall<" + GetClassName(method->input_type()) - + ", " + GetClassName(method->output_type()) + ">"; + return "AsyncDuplexStreamingCall<" + GetClassName(method->input_type()) + + ", " + GetClassName(method->output_type()) + ">"; } - GOOGLE_LOG(FATAL)<< "Can't get here."; + GOOGLE_LOG(FATAL) << "Can't get here."; return ""; } @@ -194,10 +199,10 @@ std::string GetMethodRequestParamServer(const MethodDescriptor *method) { return GetClassName(method->input_type()) + " request"; case METHODTYPE_CLIENT_STREAMING: case METHODTYPE_BIDI_STREAMING: - return "IAsyncStreamReader<" + GetClassName(method->input_type()) - + "> requestStream"; + return "IAsyncStreamReader<" + GetClassName(method->input_type()) + + "> requestStream"; } - GOOGLE_LOG(FATAL)<< "Can't get here."; + GOOGLE_LOG(FATAL) << "Can't get here."; return ""; } @@ -205,12 +210,13 @@ std::string GetMethodReturnTypeServer(const MethodDescriptor *method) { switch (GetMethodType(method)) { case METHODTYPE_NO_STREAMING: case METHODTYPE_CLIENT_STREAMING: - return "global::System.Threading.Tasks.Task<" + GetClassName(method->output_type()) + ">"; + return "global::System.Threading.Tasks.Task<" + + GetClassName(method->output_type()) + ">"; case METHODTYPE_SERVER_STREAMING: case METHODTYPE_BIDI_STREAMING: return "global::System.Threading.Tasks.Task"; } - GOOGLE_LOG(FATAL)<< "Can't get here."; + GOOGLE_LOG(FATAL) << "Can't get here."; return ""; } @@ -221,18 +227,19 @@ std::string GetMethodResponseStreamMaybe(const MethodDescriptor *method) { return ""; case METHODTYPE_SERVER_STREAMING: case METHODTYPE_BIDI_STREAMING: - return ", IServerStreamWriter<" + GetClassName(method->output_type()) - + "> responseStream"; + return ", IServerStreamWriter<" + GetClassName(method->output_type()) + + "> responseStream"; } - GOOGLE_LOG(FATAL)<< "Can't get here."; + GOOGLE_LOG(FATAL) << "Can't get here."; return ""; } // Gets vector of all messages used as input or output types. -std::vector GetUsedMessages( +std::vector GetUsedMessages( const ServiceDescriptor *service) { - std::set descriptor_set; - std::vector result; // vector is to maintain stable ordering + std::set descriptor_set; + std::vector + result; // vector is to maintain stable ordering for (int i = 0; i < service->method_count(); i++) { const MethodDescriptor *method = service->method(i); if (descriptor_set.find(method->input_type()) == descriptor_set.end()) { @@ -247,21 +254,25 @@ std::vector GetUsedMessages( return result; } -void GenerateMarshallerFields(Printer* out, const ServiceDescriptor *service) { - std::vector used_messages = GetUsedMessages(service); +void GenerateMarshallerFields(Printer *out, const ServiceDescriptor *service) { + std::vector used_messages = GetUsedMessages(service); for (size_t i = 0; i < used_messages.size(); i++) { const Descriptor *message = used_messages[i]; out->Print( - "static readonly Marshaller<$type$> $fieldname$ = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), $type$.Parser.ParseFrom);\n", + "static readonly Marshaller<$type$> $fieldname$ = " + "Marshallers.Create((arg) => " + "global::Google.Protobuf.MessageExtensions.ToByteArray(arg), " + "$type$.Parser.ParseFrom);\n", "fieldname", GetMarshallerFieldName(message), "type", GetClassName(message)); } out->Print("\n"); } -void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) { +void GenerateStaticMethodField(Printer *out, const MethodDescriptor *method) { out->Print( - "static readonly Method<$request$, $response$> $fieldname$ = new Method<$request$, $response$>(\n", + "static readonly Method<$request$, $response$> $fieldname$ = new " + "Method<$request$, $response$>(\n", "fieldname", GetMethodFieldName(method), "request", GetClassName(method->input_type()), "response", GetClassName(method->output_type())); @@ -270,7 +281,7 @@ void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) { out->Print("$methodtype$,\n", "methodtype", GetCSharpMethodType(GetMethodType(method))); out->Print("$servicenamefield$,\n", "servicenamefield", - GetServiceNameFieldName()); + GetServiceNameFieldName()); out->Print("\"$methodname$\",\n", "methodname", method->name()); out->Print("$requestmarshaller$,\n", "requestmarshaller", GetMarshallerFieldName(method->input_type())); @@ -281,11 +292,14 @@ void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) { out->Outdent(); } -void GenerateServiceDescriptorProperty(Printer* out, const ServiceDescriptor *service) { +void GenerateServiceDescriptorProperty(Printer *out, + const ServiceDescriptor *service) { std::ostringstream index; index << service->index(); out->Print("/// Service descriptor\n"); - out->Print("public static global::Google.Protobuf.Reflection.ServiceDescriptor Descriptor\n"); + out->Print( + "public static global::Google.Protobuf.Reflection.ServiceDescriptor " + "Descriptor\n"); out->Print("{\n"); out->Print(" get { return $umbrella$.Descriptor.Services[$index$]; }\n", "umbrella", GetReflectionClassName(service->file()), "index", @@ -294,9 +308,11 @@ void GenerateServiceDescriptorProperty(Printer* out, const ServiceDescriptor *se out->Print("\n"); } -void GenerateServerClass(Printer* out, const ServiceDescriptor *service) { - out->Print("/// Base class for server-side implementations of $servicename$\n", - "servicename", GetServiceClassName(service)); +void GenerateServerClass(Printer *out, const ServiceDescriptor *service) { + out->Print( + "/// Base class for server-side implementations of " + "$servicename$\n", + "servicename", GetServiceClassName(service)); out->Print("public abstract class $name$\n", "name", GetServerClassName(service)); out->Print("{\n"); @@ -305,7 +321,8 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) { const MethodDescriptor *method = service->method(i); GenerateDocCommentBody(out, method); out->Print( - "public virtual $returntype$ $methodname$($request$$response_stream_maybe$, " + "public virtual $returntype$ " + "$methodname$($request$$response_stream_maybe$, " "ServerCallContext context)\n", "methodname", method->name(), "returntype", GetMethodReturnTypeServer(method), "request", @@ -313,8 +330,9 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) { GetMethodResponseStreamMaybe(method)); out->Print("{\n"); out->Indent(); - out->Print("throw new RpcException(" - "new Status(StatusCode.Unimplemented, \"\"));\n"); + out->Print( + "throw new RpcException(" + "new Status(StatusCode.Unimplemented, \"\"));\n"); out->Outdent(); out->Print("}\n\n"); } @@ -323,41 +341,49 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) { out->Print("\n"); } -void GenerateClientStub(Printer* out, const ServiceDescriptor *service) { - out->Print("/// Client for $servicename$\n", - "servicename", GetServiceClassName(service)); - out->Print( - "public class $name$ : ClientBase<$name$>\n", - "name", GetClientClassName(service)); +void GenerateClientStub(Printer *out, const ServiceDescriptor *service) { + out->Print("/// Client for $servicename$\n", "servicename", + GetServiceClassName(service)); + out->Print("public class $name$ : ClientBase<$name$>\n", "name", + GetClientClassName(service)); out->Print("{\n"); out->Indent(); // constructors - out->Print("/// Creates a new client for $servicename$\n" - "/// The channel to use to make remote calls.\n", - "servicename", GetServiceClassName(service)); - out->Print("public $name$(Channel channel) : base(channel)\n", - "name", GetClientClassName(service)); + out->Print( + "/// Creates a new client for $servicename$\n" + "/// The channel to use to make remote " + "calls.\n", + "servicename", GetServiceClassName(service)); + out->Print("public $name$(Channel channel) : base(channel)\n", "name", + GetClientClassName(service)); out->Print("{\n"); out->Print("}\n"); - out->Print("/// Creates a new client for $servicename$ that uses a custom CallInvoker.\n" - "/// The callInvoker to use to make remote calls.\n", - "servicename", GetServiceClassName(service)); + out->Print( + "/// Creates a new client for $servicename$ that uses a custom " + "CallInvoker.\n" + "/// The callInvoker to use to make remote " + "calls.\n", + "servicename", GetServiceClassName(service)); out->Print("public $name$(CallInvoker callInvoker) : base(callInvoker)\n", "name", GetClientClassName(service)); out->Print("{\n"); out->Print("}\n"); - out->Print("/// Protected parameterless constructor to allow creation" - " of test doubles.\n"); - out->Print("protected $name$() : base()\n", - "name", GetClientClassName(service)); + out->Print( + "/// Protected parameterless constructor to allow creation" + " of test doubles.\n"); + out->Print("protected $name$() : base()\n", "name", + GetClientClassName(service)); out->Print("{\n"); out->Print("}\n"); - out->Print("/// Protected constructor to allow creation of configured clients.\n" - "/// The client configuration.\n"); - out->Print("protected $name$(ClientBaseConfiguration configuration)" - " : base(configuration)\n", - "name", GetClientClassName(service)); + out->Print( + "/// Protected constructor to allow creation of configured " + "clients.\n" + "/// The client configuration.\n"); + out->Print( + "protected $name$(ClientBaseConfiguration configuration)" + " : base(configuration)\n", + "name", GetClientClassName(service)); out->Print("{\n"); out->Print("}\n\n"); @@ -368,27 +394,36 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) { if (method_type == METHODTYPE_NO_STREAMING) { // unary calls have an extra synchronous stub method GenerateDocCommentBody(out, method); - out->Print("public virtual $response$ $methodname$($request$ request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))\n", + out->Print( + "public virtual $response$ $methodname$($request$ request, Metadata " + "headers = null, DateTime? deadline = null, CancellationToken " + "cancellationToken = default(CancellationToken))\n", "methodname", method->name(), "request", GetClassName(method->input_type()), "response", GetClassName(method->output_type())); out->Print("{\n"); out->Indent(); - out->Print("return $methodname$(request, new CallOptions(headers, deadline, cancellationToken));\n", - "methodname", method->name()); + out->Print( + "return $methodname$(request, new CallOptions(headers, deadline, " + "cancellationToken));\n", + "methodname", method->name()); out->Outdent(); out->Print("}\n"); // overload taking CallOptions as a param GenerateDocCommentBody(out, method); - out->Print("public virtual $response$ $methodname$($request$ request, CallOptions options)\n", + out->Print( + "public virtual $response$ $methodname$($request$ request, " + "CallOptions options)\n", "methodname", method->name(), "request", GetClassName(method->input_type()), "response", GetClassName(method->output_type())); out->Print("{\n"); out->Indent(); - out->Print("return CallInvoker.BlockingUnaryCall($methodfield$, null, options, request);\n", - "methodfield", GetMethodFieldName(method)); + out->Print( + "return CallInvoker.BlockingUnaryCall($methodfield$, null, options, " + "request);\n", + "methodfield", GetMethodFieldName(method)); out->Outdent(); out->Print("}\n"); } @@ -399,23 +434,28 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) { } GenerateDocCommentBody(out, method); out->Print( - "public virtual $returntype$ $methodname$($request_maybe$Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))\n", - "methodname", method_name, "request_maybe", - GetMethodRequestParamMaybe(method), "returntype", - GetMethodReturnTypeClient(method)); + "public virtual $returntype$ $methodname$($request_maybe$Metadata " + "headers = null, DateTime? deadline = null, CancellationToken " + "cancellationToken = default(CancellationToken))\n", + "methodname", method_name, "request_maybe", + GetMethodRequestParamMaybe(method), "returntype", + GetMethodReturnTypeClient(method)); out->Print("{\n"); out->Indent(); - out->Print("return $methodname$($request_maybe$new CallOptions(headers, deadline, cancellationToken));\n", - "methodname", method_name, - "request_maybe", GetMethodRequestParamMaybe(method, true)); + out->Print( + "return $methodname$($request_maybe$new CallOptions(headers, deadline, " + "cancellationToken));\n", + "methodname", method_name, "request_maybe", + GetMethodRequestParamMaybe(method, true)); out->Outdent(); out->Print("}\n"); // overload taking CallOptions as a param GenerateDocCommentBody(out, method); out->Print( - "public virtual $returntype$ $methodname$($request_maybe$CallOptions options)\n", + "public virtual $returntype$ $methodname$($request_maybe$CallOptions " + "options)\n", "methodname", method_name, "request_maybe", GetMethodRequestParamMaybe(method), "returntype", GetMethodReturnTypeClient(method)); @@ -423,36 +463,45 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) { out->Indent(); switch (GetMethodType(method)) { case METHODTYPE_NO_STREAMING: - out->Print("return CallInvoker.AsyncUnaryCall($methodfield$, null, options, request);\n", - "methodfield", GetMethodFieldName(method)); + out->Print( + "return CallInvoker.AsyncUnaryCall($methodfield$, null, options, " + "request);\n", + "methodfield", GetMethodFieldName(method)); break; case METHODTYPE_CLIENT_STREAMING: - out->Print("return CallInvoker.AsyncClientStreamingCall($methodfield$, null, options);\n", - "methodfield", GetMethodFieldName(method)); + out->Print( + "return CallInvoker.AsyncClientStreamingCall($methodfield$, null, " + "options);\n", + "methodfield", GetMethodFieldName(method)); break; case METHODTYPE_SERVER_STREAMING: out->Print( - "return CallInvoker.AsyncServerStreamingCall($methodfield$, null, options, request);\n", + "return CallInvoker.AsyncServerStreamingCall($methodfield$, null, " + "options, request);\n", "methodfield", GetMethodFieldName(method)); break; case METHODTYPE_BIDI_STREAMING: - out->Print("return CallInvoker.AsyncDuplexStreamingCall($methodfield$, null, options);\n", - "methodfield", GetMethodFieldName(method)); + out->Print( + "return CallInvoker.AsyncDuplexStreamingCall($methodfield$, null, " + "options);\n", + "methodfield", GetMethodFieldName(method)); break; default: - GOOGLE_LOG(FATAL)<< "Can't get here."; + GOOGLE_LOG(FATAL) << "Can't get here."; } out->Outdent(); out->Print("}\n"); } // override NewInstance method - out->Print("protected override $name$ NewInstance(ClientBaseConfiguration configuration)\n", - "name", GetClientClassName(service)); + out->Print( + "protected override $name$ NewInstance(ClientBaseConfiguration " + "configuration)\n", + "name", GetClientClassName(service)); out->Print("{\n"); out->Indent(); - out->Print("return new $name$(configuration);\n", - "name", GetClientClassName(service)); + out->Print("return new $name$(configuration);\n", "name", + GetClientClassName(service)); out->Outdent(); out->Print("}\n"); @@ -461,11 +510,13 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) { out->Print("\n"); } -void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service) { +void GenerateBindServiceMethod(Printer *out, const ServiceDescriptor *service) { out->Print( - "/// Creates service definition that can be registered with a server\n"); + "/// Creates service definition that can be registered with a " + "server\n"); out->Print( - "public static ServerServiceDefinition BindService($implclass$ serviceImpl)\n", + "public static ServerServiceDefinition BindService($implclass$ " + "serviceImpl)\n", "implclass", GetServerClassName(service)); out->Print("{\n"); out->Indent(); @@ -491,7 +542,7 @@ void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service) { out->Print("\n"); } -void GenerateService(Printer* out, const ServiceDescriptor *service, +void GenerateService(Printer *out, const ServiceDescriptor *service, bool generate_client, bool generate_server, bool internal_access) { GenerateDocCommentBody(out, service); diff --git a/src/compiler/csharp_generator_helpers.h b/src/compiler/csharp_generator_helpers.h index 9bdf6fb5356..f5d36f257a2 100644 --- a/src/compiler/csharp_generator_helpers.h +++ b/src/compiler/csharp_generator_helpers.h @@ -41,14 +41,16 @@ namespace grpc_csharp_generator { inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file, grpc::string *file_name_or_error) { - *file_name_or_error = grpc_generator::FileNameInUpperCamel(file, false) + "Grpc.cs"; + *file_name_or_error = + grpc_generator::FileNameInUpperCamel(file, false) + "Grpc.cs"; return true; } // Get leading or trailing comments in a string. Comment lines start with "// ". // Leading detached comments are put in in front of leading comments. template -inline grpc::string GetCsharpComments(const DescriptorType *desc, bool leading) { +inline grpc::string GetCsharpComments(const DescriptorType *desc, + bool leading) { return grpc_generator::GetPrefixedComments(desc, leading, "//"); } diff --git a/src/compiler/csharp_plugin.cc b/src/compiler/csharp_plugin.cc index 5350e73f108..7def72c54c7 100644 --- a/src/compiler/csharp_plugin.cc +++ b/src/compiler/csharp_plugin.cc @@ -67,10 +67,8 @@ class CSharpGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { } } - grpc::string code = grpc_csharp_generator::GetServices(file, - generate_client, - generate_server, - internal_access); + grpc::string code = grpc_csharp_generator::GetServices( + file, generate_client, generate_server, internal_access); if (code.size() == 0) { return true; // don't generate a file if there are no services } diff --git a/src/compiler/generator_helpers.h b/src/compiler/generator_helpers.h index 9a88c2bfccd..88d96c00809 100644 --- a/src/compiler/generator_helpers.h +++ b/src/compiler/generator_helpers.h @@ -84,7 +84,7 @@ inline grpc::string StringReplace(grpc::string str, const grpc::string &from, } str.replace(pos, from.length(), to); pos += to.length(); - } while(replace_all); + } while (replace_all); return str; } @@ -139,8 +139,8 @@ inline grpc::string LowerUnderscoreToUpperCamel(grpc::string str) { return result; } -inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *file, - bool include_package_path) { +inline grpc::string FileNameInUpperCamel( + const grpc::protobuf::FileDescriptor *file, bool include_package_path) { std::vector tokens = tokenize(StripProto(file->name()), "/"); grpc::string result = ""; if (include_package_path) { @@ -152,7 +152,8 @@ inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *f return result; } -inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *file) { +inline grpc::string FileNameInUpperCamel( + const grpc::protobuf::FileDescriptor *file) { return FileNameInUpperCamel(file, true); } @@ -163,7 +164,8 @@ enum MethodType { METHODTYPE_BIDI_STREAMING }; -inline MethodType GetMethodType(const grpc::protobuf::MethodDescriptor *method) { +inline MethodType GetMethodType( + const grpc::protobuf::MethodDescriptor *method) { if (method->client_streaming()) { if (method->server_streaming()) { return METHODTYPE_BIDI_STREAMING; @@ -254,7 +256,7 @@ inline grpc::string GenerateCommentsWithPrefix( const std::vector &in, const grpc::string &prefix) { std::ostringstream oss; for (auto it = in.begin(); it != in.end(); it++) { - const grpc::string& elem = *it; + const grpc::string &elem = *it; if (elem.empty()) { oss << prefix << "\n"; } else if (elem[0] == ' ') { diff --git a/src/compiler/node_generator.cc b/src/compiler/node_generator.cc index 1fe090d17af..c3852020a3a 100644 --- a/src/compiler/node_generator.cc +++ b/src/compiler/node_generator.cc @@ -67,15 +67,15 @@ grpc::string ModuleAlias(const grpc::string filename) { // Given a filename like foo/bar/baz.proto, returns the corresponding JavaScript // message file foo/bar/baz.js -grpc::string GetJSMessageFilename(const grpc::string& filename) { +grpc::string GetJSMessageFilename(const grpc::string &filename) { grpc::string name = filename; return grpc_generator::StripProto(name) + "_pb.js"; } // Given a filename like foo/bar/baz.proto, returns the root directory // path ../../ -grpc::string GetRootPath(const grpc::string& from_filename, - const grpc::string& to_filename) { +grpc::string GetRootPath(const grpc::string &from_filename, + const grpc::string &to_filename) { if (to_filename.find("google/protobuf") == 0) { // Well-known types (.proto files in the google/protobuf directory) are // assumed to come from the 'google-protobuf' npm package. We may want to @@ -96,21 +96,24 @@ grpc::string GetRootPath(const grpc::string& from_filename, // Return the relative path to load to_file from the directory containing // from_file, assuming that both paths are relative to the same directory -grpc::string GetRelativePath(const grpc::string& from_file, - const grpc::string& to_file) { +grpc::string GetRelativePath(const grpc::string &from_file, + const grpc::string &to_file) { return GetRootPath(from_file, to_file) + to_file; } /* Finds all message types used in all services in the file, and returns them * as a map of fully qualified message type name to message descriptor */ -map GetAllMessages(const FileDescriptor *file) { - map message_types; - for (int service_num = 0; service_num < file->service_count(); service_num++) { - const ServiceDescriptor* service = file->service(service_num); - for (int method_num = 0; method_num < service->method_count(); method_num++) { - const MethodDescriptor* method = service->method(method_num); - const Descriptor* input_type = method->input_type(); - const Descriptor* output_type = method->output_type(); +map GetAllMessages( + const FileDescriptor *file) { + map message_types; + for (int service_num = 0; service_num < file->service_count(); + service_num++) { + const ServiceDescriptor *service = file->service(service_num); + for (int method_num = 0; method_num < service->method_count(); + method_num++) { + const MethodDescriptor *method = service->method(method_num); + const Descriptor *input_type = method->input_type(); + const Descriptor *output_type = method->output_type(); message_types[input_type->name()] = input_type; message_types[output_type->name()] = output_type; } @@ -118,7 +121,7 @@ map GetAllMessages(const FileDescriptor *file) return message_types; } -grpc::string MessageIdentifierName(const grpc::string& name) { +grpc::string MessageIdentifierName(const grpc::string &name) { return grpc_generator::StringReplace(name, ".", "_"); } @@ -194,18 +197,18 @@ void PrintService(const ServiceDescriptor *service, Printer *out) { out->Print(template_vars, "var $name$Service = exports.$name$Service = {\n"); out->Indent(); for (int i = 0; i < service->method_count(); i++) { - grpc::string method_name = grpc_generator::LowercaseFirstLetter( - service->method(i)->name()); + grpc::string method_name = + grpc_generator::LowercaseFirstLetter(service->method(i)->name()); out->Print(GetNodeComments(service->method(i), true).c_str()); - out->Print("$method_name$: ", - "method_name", method_name); + out->Print("$method_name$: ", "method_name", method_name); PrintMethod(service->method(i), out); out->Print(",\n"); out->Print(GetNodeComments(service->method(i), false).c_str()); } out->Outdent(); out->Print("};\n\n"); - out->Print(template_vars, "exports.$name$Client = " + out->Print(template_vars, + "exports.$name$Client = " "grpc.makeGenericClientConstructor($name$Service);\n"); out->Print(GetNodeComments(service, false).c_str()); } @@ -213,27 +216,25 @@ void PrintService(const ServiceDescriptor *service, Printer *out) { void PrintImports(const FileDescriptor *file, Printer *out) { out->Print("var grpc = require('grpc');\n"); if (file->message_type_count() > 0) { - grpc::string file_path = GetRelativePath(file->name(), - GetJSMessageFilename( - file->name())); - out->Print("var $module_alias$ = require('$file_path$');\n", - "module_alias", ModuleAlias(file->name()), - "file_path", file_path); + grpc::string file_path = + GetRelativePath(file->name(), GetJSMessageFilename(file->name())); + out->Print("var $module_alias$ = require('$file_path$');\n", "module_alias", + ModuleAlias(file->name()), "file_path", file_path); } for (int i = 0; i < file->dependency_count(); i++) { grpc::string file_path = GetRelativePath( file->name(), GetJSMessageFilename(file->dependency(i)->name())); - out->Print("var $module_alias$ = require('$file_path$');\n", - "module_alias", ModuleAlias(file->dependency(i)->name()), - "file_path", file_path); + out->Print("var $module_alias$ = require('$file_path$');\n", "module_alias", + ModuleAlias(file->dependency(i)->name()), "file_path", + file_path); } out->Print("\n"); } void PrintTransformers(const FileDescriptor *file, Printer *out) { - map messages = GetAllMessages(file); - for (std::map::iterator it = + map messages = GetAllMessages(file); + for (std::map::iterator it = messages.begin(); it != messages.end(); it++) { PrintMessageTransformer(it->second, out); @@ -246,7 +247,6 @@ void PrintServices(const FileDescriptor *file, Printer *out) { PrintService(file->service(i), out); } } - } grpc::string GenerateFile(const FileDescriptor *file) { diff --git a/src/compiler/node_generator_helpers.h b/src/compiler/node_generator_helpers.h index 5862772841c..efe94ab00d6 100644 --- a/src/compiler/node_generator_helpers.h +++ b/src/compiler/node_generator_helpers.h @@ -48,7 +48,7 @@ inline grpc::string GetJSServiceFilename(const grpc::string& filename) { // Get leading or trailing comments in a string. Comment lines start with "// ". // Leading detached comments are put in in front of leading comments. template -inline grpc::string GetNodeComments(const DescriptorType *desc, bool leading) { +inline grpc::string GetNodeComments(const DescriptorType* desc, bool leading) { return grpc_generator::GetPrefixedComments(desc, leading, "//"); } diff --git a/src/compiler/objective_c_generator.cc b/src/compiler/objective_c_generator.cc index 4be8cb41879..1d7faf120dc 100644 --- a/src/compiler/objective_c_generator.cc +++ b/src/compiler/objective_c_generator.cc @@ -49,9 +49,9 @@ using ::std::map; namespace grpc_objective_c_generator { namespace { -void PrintProtoRpcDeclarationAsPragma(Printer *printer, - const MethodDescriptor *method, - map< ::grpc::string, ::grpc::string> vars) { +void PrintProtoRpcDeclarationAsPragma( + Printer *printer, const MethodDescriptor *method, + map< ::grpc::string, ::grpc::string> vars) { vars["client_stream"] = method->client_streaming() ? "stream " : ""; vars["server_stream"] = method->server_streaming() ? "stream " : ""; @@ -61,7 +61,7 @@ void PrintProtoRpcDeclarationAsPragma(Printer *printer, } template -static void PrintAllComments(const DescriptorType* desc, Printer* printer) { +static void PrintAllComments(const DescriptorType *desc, Printer *printer) { std::vector comments; grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING_DETACHED, &comments); @@ -100,7 +100,8 @@ void PrintMethodSignature(Printer *printer, const MethodDescriptor *method, if (method->server_streaming()) { printer->Print(vars, " eventHandler:(void(^)(BOOL done, " - "$response_class$ *_Nullable response, NSError *_Nullable error))eventHandler"); + "$response_class$ *_Nullable response, NSError *_Nullable " + "error))eventHandler"); } else { printer->Print(vars, " handler:(void(^)($response_class$ *_Nullable response, " @@ -123,7 +124,8 @@ void PrintAdvancedSignature(Printer *printer, const MethodDescriptor *method, PrintMethodSignature(printer, method, vars); } -inline map< ::grpc::string, ::grpc::string> GetMethodVars(const MethodDescriptor *method) { +inline map< ::grpc::string, ::grpc::string> GetMethodVars( + const MethodDescriptor *method) { map< ::grpc::string, ::grpc::string> res; res["method_name"] = method->name(); res["request_type"] = method->input_type()->name(); @@ -210,7 +212,8 @@ void PrintMethodImplementations(Printer *printer, grpc::protobuf::io::StringOutputStream output_stream(&output); Printer printer(&output_stream, '$'); - map< ::grpc::string, ::grpc::string> vars = {{"service_class", ServiceClassName(service)}}; + map< ::grpc::string, ::grpc::string> vars = { + {"service_class", ServiceClassName(service)}}; printer.Print(vars, "@protocol $service_class$ \n\n"); @@ -237,21 +240,23 @@ void PrintMethodImplementations(Printer *printer, } ::grpc::string GetSource(const ServiceDescriptor *service) { - ::grpc::string output; + ::grpc::string output; { // Scope the output stream so it closes and finalizes output to the string. grpc::protobuf::io::StringOutputStream output_stream(&output); Printer printer(&output_stream, '$'); - map< ::grpc::string,::grpc::string> vars = {{"service_name", service->name()}, - {"service_class", ServiceClassName(service)}, - {"package", service->file()->package()}}; + map< ::grpc::string, ::grpc::string> vars = { + {"service_name", service->name()}, + {"service_class", ServiceClassName(service)}, + {"package", service->file()->package()}}; printer.Print(vars, "@implementation $service_class$\n\n"); printer.Print("// Designated initializer\n"); printer.Print("- (instancetype)initWithHost:(NSString *)host {\n"); - printer.Print(vars, + printer.Print( + vars, " return (self = [super initWithHost:host" " packageName:@\"$package$\" serviceName:@\"$service_name$\"]);\n"); printer.Print("}\n\n"); diff --git a/src/compiler/objective_c_generator_helpers.h b/src/compiler/objective_c_generator_helpers.h index 1f8c80014f0..b482f028a10 100644 --- a/src/compiler/objective_c_generator_helpers.h +++ b/src/compiler/objective_c_generator_helpers.h @@ -53,6 +53,5 @@ inline string ServiceClassName(const ServiceDescriptor *service) { string prefix = file->options().objc_class_prefix(); return prefix + service->name(); } - } #endif // GRPC_INTERNAL_COMPILER_OBJECTIVE_C_GENERATOR_HELPERS_H diff --git a/src/compiler/objective_c_plugin.cc b/src/compiler/objective_c_plugin.cc index be647764024..8de0997ebea 100644 --- a/src/compiler/objective_c_plugin.cc +++ b/src/compiler/objective_c_plugin.cc @@ -42,7 +42,8 @@ #include using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName; -using ::google::protobuf::compiler::objectivec::IsProtobufLibraryBundledProtoFile; +using ::google::protobuf::compiler::objectivec:: + IsProtobufLibraryBundledProtoFile; class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { public: @@ -53,7 +54,6 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { const ::grpc::string ¶meter, grpc::protobuf::compiler::GeneratorContext *context, ::grpc::string *error) const { - if (file->service_count() == 0) { // No services. Do nothing. return true; @@ -66,29 +66,32 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { // Generate .pbrpc.h ::grpc::string imports = ::grpc::string("#import \"") + file_name + - ".pbobjc.h\"\n\n" - "#import \n" - "#import \n" - "#import \n"; + ".pbobjc.h\"\n\n" + "#import \n" + "#import \n" + "#import \n"; // TODO(jcanizales): Instead forward-declare the input and output types // and import the files in the .pbrpc.m ::grpc::string proto_imports; for (int i = 0; i < file->dependency_count(); i++) { - ::grpc::string header = grpc_objective_c_generator::MessageHeaderName( - file->dependency(i)); + ::grpc::string header = + grpc_objective_c_generator::MessageHeaderName(file->dependency(i)); const grpc::protobuf::FileDescriptor *dependency = file->dependency(i); if (IsProtobufLibraryBundledProtoFile(dependency)) { ::grpc::string base_name = header; grpc_generator::StripPrefix(&base_name, "google/protobuf/"); // create the import code snippet proto_imports += - "#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n" - " #import <" + ::grpc::string(ProtobufLibraryFrameworkName) + - "/" + base_name + ">\n" - "#else\n" - " #import \"" + header + "\"\n" - "#endif\n"; + "#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n" + " #import <" + + ::grpc::string(ProtobufLibraryFrameworkName) + "/" + base_name + + ">\n" + "#else\n" + " #import \"" + + header + + "\"\n" + "#endif\n"; } else { proto_imports += ::grpc::string("#import \"") + header + "\"\n"; } @@ -100,21 +103,22 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { declarations += grpc_objective_c_generator::GetHeader(service); } - static const ::grpc::string kNonNullBegin = "\nNS_ASSUME_NONNULL_BEGIN\n\n"; + static const ::grpc::string kNonNullBegin = + "\nNS_ASSUME_NONNULL_BEGIN\n\n"; static const ::grpc::string kNonNullEnd = "\nNS_ASSUME_NONNULL_END\n"; - Write(context, file_name + ".pbrpc.h", - imports + '\n' + proto_imports + '\n' + kNonNullBegin + - declarations + kNonNullEnd); + Write(context, file_name + ".pbrpc.h", imports + '\n' + proto_imports + + '\n' + kNonNullBegin + + declarations + kNonNullEnd); } { // Generate .pbrpc.m ::grpc::string imports = ::grpc::string("#import \"") + file_name + - ".pbrpc.h\"\n\n" - "#import \n" - "#import \n"; + ".pbrpc.h\"\n\n" + "#import \n" + "#import \n"; ::grpc::string definitions; for (int i = 0; i < file->service_count(); i++) { @@ -131,7 +135,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { private: // Write the given code into the given file. void Write(grpc::protobuf::compiler::GeneratorContext *context, - const ::grpc::string &filename, const ::grpc::string &code) const { + const ::grpc::string &filename, const ::grpc::string &code) const { std::unique_ptr output( context->Open(filename)); grpc::protobuf::io::CodedOutputStream coded_out(output.get()); diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc index c5f1ed80611..0f61b1fb6c0 100644 --- a/src/compiler/python_generator.cc +++ b/src/compiler/python_generator.cc @@ -74,16 +74,18 @@ PythonGrpcGenerator::PythonGrpcGenerator(const GeneratorConfiguration& config) PythonGrpcGenerator::~PythonGrpcGenerator() {} -bool PythonGrpcGenerator::Generate( - const FileDescriptor* file, const grpc::string& parameter, - GeneratorContext* context, grpc::string* error) const { +bool PythonGrpcGenerator::Generate(const FileDescriptor* file, + const grpc::string& parameter, + GeneratorContext* context, + grpc::string* error) const { // Get output file name. grpc::string file_name; static const int proto_suffix_length = strlen(".proto"); if (file->name().size() > static_cast(proto_suffix_length) && file->name().find_last_of(".proto") == file->name().size() - 1) { - file_name = file->name().substr( - 0, file->name().size() - proto_suffix_length) + "_pb2.py"; + file_name = + file->name().substr(0, file->name().size() - proto_suffix_length) + + "_pb2.py"; } else { *error = "Invalid proto file name. Proto file must end with .proto"; return false; @@ -115,7 +117,7 @@ map ListToDict( assert(values.size() % 2 == 0); map value_map; auto value_iter = values.begin(); - for (unsigned i = 0; i < values.size()/2; ++i) { + for (unsigned i = 0; i < values.size() / 2; ++i) { grpc::string key = *value_iter; ++value_iter; grpc::string value = *value_iter; @@ -138,9 +140,7 @@ class IndentScope { printer_->Indent(); } - ~IndentScope() { - printer_->Outdent(); - } + ~IndentScope() { printer_->Outdent(); } private: Printer* printer_; @@ -173,7 +173,6 @@ grpc::string ModuleAlias(const grpc::string& filename) { return module_name; } - bool GetModuleAndMessagePath(const Descriptor* type, const ServiceDescriptor* service, grpc::string* out) { @@ -182,7 +181,7 @@ bool GetModuleAndMessagePath(const Descriptor* type, do { message_path.push_back(path_elem_type); path_elem_type = path_elem_type->containing_type(); - } while (path_elem_type); // implicit nullptr comparison; don't be explicit + } while (path_elem_type); // implicit nullptr comparison; don't be explicit grpc::string file_name = type->file()->name(); static const int proto_suffix_length = strlen(".proto"); if (!(file_name.size() > static_cast(proto_suffix_length) && @@ -190,11 +189,11 @@ bool GetModuleAndMessagePath(const Descriptor* type, return false; } grpc::string service_file_name = service->file()->name(); - grpc::string module = service_file_name == file_name ? - "" : ModuleAlias(file_name) + "."; + grpc::string module = + service_file_name == file_name ? "" : ModuleAlias(file_name) + "."; grpc::string message_type; - for (auto path_iter = message_path.rbegin(); - path_iter != message_path.rend(); ++path_iter) { + for (auto path_iter = message_path.rbegin(); path_iter != message_path.rend(); + ++path_iter) { message_type += (*path_iter)->name() + "."; } // no pop_back prior to C++11 @@ -229,8 +228,7 @@ static void PrintAllComments(const DescriptorType* desc, Printer* printer) { printer->Print("\"\"\"\n"); } -bool PrintBetaServicer(const ServiceDescriptor* service, - Printer* out) { +bool PrintBetaServicer(const ServiceDescriptor* service, Printer* out) { out->Print("\n\n"); out->Print("class Beta$Service$Servicer(object):\n", "Service", service->name()); @@ -239,10 +237,10 @@ bool PrintBetaServicer(const ServiceDescriptor* service, PrintAllComments(service, out); for (int i = 0; i < service->method_count(); ++i) { auto meth = service->method(i); - grpc::string arg_name = meth->client_streaming() ? - "request_iterator" : "request"; - out->Print("def $Method$(self, $ArgName$, context):\n", - "Method", meth->name(), "ArgName", arg_name); + grpc::string arg_name = + meth->client_streaming() ? "request_iterator" : "request"; + out->Print("def $Method$(self, $ArgName$, context):\n", "Method", + meth->name(), "ArgName", arg_name); { IndentScope raii_method_indent(out); PrintAllComments(meth, out); @@ -253,8 +251,7 @@ bool PrintBetaServicer(const ServiceDescriptor* service, return true; } -bool PrintBetaStub(const ServiceDescriptor* service, - Printer* out) { +bool PrintBetaStub(const ServiceDescriptor* service, Printer* out) { out->Print("\n\n"); out->Print("class Beta$Service$Stub(object):\n", "Service", service->name()); { @@ -262,10 +259,12 @@ bool PrintBetaStub(const ServiceDescriptor* service, PrintAllComments(service, out); for (int i = 0; i < service->method_count(); ++i) { const MethodDescriptor* meth = service->method(i); - grpc::string arg_name = meth->client_streaming() ? - "request_iterator" : "request"; + grpc::string arg_name = + meth->client_streaming() ? "request_iterator" : "request"; auto methdict = ListToDict({"Method", meth->name(), "ArgName", arg_name}); - out->Print(methdict, "def $Method$(self, $ArgName$, timeout, metadata=None, with_call=False, protocol_options=None):\n"); + out->Print(methdict, + "def $Method$(self, $ArgName$, timeout, metadata=None, " + "with_call=False, protocol_options=None):\n"); { IndentScope raii_method_indent(out); PrintAllComments(meth, out); @@ -282,9 +281,10 @@ bool PrintBetaStub(const ServiceDescriptor* service, bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, const ServiceDescriptor* service, Printer* out) { out->Print("\n\n"); - out->Print("def beta_create_$Service$_server(servicer, pool=None, " - "pool_size=None, default_timeout=None, maximum_timeout=None):\n", - "Service", service->name()); + out->Print( + "def beta_create_$Service$_server(servicer, pool=None, " + "pool_size=None, default_timeout=None, maximum_timeout=None):\n", + "Service", service->name()); { IndentScope raii_create_server_indent(out); map method_implementation_constructors; @@ -315,58 +315,62 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, } out->Print("request_deserializers = {\n"); for (auto name_and_input_module_class_pair = - input_message_modules_and_classes.begin(); + input_message_modules_and_classes.begin(); name_and_input_module_class_pair != - input_message_modules_and_classes.end(); + input_message_modules_and_classes.end(); name_and_input_module_class_pair++) { IndentScope raii_indent(out); - out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): " - "$InputTypeModuleAndClass$.FromString,\n", - "PackageQualifiedServiceName", package_qualified_service_name, - "MethodName", name_and_input_module_class_pair->first, - "InputTypeModuleAndClass", - name_and_input_module_class_pair->second); + out->Print( + "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): " + "$InputTypeModuleAndClass$.FromString,\n", + "PackageQualifiedServiceName", package_qualified_service_name, + "MethodName", name_and_input_module_class_pair->first, + "InputTypeModuleAndClass", name_and_input_module_class_pair->second); } out->Print("}\n"); out->Print("response_serializers = {\n"); for (auto name_and_output_module_class_pair = - output_message_modules_and_classes.begin(); + output_message_modules_and_classes.begin(); name_and_output_module_class_pair != - output_message_modules_and_classes.end(); + output_message_modules_and_classes.end(); name_and_output_module_class_pair++) { IndentScope raii_indent(out); - out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): " - "$OutputTypeModuleAndClass$.SerializeToString,\n", - "PackageQualifiedServiceName", package_qualified_service_name, - "MethodName", name_and_output_module_class_pair->first, - "OutputTypeModuleAndClass", - name_and_output_module_class_pair->second); + out->Print( + "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): " + "$OutputTypeModuleAndClass$.SerializeToString,\n", + "PackageQualifiedServiceName", package_qualified_service_name, + "MethodName", name_and_output_module_class_pair->first, + "OutputTypeModuleAndClass", + name_and_output_module_class_pair->second); } out->Print("}\n"); out->Print("method_implementations = {\n"); for (auto name_and_implementation_constructor = - method_implementation_constructors.begin(); - name_and_implementation_constructor != - method_implementation_constructors.end(); - name_and_implementation_constructor++) { + method_implementation_constructors.begin(); + name_and_implementation_constructor != + method_implementation_constructors.end(); + name_and_implementation_constructor++) { IndentScope raii_descriptions_indent(out); const grpc::string method_name = name_and_implementation_constructor->first; - out->Print("(\'$PackageQualifiedServiceName$\', \'$Method$\'): " - "face_utilities.$Constructor$(servicer.$Method$),\n", - "PackageQualifiedServiceName", package_qualified_service_name, - "Method", name_and_implementation_constructor->first, - "Constructor", name_and_implementation_constructor->second); + out->Print( + "(\'$PackageQualifiedServiceName$\', \'$Method$\'): " + "face_utilities.$Constructor$(servicer.$Method$),\n", + "PackageQualifiedServiceName", package_qualified_service_name, + "Method", name_and_implementation_constructor->first, "Constructor", + name_and_implementation_constructor->second); } out->Print("}\n"); - out->Print("server_options = beta_implementations.server_options(" - "request_deserializers=request_deserializers, " - "response_serializers=response_serializers, " - "thread_pool=pool, thread_pool_size=pool_size, " - "default_timeout=default_timeout, " - "maximum_timeout=maximum_timeout)\n"); - out->Print("return beta_implementations.server(method_implementations, " - "options=server_options)\n"); + out->Print( + "server_options = beta_implementations.server_options(" + "request_deserializers=request_deserializers, " + "response_serializers=response_serializers, " + "thread_pool=pool, thread_pool_size=pool_size, " + "default_timeout=default_timeout, " + "maximum_timeout=maximum_timeout)\n"); + out->Print( + "return beta_implementations.server(method_implementations, " + "options=server_options)\n"); } return true; } @@ -374,10 +378,11 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, const ServiceDescriptor* service, Printer* out) { map dict = ListToDict({ - "Service", service->name(), - }); + "Service", service->name(), + }); out->Print("\n\n"); - out->Print(dict, "def beta_create_$Service$_stub(channel, host=None," + out->Print(dict, + "def beta_create_$Service$_stub(channel, host=None," " metadata_transformer=None, pool=None, pool_size=None):\n"); { IndentScope raii_create_server_indent(out); @@ -387,8 +392,7 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, for (int i = 0; i < service->method_count(); ++i) { const MethodDescriptor* method = service->method(i); const grpc::string method_cardinality = - grpc::string(method->client_streaming() ? "STREAM" : "UNARY") + - "_" + + grpc::string(method->client_streaming() ? "STREAM" : "UNARY") + "_" + grpc::string(method->server_streaming() ? "STREAM" : "UNARY"); grpc::string input_message_module_and_class; if (!GetModuleAndMessagePath(method->input_type(), service, @@ -409,32 +413,33 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, } out->Print("request_serializers = {\n"); for (auto name_and_input_module_class_pair = - input_message_modules_and_classes.begin(); + input_message_modules_and_classes.begin(); name_and_input_module_class_pair != - input_message_modules_and_classes.end(); + input_message_modules_and_classes.end(); name_and_input_module_class_pair++) { IndentScope raii_indent(out); - out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): " - "$InputTypeModuleAndClass$.SerializeToString,\n", - "PackageQualifiedServiceName", package_qualified_service_name, - "MethodName", name_and_input_module_class_pair->first, - "InputTypeModuleAndClass", - name_and_input_module_class_pair->second); + out->Print( + "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): " + "$InputTypeModuleAndClass$.SerializeToString,\n", + "PackageQualifiedServiceName", package_qualified_service_name, + "MethodName", name_and_input_module_class_pair->first, + "InputTypeModuleAndClass", name_and_input_module_class_pair->second); } out->Print("}\n"); out->Print("response_deserializers = {\n"); for (auto name_and_output_module_class_pair = - output_message_modules_and_classes.begin(); + output_message_modules_and_classes.begin(); name_and_output_module_class_pair != - output_message_modules_and_classes.end(); + output_message_modules_and_classes.end(); name_and_output_module_class_pair++) { IndentScope raii_indent(out); - out->Print("(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): " - "$OutputTypeModuleAndClass$.FromString,\n", - "PackageQualifiedServiceName", package_qualified_service_name, - "MethodName", name_and_output_module_class_pair->first, - "OutputTypeModuleAndClass", - name_and_output_module_class_pair->second); + out->Print( + "(\'$PackageQualifiedServiceName$\', \'$MethodName$\'): " + "$OutputTypeModuleAndClass$.FromString,\n", + "PackageQualifiedServiceName", package_qualified_service_name, + "MethodName", name_and_output_module_class_pair->first, + "OutputTypeModuleAndClass", + name_and_output_module_class_pair->second); } out->Print("}\n"); out->Print("cardinalities = {\n"); @@ -443,17 +448,19 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, name_and_cardinality++) { IndentScope raii_descriptions_indent(out); out->Print("\'$Method$\': cardinality.Cardinality.$Cardinality$,\n", - "Method", name_and_cardinality->first, - "Cardinality", name_and_cardinality->second); + "Method", name_and_cardinality->first, "Cardinality", + name_and_cardinality->second); } out->Print("}\n"); - out->Print("stub_options = beta_implementations.stub_options(" - "host=host, metadata_transformer=metadata_transformer, " - "request_serializers=request_serializers, " - "response_deserializers=response_deserializers, " - "thread_pool=pool, thread_pool_size=pool_size)\n"); out->Print( - "return beta_implementations.dynamic_stub(channel, \'$PackageQualifiedServiceName$\', " + "stub_options = beta_implementations.stub_options(" + "host=host, metadata_transformer=metadata_transformer, " + "request_serializers=request_serializers, " + "response_deserializers=response_deserializers, " + "thread_pool=pool, thread_pool_size=pool_size)\n"); + out->Print( + "return beta_implementations.dynamic_stub(channel, " + "\'$PackageQualifiedServiceName$\', " "cardinalities, options=stub_options)\n", "PackageQualifiedServiceName", package_qualified_service_name); } @@ -476,43 +483,41 @@ bool PrintStub(const grpc::string& package_qualified_service_name, out->Print("Args:\n"); { IndentScope raii_args_indent(out); - out->Print("channel: A grpc.Channel.\n"); + out->Print("channel: A grpc.Channel.\n"); } out->Print("\"\"\"\n"); for (int i = 0; i < service->method_count(); ++i) { auto method = service->method(i); - auto multi_callable_constructor = - grpc::string(method->client_streaming() ? "stream" : "unary") + - "_" + - grpc::string(method->server_streaming() ? "stream" : "unary"); - grpc::string request_module_and_class; - if (!GetModuleAndMessagePath(method->input_type(), service, - &request_module_and_class)) { - return false; - } - grpc::string response_module_and_class; - if (!GetModuleAndMessagePath(method->output_type(), service, - &response_module_and_class)) { + auto multi_callable_constructor = + grpc::string(method->client_streaming() ? "stream" : "unary") + + "_" + grpc::string(method->server_streaming() ? "stream" : "unary"); + grpc::string request_module_and_class; + if (!GetModuleAndMessagePath(method->input_type(), service, + &request_module_and_class)) { + return false; + } + grpc::string response_module_and_class; + if (!GetModuleAndMessagePath(method->output_type(), service, + &response_module_and_class)) { return false; - } - out->Print("self.$Method$ = channel.$MultiCallableConstructor$(\n", - "Method", method->name(), - "MultiCallableConstructor", multi_callable_constructor); - { + } + out->Print("self.$Method$ = channel.$MultiCallableConstructor$(\n", + "Method", method->name(), "MultiCallableConstructor", + multi_callable_constructor); + { IndentScope raii_first_attribute_indent(out); IndentScope raii_second_attribute_indent(out); - out->Print( - "'/$PackageQualifiedService$/$Method$',\n", - "PackageQualifiedService", package_qualified_service_name, - "Method", method->name()); - out->Print( - "request_serializer=$RequestModuleAndClass$.SerializeToString,\n", - "RequestModuleAndClass", request_module_and_class); - out->Print( + out->Print("'/$PackageQualifiedService$/$Method$',\n", + "PackageQualifiedService", package_qualified_service_name, + "Method", method->name()); + out->Print( + "request_serializer=$RequestModuleAndClass$.SerializeToString,\n", + "RequestModuleAndClass", request_module_and_class); + out->Print( "response_deserializer=$ResponseModuleAndClass$.FromString,\n", - "ResponseModuleAndClass", response_module_and_class); - out->Print(")\n"); - } + "ResponseModuleAndClass", response_module_and_class); + out->Print(")\n"); + } } } } @@ -527,11 +532,11 @@ bool PrintServicer(const ServiceDescriptor* service, Printer* out) { PrintAllComments(service, out); for (int i = 0; i < service->method_count(); ++i) { auto method = service->method(i); - grpc::string arg_name = method->client_streaming() ? - "request_iterator" : "request"; + grpc::string arg_name = + method->client_streaming() ? "request_iterator" : "request"; out->Print("\n"); - out->Print("def $Method$(self, $ArgName$, context):\n", - "Method", method->name(), "ArgName", arg_name); + out->Print("def $Method$(self, $ArgName$, context):\n", "Method", + method->name(), "ArgName", arg_name); { IndentScope raii_method_indent(out); PrintAllComments(method, out); @@ -544,11 +549,12 @@ bool PrintServicer(const ServiceDescriptor* service, Printer* out) { return true; } -bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name, - const ServiceDescriptor* service, Printer* out) { +bool PrintAddServicerToServer( + const grpc::string& package_qualified_service_name, + const ServiceDescriptor* service, Printer* out) { out->Print("\n\n"); out->Print("def add_$Service$Servicer_to_server(servicer, server):\n", - "Service", service->name()); + "Service", service->name()); { IndentScope raii_class_indent(out); out->Print("rpc_method_handlers = {\n"); @@ -557,34 +563,37 @@ bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name IndentScope raii_dict_second_indent(out); for (int i = 0; i < service->method_count(); ++i) { auto method = service->method(i); - auto method_handler_constructor = + auto method_handler_constructor = grpc::string(method->client_streaming() ? "stream" : "unary") + - "_" + + "_" + grpc::string(method->server_streaming() ? "stream" : "unary") + "_rpc_method_handler"; - grpc::string request_module_and_class; - if (!GetModuleAndMessagePath(method->input_type(), service, - &request_module_and_class)) { - return false; - } - grpc::string response_module_and_class; - if (!GetModuleAndMessagePath(method->output_type(), service, - &response_module_and_class)) { + grpc::string request_module_and_class; + if (!GetModuleAndMessagePath(method->input_type(), service, + &request_module_and_class)) { return false; - } - out->Print("'$Method$': grpc.$MethodHandlerConstructor$(\n", - "Method", method->name(), - "MethodHandlerConstructor", method_handler_constructor); - { + } + grpc::string response_module_and_class; + if (!GetModuleAndMessagePath(method->output_type(), service, + &response_module_and_class)) { + return false; + } + out->Print("'$Method$': grpc.$MethodHandlerConstructor$(\n", "Method", + method->name(), "MethodHandlerConstructor", + method_handler_constructor); + { IndentScope raii_call_first_indent(out); - IndentScope raii_call_second_indent(out); - out->Print("servicer.$Method$,\n", "Method", method->name()); - out->Print("request_deserializer=$RequestModuleAndClass$.FromString,\n", - "RequestModuleAndClass", request_module_and_class); - out->Print("response_serializer=$ResponseModuleAndClass$.SerializeToString,\n", - "ResponseModuleAndClass", response_module_and_class); - } - out->Print("),\n"); + IndentScope raii_call_second_indent(out); + out->Print("servicer.$Method$,\n", "Method", method->name()); + out->Print( + "request_deserializer=$RequestModuleAndClass$.FromString,\n", + "RequestModuleAndClass", request_module_and_class); + out->Print( + "response_serializer=$ResponseModuleAndClass$.SerializeToString," + "\n", + "ResponseModuleAndClass", response_module_and_class); + } + out->Print("),\n"); } } out->Print("}\n"); @@ -593,7 +602,7 @@ bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name IndentScope raii_call_first_indent(out); IndentScope raii_call_second_indent(out); out->Print("'$PackageQualifiedServiceName$', rpc_method_handlers)\n", - "PackageQualifiedServiceName", package_qualified_service_name); + "PackageQualifiedServiceName", package_qualified_service_name); } out->Print("server.add_generic_rpc_handlers((generic_handler,))\n"); } @@ -605,10 +614,12 @@ bool PrintPreamble(const FileDescriptor* file, out->Print("import $Package$\n", "Package", config.grpc_package_root); out->Print("from $Package$ import implementations as beta_implementations\n", "Package", config.beta_package_root); - out->Print("from $Package$ import interfaces as beta_interfaces\n", - "Package", config.beta_package_root); + out->Print("from $Package$ import interfaces as beta_interfaces\n", "Package", + config.beta_package_root); out->Print("from grpc.framework.common import cardinality\n"); - out->Print("from grpc.framework.interfaces.face import utilities as face_utilities\n"); + out->Print( + "from grpc.framework.interfaces.face import utilities as " + "face_utilities\n"); return true; } @@ -632,12 +643,14 @@ pair GetServices(const FileDescriptor* file, auto service = file->service(i); auto package_qualified_service_name = package + service->name(); if (!(PrintStub(package_qualified_service_name, service, &out) && - PrintServicer(service, &out) && - PrintAddServicerToServer(package_qualified_service_name, service, &out) && - PrintBetaServicer(service, &out) && - PrintBetaStub(service, &out) && - PrintBetaServerFactory(package_qualified_service_name, service, &out) && - PrintBetaStubFactory(package_qualified_service_name, service, &out))) { + PrintServicer(service, &out) && + PrintAddServicerToServer(package_qualified_service_name, service, + &out) && + PrintBetaServicer(service, &out) && PrintBetaStub(service, &out) && + PrintBetaServerFactory(package_qualified_service_name, service, + &out) && + PrintBetaStubFactory(package_qualified_service_name, service, + &out))) { return make_pair(false, ""); } } diff --git a/src/compiler/python_generator.h b/src/compiler/python_generator.h index 7ed99eff0bf..9bbb83bca6e 100644 --- a/src/compiler/python_generator.h +++ b/src/compiler/python_generator.h @@ -57,6 +57,7 @@ class PythonGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { const grpc::string& parameter, grpc::protobuf::compiler::GeneratorContext* context, grpc::string* error) const; + private: GeneratorConfiguration config_; }; diff --git a/src/compiler/ruby_generator.cc b/src/compiler/ruby_generator.cc index 1501c3f3e01..02202568cb4 100644 --- a/src/compiler/ruby_generator.cc +++ b/src/compiler/ruby_generator.cc @@ -55,17 +55,20 @@ namespace { // Prints out the method using the ruby gRPC DSL. void PrintMethod(const MethodDescriptor *method, const grpc::string &package, Printer *out) { - grpc::string input_type = RubyTypeOf(method->input_type()->full_name(), package); + grpc::string input_type = + RubyTypeOf(method->input_type()->full_name(), package); if (method->client_streaming()) { input_type = "stream(" + input_type + ")"; } - grpc::string output_type = RubyTypeOf(method->output_type()->full_name(), package); + grpc::string output_type = + RubyTypeOf(method->output_type()->full_name(), package); if (method->server_streaming()) { output_type = "stream(" + output_type + ")"; } - std::map method_vars = - ListToDict({"mth.name", method->name(), "input.type", input_type, - "output.type", output_type, }); + std::map method_vars = ListToDict({ + "mth.name", method->name(), "input.type", input_type, "output.type", + output_type, + }); out->Print(GetRubyComments(method, true).c_str()); out->Print(method_vars, "rpc :$mth.name$, $input.type$, $output.type$\n"); out->Print(GetRubyComments(method, false).c_str()); @@ -79,8 +82,9 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package, } // Begin the service module - std::map module_vars = - ListToDict({"module.name", CapitalizeFirst(service->name()), }); + std::map module_vars = ListToDict({ + "module.name", CapitalizeFirst(service->name()), + }); out->Print(module_vars, "module $module.name$\n"); out->Indent(); @@ -130,8 +134,9 @@ grpc::string GetServices(const FileDescriptor *file) { } // Write out a file header. - std::map header_comment_vars = ListToDict( - {"file.name", file->name(), "file.package", file->package(), }); + std::map header_comment_vars = ListToDict({ + "file.name", file->name(), "file.package", file->package(), + }); out.Print("# Generated by the protocol buffer compiler. DO NOT EDIT!\n"); out.Print(header_comment_vars, "# Source: $file.name$ for package '$file.package$'\n"); @@ -147,16 +152,18 @@ grpc::string GetServices(const FileDescriptor *file) { // Write out require statemment to import the separately generated file // that defines the messages used by the service. This is generated by the // main ruby plugin. - std::map dep_vars = - ListToDict({"dep.name", MessagesRequireName(file), }); + std::map dep_vars = ListToDict({ + "dep.name", MessagesRequireName(file), + }); out.Print(dep_vars, "require '$dep.name$'\n"); // Write out services within the modules out.Print("\n"); std::vector modules = Split(file->package(), '.'); for (size_t i = 0; i < modules.size(); ++i) { - std::map module_vars = - ListToDict({"module.name", CapitalizeFirst(modules[i]), }); + std::map module_vars = ListToDict({ + "module.name", CapitalizeFirst(modules[i]), + }); out.Print(module_vars, "module $module.name$\n"); out.Indent(); } diff --git a/src/compiler/ruby_generator_helpers-inl.h b/src/compiler/ruby_generator_helpers-inl.h index ff6939ed9fe..aa3c22fea6d 100644 --- a/src/compiler/ruby_generator_helpers-inl.h +++ b/src/compiler/ruby_generator_helpers-inl.h @@ -48,7 +48,7 @@ inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file, file->name().find_last_of(".proto") == file->name().size() - 1) { *file_name_or_error = file->name().substr(0, file->name().size() - proto_suffix_length) + - "_services.rb"; + "_services_pb.rb"; return true; } else { *file_name_or_error = "Invalid proto file name: must end with .proto"; @@ -58,7 +58,7 @@ inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file, inline grpc::string MessagesRequireName( const grpc::protobuf::FileDescriptor *file) { - return Replace(file->name(), ".proto", ""); + return Replace(file->name(), ".proto", "_pb"); } // Get leading or trailing comments in a string. Comment lines start with "# ". diff --git a/src/compiler/ruby_generator_map-inl.h b/src/compiler/ruby_generator_map-inl.h index 6b87774f213..75ba7fc8f19 100644 --- a/src/compiler/ruby_generator_map-inl.h +++ b/src/compiler/ruby_generator_map-inl.h @@ -36,8 +36,8 @@ #include "src/compiler/config.h" -#include #include +#include #include #include // NOLINT #include @@ -53,8 +53,7 @@ namespace grpc_ruby_generator { inline std::map ListToDict( const initializer_list &values) { if (values.size() % 2 != 0) { - std::cerr << "Not every 'key' has a value in `values`." - << std::endl; + std::cerr << "Not every 'key' has a value in `values`." << std::endl; } std::map value_map; auto value_iter = values.begin(); diff --git a/src/core/ext/client_config/channel_connectivity.c b/src/core/ext/client_config/channel_connectivity.c index c1220e3a8c3..ce3c13a4ee3 100644 --- a/src/core/ext/client_config/channel_connectivity.c +++ b/src/core/ext/client_config/channel_connectivity.c @@ -59,7 +59,7 @@ grpc_connectivity_state grpc_channel_check_connectivity_state( } gpr_log(GPR_ERROR, "grpc_channel_check_connectivity_state called on something that is " - "not a (u)client channel, but '%s'", + "not a client channel, but '%s'", client_channel_elem->filter->name); grpc_exec_ctx_finish(&exec_ctx); return GRPC_CHANNEL_SHUTDOWN; diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c index 85f9efb3b63..6f6855584a7 100644 --- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c +++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c @@ -45,6 +45,7 @@ #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/compress_filter.h" +#include "src/core/lib/channel/handshaker.h" #include "src/core/lib/channel/http_client_filter.h" #include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/surface/api_trace.h" @@ -63,6 +64,8 @@ typedef struct { grpc_endpoint *tcp; grpc_closure connected; + + grpc_handshake_manager *handshake_mgr; } connector; static void connector_ref(grpc_connector *con) { @@ -74,6 +77,7 @@ static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) { connector *c = (connector *)con; if (gpr_unref(&c->refs)) { /* c->initial_string_buffer does not need to be destroyed */ + grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr); gpr_free(c); } } @@ -83,9 +87,22 @@ static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg, connector_unref(exec_ctx, arg); } +static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint, + grpc_channel_args *args, void *user_data, + grpc_error *error) { + connector *c = user_data; + c->result->transport = + grpc_create_chttp2_transport(exec_ctx, args, endpoint, 1); + GPR_ASSERT(c->result->transport); + grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL, 0); + c->result->channel_args = args; + grpc_closure *notify = c->notify; + c->notify = NULL; + grpc_exec_ctx_sched(exec_ctx, notify, error, NULL); +} + static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { connector *c = arg; - grpc_closure *notify; grpc_endpoint *tcp = c->tcp; if (tcp != NULL) { if (!GPR_SLICE_IS_EMPTY(c->args.initial_connect_string)) { @@ -97,19 +114,17 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { connector_ref(arg); grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer, &c->initial_string_sent); + } else { + grpc_handshake_manager_do_handshake( + exec_ctx, c->handshake_mgr, tcp, c->args.channel_args, + c->args.deadline, NULL /* acceptor */, on_handshake_done, c); } - c->result->transport = - grpc_create_chttp2_transport(exec_ctx, c->args.channel_args, tcp, 1); - grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL, - 0); - GPR_ASSERT(c->result->transport); - c->result->channel_args = grpc_channel_args_copy(c->args.channel_args); } else { memset(c->result, 0, sizeof(*c->result)); + grpc_closure *notify = c->notify; + c->notify = NULL; + grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_REF(error), NULL); } - notify = c->notify; - c->notify = NULL; - grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_REF(error), NULL); } static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {} @@ -171,6 +186,7 @@ static grpc_subchannel *client_channel_factory_create_subchannel( memset(c, 0, sizeof(*c)); c->base.vtable = &connector_vtable; gpr_ref_init(&c->refs, 1); + c->handshake_mgr = grpc_handshake_manager_create(); args->args = final_args; s = grpc_subchannel_create(exec_ctx, &c->base, args); grpc_connector_unref(exec_ctx, &c->base); diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c index 721ba82d8f7..4e33b6fa612 100644 --- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c +++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c @@ -44,6 +44,7 @@ #include "src/core/ext/client_config/resolver_registry.h" #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/handshaker.h" #include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/security/context/security_context.h" #include "src/core/lib/security/credentials/credentials.h" @@ -69,6 +70,11 @@ typedef struct { grpc_endpoint *newly_connecting_endpoint; grpc_closure connected_closure; + + grpc_handshake_manager *handshake_mgr; + + // TODO(roth): Remove once we eliminate on_secure_handshake_done(). + grpc_channel_args *tmp_args; } connector; static void connector_ref(grpc_connector *con) { @@ -80,6 +86,8 @@ static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) { connector *c = (connector *)con; if (gpr_unref(&c->refs)) { /* c->initial_string_buffer does not need to be destroyed */ + grpc_channel_args_destroy(c->tmp_args); + grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr); gpr_free(c); } } @@ -89,13 +97,14 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *secure_endpoint, grpc_auth_context *auth_context) { connector *c = arg; - grpc_closure *notify; gpr_mu_lock(&c->mu); + grpc_error *error = GRPC_ERROR_NONE; if (c->connecting_endpoint == NULL) { memset(c->result, 0, sizeof(*c->result)); gpr_mu_unlock(&c->mu); } else if (status != GRPC_SECURITY_OK) { - gpr_log(GPR_ERROR, "Secure handshake failed with error %d.", status); + error = grpc_error_set_int(GRPC_ERROR_CREATE("Secure handshake failed"), + GRPC_ERROR_INT_SECURITY_STATUS, status); memset(c->result, 0, sizeof(*c->result)); c->connecting_endpoint = NULL; gpr_mu_unlock(&c->mu); @@ -108,25 +117,43 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL, 0); auth_context_arg = grpc_auth_context_to_arg(auth_context); - c->result->channel_args = grpc_channel_args_copy_and_add( - c->args.channel_args, &auth_context_arg, 1); + c->result->channel_args = + grpc_channel_args_copy_and_add(c->tmp_args, &auth_context_arg, 1); } - notify = c->notify; + grpc_closure *notify = c->notify; c->notify = NULL; - grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_NONE, NULL); + grpc_exec_ctx_sched(exec_ctx, notify, error, NULL); +} + +static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint, + grpc_channel_args *args, void *user_data, + grpc_error *error) { + connector *c = user_data; + if (error != GRPC_ERROR_NONE) { + grpc_closure *notify = c->notify; + c->notify = NULL; + grpc_exec_ctx_sched(exec_ctx, notify, error, NULL); + } else { + // TODO(roth, jboeuf): Convert security connector handshaking to use new + // handshake API, and then move the code from on_secure_handshake_done() + // into this function. + c->tmp_args = args; + grpc_channel_security_connector_do_handshake( + exec_ctx, c->security_connector, endpoint, c->args.deadline, + on_secure_handshake_done, c); + } } static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { connector *c = arg; - grpc_channel_security_connector_do_handshake( - exec_ctx, c->security_connector, c->connecting_endpoint, c->args.deadline, - on_secure_handshake_done, c); + grpc_handshake_manager_do_handshake( + exec_ctx, c->handshake_mgr, c->connecting_endpoint, c->args.channel_args, + c->args.deadline, NULL /* acceptor */, on_handshake_done, c); } static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { connector *c = arg; - grpc_closure *notify; grpc_endpoint *tcp = c->newly_connecting_endpoint; if (tcp != NULL) { gpr_mu_lock(&c->mu); @@ -142,13 +169,13 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer, &c->initial_string_sent); } else { - grpc_channel_security_connector_do_handshake( - exec_ctx, c->security_connector, tcp, c->args.deadline, - on_secure_handshake_done, c); + grpc_handshake_manager_do_handshake( + exec_ctx, c->handshake_mgr, tcp, c->args.channel_args, + c->args.deadline, NULL /* acceptor */, on_handshake_done, c); } } else { memset(c->result, 0, sizeof(*c->result)); - notify = c->notify; + grpc_closure *notify = c->notify; c->notify = NULL; grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_REF(error), NULL); } @@ -227,6 +254,7 @@ static grpc_subchannel *client_channel_factory_create_subchannel( memset(c, 0, sizeof(*c)); c->base.vtable = &connector_vtable; c->security_connector = f->security_connector; + c->handshake_mgr = grpc_handshake_manager_create(); gpr_mu_init(&c->mu); gpr_ref_init(&c->refs, 1); args->args = final_args; diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c index e5c987925c3..9cd374777e9 100644 --- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c +++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c @@ -37,35 +37,74 @@ #include #include #include + #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/handshaker.h" #include "src/core/lib/channel/http_server_filter.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/tcp_server.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/server.h" -static void new_transport(grpc_exec_ctx *exec_ctx, void *server, - grpc_endpoint *tcp, grpc_pollset *accepting_pollset, - grpc_tcp_server_acceptor *acceptor) { - /* - * Beware that the call to grpc_create_chttp2_transport() has to happen before - * grpc_tcp_server_destroy(). This is fine here, but similar code - * asynchronously doing a handshake instead of calling grpc_tcp_server_start() - * (as in server_secure_chttp2.c) needs to add synchronization to avoid this - * case. - */ - grpc_transport *transport = grpc_create_chttp2_transport( - exec_ctx, grpc_server_get_channel_args(server), tcp, 0); - grpc_server_setup_transport(exec_ctx, server, transport, accepting_pollset, - grpc_server_get_channel_args(server)); - grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0); +typedef struct server_connect_state { + grpc_server *server; + grpc_pollset *accepting_pollset; + grpc_tcp_server_acceptor *acceptor; + grpc_handshake_manager *handshake_mgr; +} server_connect_state; + +static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint, + grpc_channel_args *args, void *user_data, + grpc_error *error) { + server_connect_state *state = user_data; + if (error != GRPC_ERROR_NONE) { + const char *error_str = grpc_error_string(error); + gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str); + grpc_error_free_string(error_str); + GRPC_ERROR_UNREF(error); + grpc_handshake_manager_shutdown(exec_ctx, state->handshake_mgr); + } else { + // Beware that the call to grpc_create_chttp2_transport() has to happen + // before grpc_tcp_server_destroy(). This is fine here, but similar code + // asynchronously doing a handshake instead of calling + // grpc_tcp_server_start() (as in server_secure_chttp2.c) needs to add + // synchronization to avoid this case. + grpc_transport *transport = + grpc_create_chttp2_transport(exec_ctx, args, endpoint, 0); + grpc_server_setup_transport(exec_ctx, state->server, transport, + state->accepting_pollset, + grpc_server_get_channel_args(state->server)); + grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0); + } + // Clean up. + grpc_channel_args_destroy(args); + grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr); + gpr_free(state); +} + +static void on_accept(grpc_exec_ctx *exec_ctx, void *server, grpc_endpoint *tcp, + grpc_pollset *accepting_pollset, + grpc_tcp_server_acceptor *acceptor) { + server_connect_state *state = gpr_malloc(sizeof(server_connect_state)); + state->server = server; + state->accepting_pollset = accepting_pollset; + state->acceptor = acceptor; + state->handshake_mgr = grpc_handshake_manager_create(); + // TODO(roth): We should really get this timeout value from channel + // args instead of hard-coding it. + const gpr_timespec deadline = gpr_time_add( + gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN)); + grpc_handshake_manager_do_handshake( + exec_ctx, state->handshake_mgr, tcp, grpc_server_get_channel_args(server), + deadline, acceptor, on_handshake_done, state); } /* Server callback: start listening on our ports */ static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp, grpc_pollset **pollsets, size_t pollset_count) { grpc_tcp_server *tcp = tcpp; - grpc_tcp_server_start(exec_ctx, tcp, pollsets, pollset_count, new_transport, + grpc_tcp_server_start(exec_ctx, tcp, pollsets, pollset_count, on_accept, server); } @@ -74,6 +113,7 @@ static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp, static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp, grpc_closure *destroy_done) { grpc_tcp_server *tcp = tcpp; + grpc_tcp_server_shutdown_listeners(exec_ctx, tcp); grpc_tcp_server_unref(exec_ctx, tcp); grpc_exec_ctx_sched(exec_ctx, destroy_done, GRPC_ERROR_NONE, NULL); } diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c index c42810e9130..ccea15a648d 100644 --- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c +++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c @@ -42,6 +42,7 @@ #include #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/handshaker.h" #include "src/core/lib/channel/http_server_filter.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/resolve_address.h" @@ -58,7 +59,7 @@ typedef struct server_secure_state { grpc_tcp_server *tcp; grpc_server_security_connector *sc; grpc_server_credentials *creds; - int is_shutdown; + bool is_shutdown; gpr_mu mu; gpr_refcount refcount; grpc_closure destroy_closure; @@ -68,6 +69,12 @@ typedef struct server_secure_state { typedef struct server_secure_connect { server_secure_state *state; grpc_pollset *accepting_pollset; + grpc_tcp_server_acceptor *acceptor; + grpc_handshake_manager *handshake_mgr; + // TODO(roth): Remove the following two fields when we eliminate + // grpc_server_security_connector_do_handshake(). + gpr_timespec deadline; + grpc_channel_args *args; } server_secure_connect; static void state_ref(server_secure_state *state) { gpr_ref(&state->refcount); } @@ -89,21 +96,18 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *secure_endpoint, grpc_auth_context *auth_context) { server_secure_connect *state = statep; - grpc_transport *transport; if (status == GRPC_SECURITY_OK) { if (secure_endpoint) { gpr_mu_lock(&state->state->mu); if (!state->state->is_shutdown) { - transport = grpc_create_chttp2_transport( + grpc_transport *transport = grpc_create_chttp2_transport( exec_ctx, grpc_server_get_channel_args(state->state->server), secure_endpoint, 0); - grpc_channel_args *args_copy; grpc_arg args_to_add[2]; args_to_add[0] = grpc_server_credentials_to_arg(state->state->creds); args_to_add[1] = grpc_auth_context_to_arg(auth_context); - args_copy = grpc_channel_args_copy_and_add( - grpc_server_get_channel_args(state->state->server), args_to_add, - GPR_ARRAY_SIZE(args_to_add)); + grpc_channel_args *args_copy = grpc_channel_args_copy_and_add( + state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add)); grpc_server_setup_transport(exec_ctx, state->state->server, transport, state->accepting_pollset, args_copy); grpc_channel_args_destroy(args_copy); @@ -118,10 +122,38 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep, } else { gpr_log(GPR_ERROR, "Secure transport failed with error %d", status); } + grpc_channel_args_destroy(state->args); state_unref(state->state); gpr_free(state); } +static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint, + grpc_channel_args *args, void *user_data, + grpc_error *error) { + server_secure_connect *state = user_data; + if (error != GRPC_ERROR_NONE) { + const char *error_str = grpc_error_string(error); + gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str); + grpc_error_free_string(error_str); + GRPC_ERROR_UNREF(error); + grpc_handshake_manager_shutdown(exec_ctx, state->handshake_mgr); + grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr); + grpc_channel_args_destroy(args); + state_unref(state->state); + gpr_free(state); + return; + } + grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr); + state->handshake_mgr = NULL; + // TODO(roth, jboeuf): Convert security connector handshaking to use new + // handshake API, and then move the code from on_secure_handshake_done() + // into this function. + state->args = args; + grpc_server_security_connector_do_handshake( + exec_ctx, state->state->sc, state->acceptor, endpoint, state->deadline, + on_secure_handshake_done, state); +} + static void on_accept(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { @@ -129,11 +161,16 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *tcp, state->state = statep; state_ref(state->state); state->accepting_pollset = accepting_pollset; - grpc_server_security_connector_do_handshake( - exec_ctx, state->state->sc, acceptor, tcp, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_seconds(120, GPR_TIMESPAN)), - on_secure_handshake_done, state); + state->acceptor = acceptor; + state->handshake_mgr = grpc_handshake_manager_create(); + // TODO(roth): We should really get this timeout value from channel + // args instead of hard-coding it. + state->deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), + gpr_time_from_seconds(120, GPR_TIMESPAN)); + grpc_handshake_manager_do_handshake( + exec_ctx, state->handshake_mgr, tcp, + grpc_server_get_channel_args(state->state->server), state->deadline, + acceptor, on_handshake_done, state); } /* Server callback: start listening on our ports */ @@ -162,10 +199,11 @@ static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep, server_secure_state *state = statep; grpc_tcp_server *tcp; gpr_mu_lock(&state->mu); - state->is_shutdown = 1; + state->is_shutdown = true; state->destroy_callback = callback; tcp = state->tcp; gpr_mu_unlock(&state->mu); + grpc_tcp_server_shutdown_listeners(exec_ctx, tcp); grpc_tcp_server_unref(exec_ctx, tcp); } @@ -226,7 +264,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, state->tcp = tcp; state->sc = sc; state->creds = grpc_server_credentials_ref(creds); - state->is_shutdown = 0; + state->is_shutdown = false; gpr_mu_init(&state->mu); gpr_ref_init(&state->refcount, 1); diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c index bd87253ed32..7d5279b9da4 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c @@ -36,11 +36,14 @@ #include "src/core/lib/debug/trace.h" #include "src/core/lib/transport/metadata.h" +extern int grpc_http_write_state_trace; + void grpc_chttp2_plugin_init(void) { grpc_chttp2_base64_encode_and_huffman_compress = grpc_chttp2_base64_encode_and_huffman_compress_impl; grpc_register_tracer("http", &grpc_http_trace); grpc_register_tracer("flowctl", &grpc_flowctl_trace); + grpc_register_tracer("http_write_state", &grpc_http_write_state_trace); } void grpc_chttp2_plugin_shutdown(void) {} diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 5aae753c07d..d050467a020 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -48,6 +48,7 @@ #include "src/core/ext/transport/chttp2/transport/status_conversion.h" #include "src/core/ext/transport/chttp2/transport/timeout_encoding.h" #include "src/core/lib/http/parser.h" +#include "src/core/lib/iomgr/workqueue.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/string.h" #include "src/core/lib/transport/static_metadata.h" @@ -60,9 +61,9 @@ #define DEFAULT_MAX_HEADER_LIST_SIZE (16 * 1024) #define MAX_CLIENT_STREAM_ID 0x7fffffffu - int grpc_http_trace = 0; int grpc_flowctl_trace = 0; +int grpc_http_write_state_trace = 0; #define TRANSPORT_FROM_WRITING(tw) \ ((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \ @@ -88,10 +89,16 @@ static const grpc_transport_vtable vtable; static void writing_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); static void reading_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); static void parsing_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); +static void initiate_writing(grpc_exec_ctx *exec_ctx, void *t, + grpc_error *error); + +static void start_writing(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t); +static void end_waiting_for_write(grpc_exec_ctx *exec_ctx, + grpc_chttp2_transport *t, grpc_error *error); /** Set a transport level setting, and push it to our peer */ -static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id, - uint32_t value); +static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, + grpc_chttp2_setting_id id, uint32_t value); /** Start disconnection chain */ static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, @@ -137,7 +144,7 @@ static void check_read_ops(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global); static void incoming_byte_stream_update_flow_control( - grpc_chttp2_transport_global *transport_global, + grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global, grpc_chttp2_stream_global *stream_global, size_t max_size_hint, size_t have_already); static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx, @@ -201,6 +208,7 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx, gpr_free(t); } +/*#define REFCOUNTING_DEBUG 1*/ #ifdef REFCOUNTING_DEBUG #define REF_TRANSPORT(t, r) ref_transport(t, r, __FILE__, __LINE__) #define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t, r, __FILE__, __LINE__) @@ -231,7 +239,7 @@ static void ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); } static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, const grpc_channel_args *channel_args, - grpc_endpoint *ep, uint8_t is_client) { + grpc_endpoint *ep, bool is_client) { size_t i; int j; @@ -273,6 +281,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_closure_init(&t->writing_action, writing_action, t); grpc_closure_init(&t->reading_action, reading_action, t); grpc_closure_init(&t->parsing_action, parsing_action, t); + grpc_closure_init(&t->initiate_writing, initiate_writing, t); gpr_slice_buffer_init(&t->parsing.qbuf); grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser); @@ -286,6 +295,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, gpr_slice_buffer_add( &t->global.qbuf, gpr_slice_from_copied_string(GRPC_CHTTP2_CLIENT_CONNECT_STRING)); + grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "initial_write"); } /* 8 is a random stab in the dark as to a good initial size: it's small enough that it shouldn't waste memory for infrequently used connections, yet @@ -311,11 +321,12 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, /* configure http2 the way we like it */ if (is_client) { - push_setting(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0); - push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0); + push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0); + push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0); } - push_setting(t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, DEFAULT_WINDOW); - push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, + push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, + DEFAULT_WINDOW); + push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, DEFAULT_MAX_HEADER_LIST_SIZE); if (channel_args) { @@ -329,7 +340,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, gpr_log(GPR_ERROR, "%s: must be an integer", GRPC_ARG_MAX_CONCURRENT_STREAMS); } else { - push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, + push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, (uint32_t)channel_args->args[i].value.integer); } } else if (0 == strcmp(channel_args->args[i].key, @@ -368,7 +379,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, gpr_log(GPR_ERROR, "%s: must be non-negative", GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER); } else { - push_setting(t, GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE, + push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE, (uint32_t)channel_args->args[i].value.integer); } } else if (0 == strcmp(channel_args->args[i].key, @@ -393,7 +404,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, gpr_log(GPR_ERROR, "%s: must be non-negative", GRPC_ARG_MAX_METADATA_SIZE); } else { - push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, + push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, (uint32_t)channel_args->args[i].value.integer); } } @@ -444,6 +455,9 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_error *error) { if (!t->closed) { + if (grpc_http_write_state_trace) { + gpr_log(GPR_DEBUG, "W:%p close transport", t); + } t->closed = 1; connectivity_state_set(exec_ctx, &t->global, GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error), "close_transport"); @@ -590,7 +604,8 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer_destroy( &s->global.received_trailing_metadata); gpr_slice_buffer_destroy(&s->writing.flow_controlled_buffer); - GRPC_ERROR_UNREF(s->global.removal_error); + GRPC_ERROR_UNREF(s->global.read_closed_error); + GRPC_ERROR_UNREF(s->global.write_closed_error); UNREF_TRANSPORT(exec_ctx, t, "stream"); @@ -634,6 +649,36 @@ grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream( * LOCK MANAGEMENT */ +static const char *write_state_name(grpc_chttp2_write_state state) { + switch (state) { + case GRPC_CHTTP2_WRITING_INACTIVE: + return "INACTIVE"; + case GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER: + return "REQUESTED[p=0]"; + case GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER: + return "REQUESTED[p=1]"; + case GRPC_CHTTP2_WRITE_SCHEDULED: + return "SCHEDULED"; + case GRPC_CHTTP2_WRITING: + return "WRITING"; + case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER: + return "WRITING[p=1]"; + case GRPC_CHTTP2_WRITING_STALE_NO_POLLER: + return "WRITING[p=0]"; + } + GPR_UNREACHABLE_CODE(return "UNKNOWN"); +} + +static void set_write_state(grpc_chttp2_transport *t, + grpc_chttp2_write_state state, const char *reason) { + if (grpc_http_write_state_trace) { + gpr_log(GPR_DEBUG, "W:%p %s -> %s because %s", t, + write_state_name(t->executor.write_state), write_state_name(state), + reason); + } + t->executor.write_state = state; +} + static void finish_global_actions(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) { grpc_chttp2_executor_action_header *hdr; @@ -642,13 +687,6 @@ static void finish_global_actions(grpc_exec_ctx *exec_ctx, GPR_TIMER_BEGIN("finish_global_actions", 0); for (;;) { - if (!t->executor.writing_active && !t->closed && - grpc_chttp2_unlocking_check_writes(exec_ctx, &t->global, &t->writing)) { - t->executor.writing_active = 1; - REF_TRANSPORT(t, "writing"); - prevent_endpoint_shutdown(t); - grpc_exec_ctx_sched(exec_ctx, &t->writing_action, GRPC_ERROR_NONE, NULL); - } check_read_ops(exec_ctx, &t->global); gpr_mu_lock(&t->executor.mu); @@ -669,8 +707,28 @@ static void finish_global_actions(grpc_exec_ctx *exec_ctx, continue; } else { t->executor.global_active = false; + switch (t->executor.write_state) { + case GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER: + set_write_state(t, GRPC_CHTTP2_WRITE_SCHEDULED, "unlocking"); + REF_TRANSPORT(t, "initiate_writing"); + gpr_mu_unlock(&t->executor.mu); + grpc_exec_ctx_sched( + exec_ctx, &t->initiate_writing, GRPC_ERROR_NONE, + t->ep != NULL ? grpc_endpoint_get_workqueue(t->ep) : NULL); + break; + case GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER: + start_writing(exec_ctx, t); + gpr_mu_unlock(&t->executor.mu); + break; + case GRPC_CHTTP2_WRITING_INACTIVE: + case GRPC_CHTTP2_WRITING: + case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER: + case GRPC_CHTTP2_WRITING_STALE_NO_POLLER: + case GRPC_CHTTP2_WRITE_SCHEDULED: + gpr_mu_unlock(&t->executor.mu); + break; + } } - gpr_mu_unlock(&t->executor.mu); break; } @@ -741,16 +799,118 @@ void grpc_chttp2_run_with_global_lock(grpc_exec_ctx *exec_ctx, * OUTPUT PROCESSING */ -void grpc_chttp2_become_writable(grpc_chttp2_transport_global *transport_global, - grpc_chttp2_stream_global *stream_global) { +void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx, + grpc_chttp2_transport_global *transport_global, + bool covered_by_poller, const char *reason) { + /* Perform state checks, and transition to a scheduled state if appropriate. + Each time we finish the global lock execution, we check if we need to + write. If we do: + - (if there is a poller surrounding the write) schedule + initiate_writing, which locks and calls initiate_writing_locked to... + - call start_writing, which verifies (under the global lock) that there + are things that need to be written by calling + grpc_chttp2_unlocking_check_writes, and if so schedules writing_action + against the current exec_ctx, to be executed OUTSIDE of the global lock + - eventually writing_action results in grpc_chttp2_terminate_writing being + called, which re-takes the global lock, updates state, checks if we need + to do *another* write immediately, and if so loops back to + start_writing. + + Current problems: + - too much lock entry/exiting + - the writing thread can become stuck indefinitely (punt through the + workqueue periodically to fix) */ + + grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global); + switch (t->executor.write_state) { + case GRPC_CHTTP2_WRITING_INACTIVE: + set_write_state(t, covered_by_poller + ? GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER + : GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER, + reason); + break; + case GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER: + /* nothing to do: write already requested */ + break; + case GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER: + if (covered_by_poller) { + /* upgrade to note poller is available to cover the write */ + set_write_state(t, GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER, reason); + } + break; + case GRPC_CHTTP2_WRITE_SCHEDULED: + /* nothing to do: write already scheduled */ + break; + case GRPC_CHTTP2_WRITING: + set_write_state(t, + covered_by_poller ? GRPC_CHTTP2_WRITING_STALE_WITH_POLLER + : GRPC_CHTTP2_WRITING_STALE_NO_POLLER, + reason); + break; + case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER: + /* nothing to do: write already requested */ + break; + case GRPC_CHTTP2_WRITING_STALE_NO_POLLER: + if (covered_by_poller) { + /* upgrade to note poller is available to cover the write */ + set_write_state(t, GRPC_CHTTP2_WRITING_STALE_WITH_POLLER, reason); + } + break; + } +} + +static void start_writing(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) { + GPR_ASSERT(t->executor.write_state == GRPC_CHTTP2_WRITE_SCHEDULED || + t->executor.write_state == GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER); + if (!t->closed && + grpc_chttp2_unlocking_check_writes(exec_ctx, &t->global, &t->writing)) { + set_write_state(t, GRPC_CHTTP2_WRITING, "start_writing"); + REF_TRANSPORT(t, "writing"); + prevent_endpoint_shutdown(t); + grpc_exec_ctx_sched(exec_ctx, &t->writing_action, GRPC_ERROR_NONE, NULL); + } else { + if (t->closed) { + set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE, + "start_writing:transport_closed"); + } else { + set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE, + "start_writing:nothing_to_write"); + } + end_waiting_for_write(exec_ctx, t, GRPC_ERROR_CREATE("Nothing to write")); + if (t->ep && !t->endpoint_reading) { + destroy_endpoint(exec_ctx, t); + } + } +} + +static void initiate_writing_locked(grpc_exec_ctx *exec_ctx, + grpc_chttp2_transport *t, + grpc_chttp2_stream *s_unused, + void *arg_ignored) { + start_writing(exec_ctx, t); + UNREF_TRANSPORT(exec_ctx, t, "initiate_writing"); +} + +static void initiate_writing(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + grpc_chttp2_run_with_global_lock(exec_ctx, arg, NULL, initiate_writing_locked, + NULL, 0); +} + +void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx, + grpc_chttp2_transport_global *transport_global, + grpc_chttp2_stream_global *stream_global, + bool covered_by_poller, const char *reason) { if (!TRANSPORT_FROM_GLOBAL(transport_global)->closed && grpc_chttp2_list_add_writable_stream(transport_global, stream_global)) { GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing"); + grpc_chttp2_initiate_write(exec_ctx, transport_global, covered_by_poller, + reason); } } -static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id, - uint32_t value) { +static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, + grpc_chttp2_setting_id id, uint32_t value) { const grpc_chttp2_setting_parameters *sp = &grpc_chttp2_settings_parameters[id]; uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value); @@ -761,9 +921,22 @@ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id, if (use_value != t->global.settings[GRPC_LOCAL_SETTINGS][id]) { t->global.settings[GRPC_LOCAL_SETTINGS][id] = use_value; t->global.dirtied_local_settings = 1; + grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "push_setting"); } } +static void end_waiting_for_write(grpc_exec_ctx *exec_ctx, + grpc_chttp2_transport *t, grpc_error *error) { + grpc_chttp2_stream_global *stream_global; + while (grpc_chttp2_list_pop_closed_waiting_for_writing(&t->global, + &stream_global)) { + fail_pending_writes(exec_ctx, &t->global, stream_global, + GRPC_ERROR_REF(error)); + GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "finish_writes"); + } + GRPC_ERROR_UNREF(error); +} + static void terminate_writing_with_lock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s_ignored, @@ -778,24 +951,32 @@ static void terminate_writing_with_lock(grpc_exec_ctx *exec_ctx, grpc_chttp2_cleanup_writing(exec_ctx, &t->global, &t->writing); - grpc_chttp2_stream_global *stream_global; - while (grpc_chttp2_list_pop_closed_waiting_for_writing(&t->global, - &stream_global)) { - fail_pending_writes(exec_ctx, &t->global, stream_global, - GRPC_ERROR_REF(error)); - GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "finish_writes"); + end_waiting_for_write(exec_ctx, t, error); + + switch (t->executor.write_state) { + case GRPC_CHTTP2_WRITING_INACTIVE: + case GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER: + case GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER: + case GRPC_CHTTP2_WRITE_SCHEDULED: + GPR_UNREACHABLE_CODE(break); + case GRPC_CHTTP2_WRITING: + set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE, "terminate_writing"); + break; + case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER: + set_write_state(t, GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER, + "terminate_writing"); + break; + case GRPC_CHTTP2_WRITING_STALE_NO_POLLER: + set_write_state(t, GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER, + "terminate_writing"); + break; } - /* leave the writing flag up on shutdown to prevent further writes in - unlock() - from starting */ - t->executor.writing_active = 0; if (t->ep && !t->endpoint_reading) { destroy_endpoint(exec_ctx, t); } UNREF_TRANSPORT(exec_ctx, t, "writing"); - GRPC_ERROR_UNREF(error); } void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx, @@ -878,7 +1059,8 @@ static void maybe_start_some_streams( stream_global->id, STREAM_FROM_GLOBAL(stream_global)); stream_global->in_stream_map = true; transport_global->concurrent_stream_count++; - grpc_chttp2_become_writable(transport_global, stream_global); + grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, true, + "new_stream"); } /* cancel out streams that will never be started */ while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID && @@ -1018,9 +1200,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, maybe_start_some_streams(exec_ctx, transport_global); } else { GPR_ASSERT(stream_global->id != 0); - grpc_chttp2_become_writable(transport_global, stream_global); + grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, + true, "op.send_initial_metadata"); } } else { + stream_global->send_trailing_metadata = NULL; grpc_chttp2_complete_closure_step( exec_ctx, transport_global, stream_global, &stream_global->send_initial_metadata_finished, @@ -1042,7 +1226,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, } else { stream_global->send_message = op->send_message; if (stream_global->id != 0) { - grpc_chttp2_become_writable(transport_global, stream_global); + grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, + true, "op.send_message"); } } } @@ -1075,6 +1260,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_list_add_check_read_ops(transport_global, stream_global); } if (stream_global->write_closed) { + stream_global->send_trailing_metadata = NULL; grpc_chttp2_complete_closure_step( exec_ctx, transport_global, stream_global, &stream_global->send_trailing_metadata_finished, @@ -1085,7 +1271,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, } else if (stream_global->id != 0) { /* TODO(ctiller): check if there's flow control for any outstanding bytes before going writable */ - grpc_chttp2_become_writable(transport_global, stream_global); + grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, + true, "op.send_trailing_metadata"); } } } @@ -1106,8 +1293,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, (stream_global->incoming_frames.head == NULL || stream_global->incoming_frames.head->is_tail)) { incoming_byte_stream_update_flow_control( - transport_global, stream_global, transport_global->stream_lookahead, - 0); + exec_ctx, transport_global, stream_global, + transport_global->stream_lookahead, 0); } grpc_chttp2_list_add_check_read_ops(transport_global, stream_global); } @@ -1135,7 +1322,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, sizeof(*op)); } -static void send_ping_locked(grpc_chttp2_transport *t, grpc_closure *on_recv) { +static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, + grpc_closure *on_recv) { grpc_chttp2_outstanding_ping *p = gpr_malloc(sizeof(*p)); p->next = &t->global.pings; p->prev = p->next->prev; @@ -1150,6 +1338,7 @@ static void send_ping_locked(grpc_chttp2_transport *t, grpc_closure *on_recv) { p->id[7] = (uint8_t)(t->global.ping_counter & 0xff); p->on_recv = on_recv; gpr_slice_buffer_add(&t->global.qbuf, grpc_chttp2_ping_create(0, p->id)); + grpc_chttp2_initiate_write(exec_ctx, &t->global, true, "send_ping"); } static void ack_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, @@ -1209,6 +1398,7 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx, close_transport = grpc_chttp2_has_streams(t) ? GRPC_ERROR_NONE : GRPC_ERROR_CREATE("GOAWAY sent"); + grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "goaway_sent"); } if (op->set_accept_stream) { @@ -1226,7 +1416,7 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx, } if (op->send_ping) { - send_ping_locked(t, op->send_ping); + send_ping_locked(exec_ctx, t, op->send_ping); } if (close_transport != GRPC_ERROR_NONE) { @@ -1414,6 +1604,8 @@ static void cancel_from_api(grpc_exec_ctx *exec_ctx, &transport_global->qbuf, grpc_chttp2_rst_stream_create(stream_global->id, (uint32_t)http_error, &stream_global->stats.outgoing)); + grpc_chttp2_initiate_write(exec_ctx, transport_global, false, + "rst_stream"); } const char *msg = @@ -1473,10 +1665,39 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, } } +static void add_error(grpc_error *error, grpc_error **refs, size_t *nrefs) { + if (error == GRPC_ERROR_NONE) return; + for (size_t i = 0; i < *nrefs; i++) { + if (error == refs[i]) { + return; + } + } + refs[*nrefs] = error; + ++*nrefs; +} + +static grpc_error *removal_error(grpc_error *extra_error, + grpc_chttp2_stream_global *stream_global) { + grpc_error *refs[3]; + size_t nrefs = 0; + add_error(stream_global->read_closed_error, refs, &nrefs); + add_error(stream_global->write_closed_error, refs, &nrefs); + add_error(extra_error, refs, &nrefs); + grpc_error *error = GRPC_ERROR_NONE; + if (nrefs > 0) { + error = GRPC_ERROR_CREATE_REFERENCING("Failed due to stream removal", refs, + nrefs); + } + GRPC_ERROR_UNREF(extra_error); + return error; +} + static void fail_pending_writes(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global, grpc_chttp2_stream_global *stream_global, grpc_error *error) { + error = removal_error(error, stream_global); + stream_global->send_message = NULL; grpc_chttp2_complete_closure_step( exec_ctx, transport_global, stream_global, &stream_global->send_initial_metadata_finished, GRPC_ERROR_REF(error)); @@ -1499,14 +1720,17 @@ void grpc_chttp2_mark_stream_closed( } grpc_chttp2_list_add_check_read_ops(transport_global, stream_global); if (close_reads && !stream_global->read_closed) { + stream_global->read_closed_error = GRPC_ERROR_REF(error); stream_global->read_closed = true; stream_global->published_initial_metadata = true; stream_global->published_trailing_metadata = true; decrement_active_streams_locked(exec_ctx, transport_global, stream_global); } if (close_writes && !stream_global->write_closed) { + stream_global->write_closed_error = GRPC_ERROR_REF(error); stream_global->write_closed = true; - if (TRANSPORT_FROM_GLOBAL(transport_global)->executor.writing_active) { + if (TRANSPORT_FROM_GLOBAL(transport_global)->executor.write_state != + GRPC_CHTTP2_WRITING_INACTIVE) { GRPC_CHTTP2_STREAM_REF(stream_global, "finish_writes"); grpc_chttp2_list_add_closed_waiting_for_writing(transport_global, stream_global); @@ -1516,7 +1740,6 @@ void grpc_chttp2_mark_stream_closed( } } if (stream_global->read_closed && stream_global->write_closed) { - stream_global->removal_error = GRPC_ERROR_REF(error); if (stream_global->id != 0 && TRANSPORT_FROM_GLOBAL(transport_global)->executor.parsing_active) { grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global, @@ -1524,7 +1747,8 @@ void grpc_chttp2_mark_stream_closed( } else { if (stream_global->id != 0) { remove_stream(exec_ctx, TRANSPORT_FROM_GLOBAL(transport_global), - stream_global->id, GRPC_ERROR_REF(error)); + stream_global->id, + removal_error(GRPC_ERROR_REF(error), stream_global)); } GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2"); } @@ -1649,6 +1873,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1, 1, error); + grpc_chttp2_initiate_write(exec_ctx, transport_global, false, + "close_from_api"); } typedef struct { @@ -1678,8 +1904,14 @@ static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, } /** update window from a settings change */ +typedef struct { + grpc_chttp2_transport *t; + grpc_exec_ctx *exec_ctx; +} update_global_window_args; + static void update_global_window(void *args, uint32_t id, void *stream) { - grpc_chttp2_transport *t = args; + update_global_window_args *a = args; + grpc_chttp2_transport *t = a->t; grpc_chttp2_stream *s = stream; grpc_chttp2_transport_global *transport_global = &t->global; grpc_chttp2_stream_global *stream_global = &s->global; @@ -1693,7 +1925,8 @@ static void update_global_window(void *args, uint32_t id, void *stream) { is_zero = stream_global->outgoing_window <= 0; if (was_zero && !is_zero) { - grpc_chttp2_become_writable(transport_global, stream_global); + grpc_chttp2_become_writable(a->exec_ctx, transport_global, stream_global, + true, "update_global_window"); } } @@ -1772,6 +2005,7 @@ static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx, static void parsing_action(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_chttp2_transport *t = arg; + grpc_error *err = GRPC_ERROR_NONE; GPR_TIMER_BEGIN("reading_action.parse", 0); size_t i = 0; grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE, @@ -1780,15 +2014,13 @@ static void parsing_action(grpc_exec_ctx *exec_ctx, void *arg, errors[1] = grpc_chttp2_perform_read(exec_ctx, &t->parsing, t->read_buffer.slices[i]); }; - if (i != t->read_buffer.count) { + if (errors[1] == GRPC_ERROR_NONE) { + err = GRPC_ERROR_REF(error); + } else { errors[2] = try_http_parsing(exec_ctx, t); + err = GRPC_ERROR_CREATE_REFERENCING("Failed parsing HTTP/2", errors, + GPR_ARRAY_SIZE(errors)); } - grpc_error *err = - errors[0] == GRPC_ERROR_NONE && errors[1] == GRPC_ERROR_NONE && - errors[2] == GRPC_ERROR_NONE - ? GRPC_ERROR_NONE - : GRPC_ERROR_CREATE_REFERENCING("Failed parsing HTTP/2", errors, - GPR_ARRAY_SIZE(errors)); for (i = 0; i < GPR_ARRAY_SIZE(errors); i++) { GRPC_ERROR_UNREF(errors[i]); } @@ -1802,14 +2034,19 @@ static void post_parse_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_transport_global *transport_global = &t->global; grpc_chttp2_transport_parsing *transport_parsing = &t->parsing; /* copy parsing qbuf to global qbuf */ - gpr_slice_buffer_move_into(&t->parsing.qbuf, &t->global.qbuf); + if (t->parsing.qbuf.count > 0) { + gpr_slice_buffer_move_into(&t->parsing.qbuf, &t->global.qbuf); + grpc_chttp2_initiate_write(exec_ctx, transport_global, false, + "parsing_qbuf"); + } /* merge stream lists */ grpc_chttp2_stream_map_move_into(&t->new_stream_map, &t->parsing_stream_map); transport_global->concurrent_stream_count = (uint32_t)grpc_chttp2_stream_map_size(&t->parsing_stream_map); if (transport_parsing->initial_window_update != 0) { + update_global_window_args args = {t, exec_ctx}; grpc_chttp2_stream_map_for_each(&t->parsing_stream_map, - update_global_window, t); + update_global_window, &args); transport_parsing->initial_window_update = 0; } /* handle higher level things */ @@ -1832,7 +2069,7 @@ static void post_parse_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, GPR_ASSERT(stream_global->write_closed); GPR_ASSERT(stream_global->read_closed); remove_stream(exec_ctx, t, stream_global->id, - GRPC_ERROR_REF(stream_global->removal_error)); + removal_error(GRPC_ERROR_NONE, stream_global)); GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2"); } @@ -1855,11 +2092,12 @@ static void post_reading_action_locked(grpc_exec_ctx *exec_ctx, } drop_connection(exec_ctx, t, GRPC_ERROR_REF(error)); t->endpoint_reading = 0; - if (!t->executor.writing_active && t->ep) { - grpc_endpoint_destroy(exec_ctx, t->ep); - t->ep = NULL; - /* safe as we still have a ref for read */ - UNREF_TRANSPORT(exec_ctx, t, "disconnect"); + if (grpc_http_write_state_trace) { + gpr_log(GPR_DEBUG, "R:%p -> 0 ws=%s", t, + write_state_name(t->executor.write_state)); + } + if (t->executor.write_state == GRPC_CHTTP2_WRITING_INACTIVE && t->ep) { + destroy_endpoint(exec_ctx, t); } } else if (!t->closed) { keep_reading = true; @@ -1943,7 +2181,7 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx, } static void incoming_byte_stream_update_flow_control( - grpc_chttp2_transport_global *transport_global, + grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global, grpc_chttp2_stream_global *stream_global, size_t max_size_hint, size_t have_already) { uint32_t max_recv_bytes; @@ -1978,7 +2216,8 @@ static void incoming_byte_stream_update_flow_control( add_max_recv_bytes); grpc_chttp2_list_add_unannounced_incoming_window_available(transport_global, stream_global); - grpc_chttp2_become_writable(transport_global, stream_global); + grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, + false, "read_incoming_stream"); } } @@ -2000,8 +2239,9 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream_global *stream_global = &bs->stream->global; if (bs->is_tail) { - incoming_byte_stream_update_flow_control( - transport_global, stream_global, arg->max_size_hint, bs->slices.length); + incoming_byte_stream_update_flow_control(exec_ctx, transport_global, + stream_global, arg->max_size_hint, + bs->slices.length); } if (bs->slices.count > 0) { *arg->slice = gpr_slice_buffer_take_first(&bs->slices); @@ -2185,7 +2425,7 @@ static char *format_flowctl_context_var(const char *context, const char *var, if (context == NULL) { *scope = NULL; gpr_asprintf(&buf, "%s(%" PRId64 ")", var, val); - result = gpr_leftpad(buf, ' ', 40); + result = gpr_leftpad(buf, ' ', 60); gpr_free(buf); return result; } @@ -2198,7 +2438,7 @@ static char *format_flowctl_context_var(const char *context, const char *var, gpr_free(tmp); } gpr_asprintf(&buf, "%s.%s(%" PRId64 ")", underscore_pos + 1, var, val); - result = gpr_leftpad(buf, ' ', 40); + result = gpr_leftpad(buf, ' ', 60); gpr_free(buf); return result; } @@ -2231,7 +2471,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase, tmp_phase = gpr_leftpad(phase, ' ', 8); tmp_scope1 = gpr_leftpad(scope1, ' ', 11); - gpr_asprintf(&prefix, "FLOW %s: %s %s ", phase, clisvr, scope1); + gpr_asprintf(&prefix, "FLOW %s: %s %s ", tmp_phase, clisvr, scope1); gpr_free(tmp_phase); gpr_free(tmp_scope1); diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 8d79e93ceb3..e1dcf5262a1 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -305,6 +305,22 @@ typedef struct grpc_chttp2_executor_action_header { void *arg; } grpc_chttp2_executor_action_header; +typedef enum { + /** no writing activity */ + GRPC_CHTTP2_WRITING_INACTIVE, + /** write has been requested, but not scheduled yet */ + GRPC_CHTTP2_WRITE_REQUESTED_WITH_POLLER, + GRPC_CHTTP2_WRITE_REQUESTED_NO_POLLER, + /** write has been requested and scheduled against the workqueue */ + GRPC_CHTTP2_WRITE_SCHEDULED, + /** write has been initiated after being reaped from the workqueue */ + GRPC_CHTTP2_WRITING, + /** write has been initiated, AND another write needs to be started once it's + done */ + GRPC_CHTTP2_WRITING_STALE_WITH_POLLER, + GRPC_CHTTP2_WRITING_STALE_NO_POLLER, +} grpc_chttp2_write_state; + struct grpc_chttp2_transport { grpc_transport base; /* must be first */ gpr_refcount refs; @@ -319,10 +335,10 @@ struct grpc_chttp2_transport { /** is a thread currently in the global lock */ bool global_active; - /** is a thread currently writing */ - bool writing_active; /** is a thread currently parsing */ bool parsing_active; + /** write execution state of the transport */ + grpc_chttp2_write_state write_state; grpc_chttp2_executor_action_header *pending_actions_head; grpc_chttp2_executor_action_header *pending_actions_tail; @@ -342,7 +358,8 @@ struct grpc_chttp2_transport { /** global state for reading/writing */ grpc_chttp2_transport_global global; /** state only accessible by the chain of execution that - set writing_active=1 */ + set writing_state >= GRPC_WRITING, and only by the writing closure + chain. */ grpc_chttp2_transport_writing writing; /** state only accessible by the chain of execution that set parsing_active=1 */ @@ -363,6 +380,8 @@ struct grpc_chttp2_transport { grpc_closure reading_action; /** closure to actually do parsing */ grpc_closure parsing_action; + /** closure to initiate writing */ + grpc_closure initiate_writing; /** incoming read bytes */ gpr_slice_buffer read_buffer; @@ -436,8 +455,10 @@ typedef struct { bool seen_error; bool exceeded_metadata_size; - /** the error that resulted in this stream being removed */ - grpc_error *removal_error; + /** the error that resulted in this stream being read-closed */ + grpc_error *read_closed_error; + /** the error that resulted in this stream being write-closed */ + grpc_error *write_closed_error; bool published_initial_metadata; bool published_trailing_metadata; @@ -514,15 +535,20 @@ struct grpc_chttp2_stream { }; /** Transport writing call flow: - chttp2_transport.c calls grpc_chttp2_unlocking_check_writes to see if writes - are required; - if they are, chttp2_transport.c calls grpc_chttp2_perform_writes to do the - writes. - Once writes have been completed (meaning another write could potentially be - started), - grpc_chttp2_terminate_writing is called. This will call - grpc_chttp2_cleanup_writing, at which - point the write phase is complete. */ + grpc_chttp2_initiate_write() is called anywhere that we know bytes need to + go out on the wire. + If no other write has been started, a task is enqueued onto our workqueue. + When that task executes, it obtains the global lock, and gathers the data + to write. + The global lock is dropped and we do the syscall to write. + After writing, a follow-up check is made to see if another round of writing + should be performed. + + The actual call chain is documented in the implementation of this function. + */ +void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx, + grpc_chttp2_transport_global *transport_global, + bool covered_by_poller, const char *reason); /** Someone is unlocking the transport mutex: check to see if writes are required, and schedule them if so */ @@ -610,9 +636,8 @@ int grpc_chttp2_list_pop_check_read_ops( void grpc_chttp2_list_add_writing_stalled_by_transport( grpc_chttp2_transport_writing *transport_writing, grpc_chttp2_stream_writing *stream_writing); -void grpc_chttp2_list_flush_writing_stalled_by_transport( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing, - bool is_window_available); +bool grpc_chttp2_list_flush_writing_stalled_by_transport( + grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing); void grpc_chttp2_list_add_stalled_by_transport( grpc_chttp2_transport_writing *transport_writing, @@ -822,7 +847,9 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, /** add a ref to the stream and add it to the writable list; ref will be dropped in writing.c */ -void grpc_chttp2_become_writable(grpc_chttp2_transport_global *transport_global, - grpc_chttp2_stream_global *stream_global); +void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx, + grpc_chttp2_transport_global *transport_global, + grpc_chttp2_stream_global *stream_global, + bool covered_by_poller, const char *reason); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */ diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c index 84eb5752f16..e1fc0ddee20 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.c +++ b/src/core/ext/transport/chttp2/transport/parsing.c @@ -154,10 +154,8 @@ void grpc_chttp2_publish_reads( transport_parsing, outgoing_window); is_zero = transport_global->outgoing_window <= 0; if (was_zero && !is_zero) { - while (grpc_chttp2_list_pop_stalled_by_transport(transport_global, - &stream_global)) { - grpc_chttp2_become_writable(transport_global, stream_global); - } + grpc_chttp2_initiate_write(exec_ctx, transport_global, false, + "new_global_flow_control"); } if (transport_parsing->incoming_window < @@ -168,6 +166,8 @@ void grpc_chttp2_publish_reads( announce_incoming_window, announce_bytes); GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_parsing, incoming_window, announce_bytes); + grpc_chttp2_initiate_write(exec_ctx, transport_global, false, + "global incoming window"); } /* for each stream that saw an update, fixup global state */ @@ -190,7 +190,8 @@ void grpc_chttp2_publish_reads( outgoing_window); is_zero = stream_global->outgoing_window <= 0; if (was_zero && !is_zero) { - grpc_chttp2_become_writable(transport_global, stream_global); + grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, + false, "stream.read_flow_control"); } stream_global->max_recv_bytes -= (uint32_t)GPR_MIN( diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c index 8f3ab00e6df..2eb5f5f632e 100644 --- a/src/core/ext/transport/chttp2/transport/stream_lists.c +++ b/src/core/ext/transport/chttp2/transport/stream_lists.c @@ -329,6 +329,7 @@ void grpc_chttp2_list_add_writing_stalled_by_transport( grpc_chttp2_transport_writing *transport_writing, grpc_chttp2_stream_writing *stream_writing) { grpc_chttp2_stream *stream = STREAM_FROM_WRITING(stream_writing); + gpr_log(GPR_DEBUG, "writing stalled %d", stream->global.id); if (!stream->included[GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT]) { GRPC_CHTTP2_STREAM_REF(&stream->global, "chttp2_writing_stalled"); } @@ -336,27 +337,28 @@ void grpc_chttp2_list_add_writing_stalled_by_transport( GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT); } -void grpc_chttp2_list_flush_writing_stalled_by_transport( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing, - bool is_window_available) { +bool grpc_chttp2_list_flush_writing_stalled_by_transport( + grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing) { grpc_chttp2_stream *stream; + bool out = false; grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing); while (stream_list_pop(transport, &stream, GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) { - if (is_window_available) { - grpc_chttp2_become_writable(&transport->global, &stream->global); - } else { - grpc_chttp2_list_add_stalled_by_transport(transport_writing, - &stream->writing); - } + gpr_log(GPR_DEBUG, "move %d from writing stalled to just stalled", + stream->global.id); + grpc_chttp2_list_add_stalled_by_transport(transport_writing, + &stream->writing); GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &stream->global, "chttp2_writing_stalled"); + out = true; } + return out; } void grpc_chttp2_list_add_stalled_by_transport( grpc_chttp2_transport_writing *transport_writing, grpc_chttp2_stream_writing *stream_writing) { + gpr_log(GPR_DEBUG, "stalled %d", stream_writing->id); stream_list_add(TRANSPORT_FROM_WRITING(transport_writing), STREAM_FROM_WRITING(stream_writing), GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index a2ac8e8ec96..8f7a1f55c6e 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -75,9 +75,13 @@ int grpc_chttp2_unlocking_check_writes( GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window, transport_global, outgoing_window); - bool is_window_available = transport_writing->outgoing_window > 0; - grpc_chttp2_list_flush_writing_stalled_by_transport( - exec_ctx, transport_writing, is_window_available); + if (transport_writing->outgoing_window > 0) { + while (grpc_chttp2_list_pop_stalled_by_transport(transport_global, + &stream_global)) { + grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, + false, "transport.read_flow_control"); + } + } /* for each grpc_chttp2_stream that's become writable, frame it's data (according to available window sizes) and add to the output buffer */ @@ -337,6 +341,12 @@ void grpc_chttp2_cleanup_writing( grpc_chttp2_stream_writing *stream_writing; grpc_chttp2_stream_global *stream_global; + if (grpc_chttp2_list_flush_writing_stalled_by_transport(exec_ctx, + transport_writing)) { + grpc_chttp2_initiate_write(exec_ctx, transport_global, false, + "resume_stalled_stream"); + } + while (grpc_chttp2_list_pop_written_stream( transport_global, transport_writing, &stream_global, &stream_writing)) { if (stream_writing->sent_initial_metadata) { diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h index 653d04f4279..aec61ee7c62 100644 --- a/src/core/lib/channel/channel_args.h +++ b/src/core/lib/channel/channel_args.h @@ -37,6 +37,8 @@ #include #include +// Channel args are intentionally immutable, to avoid the need for locking. + /** Copy the arguments in \a src into a new instance */ grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src); diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c new file mode 100644 index 00000000000..6c3ca198b72 --- /dev/null +++ b/src/core/lib/channel/handshaker.c @@ -0,0 +1,197 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include + +#include +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/handshaker.h" + +// +// grpc_handshaker +// + +void grpc_handshaker_init(const struct grpc_handshaker_vtable* vtable, + grpc_handshaker* handshaker) { + handshaker->vtable = vtable; +} + +void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker) { + handshaker->vtable->destroy(exec_ctx, handshaker); +} + +void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker) { + handshaker->vtable->shutdown(exec_ctx, handshaker); +} + +void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker, + grpc_endpoint* endpoint, + grpc_channel_args* args, + gpr_timespec deadline, + grpc_tcp_server_acceptor* acceptor, + grpc_handshaker_done_cb cb, void* user_data) { + handshaker->vtable->do_handshake(exec_ctx, handshaker, endpoint, args, + deadline, acceptor, cb, user_data); +} + +// +// grpc_handshake_manager +// + +// State used while chaining handshakers. +struct grpc_handshaker_state { + // The index of the handshaker to invoke next. + size_t index; + // The deadline for all handshakers. + gpr_timespec deadline; + // The acceptor to call the handshakers with. + grpc_tcp_server_acceptor* acceptor; + // The final callback and user_data to invoke after the last handshaker. + grpc_handshaker_done_cb final_cb; + void* final_user_data; +}; + +struct grpc_handshake_manager { + // An array of handshakers added via grpc_handshake_manager_add(). + size_t count; + grpc_handshaker** handshakers; + // State used while chaining handshakers. + struct grpc_handshaker_state* state; +}; + +grpc_handshake_manager* grpc_handshake_manager_create() { + grpc_handshake_manager* mgr = gpr_malloc(sizeof(grpc_handshake_manager)); + memset(mgr, 0, sizeof(*mgr)); + return mgr; +} + +static bool is_power_of_2(size_t n) { return (n & (n - 1)) == 0; } + +void grpc_handshake_manager_add(grpc_handshake_manager* mgr, + grpc_handshaker* handshaker) { + // To avoid allocating memory for each handshaker we add, we double + // the number of elements every time we need more. + size_t realloc_count = 0; + if (mgr->count == 0) { + realloc_count = 2; + } else if (mgr->count >= 2 && is_power_of_2(mgr->count)) { + realloc_count = mgr->count * 2; + } + if (realloc_count > 0) { + mgr->handshakers = + gpr_realloc(mgr->handshakers, realloc_count * sizeof(grpc_handshaker*)); + } + mgr->handshakers[mgr->count++] = handshaker; +} + +void grpc_handshake_manager_destroy(grpc_exec_ctx* exec_ctx, + grpc_handshake_manager* mgr) { + for (size_t i = 0; i < mgr->count; ++i) { + grpc_handshaker_destroy(exec_ctx, mgr->handshakers[i]); + } + gpr_free(mgr->handshakers); + gpr_free(mgr); +} + +void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, + grpc_handshake_manager* mgr) { + for (size_t i = 0; i < mgr->count; ++i) { + grpc_handshaker_shutdown(exec_ctx, mgr->handshakers[i]); + } + if (mgr->state != NULL) { + gpr_free(mgr->state); + mgr->state = NULL; + } +} + +// A function used as the handshaker-done callback when chaining +// handshakers together. +static void call_next_handshaker(grpc_exec_ctx* exec_ctx, + grpc_endpoint* endpoint, + grpc_channel_args* args, void* user_data, + grpc_error* error) { + grpc_handshake_manager* mgr = user_data; + GPR_ASSERT(mgr->state != NULL); + GPR_ASSERT(mgr->state->index < mgr->count); + // If we got an error, skip all remaining handshakers and invoke the + // caller-supplied callback immediately. + if (error != GRPC_ERROR_NONE) { + mgr->state->final_cb(exec_ctx, endpoint, args, mgr->state->final_user_data, + error); + return; + } + grpc_handshaker_done_cb cb = call_next_handshaker; + // If this is the last handshaker, use the caller-supplied callback + // and user_data instead of chaining back to this function again. + if (mgr->state->index == mgr->count - 1) { + cb = mgr->state->final_cb; + user_data = mgr->state->final_user_data; + } + // Invoke handshaker. + grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->state->index], + endpoint, args, mgr->state->deadline, + mgr->state->acceptor, cb, user_data); + ++mgr->state->index; + // If this is the last handshaker, clean up state. + if (mgr->state->index == mgr->count) { + gpr_free(mgr->state); + mgr->state = NULL; + } +} + +void grpc_handshake_manager_do_handshake( + grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, + grpc_endpoint* endpoint, const grpc_channel_args* args, + gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor, + grpc_handshaker_done_cb cb, void* user_data) { + grpc_channel_args* args_copy = grpc_channel_args_copy(args); + if (mgr->count == 0) { + // No handshakers registered, so we just immediately call the done + // callback with the passed-in endpoint. + cb(exec_ctx, endpoint, args_copy, user_data, GRPC_ERROR_NONE); + } else { + GPR_ASSERT(mgr->state == NULL); + mgr->state = gpr_malloc(sizeof(struct grpc_handshaker_state)); + memset(mgr->state, 0, sizeof(*mgr->state)); + mgr->state->deadline = deadline; + mgr->state->acceptor = acceptor; + mgr->state->final_cb = cb; + mgr->state->final_user_data = user_data; + call_next_handshaker(exec_ctx, endpoint, args_copy, mgr, GRPC_ERROR_NONE); + } +} diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h new file mode 100644 index 00000000000..dfc469c417b --- /dev/null +++ b/src/core/lib/channel/handshaker.h @@ -0,0 +1,145 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H +#define GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H + +#include +#include + +#include "src/core/lib/iomgr/closure.h" +#include "src/core/lib/iomgr/endpoint.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/tcp_server.h" + +/// Handshakers are used to perform initial handshakes on a connection +/// before the client sends the initial request. Some examples of what +/// a handshaker can be used for includes support for HTTP CONNECT on +/// the client side and various types of security initialization. +/// +/// In general, handshakers should be used via a handshake manager. + +/// +/// grpc_handshaker +/// + +typedef struct grpc_handshaker grpc_handshaker; + +/// Callback type invoked when a handshaker is done. +/// Takes ownership of \a args. +typedef void (*grpc_handshaker_done_cb)(grpc_exec_ctx* exec_ctx, + grpc_endpoint* endpoint, + grpc_channel_args* args, + void* user_data, grpc_error* error); + +struct grpc_handshaker_vtable { + /// Destroys the handshaker. + void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker); + + /// Shuts down the handshaker (e.g., to clean up when the operation is + /// aborted in the middle). + void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker); + + /// Performs handshaking. When finished, calls \a cb with \a user_data. + /// Takes ownership of \a args. + /// \a acceptor will be NULL for client-side handshakers. + void (*do_handshake)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker, + grpc_endpoint* endpoint, grpc_channel_args* args, + gpr_timespec deadline, + grpc_tcp_server_acceptor* acceptor, + grpc_handshaker_done_cb cb, void* user_data); +}; + +/// Base struct. To subclass, make this the first member of the +/// implementation struct. +struct grpc_handshaker { + const struct grpc_handshaker_vtable* vtable; +}; + +/// Called by concrete implementations to initialize the base struct. +void grpc_handshaker_init(const struct grpc_handshaker_vtable* vtable, + grpc_handshaker* handshaker); + +/// Convenient wrappers for invoking methods via the vtable. +/// These probably do not need to be called from anywhere but +/// grpc_handshake_manager. +void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker); +void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker); +void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker, + grpc_endpoint* endpoint, + grpc_channel_args* args, + gpr_timespec deadline, + grpc_tcp_server_acceptor* acceptor, + grpc_handshaker_done_cb cb, void* user_data); + +/// +/// grpc_handshake_manager +/// + +typedef struct grpc_handshake_manager grpc_handshake_manager; + +/// Creates a new handshake manager. Caller takes ownership. +grpc_handshake_manager* grpc_handshake_manager_create(); + +/// Adds a handshaker to the handshake manager. +/// Takes ownership of \a handshaker. +void grpc_handshake_manager_add(grpc_handshake_manager* mgr, + grpc_handshaker* handshaker); + +/// Destroys the handshake manager. +void grpc_handshake_manager_destroy(grpc_exec_ctx* exec_ctx, + grpc_handshake_manager* mgr); + +/// Shuts down the handshake manager (e.g., to clean up when the operation is +/// aborted in the middle). +/// The caller must still call grpc_handshake_manager_destroy() after +/// calling this function. +void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, + grpc_handshake_manager* mgr); + +/// Invokes handshakers in the order they were added. +/// Does NOT take ownership of \a args. Instead, makes a copy before +/// invoking the first handshaker. +/// \a acceptor will be NULL for client-side handshakers. +/// Invokes \a cb with \a user_data after either a handshaker fails or +/// all handshakers have completed successfully. +void grpc_handshake_manager_do_handshake( + grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, + grpc_endpoint* endpoint, const grpc_channel_args* args, + gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor, + grpc_handshaker_done_cb cb, void* user_data); + +#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */ diff --git a/src/core/lib/iomgr/endpoint.c b/src/core/lib/iomgr/endpoint.c index 1ab3733d381..f901fcf9622 100644 --- a/src/core/lib/iomgr/endpoint.c +++ b/src/core/lib/iomgr/endpoint.c @@ -65,3 +65,7 @@ void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) { char* grpc_endpoint_get_peer(grpc_endpoint* ep) { return ep->vtable->get_peer(ep); } + +grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) { + return ep->vtable->get_workqueue(ep); +} diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h index f9808bbda18..894efc0b238 100644 --- a/src/core/lib/iomgr/endpoint.h +++ b/src/core/lib/iomgr/endpoint.h @@ -51,6 +51,7 @@ struct grpc_endpoint_vtable { gpr_slice_buffer *slices, grpc_closure *cb); void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, gpr_slice_buffer *slices, grpc_closure *cb); + grpc_workqueue *(*get_workqueue)(grpc_endpoint *ep); void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_pollset *pollset); void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, @@ -69,6 +70,9 @@ void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, char *grpc_endpoint_get_peer(grpc_endpoint *ep); +/* Retrieve a reference to the workqueue associated with this endpoint */ +grpc_workqueue *grpc_endpoint_get_workqueue(grpc_endpoint *ep); + /* Write slices out to the socket. If the connection is ready for more data after the end of the call, it diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c index cf0fe736a0b..6a63c4d1d18 100644 --- a/src/core/lib/iomgr/ev_epoll_linux.c +++ b/src/core/lib/iomgr/ev_epoll_linux.c @@ -57,6 +57,7 @@ #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" +#include "src/core/lib/iomgr/workqueue.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/block_annotate.h" @@ -113,9 +114,7 @@ struct grpc_fd { grpc_closure *read_closure; grpc_closure *write_closure; - /* The polling island to which this fd belongs to and the mutex protecting the - the field */ - gpr_mu pi_mu; + /* The polling island to which this fd belongs to (protected by mu) */ struct polling_island *polling_island; struct grpc_fd *freelist_next; @@ -152,16 +151,17 @@ static void fd_global_shutdown(void); * Polling island Declarations */ -// #define GRPC_PI_REF_COUNT_DEBUG +//#define GRPC_PI_REF_COUNT_DEBUG #ifdef GRPC_PI_REF_COUNT_DEBUG #define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__) -#define PI_UNREF(p, r) pi_unref_dbg((p), (r), __FILE__, __LINE__) +#define PI_UNREF(exec_ctx, p, r) \ + pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__) #else /* defined(GRPC_PI_REF_COUNT_DEBUG) */ #define PI_ADD_REF(p, r) pi_add_ref((p)) -#define PI_UNREF(p, r) pi_unref((p)) +#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p)) #endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */ @@ -172,7 +172,7 @@ typedef struct polling_island { Once the ref count becomes zero, this structure is destroyed which means we should ensure that there is never a scenario where a PI_ADD_REF() is racing with a PI_UNREF() that just made the ref_count zero. */ - gpr_refcount ref_count; + gpr_atm ref_count; /* Pointer to the polling_island this merged into. * merged_to value is only set once in polling_island's lifetime (and that too @@ -184,6 +184,9 @@ typedef struct polling_island { * (except mu and ref_count) are invalid and must be ignored. */ gpr_atm merged_to; + /* The workqueue associated with this polling island */ + grpc_workqueue *workqueue; + /* The fd of the underlying epoll set */ int epoll_fd; @@ -191,11 +194,6 @@ typedef struct polling_island { size_t fd_cnt; size_t fd_capacity; grpc_fd **fds; - - /* Polling islands that are no longer needed are kept in a freelist so that - they can be reused. This field points to the next polling island in the - free list */ - struct polling_island *next_free; } polling_island; /******************************************************************************* @@ -253,13 +251,14 @@ struct grpc_pollset_set { * Common helpers */ -static void append_error(grpc_error **composite, grpc_error *error, +static bool append_error(grpc_error **composite, grpc_error *error, const char *desc) { - if (error == GRPC_ERROR_NONE) return; + if (error == GRPC_ERROR_NONE) return true; if (*composite == GRPC_ERROR_NONE) { *composite = GRPC_ERROR_CREATE(desc); } *composite = grpc_error_add_child(*composite, error); + return false; } /******************************************************************************* @@ -275,11 +274,8 @@ static void append_error(grpc_error **composite, grpc_error *error, threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */ static grpc_wakeup_fd polling_island_wakeup_fd; -/* Polling island freelist */ -static gpr_mu g_pi_freelist_mu; -static polling_island *g_pi_freelist = NULL; - -static void polling_island_delete(); /* Forward declaration */ +/* Forward declaration */ +static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); #ifdef GRPC_TSAN /* Currently TSAN may incorrectly flag data races between epoll_ctl and @@ -293,28 +289,35 @@ gpr_atm g_epoll_sync; #endif /* defined(GRPC_TSAN) */ #ifdef GRPC_PI_REF_COUNT_DEBUG -void pi_add_ref(polling_island *pi); -void pi_unref(polling_island *pi); +static void pi_add_ref(polling_island *pi); +static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi); -void pi_add_ref_dbg(polling_island *pi, char *reason, char *file, int line) { - long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count)); +static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file, + int line) { + long old_cnt = gpr_atm_acq_load(&pi->ref_count); pi_add_ref(pi); gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)", (void *)pi, old_cnt, old_cnt + 1, reason, file, line); } -void pi_unref_dbg(polling_island *pi, char *reason, char *file, int line) { - long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count)); - pi_unref(pi); +static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi, + char *reason, char *file, int line) { + long old_cnt = gpr_atm_acq_load(&pi->ref_count); + pi_unref(exec_ctx, pi); gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)", (void *)pi, old_cnt, (old_cnt - 1), reason, file, line); } #endif -void pi_add_ref(polling_island *pi) { gpr_ref(&pi->ref_count); } +static void pi_add_ref(polling_island *pi) { + gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1); +} -void pi_unref(polling_island *pi) { - /* If ref count went to zero, delete the polling island. +static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) { + /* If ref count went to one, we're back to just the workqueue owning a ref. + Unref the workqueue to break the loop. + + If ref count went to zero, delete the polling island. Note that this deletion not be done under a lock. Once the ref count goes to zero, we are guaranteed that no one else holds a reference to the polling island (and that there is no racing pi_add_ref() call either). @@ -322,12 +325,20 @@ void pi_unref(polling_island *pi) { Also, if we are deleting the polling island and the merged_to field is non-empty, we should remove a ref to the merged_to polling island */ - if (gpr_unref(&pi->ref_count)) { - polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - polling_island_delete(pi); - if (next != NULL) { - PI_UNREF(next, "pi_delete"); /* Recursive call */ + switch (gpr_atm_full_fetch_add(&pi->ref_count, -1)) { + case 2: /* last external ref: the only one now owned is by the workqueue */ + GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island"); + break; + case 1: { + polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); + polling_island_delete(exec_ctx, pi); + if (next != NULL) { + PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */ + } + break; } + case 0: + GPR_UNREACHABLE_CODE(return ); } } @@ -462,69 +473,68 @@ static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd, } /* Might return NULL in case of an error */ -static polling_island *polling_island_create(grpc_fd *initial_fd, +static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, + grpc_fd *initial_fd, grpc_error **error) { polling_island *pi = NULL; - char *err_msg; const char *err_desc = "polling_island_create"; - /* Try to get one from the polling island freelist */ - gpr_mu_lock(&g_pi_freelist_mu); - if (g_pi_freelist != NULL) { - pi = g_pi_freelist; - g_pi_freelist = g_pi_freelist->next_free; - pi->next_free = NULL; - } - gpr_mu_unlock(&g_pi_freelist_mu); + *error = GRPC_ERROR_NONE; - /* Create new polling island if we could not get one from the free list */ - if (pi == NULL) { - pi = gpr_malloc(sizeof(*pi)); - gpr_mu_init(&pi->mu); - pi->fd_cnt = 0; - pi->fd_capacity = 0; - pi->fds = NULL; - } + pi = gpr_malloc(sizeof(*pi)); + gpr_mu_init(&pi->mu); + pi->fd_cnt = 0; + pi->fd_capacity = 0; + pi->fds = NULL; + pi->epoll_fd = -1; + pi->workqueue = NULL; - gpr_ref_init(&pi->ref_count, 0); + gpr_atm_rel_store(&pi->ref_count, 0); gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL); pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (pi->epoll_fd < 0) { - gpr_asprintf(&err_msg, "epoll_create1 failed with error %d (%s)", errno, - strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - gpr_free(err_msg); - } else { - polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error); - pi->next_free = NULL; + append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc); + goto done; + } - if (initial_fd != NULL) { - /* Lock the polling island here just in case we got this structure from - the freelist and the polling island lock was not released yet (by the - code that adds the polling island to the freelist) */ - gpr_mu_lock(&pi->mu); - polling_island_add_fds_locked(pi, &initial_fd, 1, true, error); - gpr_mu_unlock(&pi->mu); - } + polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error); + + if (initial_fd != NULL) { + polling_island_add_fds_locked(pi, &initial_fd, 1, true, error); + } + + if (append_error(error, grpc_workqueue_create(exec_ctx, &pi->workqueue), + err_desc) && + *error == GRPC_ERROR_NONE) { + polling_island_add_fds_locked(pi, &pi->workqueue->wakeup_read_fd, 1, true, + error); + GPR_ASSERT(pi->workqueue->wakeup_read_fd->polling_island == NULL); + pi->workqueue->wakeup_read_fd->polling_island = pi; + PI_ADD_REF(pi, "fd"); } +done: + if (*error != GRPC_ERROR_NONE) { + if (pi->workqueue != NULL) { + GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island"); + } + polling_island_delete(exec_ctx, pi); + pi = NULL; + } return pi; } -static void polling_island_delete(polling_island *pi) { +static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) { GPR_ASSERT(pi->fd_cnt == 0); - gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL); - - close(pi->epoll_fd); - pi->epoll_fd = -1; - - gpr_mu_lock(&g_pi_freelist_mu); - pi->next_free = g_pi_freelist; - g_pi_freelist = pi; - gpr_mu_unlock(&g_pi_freelist_mu); + if (pi->epoll_fd >= 0) { + close(pi->epoll_fd); + } + gpr_mu_destroy(&pi->mu); + gpr_free(pi->fds); + gpr_free(pi); } /* Attempts to gets the last polling island in the linked list (liked by the @@ -704,9 +714,6 @@ static polling_island *polling_island_merge(polling_island *p, static grpc_error *polling_island_global_init() { grpc_error *error = GRPC_ERROR_NONE; - gpr_mu_init(&g_pi_freelist_mu); - g_pi_freelist = NULL; - error = grpc_wakeup_fd_init(&polling_island_wakeup_fd); if (error == GRPC_ERROR_NONE) { error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd); @@ -716,18 +723,6 @@ static grpc_error *polling_island_global_init() { } static void polling_island_global_shutdown() { - polling_island *next; - gpr_mu_lock(&g_pi_freelist_mu); - gpr_mu_unlock(&g_pi_freelist_mu); - while (g_pi_freelist != NULL) { - next = g_pi_freelist->next_free; - gpr_mu_destroy(&g_pi_freelist->mu); - gpr_free(g_pi_freelist->fds); - gpr_free(g_pi_freelist); - g_pi_freelist = next; - } - gpr_mu_destroy(&g_pi_freelist_mu); - grpc_wakeup_fd_destroy(&polling_island_wakeup_fd); } @@ -845,7 +840,6 @@ static grpc_fd *fd_create(int fd, const char *name) { if (new_fd == NULL) { new_fd = gpr_malloc(sizeof(grpc_fd)); gpr_mu_init(&new_fd->mu); - gpr_mu_init(&new_fd->pi_mu); } /* Note: It is not really needed to get the new_fd->mu lock here. If this is a @@ -896,6 +890,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, const char *reason) { bool is_fd_closed = false; grpc_error *error = GRPC_ERROR_NONE; + polling_island *unref_pi = NULL; gpr_mu_lock(&fd->mu); fd->on_done_closure = on_done; @@ -923,21 +918,26 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - Unlock the latest polling island - Set fd->polling_island to NULL (but remove the ref on the polling island before doing this.) */ - gpr_mu_lock(&fd->pi_mu); if (fd->polling_island != NULL) { polling_island *pi_latest = polling_island_lock(fd->polling_island); polling_island_remove_fd_locked(pi_latest, fd, is_fd_closed, &error); gpr_mu_unlock(&pi_latest->mu); - PI_UNREF(fd->polling_island, "fd_orphan"); + unref_pi = fd->polling_island; fd->polling_island = NULL; } - gpr_mu_unlock(&fd->pi_mu); grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, error, NULL); gpr_mu_unlock(&fd->mu); UNREF_BY(fd, 2, reason); /* Drop the reference */ + if (unref_pi != NULL) { + /* Unref stale polling island here, outside the fd lock above. + The polling island owns a workqueue which owns an fd, and unreffing + inside the lock can cause an eventual lock loop that makes TSAN very + unhappy. */ + PI_UNREF(exec_ctx, unref_pi, "fd_orphan"); + } GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error)); } @@ -1037,6 +1037,17 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_mu_unlock(&fd->mu); } +static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { + gpr_mu_lock(&fd->mu); + grpc_workqueue *workqueue = NULL; + if (fd->polling_island != NULL) { + workqueue = + GRPC_WORKQUEUE_REF(fd->polling_island->workqueue, "get_workqueue"); + } + gpr_mu_unlock(&fd->mu); + return workqueue; +} + /******************************************************************************* * Pollset Definitions */ @@ -1227,9 +1238,10 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { gpr_mu_unlock(&fd->mu); } -static void pollset_release_polling_island(grpc_pollset *ps, char *reason) { +static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, + grpc_pollset *ps, char *reason) { if (ps->polling_island != NULL) { - PI_UNREF(ps->polling_island, reason); + PI_UNREF(exec_ctx, ps->polling_island, reason); } ps->polling_island = NULL; } @@ -1242,7 +1254,7 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, pollset->finish_shutdown_called = true; /* Release the ref and set pollset->polling_island to NULL */ - pollset_release_polling_island(pollset, "ps_shutdown"); + pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown"); grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL); } @@ -1281,7 +1293,7 @@ static void pollset_reset(grpc_pollset *pollset) { pollset->finish_shutdown_called = false; pollset->kicked_without_pollers = false; pollset->shutdown_done = NULL; - pollset_release_polling_island(pollset, "ps_reset"); + GPR_ASSERT(pollset->polling_island == NULL); } #define GRPC_EPOLL_MAX_EVENTS 1000 @@ -1309,7 +1321,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, this function (i.e pollset_work_and_unlock()) is called */ if (pollset->polling_island == NULL) { - pollset->polling_island = polling_island_create(NULL, error); + pollset->polling_island = polling_island_create(exec_ctx, NULL, error); if (pollset->polling_island == NULL) { GPR_TIMER_END("pollset_work_and_unlock", 0); return; /* Fatal error. We cannot continue */ @@ -1329,7 +1341,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the polling island to be deleted */ PI_ADD_REF(pi, "ps"); - PI_UNREF(pollset->polling_island, "ps"); + PI_UNREF(exec_ctx, pollset->polling_island, "ps"); pollset->polling_island = pi; } @@ -1400,7 +1412,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, that we got before releasing the polling island lock). This is because pollset->polling_island pointer might get udpated in other parts of the code when there is an island merge while we are doing epoll_wait() above */ - PI_UNREF(pi, "ps_work"); + PI_UNREF(exec_ctx, pi, "ps_work"); GPR_TIMER_END("pollset_work_and_unlock", 0); } @@ -1517,10 +1529,11 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_error *error = GRPC_ERROR_NONE; gpr_mu_lock(&pollset->mu); - gpr_mu_lock(&fd->pi_mu); + gpr_mu_lock(&fd->mu); polling_island *pi_new = NULL; +retry: /* 1) If fd->polling_island and pollset->polling_island are both non-NULL and * equal, do nothing. * 2) If fd->polling_island and pollset->polling_island are both NULL, create @@ -1535,15 +1548,44 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, * polling_island fields in both fd and pollset to point to the merged * polling island. */ + + if (fd->orphaned) { + gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&pollset->mu); + /* early out */ + return; + } + if (fd->polling_island == pollset->polling_island) { pi_new = fd->polling_island; if (pi_new == NULL) { - pi_new = polling_island_create(fd, &error); - - GRPC_POLLING_TRACE( - "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, " - "pollset: %p)", - (void *)pi_new, fd->fd, (void *)pollset); + /* Unlock before creating a new polling island: the polling island will + create a workqueue which creates a file descriptor, and holding an fd + lock here can eventually cause a loop to appear to TSAN (making it + unhappy). We don't think it's a real loop (there's an epoch point where + that loop possibility disappears), but the advantages of keeping TSAN + happy outweigh any performance advantage we might have by keeping the + lock held. */ + gpr_mu_unlock(&fd->mu); + pi_new = polling_island_create(exec_ctx, fd, &error); + gpr_mu_lock(&fd->mu); + /* Need to reverify any assumptions made between the initial lock and + getting to this branch: if they've changed, we need to throw away our + work and figure things out again. */ + if (fd->polling_island != NULL) { + GRPC_POLLING_TRACE( + "pollset_add_fd: Raced creating new polling island. pi_new: %p " + "(fd: %d, pollset: %p)", + (void *)pi_new, fd->fd, (void *)pollset); + PI_ADD_REF(pi_new, "dance_of_destruction"); + PI_UNREF(exec_ctx, pi_new, "dance_of_destruction"); + goto retry; + } else { + GRPC_POLLING_TRACE( + "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, " + "pollset: %p)", + (void *)pi_new, fd->fd, (void *)pollset); + } } } else if (fd->polling_island == NULL) { pi_new = polling_island_lock(pollset->polling_island); @@ -1579,7 +1621,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (fd->polling_island != pi_new) { PI_ADD_REF(pi_new, "fd"); if (fd->polling_island != NULL) { - PI_UNREF(fd->polling_island, "fd"); + PI_UNREF(exec_ctx, fd->polling_island, "fd"); } fd->polling_island = pi_new; } @@ -1587,13 +1629,15 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (pollset->polling_island != pi_new) { PI_ADD_REF(pi_new, "ps"); if (pollset->polling_island != NULL) { - PI_UNREF(pollset->polling_island, "ps"); + PI_UNREF(exec_ctx, pollset->polling_island, "ps"); } pollset->polling_island = pi_new; } - gpr_mu_unlock(&fd->pi_mu); + gpr_mu_unlock(&fd->mu); gpr_mu_unlock(&pollset->mu); + + GRPC_LOG_IF_ERROR("pollset_add_fd", error); } /******************************************************************************* @@ -1744,9 +1788,9 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, void *grpc_fd_get_polling_island(grpc_fd *fd) { polling_island *pi; - gpr_mu_lock(&fd->pi_mu); + gpr_mu_lock(&fd->mu); pi = fd->polling_island; - gpr_mu_unlock(&fd->pi_mu); + gpr_mu_unlock(&fd->mu); return pi; } @@ -1794,6 +1838,7 @@ static const grpc_event_engine_vtable vtable = { .fd_notify_on_read = fd_notify_on_read, .fd_notify_on_write = fd_notify_on_write, .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, + .fd_get_workqueue = fd_get_workqueue, .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c index 9e306af5fac..c2107e5e393 100644 --- a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c +++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c @@ -725,6 +725,8 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher, GRPC_FD_UNREF(fd, "poll"); } +static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; } + /******************************************************************************* * pollset_posix.c */ @@ -2006,6 +2008,7 @@ static const grpc_event_engine_vtable vtable = { .fd_notify_on_read = fd_notify_on_read, .fd_notify_on_write = fd_notify_on_write, .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, + .fd_get_workqueue = fd_get_workqueue, .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c index 45c0a5e9546..16a5e3083e6 100644 --- a/src/core/lib/iomgr/ev_poll_posix.c +++ b/src/core/lib/iomgr/ev_poll_posix.c @@ -617,6 +617,8 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher, GRPC_FD_UNREF(fd, "poll"); } +static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; } + /******************************************************************************* * pollset_posix.c */ @@ -842,6 +844,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, *worker_hdl = &worker; grpc_error *error = GRPC_ERROR_NONE; + /* Avoid malloc for small number of elements. */ + enum { inline_elements = 96 }; + struct pollfd pollfd_space[inline_elements]; + struct grpc_fd_watcher watcher_space[inline_elements]; + /* pollset->mu already held */ int added_worker = 0; int locked = 1; @@ -897,15 +904,23 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, int r; size_t i, fd_count; nfds_t pfd_count; - /* TODO(ctiller): inline some elements to avoid an allocation */ grpc_fd_watcher *watchers; struct pollfd *pfds; timeout = poll_deadline_to_millis_timeout(deadline, now); - /* TODO(ctiller): perform just one malloc here if we exceed the inline - * case */ - pfds = gpr_malloc(sizeof(*pfds) * (pollset->fd_count + 2)); - watchers = gpr_malloc(sizeof(*watchers) * (pollset->fd_count + 2)); + + if (pollset->fd_count + 2 <= inline_elements) { + pfds = pollfd_space; + watchers = watcher_space; + } else { + /* Allocate one buffer to hold both pfds and watchers arrays */ + const size_t pfd_size = sizeof(*pfds) * (pollset->fd_count + 2); + const size_t watch_size = sizeof(*watchers) * (pollset->fd_count + 2); + void *buf = gpr_malloc(pfd_size + watch_size); + pfds = buf; + watchers = (void *)((char *)buf + pfd_size); + } + fd_count = 0; pfd_count = 2; pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd); @@ -972,8 +987,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } } - gpr_free(pfds); - gpr_free(watchers); + if (pfds != pollfd_space) { + /* pfds and watchers are in the same memory block pointed to by pfds */ + gpr_free(pfds); + } + GPR_TIMER_END("maybe_work_and_unlock", 0); locked = 0; } else { @@ -1234,6 +1252,7 @@ static const grpc_event_engine_vtable vtable = { .fd_notify_on_read = fd_notify_on_read, .fd_notify_on_write = fd_notify_on_write, .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, + .fd_get_workqueue = fd_get_workqueue, .pollset_init = pollset_init, .pollset_shutdown = pollset_shutdown, diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c index a3c1e9db9a0..65366726859 100644 --- a/src/core/lib/iomgr/ev_posix.c +++ b/src/core/lib/iomgr/ev_posix.c @@ -148,6 +148,10 @@ grpc_fd *grpc_fd_create(int fd, const char *name) { return g_event_engine->fd_create(fd, name); } +grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd) { + return g_event_engine->fd_get_workqueue(fd); +} + int grpc_fd_wrapped_fd(grpc_fd *fd) { return g_event_engine->fd_wrapped_fd(fd); } diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index 579c84ef707..c2aa1756ea2 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -56,6 +56,7 @@ typedef struct grpc_event_engine_vtable { void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure); bool (*fd_is_shutdown)(grpc_fd *fd); + grpc_workqueue *(*fd_get_workqueue)(grpc_fd *fd); grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx, grpc_fd *fd); @@ -107,6 +108,9 @@ const char *grpc_get_poll_strategy_name(); This takes ownership of closing fd. */ grpc_fd *grpc_fd_create(int fd, const char *name); +/* Get a workqueue that's associated with this fd */ +grpc_workqueue *grpc_fd_get_workqueue(grpc_fd *fd); + /* Return the wrapped fd, or -1 if it has been released or closed. */ int grpc_fd_wrapped_fd(grpc_fd *fd); diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c index c44aafcddf0..ac7785ec135 100644 --- a/src/core/lib/iomgr/exec_ctx.c +++ b/src/core/lib/iomgr/exec_ctx.c @@ -37,6 +37,7 @@ #include #include +#include "src/core/lib/iomgr/workqueue.h" #include "src/core/lib/profiling/timers.h" bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) { @@ -85,14 +86,17 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) { void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error, grpc_workqueue *offload_target_or_null) { - GPR_ASSERT(offload_target_or_null == NULL); - grpc_closure_list_append(&exec_ctx->closure_list, closure, error); + if (offload_target_or_null == NULL) { + grpc_closure_list_append(&exec_ctx->closure_list, closure, error); + } else { + grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error); + GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched"); + } } void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx, grpc_closure_list *list, grpc_workqueue *offload_target_or_null) { - GPR_ASSERT(offload_target_or_null == NULL); grpc_closure_list_move(list, &exec_ctx->closure_list); } diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h index 38f27d9b136..917f332f03d 100644 --- a/src/core/lib/iomgr/exec_ctx.h +++ b/src/core/lib/iomgr/exec_ctx.h @@ -93,7 +93,11 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx); /** Finish any pending work for a grpc_exec_ctx. Must be called before * the instance is destroyed, or work may be lost. */ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx); -/** Add a closure to be executed at the next flush/finish point */ +/** Add a closure to be executed in the future. + If \a offload_target_or_null is NULL, the closure will be executed at the + next exec_ctx.{finish,flush} point. + If \a offload_target_or_null is non-NULL, the closure will be scheduled + against the workqueue, and a reference to the workqueue will be consumed. */ void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error, grpc_workqueue *offload_target_or_null); diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c index 89292a153ed..d67d388b8c9 100644 --- a/src/core/lib/iomgr/iomgr.c +++ b/src/core/lib/iomgr/iomgr.c @@ -45,6 +45,7 @@ #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/network_status_tracker.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/support/env.h" #include "src/core/lib/support/string.h" @@ -62,6 +63,7 @@ void grpc_iomgr_init(void) { grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC)); g_root_object.next = g_root_object.prev = &g_root_object; g_root_object.name = "root"; + grpc_network_status_init(); grpc_iomgr_platform_init(); } @@ -140,6 +142,7 @@ void grpc_iomgr_shutdown(void) { grpc_iomgr_platform_shutdown(); grpc_exec_ctx_global_shutdown(); + grpc_network_status_shutdown(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_rcv); } diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.c index ccbe136db92..b4bb7e3cf7b 100644 --- a/src/core/lib/iomgr/network_status_tracker.c +++ b/src/core/lib/iomgr/network_status_tracker.c @@ -42,9 +42,8 @@ typedef struct endpoint_ll_node { static endpoint_ll_node *head = NULL; static gpr_mu g_endpoint_mutex; -static gpr_once g_once_init = GPR_ONCE_INIT; -static void destroy_network_status_monitor(void) { +void grpc_network_status_shutdown(void) { if (head != NULL) { gpr_log(GPR_ERROR, "Memory leaked as all network endpoints were not shut down"); @@ -52,14 +51,21 @@ static void destroy_network_status_monitor(void) { gpr_mu_destroy(&g_endpoint_mutex); } -static void initialize_network_status_monitor(void) { +void grpc_network_status_init(void) { gpr_mu_init(&g_endpoint_mutex); - atexit(destroy_network_status_monitor); // TODO(makarandd): Install callback with OS to monitor network status. } +void grpc_destroy_network_status_monitor() { + for (endpoint_ll_node *curr = head; curr != NULL;) { + endpoint_ll_node *next = curr->next; + gpr_free(curr); + curr = next; + } + gpr_mu_destroy(&g_endpoint_mutex); +} + void grpc_network_status_register_endpoint(grpc_endpoint *ep) { - gpr_once_init(&g_once_init, initialize_network_status_monitor); gpr_mu_lock(&g_endpoint_mutex); if (head == NULL) { head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node)); diff --git a/src/core/lib/iomgr/network_status_tracker.h b/src/core/lib/iomgr/network_status_tracker.h index 74a1aa8135f..67cb645f445 100644 --- a/src/core/lib/iomgr/network_status_tracker.h +++ b/src/core/lib/iomgr/network_status_tracker.h @@ -35,7 +35,11 @@ #define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H #include "src/core/lib/iomgr/endpoint.h" +void grpc_network_status_init(void); +void grpc_network_status_shutdown(void); + void grpc_network_status_register_endpoint(grpc_endpoint *ep); void grpc_network_status_unregister_endpoint(grpc_endpoint *ep); void grpc_network_status_shutdown_all_endpoints(); + #endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */ diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 2ab45e33ce3..ec21e039448 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -284,7 +284,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, } /* returns true if done, false if pending; if returning true, *error is set */ -#define MAX_WRITE_IOVEC 16 +#define MAX_WRITE_IOVEC 1024 static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) { struct msghdr msg; struct iovec iov[MAX_WRITE_IOVEC]; @@ -450,9 +450,19 @@ static char *tcp_get_peer(grpc_endpoint *ep) { return gpr_strdup(tcp->peer_string); } -static const grpc_endpoint_vtable vtable = { - tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set, - tcp_shutdown, tcp_destroy, tcp_get_peer}; +static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) { + grpc_tcp *tcp = (grpc_tcp *)ep; + return grpc_fd_get_workqueue(tcp->em_fd); +} + +static const grpc_endpoint_vtable vtable = {tcp_read, + tcp_write, + tcp_get_workqueue, + tcp_add_to_pollset, + tcp_add_to_pollset_set, + tcp_shutdown, + tcp_destroy, + tcp_get_peer}; grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, const char *peer_string) { diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h index 875a6ca14a7..5a25d39a0c4 100644 --- a/src/core/lib/iomgr/tcp_server.h +++ b/src/core/lib/iomgr/tcp_server.h @@ -105,4 +105,8 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, a call (exec_ctx!=NULL) to shutdown_complete. */ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s); +/* Shutdown the fds of listeners. */ +void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, + grpc_tcp_server *s); + #endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_H */ diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c index d3803c3bd0e..38ebd2dbcb0 100644 --- a/src/core/lib/iomgr/tcp_server_posix.c +++ b/src/core/lib/iomgr/tcp_server_posix.c @@ -491,7 +491,8 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) { } for (unsigned i = 0; i < count; i++) { - int fd, port; + int fd = -1; + int port = -1; grpc_dualstack_mode dsmode; err = grpc_create_dualstack_socket(&listener->addr.sockaddr, SOCK_STREAM, 0, &dsmode, &fd); @@ -740,4 +741,17 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { } } +void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, + grpc_tcp_server *s) { + gpr_mu_lock(&s->mu); + /* shutdown all fd's */ + if (s->active_ports) { + grpc_tcp_listener *sp; + for (sp = s->head; sp; sp = sp->next) { + grpc_fd_shutdown(exec_ctx, sp->emfd); + } + } + gpr_mu_unlock(&s->mu); +} + #endif diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c index 7b0966704c2..1b125e7005b 100644 --- a/src/core/lib/iomgr/tcp_server_windows.c +++ b/src/core/lib/iomgr/tcp_server_windows.c @@ -540,4 +540,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, gpr_mu_unlock(&s->mu); } +void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, + grpc_tcp_server *s) {} + #endif /* GPR_WINSOCK_SOCKET */ diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c index 37ab59021e3..35054c42b55 100644 --- a/src/core/lib/iomgr/tcp_windows.c +++ b/src/core/lib/iomgr/tcp_windows.c @@ -389,9 +389,16 @@ static char *win_get_peer(grpc_endpoint *ep) { return gpr_strdup(tcp->peer_string); } -static grpc_endpoint_vtable vtable = { - win_read, win_write, win_add_to_pollset, win_add_to_pollset_set, - win_shutdown, win_destroy, win_get_peer}; +static grpc_workqueue *win_get_workqueue(grpc_endpoint *ep) { return NULL; } + +static grpc_endpoint_vtable vtable = {win_read, + win_write, + win_get_workqueue, + win_add_to_pollset, + win_add_to_pollset_set, + win_shutdown, + win_destroy, + win_get_peer}; grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) { grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c index 1ebccf2ee2e..48032412a2e 100644 --- a/src/core/lib/iomgr/udp_server.c +++ b/src/core/lib/iomgr/udp_server.c @@ -60,6 +60,7 @@ #include #include #include +#include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/sockaddr_utils.h" @@ -128,7 +129,7 @@ grpc_udp_server *grpc_udp_server_create(void) { } static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { - grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1, NULL); + grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); gpr_mu_destroy(&s->mu); gpr_cv_destroy(&s->cv); @@ -138,7 +139,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { } static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, - bool success) { + grpc_error *error) { grpc_udp_server *s = server; gpr_mu_lock(&s->mu); s->destroyed_ports++; @@ -217,14 +218,23 @@ static int prepare_socket(int fd, const struct sockaddr *addr, goto error; } - if (!grpc_set_socket_nonblocking(fd, 1) || !grpc_set_socket_cloexec(fd, 1)) { - gpr_log(GPR_ERROR, "Unable to configure socket %d: %s", fd, - strerror(errno)); + if (grpc_set_socket_nonblocking(fd, 1) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Unable to set nonblocking %d: %s", fd, strerror(errno)); + goto error; + } + if (grpc_set_socket_cloexec(fd, 1) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Unable to set cloexec %d: %s", fd, strerror(errno)); + goto error; } - if (grpc_set_socket_ip_pktinfo_if_possible(fd) && - addr->sa_family == AF_INET6) { - grpc_set_socket_ipv6_recvpktinfo_if_possible(fd); + if (grpc_set_socket_ip_pktinfo_if_possible(fd) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Unable to set ip_pktinfo."); + goto error; + } else if (addr->sa_family == AF_INET6) { + if (grpc_set_socket_ipv6_recvpktinfo_if_possible(fd) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Unable to set ipv6_recvpktinfo."); + goto error; + } } GPR_ASSERT(addr_len < ~(socklen_t)0); @@ -241,13 +251,13 @@ static int prepare_socket(int fd, const struct sockaddr *addr, goto error; } - if (!grpc_set_socket_sndbuf(fd, buffer_size_bytes)) { + if (grpc_set_socket_sndbuf(fd, buffer_size_bytes) != GRPC_ERROR_NONE) { gpr_log(GPR_ERROR, "Failed to set send buffer size to %d bytes", buffer_size_bytes); goto error; } - if (!grpc_set_socket_rcvbuf(fd, buffer_size_bytes)) { + if (grpc_set_socket_rcvbuf(fd, buffer_size_bytes) != GRPC_ERROR_NONE) { gpr_log(GPR_ERROR, "Failed to set receive buffer size to %d bytes", buffer_size_bytes); goto error; @@ -263,10 +273,10 @@ error: } /* event manager callback when reads are ready */ -static void on_read(grpc_exec_ctx *exec_ctx, void *arg, bool success) { +static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { server_port *sp = arg; - if (!success) { + if (error != GRPC_ERROR_NONE) { gpr_mu_lock(&sp->server->mu); if (0 == --sp->server->active_ports) { gpr_mu_unlock(&sp->server->mu); @@ -369,7 +379,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr, /* Try listening on IPv6 first. */ addr = (struct sockaddr *)&wild6; addr_len = sizeof(wild6); - fd = grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode); + // TODO(rjshade): Test and propagate the returned grpc_error*: + grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd); allocated_port1 = add_socket_to_server(s, fd, addr, addr_len, read_cb, orphan_cb); if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) { @@ -384,7 +395,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr, addr_len = sizeof(wild4); } - fd = grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode); + // TODO(rjshade): Test and propagate the returned grpc_error*: + grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd); if (fd < 0) { gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno)); } diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h index 5cc40eea505..7156e490d73 100644 --- a/src/core/lib/iomgr/workqueue.h +++ b/src/core/lib/iomgr/workqueue.h @@ -38,6 +38,7 @@ #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/pollset.h" +#include "src/core/lib/iomgr/pollset_set.h" #ifdef GPR_POSIX_SOCKET #include "src/core/lib/iomgr/workqueue_posix.h" @@ -49,35 +50,45 @@ /* grpc_workqueue is forward declared in exec_ctx.h */ -/** Create a work queue */ -grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx, - grpc_workqueue **workqueue); - +/* Deprecated: do not use. + This has *already* been removed in a future commit. */ void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue); -#define GRPC_WORKQUEUE_REFCOUNT_DEBUG +/* Reference counting functions. Use the macro's always + (GRPC_WORKQUEUE_{REF,UNREF}). + + Pass in a descriptive reason string for reffing/unreffing as the last + argument to each macro. When GRPC_WORKQUEUE_REFCOUNT_DEBUG is defined, that + string will be printed alongside the refcount. When it is not defined, the + string will be discarded at compilation time. */ + +//#define GRPC_WORKQUEUE_REFCOUNT_DEBUG #ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG #define GRPC_WORKQUEUE_REF(p, r) \ - grpc_workqueue_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_WORKQUEUE_UNREF(cl, p, r) \ - grpc_workqueue_unref((cl), (p), __FILE__, __LINE__, (r)) + (grpc_workqueue_ref((p), __FILE__, __LINE__, (r)), (p)) +#define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \ + grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r)) void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line, const char *reason); void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, const char *file, int line, const char *reason); #else -#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p)) +#define GRPC_WORKQUEUE_REF(p, r) (grpc_workqueue_ref((p)), (p)) #define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p)) void grpc_workqueue_ref(grpc_workqueue *workqueue); void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue); #endif -/** Bind this workqueue to a pollset */ -void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx, - grpc_workqueue *workqueue, - grpc_pollset *pollset); +/** Add a work item to a workqueue. Items added to a work queue will be started + in approximately the order they were enqueued, on some thread that may or + may not be the current thread. Successive closures enqueued onto a workqueue + MAY be executed concurrently. + + It is generally more expensive to add a closure to a workqueue than to the + execution context, both in terms of CPU work and in execution latency. -/** Add a work item to a workqueue */ + Use work queues when it's important that other threads be given a chance to + tackle some workload. */ void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, grpc_closure *closure, grpc_error *error); diff --git a/src/core/lib/iomgr/workqueue_posix.c b/src/core/lib/iomgr/workqueue_posix.c index 45e0f6063b4..e0d6dac2308 100644 --- a/src/core/lib/iomgr/workqueue_posix.c +++ b/src/core/lib/iomgr/workqueue_posix.c @@ -70,7 +70,7 @@ grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx, static void workqueue_destroy(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) { - GPR_ASSERT(grpc_closure_list_empty(workqueue->closure_list)); + grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL); grpc_fd_shutdown(exec_ctx, workqueue->wakeup_read_fd); } @@ -100,12 +100,6 @@ void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) { } } -void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx, - grpc_workqueue *workqueue, - grpc_pollset *pollset) { - grpc_pollset_add_fd(exec_ctx, pollset, workqueue->wakeup_read_fd); -} - void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) { gpr_mu_lock(&workqueue->mu); grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL); diff --git a/src/core/lib/iomgr/workqueue_posix.h b/src/core/lib/iomgr/workqueue_posix.h index dcb47e7b59d..0f26ba58e27 100644 --- a/src/core/lib/iomgr/workqueue_posix.h +++ b/src/core/lib/iomgr/workqueue_posix.h @@ -50,4 +50,9 @@ struct grpc_workqueue { grpc_closure read_closure; }; +/** Create a work queue. Returns an error if creation fails. If creation + succeeds, sets *workqueue to point to it. */ +grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx, + grpc_workqueue **workqueue); + #endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H */ diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c index 275f040b1cc..23e2dea1859 100644 --- a/src/core/lib/iomgr/workqueue_windows.c +++ b/src/core/lib/iomgr/workqueue_windows.c @@ -37,4 +37,26 @@ #include "src/core/lib/iomgr/workqueue.h" +// Minimal implementation of grpc_workqueue for Windows +// Works by directly enqueuing workqueue items onto the current execution +// context, which is at least correct, if not performant or in the spirit of +// workqueues. + +void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {} + +#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG +void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line, + const char *reason) {} +void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, + const char *file, int line, const char *reason) {} +#else +void grpc_workqueue_ref(grpc_workqueue *workqueue) {} +void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {} +#endif + +void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, + grpc_closure *closure, grpc_error *error) { + grpc_exec_ctx_sched(exec_ctx, closure, error, NULL); +} + #endif /* GPR_WINDOWS */ diff --git a/src/core/lib/security/transport/handshake.c b/src/core/lib/security/transport/handshake.c index b374ca7633b..540a17283d3 100644 --- a/src/core/lib/security/transport/handshake.c +++ b/src/core/lib/security/transport/handshake.c @@ -357,8 +357,9 @@ void grpc_do_security_handshake( gpr_mu_unlock(&server_connector->mu); } send_handshake_bytes_to_peer(exec_ctx, h); - grpc_timer_init(exec_ctx, &h->timer, deadline, on_timeout, h, - gpr_now(deadline.clock_type)); + grpc_timer_init(exec_ctx, &h->timer, + gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), + on_timeout, h, gpr_now(GPR_CLOCK_MONOTONIC)); } void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c index 7650d68e892..bc50f9d1b00 100644 --- a/src/core/lib/security/transport/secure_endpoint.c +++ b/src/core/lib/security/transport/secure_endpoint.c @@ -360,11 +360,19 @@ static char *endpoint_get_peer(grpc_endpoint *secure_ep) { return grpc_endpoint_get_peer(ep->wrapped_ep); } -static const grpc_endpoint_vtable vtable = { - endpoint_read, endpoint_write, - endpoint_add_to_pollset, endpoint_add_to_pollset_set, - endpoint_shutdown, endpoint_destroy, - endpoint_get_peer}; +static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) { + secure_endpoint *ep = (secure_endpoint *)secure_ep; + return grpc_endpoint_get_workqueue(ep->wrapped_ep); +} + +static const grpc_endpoint_vtable vtable = {endpoint_read, + endpoint_write, + endpoint_get_workqueue, + endpoint_add_to_pollset, + endpoint_add_to_pollset_set, + endpoint_shutdown, + endpoint_destroy, + endpoint_get_peer}; grpc_endpoint *grpc_secure_endpoint_create( struct tsi_frame_protector *protector, grpc_endpoint *transport, diff --git a/src/core/lib/support/log.c b/src/core/lib/support/log.c index bae0957df72..899f1218b60 100644 --- a/src/core/lib/support/log.c +++ b/src/core/lib/support/log.c @@ -79,17 +79,18 @@ void gpr_set_log_verbosity(gpr_log_severity min_severity_to_print) { void gpr_log_verbosity_init() { char *verbosity = gpr_getenv("GRPC_VERBOSITY"); - if (verbosity == NULL) return; - gpr_atm min_severity_to_print = GPR_LOG_VERBOSITY_UNSET; - if (strcmp(verbosity, "DEBUG") == 0) { - min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_DEBUG; - } else if (strcmp(verbosity, "INFO") == 0) { - min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_INFO; - } else if (strcmp(verbosity, "ERROR") == 0) { - min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_ERROR; + gpr_atm min_severity_to_print = GPR_LOG_SEVERITY_ERROR; + if (verbosity != NULL) { + if (strcmp(verbosity, "DEBUG") == 0) { + min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_DEBUG; + } else if (strcmp(verbosity, "INFO") == 0) { + min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_INFO; + } else if (strcmp(verbosity, "ERROR") == 0) { + min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_ERROR; + } + gpr_free(verbosity); } - gpr_free(verbosity); if ((gpr_atm_no_barrier_load(&g_min_severity_to_print)) == GPR_LOG_VERBOSITY_UNSET) { gpr_atm_no_barrier_store(&g_min_severity_to_print, min_severity_to_print); diff --git a/src/core/lib/support/slice.c b/src/core/lib/support/slice.c index b9a7c77bda6..8a2c0a90865 100644 --- a/src/core/lib/support/slice.c +++ b/src/core/lib/support/slice.c @@ -94,14 +94,16 @@ static void new_slice_unref(void *p) { } } -gpr_slice gpr_slice_new(void *p, size_t len, void (*destroy)(void *)) { +gpr_slice gpr_slice_new_with_user_data(void *p, size_t len, + void (*destroy)(void *), + void *user_data) { gpr_slice slice; new_slice_refcount *rc = gpr_malloc(sizeof(new_slice_refcount)); gpr_ref_init(&rc->refs, 1); rc->rc.ref = new_slice_ref; rc->rc.unref = new_slice_unref; rc->user_destroy = destroy; - rc->user_data = p; + rc->user_data = user_data; slice.refcount = &rc->rc; slice.data.refcounted.bytes = p; @@ -109,6 +111,11 @@ gpr_slice gpr_slice_new(void *p, size_t len, void (*destroy)(void *)) { return slice; } +gpr_slice gpr_slice_new(void *p, size_t len, void (*destroy)(void *)) { + /* Pass "p" to *destroy when the slice is no longer needed. */ + return gpr_slice_new_with_user_data(p, len, destroy, p); +} + /* gpr_slice_new_with_len support structures - we create a refcount object extended with the user provided data pointer & destroy function */ typedef struct new_with_len_slice_refcount { diff --git a/src/core/lib/support/time.c b/src/core/lib/support/time.c index 57f83311945..5a7d043aed8 100644 --- a/src/core/lib/support/time.c +++ b/src/core/lib/support/time.c @@ -80,103 +80,67 @@ gpr_timespec gpr_inf_past(gpr_clock_type type) { return out; } -/* TODO(ctiller): consider merging _nanos, _micros, _millis into a single - function for maintainability. Similarly for _seconds, _minutes, and _hours */ - -gpr_timespec gpr_time_from_nanos(int64_t ns, gpr_clock_type type) { - gpr_timespec result; - result.clock_type = type; - if (ns == INT64_MAX) { - result = gpr_inf_future(type); - } else if (ns == INT64_MIN) { - result = gpr_inf_past(type); - } else if (ns >= 0) { - result.tv_sec = ns / GPR_NS_PER_SEC; - result.tv_nsec = (int32_t)(ns - result.tv_sec * GPR_NS_PER_SEC); +static gpr_timespec to_seconds_from_sub_second_time(int64_t time_in_units, + int64_t units_per_sec, + gpr_clock_type type) { + gpr_timespec out; + if (time_in_units == INT64_MAX) { + out = gpr_inf_future(type); + } else if (time_in_units == INT64_MIN) { + out = gpr_inf_past(type); } else { - /* Calculation carefully formulated to avoid any possible under/overflow. */ - result.tv_sec = (-(999999999 - (ns + GPR_NS_PER_SEC)) / GPR_NS_PER_SEC) - 1; - result.tv_nsec = (int32_t)(ns - result.tv_sec * GPR_NS_PER_SEC); + if (time_in_units >= 0) { + out.tv_sec = time_in_units / units_per_sec; + } else { + out.tv_sec = (-((units_per_sec - 1) - (time_in_units + units_per_sec)) / + units_per_sec) - + 1; + } + out.tv_nsec = (int32_t)((time_in_units - out.tv_sec * units_per_sec) * + GPR_NS_PER_SEC / units_per_sec); + out.clock_type = type; } - return result; + return out; } -gpr_timespec gpr_time_from_micros(int64_t us, gpr_clock_type type) { - gpr_timespec result; - result.clock_type = type; - if (us == INT64_MAX) { - result = gpr_inf_future(type); - } else if (us == INT64_MIN) { - result = gpr_inf_past(type); - } else if (us >= 0) { - result.tv_sec = us / 1000000; - result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000); +static gpr_timespec to_seconds_from_above_second_time(int64_t time_in_units, + int64_t secs_per_unit, + gpr_clock_type type) { + gpr_timespec out; + if (time_in_units >= INT64_MAX / secs_per_unit) { + out = gpr_inf_future(type); + } else if (time_in_units <= INT64_MIN / secs_per_unit) { + out = gpr_inf_past(type); } else { - /* Calculation carefully formulated to avoid any possible under/overflow. */ - result.tv_sec = (-(999999 - (us + 1000000)) / 1000000) - 1; - result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000); + out.tv_sec = time_in_units * secs_per_unit; + out.tv_nsec = 0; + out.clock_type = type; } - return result; + return out; +} + +gpr_timespec gpr_time_from_nanos(int64_t ns, gpr_clock_type type) { + return to_seconds_from_sub_second_time(ns, GPR_NS_PER_SEC, type); +} + +gpr_timespec gpr_time_from_micros(int64_t us, gpr_clock_type type) { + return to_seconds_from_sub_second_time(us, GPR_US_PER_SEC, type); } gpr_timespec gpr_time_from_millis(int64_t ms, gpr_clock_type type) { - gpr_timespec result; - result.clock_type = type; - if (ms == INT64_MAX) { - result = gpr_inf_future(type); - } else if (ms == INT64_MIN) { - result = gpr_inf_past(type); - } else if (ms >= 0) { - result.tv_sec = ms / 1000; - result.tv_nsec = (int32_t)((ms - result.tv_sec * 1000) * 1000000); - } else { - /* Calculation carefully formulated to avoid any possible under/overflow. */ - result.tv_sec = (-(999 - (ms + 1000)) / 1000) - 1; - result.tv_nsec = (int32_t)((ms - result.tv_sec * 1000) * 1000000); - } - return result; + return to_seconds_from_sub_second_time(ms, GPR_MS_PER_SEC, type); } gpr_timespec gpr_time_from_seconds(int64_t s, gpr_clock_type type) { - gpr_timespec result; - result.clock_type = type; - if (s == INT64_MAX) { - result = gpr_inf_future(type); - } else if (s == INT64_MIN) { - result = gpr_inf_past(type); - } else { - result.tv_sec = s; - result.tv_nsec = 0; - } - return result; + return to_seconds_from_sub_second_time(s, 1, type); } gpr_timespec gpr_time_from_minutes(int64_t m, gpr_clock_type type) { - gpr_timespec result; - result.clock_type = type; - if (m >= INT64_MAX / 60) { - result = gpr_inf_future(type); - } else if (m <= INT64_MIN / 60) { - result = gpr_inf_past(type); - } else { - result.tv_sec = m * 60; - result.tv_nsec = 0; - } - return result; + return to_seconds_from_above_second_time(m, 60, type); } gpr_timespec gpr_time_from_hours(int64_t h, gpr_clock_type type) { - gpr_timespec result; - result.clock_type = type; - if (h >= INT64_MAX / 3600) { - result = gpr_inf_future(type); - } else if (h <= INT64_MIN / 3600) { - result = gpr_inf_past(type); - } else { - result.tv_sec = h * 3600; - result.tv_nsec = 0; - } - return result; + return to_seconds_from_above_second_time(h, 3600, type); } gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) { diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c index 32913ad4e88..a482ba43d8a 100644 --- a/src/core/lib/surface/server.c +++ b/src/core/lib/surface/server.c @@ -73,6 +73,7 @@ typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type; typedef struct requested_call { requested_call_type type; + size_t cq_idx; void *tag; grpc_server *server; grpc_completion_queue *cq_bound_to_call; @@ -206,11 +207,11 @@ struct grpc_server { registered_method *registered_methods; /** one request matcher for unregistered methods */ request_matcher unregistered_request_matcher; - /** free list of available requested_calls indices */ - gpr_stack_lockfree *request_freelist; + /** free list of available requested_calls_per_cq indices */ + gpr_stack_lockfree **request_freelist_per_cq; /** requested call backing data */ - requested_call *requested_calls; - size_t max_requested_calls; + requested_call **requested_calls_per_cq; + int max_requested_calls_per_cq; gpr_atm shutdown_flag; uint8_t shutdown_published; @@ -357,7 +358,8 @@ static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx, for (size_t i = 0; i < server->cq_count; i++) { while ((request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[i])) != -1) { - fail_call(exec_ctx, server, i, &server->requested_calls[request_id], + fail_call(exec_ctx, server, i, + &server->requested_calls_per_cq[i][request_id], GRPC_ERROR_REF(error)); } } @@ -392,12 +394,16 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) { } for (i = 0; i < server->cq_count; i++) { GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server"); + if (server->started) { + gpr_stack_lockfree_destroy(server->request_freelist_per_cq[i]); + gpr_free(server->requested_calls_per_cq[i]); + } } - gpr_stack_lockfree_destroy(server->request_freelist); + gpr_free(server->request_freelist_per_cq); + gpr_free(server->requested_calls_per_cq); gpr_free(server->cqs); gpr_free(server->pollsets); gpr_free(server->shutdown_tags); - gpr_free(server->requested_calls); gpr_free(server); } @@ -460,11 +466,13 @@ static void done_request_event(grpc_exec_ctx *exec_ctx, void *req, requested_call *rc = req; grpc_server *server = rc->server; - if (rc >= server->requested_calls && - rc < server->requested_calls + server->max_requested_calls) { - GPR_ASSERT(rc - server->requested_calls <= INT_MAX); - gpr_stack_lockfree_push(server->request_freelist, - (int)(rc - server->requested_calls)); + if (rc >= server->requested_calls_per_cq[rc->cq_idx] && + rc < server->requested_calls_per_cq[rc->cq_idx] + + server->max_requested_calls_per_cq) { + GPR_ASSERT(rc - server->requested_calls_per_cq[rc->cq_idx] <= INT_MAX); + gpr_stack_lockfree_push( + server->request_freelist_per_cq[rc->cq_idx], + (int)(rc - server->requested_calls_per_cq[rc->cq_idx])); } else { gpr_free(req); } @@ -540,7 +548,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg, calld->state = ACTIVATED; gpr_mu_unlock(&calld->mu_state); publish_call(exec_ctx, server, calld, cq_idx, - &server->requested_calls[request_id]); + &server->requested_calls_per_cq[cq_idx][request_id]); return; /* early out */ } } @@ -980,8 +988,6 @@ void grpc_server_register_non_listening_completion_queue( } grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) { - size_t i; - GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved)); grpc_server *server = gpr_malloc(sizeof(grpc_server)); @@ -999,15 +1005,7 @@ grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) { &server->root_channel_data; /* TODO(ctiller): expose a channel_arg for this */ - server->max_requested_calls = 32768; - server->request_freelist = - gpr_stack_lockfree_create(server->max_requested_calls); - for (i = 0; i < (size_t)server->max_requested_calls; i++) { - gpr_stack_lockfree_push(server->request_freelist, (int)i); - } - server->requested_calls = gpr_malloc(server->max_requested_calls * - sizeof(*server->requested_calls)); - + server->max_requested_calls_per_cq = 32768; server->channel_args = grpc_channel_args_copy(args); return server; @@ -1067,16 +1065,28 @@ void grpc_server_start(grpc_server *server) { server->started = true; size_t pollset_count = 0; server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count); + server->request_freelist_per_cq = + gpr_malloc(sizeof(*server->request_freelist_per_cq) * server->cq_count); + server->requested_calls_per_cq = + gpr_malloc(sizeof(*server->requested_calls_per_cq) * server->cq_count); for (i = 0; i < server->cq_count; i++) { if (!grpc_cq_is_non_listening_server_cq(server->cqs[i])) { server->pollsets[pollset_count++] = grpc_cq_pollset(server->cqs[i]); } + server->request_freelist_per_cq[i] = + gpr_stack_lockfree_create((size_t)server->max_requested_calls_per_cq); + for (int j = 0; j < server->max_requested_calls_per_cq; j++) { + gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j); + } + server->requested_calls_per_cq[i] = + gpr_malloc((size_t)server->max_requested_calls_per_cq * + sizeof(*server->requested_calls_per_cq[i])); } request_matcher_init(&server->unregistered_request_matcher, - server->max_requested_calls, server); + (size_t)server->max_requested_calls_per_cq, server); for (registered_method *rm = server->registered_methods; rm; rm = rm->next) { - request_matcher_init(&rm->request_matcher, server->max_requested_calls, - server); + request_matcher_init(&rm->request_matcher, + (size_t)server->max_requested_calls_per_cq, server); } for (l = server->listeners; l; l = l->next) { @@ -1308,11 +1318,13 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx, GRPC_ERROR_CREATE("Server Shutdown")); return GRPC_CALL_OK; } - request_id = gpr_stack_lockfree_pop(server->request_freelist); + request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]); if (request_id == -1) { /* out of request ids: just fail this one */ fail_call(exec_ctx, server, cq_idx, rc, - GRPC_ERROR_CREATE("Server Shutdown")); + grpc_error_set_int(GRPC_ERROR_CREATE("Out of request ids"), + GRPC_ERROR_INT_LIMIT, + server->max_requested_calls_per_cq)); return GRPC_CALL_OK; } switch (rc->type) { @@ -1323,7 +1335,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx, rm = &rc->data.registered.registered_method->request_matcher; break; } - server->requested_calls[request_id] = *rc; + server->requested_calls_per_cq[cq_idx][request_id] = *rc; gpr_free(rc); if (gpr_stack_lockfree_push(rm->requests_per_cq[cq_idx], request_id)) { /* this was the first queued request: we need to lock and start @@ -1347,7 +1359,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx, calld->state = ACTIVATED; gpr_mu_unlock(&calld->mu_state); publish_call(exec_ctx, server, calld, cq_idx, - &server->requested_calls[request_id]); + &server->requested_calls_per_cq[cq_idx][request_id]); } gpr_mu_lock(&server->mu_call); } @@ -1383,6 +1395,7 @@ grpc_call_error grpc_server_request_call( } grpc_cq_begin_op(cq_for_notification, tag); details->reserved = NULL; + rc->cq_idx = cq_idx; rc->type = BATCH_CALL; rc->server = server; rc->tag = tag; @@ -1431,6 +1444,7 @@ grpc_call_error grpc_server_request_registered_call( goto done; } grpc_cq_begin_op(cq_for_notification, tag); + rc->cq_idx = cq_idx; rc->type = REGISTERED_CALL; rc->server = server; rc->tag = tag; diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c index 53f3c438541..19420750545 100644 --- a/src/core/lib/surface/version.c +++ b/src/core/lib/surface/version.c @@ -36,4 +36,4 @@ #include -const char *grpc_version_string(void) { return "0.16.0-dev"; } +const char *grpc_version_string(void) { return "1.1.0-dev"; } diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c index 054f112127b..68d05e3a858 100644 --- a/src/core/lib/transport/connectivity_state.c +++ b/src/core/lib/transport/connectivity_state.c @@ -179,6 +179,9 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, while ((w = tracker->watchers) != NULL) { *w->current = tracker->current_state; tracker->watchers = w->next; + if (grpc_connectivity_state_trace) { + gpr_log(GPR_DEBUG, "NOTIFY: %p", w->notify); + } grpc_exec_ctx_sched(exec_ctx, w->notify, GRPC_ERROR_REF(tracker->current_error), NULL); gpr_free(w); diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h index 08c0a237c97..e33fc5c761f 100644 --- a/src/core/lib/transport/transport.h +++ b/src/core/lib/transport/transport.h @@ -138,7 +138,7 @@ typedef struct grpc_transport_stream_op { /** If != GRPC_ERROR_NONE, cancel this stream */ grpc_error *cancel_error; - /** If != GRPC_ERROR, send grpc-status, grpc-message, and close this + /** If != GRPC_ERROR_NONE, send grpc-status, grpc-message, and close this stream for both reading and writing */ grpc_error *close_error; diff --git a/src/cpp/ext/proto_server_reflection.h b/src/cpp/ext/proto_server_reflection.h index 23c130513d1..f66f3c2c9ae 100644 --- a/src/cpp/ext/proto_server_reflection.h +++ b/src/cpp/ext/proto_server_reflection.h @@ -30,13 +30,31 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ + +/* + - If the generated header `grpc++/ext/reflection.grpc.pb.h` needs to be + installed, target `grpc++_reflection` in `build.yaml` should use the + filegroup `grpc++_reflection_proto`, and GRPC_NO_GENERATED_CODE should not + be defined. + - If the server reflection library needs to generate `reflection.grpc.pb.h` + from `reflection.proto` at compile time, the generated header + `grpc++/ext/reflection.grpc.pb.h` should not be installed. In this case, + target `grpc++_reflection` should depend on `grpc++_reflection_codegen`, and + GRPC_NO_GENERATED_CODE should be defined. +*/ + #ifndef GRPC_INTERNAL_CPP_EXT_PROTO_SERVER_REFLECTION_H #define GRPC_INTERNAL_CPP_EXT_PROTO_SERVER_REFLECTION_H #include #include +// GRPC_NO_GENERATED_CODE indicates generated pb files should not be used +#ifdef GRPC_NO_GENERATED_CODE +#include "src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h" +#else #include +#endif // GRPC_NO_GENERATED_CODE #include namespace grpc { diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc index fb4c68ebe49..af04fd4ca64 100644 --- a/src/cpp/server/server.cc +++ b/src/cpp/server/server.cc @@ -281,6 +281,7 @@ Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned, : max_message_size_(max_message_size), started_(false), shutdown_(false), + shutdown_notified_(false), num_running_cb_(0), sync_methods_(new std::list), has_generic_service_(false), @@ -462,13 +463,16 @@ void Server::ShutdownInternal(gpr_timespec deadline) { while (num_running_cb_ != 0) { callback_cv_.wait(lock); } + + shutdown_notified_ = true; + shutdown_cv_.notify_all(); } } void Server::Wait() { grpc::unique_lock lock(mu_); - while (num_running_cb_ != 0) { - callback_cv_.wait(lock); + while (started_ && !shutdown_notified_) { + shutdown_cv_.wait(lock); } } diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc index 43117fd1e95..1ca6a2b906e 100644 --- a/src/cpp/server/server_context.cc +++ b/src/cpp/server/server_context.cc @@ -129,7 +129,8 @@ ServerContext::ServerContext() deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)), call_(nullptr), cq_(nullptr), - sent_initial_metadata_(false) {} + sent_initial_metadata_(false), + compression_level_set_(false) {} ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata* metadata, size_t metadata_count) @@ -139,7 +140,8 @@ ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata* metadata, deadline_(deadline), call_(nullptr), cq_(nullptr), - sent_initial_metadata_(false) { + sent_initial_metadata_(false), + compression_level_set_(false) { for (size_t i = 0; i < metadata_count; i++) { client_metadata_.insert(std::pair( metadata[i].key, @@ -194,15 +196,6 @@ bool ServerContext::IsCancelled() const { } } -void ServerContext::set_compression_level(grpc_compression_level level) { - // TODO(dgq): get rid of grpc_call_compression_for_level and propagate the - // compression level by adding a new argument to - // CallOpSendInitialMetadata::SendInitialMetadata. - const grpc_compression_algorithm algorithm_for_level = - grpc_call_compression_for_level(call_, level); - set_compression_algorithm(algorithm_for_level); -} - void ServerContext::set_compression_algorithm( grpc_compression_algorithm algorithm) { char* algorithm_name = NULL; diff --git a/src/csharp/.nuget/packages.config b/src/csharp/.nuget/packages.config deleted file mode 100644 index 6154b3561f4..00000000000 --- a/src/csharp/.nuget/packages.config +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/src/csharp/Grpc.Auth/project.json b/src/csharp/Grpc.Auth/project.json index 72c258a91ac..08429f1d466 100644 --- a/src/csharp/Grpc.Auth/project.json +++ b/src/csharp/Grpc.Auth/project.json @@ -1,5 +1,5 @@ { - "version": "0.16.0-dev", + "version": "1.1.0-dev", "title": "gRPC C# Auth", "authors": [ "Google Inc." ], "copyright": "Copyright 2015, Google Inc.", @@ -22,7 +22,7 @@ } }, "dependencies": { - "Grpc.Core": "0.16.0-dev", + "Grpc.Core": "1.1.0-dev", "Google.Apis.Auth": "1.11.1" }, "frameworks": { diff --git a/src/csharp/Grpc.Core.Tests/SanityTest.cs b/src/csharp/Grpc.Core.Tests/SanityTest.cs index 501992c5695..9d069fa432a 100644 --- a/src/csharp/Grpc.Core.Tests/SanityTest.cs +++ b/src/csharp/Grpc.Core.Tests/SanityTest.cs @@ -58,10 +58,11 @@ namespace Grpc.Core.Tests [Test] public void TestsJsonUpToDate() { - var discoveredTests = DiscoverAllTestClasses(); - string discoveredTestsJson = JsonConvert.SerializeObject(discoveredTests, Formatting.Indented); + Dictionary> discoveredTests = DiscoverAllTestClasses(); + Dictionary> testsFromFile + = JsonConvert.DeserializeObject>>(ReadTestsJson()); - Assert.AreEqual(discoveredTestsJson, ReadTestsJson()); + Assert.AreEqual(discoveredTests, testsFromFile); } /// diff --git a/src/csharp/Grpc.Core.Tests/packages.config b/src/csharp/Grpc.Core.Tests/packages.config index aa7d951fdc7..6a930c17ee6 100644 --- a/src/csharp/Grpc.Core.Tests/packages.config +++ b/src/csharp/Grpc.Core.Tests/packages.config @@ -4,4 +4,7 @@ - \ No newline at end of file + + + + diff --git a/src/csharp/Grpc.Core.Tests/project.json b/src/csharp/Grpc.Core.Tests/project.json index f58bcbb5159..4a682d927e4 100644 --- a/src/csharp/Grpc.Core.Tests/project.json +++ b/src/csharp/Grpc.Core.Tests/project.json @@ -14,10 +14,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -33,10 +33,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } @@ -54,7 +54,10 @@ }, "Newtonsoft.Json": "8.0.3", "NUnit": "3.2.0", - "NUnitLite": "3.2.0-*" + "NUnitLite": "3.2.0-*", + "NUnit.ConsoleRunner": "3.2.0", + "OpenCover": "4.6.519", + "ReportGenerator": "2.4.4.0" }, "frameworks": { "net45": { }, diff --git a/src/csharp/Grpc.Core/Grpc.Core.nuspec b/src/csharp/Grpc.Core/Grpc.Core.nuspec index 47593f787b1..543549eb2d2 100644 --- a/src/csharp/Grpc.Core/Grpc.Core.nuspec +++ b/src/csharp/Grpc.Core/Grpc.Core.nuspec @@ -25,11 +25,11 @@ - - - - - - + + + + + + diff --git a/src/csharp/Grpc.Core/Grpc.Core.targets b/src/csharp/Grpc.Core/Grpc.Core.targets index 501fc505482..3367d51a80e 100644 --- a/src/csharp/Grpc.Core/Grpc.Core.targets +++ b/src/csharp/Grpc.Core/Grpc.Core.targets @@ -1,29 +1,29 @@  - + PreserveNewest - nativelibs\windows_x86\grpc_csharp_ext.dll + grpc_csharp_ext.x86.dll - + PreserveNewest - nativelibs\windows_x64\grpc_csharp_ext.dll + grpc_csharp_ext.x64.dll - + PreserveNewest - nativelibs\linux_x86\libgrpc_csharp_ext.so + libgrpc_csharp_ext.x86.so - + PreserveNewest - nativelibs\linux_x64\libgrpc_csharp_ext.so + libgrpc_csharp_ext.x64.so - + PreserveNewest - nativelibs\macosx_x86\libgrpc_csharp_ext.dylib + libgrpc_csharp_ext.x86.dylib - + PreserveNewest - nativelibs\macosx_x64\libgrpc_csharp_ext.dylib + libgrpc_csharp_ext.x64.dylib \ No newline at end of file diff --git a/src/csharp/Grpc.Core/GrpcEnvironment.cs b/src/csharp/Grpc.Core/GrpcEnvironment.cs index eeed6997123..574e900a0e7 100644 --- a/src/csharp/Grpc.Core/GrpcEnvironment.cs +++ b/src/csharp/Grpc.Core/GrpcEnvironment.cs @@ -47,7 +47,6 @@ namespace Grpc.Core /// public class GrpcEnvironment { - const LogLevel DefaultLogLevel = LogLevel.Info; const int MinDefaultThreadPoolSize = 4; static object staticLock = new object(); @@ -58,7 +57,7 @@ namespace Grpc.Core static readonly HashSet registeredChannels = new HashSet(); static readonly HashSet registeredServers = new HashSet(); - static ILogger logger = new LogLevelFilterLogger(new ConsoleLogger(), DefaultLogLevel); + static ILogger logger = new NullLogger(); readonly object myLock = new object(); readonly GrpcThreadPool threadPool; diff --git a/src/csharp/Grpc.Core/Internal/NativeExtension.cs b/src/csharp/Grpc.Core/Internal/NativeExtension.cs index a6d79258162..509baf7cb1f 100644 --- a/src/csharp/Grpc.Core/Internal/NativeExtension.cs +++ b/src/csharp/Grpc.Core/Internal/NativeExtension.cs @@ -44,9 +44,6 @@ namespace Grpc.Core.Internal /// internal sealed class NativeExtension { - const string NativeLibrariesDir = "nativelibs"; - const string DnxStyleNativeLibrariesDir = "../../build/native/bin/"; - static readonly ILogger Logger = GrpcEnvironment.Logger.ForType(); static readonly object staticLock = new object(); static volatile NativeExtension instance; @@ -98,20 +95,25 @@ namespace Grpc.Core.Internal private static UnmanagedLibrary Load() { // TODO: allow customizing path to native extension (possibly through exposing a GrpcEnvironment property). - - var libraryFlavor = string.Format("{0}_{1}", GetPlatformString(), GetArchitectureString()); - + // See https://github.com/grpc/grpc/pull/7303 for one option. var assemblyDirectory = Path.GetDirectoryName(GetAssemblyPath()); // With old-style VS projects, the native libraries get copied using a .targets rule to the build output folder // alongside the compiled assembly. - var classicPath = Path.Combine(assemblyDirectory, NativeLibrariesDir, libraryFlavor, GetNativeLibraryFilename()); + // With dotnet cli projects, the native libraries (just the required ones) are similarly copied to the built output folder, + // through the magic of Microsoft.NETCore.Platforms. + var classicPath = Path.Combine(assemblyDirectory, GetNativeLibraryFilename()); // DNX-style project.json projects will use Grpc.Core assembly directly in the location where it got restored // by nuget. We locate the native libraries based on known structure of Grpc.Core nuget package. - var dnxStylePath = Path.Combine(assemblyDirectory, DnxStyleNativeLibrariesDir, libraryFlavor, GetNativeLibraryFilename()); - return new UnmanagedLibrary(new string[] {classicPath, dnxStylePath}); + // TODO: Support .NET Core applications, which act slightly differently. We may be okay if "dotnet publish" + // is used, but "dotnet run" leaves the native libraries in-package, while copying assemblies. + string platform = GetPlatformString(); + string relativeDirectory = string.Format("../../runtimes/{0}/native", platform); + var dnxStylePath = Path.Combine(assemblyDirectory, relativeDirectory, GetNativeLibraryFilename()); + string[] paths = new[] { classicPath, dnxStylePath }; + return new UnmanagedLibrary(paths); } private static string GetAssemblyPath() @@ -147,7 +149,7 @@ namespace Grpc.Core.Internal { if (PlatformApis.IsWindows) { - return "windows"; + return "win"; } if (PlatformApis.IsLinux) { @@ -155,7 +157,7 @@ namespace Grpc.Core.Internal } if (PlatformApis.IsMacOSX) { - return "macosx"; + return "osx"; } throw new InvalidOperationException("Unsupported platform."); } @@ -176,17 +178,18 @@ namespace Grpc.Core.Internal // platform specific file name of the extension library private static string GetNativeLibraryFilename() { + string architecture = GetArchitectureString(); if (PlatformApis.IsWindows) { - return "grpc_csharp_ext.dll"; + return string.Format("grpc_csharp_ext.{0}.dll", architecture); } if (PlatformApis.IsLinux) { - return "libgrpc_csharp_ext.so"; + return string.Format("libgrpc_csharp_ext.{0}.so", architecture); } if (PlatformApis.IsMacOSX) { - return "libgrpc_csharp_ext.dylib"; + return string.Format("libgrpc_csharp_ext.{0}.dylib", architecture); } throw new InvalidOperationException("Unsupported platform."); } diff --git a/src/csharp/Grpc.Core/Internal/NativeMethods.cs b/src/csharp/Grpc.Core/Internal/NativeMethods.cs index 65607ed1204..f457c9dbf1e 100644 --- a/src/csharp/Grpc.Core/Internal/NativeMethods.cs +++ b/src/csharp/Grpc.Core/Internal/NativeMethods.cs @@ -159,215 +159,107 @@ namespace Grpc.Core.Internal public NativeMethods(UnmanagedLibrary library) { - if (PlatformApis.IsLinux || PlatformApis.IsMacOSX) - { - this.grpcsharp_init = GetMethodDelegate(library); - this.grpcsharp_shutdown = GetMethodDelegate(library); - this.grpcsharp_version_string = GetMethodDelegate(library); - - this.grpcsharp_batch_context_create = GetMethodDelegate(library); - this.grpcsharp_batch_context_recv_initial_metadata = GetMethodDelegate(library); - this.grpcsharp_batch_context_recv_message_length = GetMethodDelegate(library); - this.grpcsharp_batch_context_recv_message_to_buffer = GetMethodDelegate(library); - this.grpcsharp_batch_context_recv_status_on_client_status = GetMethodDelegate(library); - this.grpcsharp_batch_context_recv_status_on_client_details = GetMethodDelegate(library); - this.grpcsharp_batch_context_recv_status_on_client_trailing_metadata = GetMethodDelegate(library); - this.grpcsharp_batch_context_server_rpc_new_call = GetMethodDelegate(library); - this.grpcsharp_batch_context_server_rpc_new_method = GetMethodDelegate(library); - this.grpcsharp_batch_context_server_rpc_new_host = GetMethodDelegate(library); - this.grpcsharp_batch_context_server_rpc_new_deadline = GetMethodDelegate(library); - this.grpcsharp_batch_context_server_rpc_new_request_metadata = GetMethodDelegate(library); - this.grpcsharp_batch_context_recv_close_on_server_cancelled = GetMethodDelegate(library); - this.grpcsharp_batch_context_destroy = GetMethodDelegate(library); - - this.grpcsharp_composite_call_credentials_create = GetMethodDelegate(library); - this.grpcsharp_call_credentials_release = GetMethodDelegate(library); - - this.grpcsharp_call_cancel = GetMethodDelegate(library); - this.grpcsharp_call_cancel_with_status = GetMethodDelegate(library); - this.grpcsharp_call_start_unary = GetMethodDelegate(library); - this.grpcsharp_call_start_client_streaming = GetMethodDelegate(library); - this.grpcsharp_call_start_server_streaming = GetMethodDelegate(library); - this.grpcsharp_call_start_duplex_streaming = GetMethodDelegate(library); - this.grpcsharp_call_send_message = GetMethodDelegate(library); - this.grpcsharp_call_send_close_from_client = GetMethodDelegate(library); - this.grpcsharp_call_send_status_from_server = GetMethodDelegate(library); - this.grpcsharp_call_recv_message = GetMethodDelegate(library); - this.grpcsharp_call_recv_initial_metadata = GetMethodDelegate(library); - this.grpcsharp_call_start_serverside = GetMethodDelegate(library); - this.grpcsharp_call_send_initial_metadata = GetMethodDelegate(library); - this.grpcsharp_call_set_credentials = GetMethodDelegate(library); - this.grpcsharp_call_get_peer = GetMethodDelegate(library); - this.grpcsharp_call_destroy = GetMethodDelegate(library); - - this.grpcsharp_channel_args_create = GetMethodDelegate(library); - this.grpcsharp_channel_args_set_string = GetMethodDelegate(library); - this.grpcsharp_channel_args_set_integer = GetMethodDelegate(library); - this.grpcsharp_channel_args_destroy = GetMethodDelegate(library); - - this.grpcsharp_override_default_ssl_roots = GetMethodDelegate(library); - this.grpcsharp_ssl_credentials_create = GetMethodDelegate(library); - this.grpcsharp_composite_channel_credentials_create = GetMethodDelegate(library); - this.grpcsharp_channel_credentials_release = GetMethodDelegate(library); - - this.grpcsharp_insecure_channel_create = GetMethodDelegate(library); - this.grpcsharp_secure_channel_create = GetMethodDelegate(library); - this.grpcsharp_channel_create_call = GetMethodDelegate(library); - this.grpcsharp_channel_check_connectivity_state = GetMethodDelegate(library); - this.grpcsharp_channel_watch_connectivity_state = GetMethodDelegate(library); - this.grpcsharp_channel_get_target = GetMethodDelegate(library); - this.grpcsharp_channel_destroy = GetMethodDelegate(library); - - this.grpcsharp_sizeof_grpc_event = GetMethodDelegate(library); - - this.grpcsharp_completion_queue_create = GetMethodDelegate(library); - this.grpcsharp_completion_queue_shutdown = GetMethodDelegate(library); - this.grpcsharp_completion_queue_next = GetMethodDelegate(library); - this.grpcsharp_completion_queue_pluck = GetMethodDelegate(library); - this.grpcsharp_completion_queue_destroy = GetMethodDelegate(library); - - this.gprsharp_free = GetMethodDelegate(library); - - this.grpcsharp_metadata_array_create = GetMethodDelegate(library); - this.grpcsharp_metadata_array_add = GetMethodDelegate(library); - this.grpcsharp_metadata_array_count = GetMethodDelegate(library); - this.grpcsharp_metadata_array_get_key = GetMethodDelegate(library); - this.grpcsharp_metadata_array_get_value = GetMethodDelegate(library); - this.grpcsharp_metadata_array_get_value_length = GetMethodDelegate(library); - this.grpcsharp_metadata_array_destroy_full = GetMethodDelegate(library); - - this.grpcsharp_redirect_log = GetMethodDelegate(library); - - this.grpcsharp_metadata_credentials_create_from_plugin = GetMethodDelegate(library); - this.grpcsharp_metadata_credentials_notify_from_plugin = GetMethodDelegate(library); - - this.grpcsharp_ssl_server_credentials_create = GetMethodDelegate(library); - this.grpcsharp_server_credentials_release = GetMethodDelegate(library); - - this.grpcsharp_server_create = GetMethodDelegate(library); - this.grpcsharp_server_register_completion_queue = GetMethodDelegate(library); - this.grpcsharp_server_add_insecure_http2_port = GetMethodDelegate(library); - this.grpcsharp_server_add_secure_http2_port = GetMethodDelegate(library); - this.grpcsharp_server_start = GetMethodDelegate(library); - this.grpcsharp_server_request_call = GetMethodDelegate(library); - this.grpcsharp_server_cancel_all_calls = GetMethodDelegate(library); - this.grpcsharp_server_shutdown_and_notify_callback = GetMethodDelegate(library); - this.grpcsharp_server_destroy = GetMethodDelegate(library); - - this.gprsharp_now = GetMethodDelegate(library); - this.gprsharp_inf_future = GetMethodDelegate(library); - this.gprsharp_inf_past = GetMethodDelegate(library); - this.gprsharp_convert_clock_type = GetMethodDelegate(library); - this.gprsharp_sizeof_timespec = GetMethodDelegate(library); - - this.grpcsharp_test_callback = GetMethodDelegate(library); - this.grpcsharp_test_nop = GetMethodDelegate(library); - } - else - { - // Windows or fallback - this.grpcsharp_init = PInvokeMethods.grpcsharp_init; - this.grpcsharp_shutdown = PInvokeMethods.grpcsharp_shutdown; - this.grpcsharp_version_string = PInvokeMethods.grpcsharp_version_string; - - this.grpcsharp_batch_context_create = PInvokeMethods.grpcsharp_batch_context_create; - this.grpcsharp_batch_context_recv_initial_metadata = PInvokeMethods.grpcsharp_batch_context_recv_initial_metadata; - this.grpcsharp_batch_context_recv_message_length = PInvokeMethods.grpcsharp_batch_context_recv_message_length; - this.grpcsharp_batch_context_recv_message_to_buffer = PInvokeMethods.grpcsharp_batch_context_recv_message_to_buffer; - this.grpcsharp_batch_context_recv_status_on_client_status = PInvokeMethods.grpcsharp_batch_context_recv_status_on_client_status; - this.grpcsharp_batch_context_recv_status_on_client_details = PInvokeMethods.grpcsharp_batch_context_recv_status_on_client_details; - this.grpcsharp_batch_context_recv_status_on_client_trailing_metadata = PInvokeMethods.grpcsharp_batch_context_recv_status_on_client_trailing_metadata; - this.grpcsharp_batch_context_server_rpc_new_call = PInvokeMethods.grpcsharp_batch_context_server_rpc_new_call; - this.grpcsharp_batch_context_server_rpc_new_method = PInvokeMethods.grpcsharp_batch_context_server_rpc_new_method; - this.grpcsharp_batch_context_server_rpc_new_host = PInvokeMethods.grpcsharp_batch_context_server_rpc_new_host; - this.grpcsharp_batch_context_server_rpc_new_deadline = PInvokeMethods.grpcsharp_batch_context_server_rpc_new_deadline; - this.grpcsharp_batch_context_server_rpc_new_request_metadata = PInvokeMethods.grpcsharp_batch_context_server_rpc_new_request_metadata; - this.grpcsharp_batch_context_recv_close_on_server_cancelled = PInvokeMethods.grpcsharp_batch_context_recv_close_on_server_cancelled; - this.grpcsharp_batch_context_destroy = PInvokeMethods.grpcsharp_batch_context_destroy; - - this.grpcsharp_composite_call_credentials_create = PInvokeMethods.grpcsharp_composite_call_credentials_create; - this.grpcsharp_call_credentials_release = PInvokeMethods.grpcsharp_call_credentials_release; - - this.grpcsharp_call_cancel = PInvokeMethods.grpcsharp_call_cancel; - this.grpcsharp_call_cancel_with_status = PInvokeMethods.grpcsharp_call_cancel_with_status; - this.grpcsharp_call_start_unary = PInvokeMethods.grpcsharp_call_start_unary; - this.grpcsharp_call_start_client_streaming = PInvokeMethods.grpcsharp_call_start_client_streaming; - this.grpcsharp_call_start_server_streaming = PInvokeMethods.grpcsharp_call_start_server_streaming; - this.grpcsharp_call_start_duplex_streaming = PInvokeMethods.grpcsharp_call_start_duplex_streaming; - this.grpcsharp_call_send_message = PInvokeMethods.grpcsharp_call_send_message; - this.grpcsharp_call_send_close_from_client = PInvokeMethods.grpcsharp_call_send_close_from_client; - this.grpcsharp_call_send_status_from_server = PInvokeMethods.grpcsharp_call_send_status_from_server; - this.grpcsharp_call_recv_message = PInvokeMethods.grpcsharp_call_recv_message; - this.grpcsharp_call_recv_initial_metadata = PInvokeMethods.grpcsharp_call_recv_initial_metadata; - this.grpcsharp_call_start_serverside = PInvokeMethods.grpcsharp_call_start_serverside; - this.grpcsharp_call_send_initial_metadata = PInvokeMethods.grpcsharp_call_send_initial_metadata; - this.grpcsharp_call_set_credentials = PInvokeMethods.grpcsharp_call_set_credentials; - this.grpcsharp_call_get_peer = PInvokeMethods.grpcsharp_call_get_peer; - this.grpcsharp_call_destroy = PInvokeMethods.grpcsharp_call_destroy; - - this.grpcsharp_channel_args_create = PInvokeMethods.grpcsharp_channel_args_create; - this.grpcsharp_channel_args_set_string = PInvokeMethods.grpcsharp_channel_args_set_string; - this.grpcsharp_channel_args_set_integer = PInvokeMethods.grpcsharp_channel_args_set_integer; - this.grpcsharp_channel_args_destroy = PInvokeMethods.grpcsharp_channel_args_destroy; - - this.grpcsharp_override_default_ssl_roots = PInvokeMethods.grpcsharp_override_default_ssl_roots; - this.grpcsharp_ssl_credentials_create = PInvokeMethods.grpcsharp_ssl_credentials_create; - this.grpcsharp_composite_channel_credentials_create = PInvokeMethods.grpcsharp_composite_channel_credentials_create; - this.grpcsharp_channel_credentials_release = PInvokeMethods.grpcsharp_channel_credentials_release; - - this.grpcsharp_insecure_channel_create = PInvokeMethods.grpcsharp_insecure_channel_create; - this.grpcsharp_secure_channel_create = PInvokeMethods.grpcsharp_secure_channel_create; - this.grpcsharp_channel_create_call = PInvokeMethods.grpcsharp_channel_create_call; - this.grpcsharp_channel_check_connectivity_state = PInvokeMethods.grpcsharp_channel_check_connectivity_state; - this.grpcsharp_channel_watch_connectivity_state = PInvokeMethods.grpcsharp_channel_watch_connectivity_state; - this.grpcsharp_channel_get_target = PInvokeMethods.grpcsharp_channel_get_target; - this.grpcsharp_channel_destroy = PInvokeMethods.grpcsharp_channel_destroy; - - this.grpcsharp_sizeof_grpc_event = PInvokeMethods.grpcsharp_sizeof_grpc_event; - - this.grpcsharp_completion_queue_create = PInvokeMethods.grpcsharp_completion_queue_create; - this.grpcsharp_completion_queue_shutdown = PInvokeMethods.grpcsharp_completion_queue_shutdown; - this.grpcsharp_completion_queue_next = PInvokeMethods.grpcsharp_completion_queue_next; - this.grpcsharp_completion_queue_pluck = PInvokeMethods.grpcsharp_completion_queue_pluck; - this.grpcsharp_completion_queue_destroy = PInvokeMethods.grpcsharp_completion_queue_destroy; - - this.gprsharp_free = PInvokeMethods.gprsharp_free; - - this.grpcsharp_metadata_array_create = PInvokeMethods.grpcsharp_metadata_array_create; - this.grpcsharp_metadata_array_add = PInvokeMethods.grpcsharp_metadata_array_add; - this.grpcsharp_metadata_array_count = PInvokeMethods.grpcsharp_metadata_array_count; - this.grpcsharp_metadata_array_get_key = PInvokeMethods.grpcsharp_metadata_array_get_key; - this.grpcsharp_metadata_array_get_value = PInvokeMethods.grpcsharp_metadata_array_get_value; - this.grpcsharp_metadata_array_get_value_length = PInvokeMethods.grpcsharp_metadata_array_get_value_length; - this.grpcsharp_metadata_array_destroy_full = PInvokeMethods.grpcsharp_metadata_array_destroy_full; - - this.grpcsharp_redirect_log = PInvokeMethods.grpcsharp_redirect_log; - - this.grpcsharp_metadata_credentials_create_from_plugin = PInvokeMethods.grpcsharp_metadata_credentials_create_from_plugin; - this.grpcsharp_metadata_credentials_notify_from_plugin = PInvokeMethods.grpcsharp_metadata_credentials_notify_from_plugin; - - this.grpcsharp_ssl_server_credentials_create = PInvokeMethods.grpcsharp_ssl_server_credentials_create; - this.grpcsharp_server_credentials_release = PInvokeMethods.grpcsharp_server_credentials_release; - - this.grpcsharp_server_create = PInvokeMethods.grpcsharp_server_create; - this.grpcsharp_server_register_completion_queue = PInvokeMethods.grpcsharp_server_register_completion_queue; - this.grpcsharp_server_add_insecure_http2_port = PInvokeMethods.grpcsharp_server_add_insecure_http2_port; - this.grpcsharp_server_add_secure_http2_port = PInvokeMethods.grpcsharp_server_add_secure_http2_port; - this.grpcsharp_server_start = PInvokeMethods.grpcsharp_server_start; - this.grpcsharp_server_request_call = PInvokeMethods.grpcsharp_server_request_call; - this.grpcsharp_server_cancel_all_calls = PInvokeMethods.grpcsharp_server_cancel_all_calls; - this.grpcsharp_server_shutdown_and_notify_callback = PInvokeMethods.grpcsharp_server_shutdown_and_notify_callback; - this.grpcsharp_server_destroy = PInvokeMethods.grpcsharp_server_destroy; - - this.gprsharp_now = PInvokeMethods.gprsharp_now; - this.gprsharp_inf_future = PInvokeMethods.gprsharp_inf_future; - this.gprsharp_inf_past = PInvokeMethods.gprsharp_inf_past; - this.gprsharp_convert_clock_type = PInvokeMethods.gprsharp_convert_clock_type; - this.gprsharp_sizeof_timespec = PInvokeMethods.gprsharp_sizeof_timespec; - - this.grpcsharp_test_callback = PInvokeMethods.grpcsharp_test_callback; - this.grpcsharp_test_nop = PInvokeMethods.grpcsharp_test_nop; - } + this.grpcsharp_init = GetMethodDelegate(library); + this.grpcsharp_shutdown = GetMethodDelegate(library); + this.grpcsharp_version_string = GetMethodDelegate(library); + + this.grpcsharp_batch_context_create = GetMethodDelegate(library); + this.grpcsharp_batch_context_recv_initial_metadata = GetMethodDelegate(library); + this.grpcsharp_batch_context_recv_message_length = GetMethodDelegate(library); + this.grpcsharp_batch_context_recv_message_to_buffer = GetMethodDelegate(library); + this.grpcsharp_batch_context_recv_status_on_client_status = GetMethodDelegate(library); + this.grpcsharp_batch_context_recv_status_on_client_details = GetMethodDelegate(library); + this.grpcsharp_batch_context_recv_status_on_client_trailing_metadata = GetMethodDelegate(library); + this.grpcsharp_batch_context_server_rpc_new_call = GetMethodDelegate(library); + this.grpcsharp_batch_context_server_rpc_new_method = GetMethodDelegate(library); + this.grpcsharp_batch_context_server_rpc_new_host = GetMethodDelegate(library); + this.grpcsharp_batch_context_server_rpc_new_deadline = GetMethodDelegate(library); + this.grpcsharp_batch_context_server_rpc_new_request_metadata = GetMethodDelegate(library); + this.grpcsharp_batch_context_recv_close_on_server_cancelled = GetMethodDelegate(library); + this.grpcsharp_batch_context_destroy = GetMethodDelegate(library); + + this.grpcsharp_composite_call_credentials_create = GetMethodDelegate(library); + this.grpcsharp_call_credentials_release = GetMethodDelegate(library); + + this.grpcsharp_call_cancel = GetMethodDelegate(library); + this.grpcsharp_call_cancel_with_status = GetMethodDelegate(library); + this.grpcsharp_call_start_unary = GetMethodDelegate(library); + this.grpcsharp_call_start_client_streaming = GetMethodDelegate(library); + this.grpcsharp_call_start_server_streaming = GetMethodDelegate(library); + this.grpcsharp_call_start_duplex_streaming = GetMethodDelegate(library); + this.grpcsharp_call_send_message = GetMethodDelegate(library); + this.grpcsharp_call_send_close_from_client = GetMethodDelegate(library); + this.grpcsharp_call_send_status_from_server = GetMethodDelegate(library); + this.grpcsharp_call_recv_message = GetMethodDelegate(library); + this.grpcsharp_call_recv_initial_metadata = GetMethodDelegate(library); + this.grpcsharp_call_start_serverside = GetMethodDelegate(library); + this.grpcsharp_call_send_initial_metadata = GetMethodDelegate(library); + this.grpcsharp_call_set_credentials = GetMethodDelegate(library); + this.grpcsharp_call_get_peer = GetMethodDelegate(library); + this.grpcsharp_call_destroy = GetMethodDelegate(library); + + this.grpcsharp_channel_args_create = GetMethodDelegate(library); + this.grpcsharp_channel_args_set_string = GetMethodDelegate(library); + this.grpcsharp_channel_args_set_integer = GetMethodDelegate(library); + this.grpcsharp_channel_args_destroy = GetMethodDelegate(library); + + this.grpcsharp_override_default_ssl_roots = GetMethodDelegate(library); + this.grpcsharp_ssl_credentials_create = GetMethodDelegate(library); + this.grpcsharp_composite_channel_credentials_create = GetMethodDelegate(library); + this.grpcsharp_channel_credentials_release = GetMethodDelegate(library); + + this.grpcsharp_insecure_channel_create = GetMethodDelegate(library); + this.grpcsharp_secure_channel_create = GetMethodDelegate(library); + this.grpcsharp_channel_create_call = GetMethodDelegate(library); + this.grpcsharp_channel_check_connectivity_state = GetMethodDelegate(library); + this.grpcsharp_channel_watch_connectivity_state = GetMethodDelegate(library); + this.grpcsharp_channel_get_target = GetMethodDelegate(library); + this.grpcsharp_channel_destroy = GetMethodDelegate(library); + + this.grpcsharp_sizeof_grpc_event = GetMethodDelegate(library); + + this.grpcsharp_completion_queue_create = GetMethodDelegate(library); + this.grpcsharp_completion_queue_shutdown = GetMethodDelegate(library); + this.grpcsharp_completion_queue_next = GetMethodDelegate(library); + this.grpcsharp_completion_queue_pluck = GetMethodDelegate(library); + this.grpcsharp_completion_queue_destroy = GetMethodDelegate(library); + + this.gprsharp_free = GetMethodDelegate(library); + + this.grpcsharp_metadata_array_create = GetMethodDelegate(library); + this.grpcsharp_metadata_array_add = GetMethodDelegate(library); + this.grpcsharp_metadata_array_count = GetMethodDelegate(library); + this.grpcsharp_metadata_array_get_key = GetMethodDelegate(library); + this.grpcsharp_metadata_array_get_value = GetMethodDelegate(library); + this.grpcsharp_metadata_array_get_value_length = GetMethodDelegate(library); + this.grpcsharp_metadata_array_destroy_full = GetMethodDelegate(library); + + this.grpcsharp_redirect_log = GetMethodDelegate(library); + + this.grpcsharp_metadata_credentials_create_from_plugin = GetMethodDelegate(library); + this.grpcsharp_metadata_credentials_notify_from_plugin = GetMethodDelegate(library); + + this.grpcsharp_ssl_server_credentials_create = GetMethodDelegate(library); + this.grpcsharp_server_credentials_release = GetMethodDelegate(library); + + this.grpcsharp_server_create = GetMethodDelegate(library); + this.grpcsharp_server_register_completion_queue = GetMethodDelegate(library); + this.grpcsharp_server_add_insecure_http2_port = GetMethodDelegate(library); + this.grpcsharp_server_add_secure_http2_port = GetMethodDelegate(library); + this.grpcsharp_server_start = GetMethodDelegate(library); + this.grpcsharp_server_request_call = GetMethodDelegate(library); + this.grpcsharp_server_cancel_all_calls = GetMethodDelegate(library); + this.grpcsharp_server_shutdown_and_notify_callback = GetMethodDelegate(library); + this.grpcsharp_server_destroy = GetMethodDelegate(library); + + this.gprsharp_now = GetMethodDelegate(library); + this.gprsharp_inf_future = GetMethodDelegate(library); + this.gprsharp_inf_past = GetMethodDelegate(library); + this.gprsharp_convert_clock_type = GetMethodDelegate(library); + this.gprsharp_sizeof_timespec = GetMethodDelegate(library); + + this.grpcsharp_test_callback = GetMethodDelegate(library); + this.grpcsharp_test_nop = GetMethodDelegate(library); } /// @@ -516,317 +408,5 @@ namespace Grpc.Core.Internal public delegate CallError grpcsharp_test_callback_delegate([MarshalAs(UnmanagedType.FunctionPtr)] OpCompletionDelegate callback); public delegate IntPtr grpcsharp_test_nop_delegate(IntPtr ptr); } - - /// - /// Default PInvoke bindings for native methods that are used on Windows. - /// Alternatively, they can also be used as a fallback on Mono - /// (if libgrpc_csharp_ext is installed on your system, or is made accessible through e.g. LD_LIBRARY_PATH environment variable - /// or using Mono's dllMap feature). - /// - private class PInvokeMethods - { - // Environment - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_init(); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_shutdown(); - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_version_string(); // returns not-owned const char* - - // BatchContextSafeHandle - - [DllImport("grpc_csharp_ext.dll")] - public static extern BatchContextSafeHandle grpcsharp_batch_context_create(); - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_batch_context_recv_initial_metadata(BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_batch_context_recv_message_length(BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_batch_context_recv_message_to_buffer(BatchContextSafeHandle ctx, byte[] buffer, UIntPtr bufferLen); - - [DllImport("grpc_csharp_ext.dll")] - public static extern StatusCode grpcsharp_batch_context_recv_status_on_client_status(BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_batch_context_recv_status_on_client_details(BatchContextSafeHandle ctx); // returns const char* - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_batch_context_recv_status_on_client_trailing_metadata(BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallSafeHandle grpcsharp_batch_context_server_rpc_new_call(BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_batch_context_server_rpc_new_method(BatchContextSafeHandle ctx); // returns const char* - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_batch_context_server_rpc_new_host(BatchContextSafeHandle ctx); // returns const char* - - [DllImport("grpc_csharp_ext.dll")] - public static extern Timespec grpcsharp_batch_context_server_rpc_new_deadline(BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_batch_context_server_rpc_new_request_metadata(BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern int grpcsharp_batch_context_recv_close_on_server_cancelled(BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_batch_context_destroy(IntPtr ctx); - - // CallCredentialsSafeHandle - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallCredentialsSafeHandle grpcsharp_composite_call_credentials_create(CallCredentialsSafeHandle creds1, CallCredentialsSafeHandle creds2); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_call_credentials_release(IntPtr credentials); - - // CallSafeHandle - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_cancel(CallSafeHandle call); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_cancel_with_status(CallSafeHandle call, StatusCode status, string description); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_start_unary(CallSafeHandle call, - BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_start_client_streaming(CallSafeHandle call, - BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_start_server_streaming(CallSafeHandle call, - BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, - MetadataArraySafeHandle metadataArray, WriteFlags writeFlags); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_start_duplex_streaming(CallSafeHandle call, - BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_send_message(CallSafeHandle call, - BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, bool sendEmptyInitialMetadata); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_send_close_from_client(CallSafeHandle call, - BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_send_status_from_server(CallSafeHandle call, - BatchContextSafeHandle ctx, StatusCode statusCode, string statusMessage, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata, - byte[] optionalSendBuffer, UIntPtr optionalSendBufferLen, WriteFlags writeFlags); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_recv_message(CallSafeHandle call, - BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_recv_initial_metadata(CallSafeHandle call, - BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_start_serverside(CallSafeHandle call, - BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_send_initial_metadata(CallSafeHandle call, - BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_call_set_credentials(CallSafeHandle call, CallCredentialsSafeHandle credentials); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CStringSafeHandle grpcsharp_call_get_peer(CallSafeHandle call); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_call_destroy(IntPtr call); - - // ChannelArgsSafeHandle - - [DllImport("grpc_csharp_ext.dll")] - public static extern ChannelArgsSafeHandle grpcsharp_channel_args_create(UIntPtr numArgs); - - [DllImport("grpc_csharp_ext.dll", CharSet = CharSet.Ansi)] - public static extern void grpcsharp_channel_args_set_string(ChannelArgsSafeHandle args, UIntPtr index, string key, string value); - - [DllImport("grpc_csharp_ext.dll", CharSet = CharSet.Ansi)] - public static extern void grpcsharp_channel_args_set_integer(ChannelArgsSafeHandle args, UIntPtr index, string key, int value); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_channel_args_destroy(IntPtr args); - - // ChannelCredentialsSafeHandle - - [DllImport("grpc_csharp_ext.dll", CharSet = CharSet.Ansi)] - public static extern void grpcsharp_override_default_ssl_roots(string pemRootCerts); - - [DllImport("grpc_csharp_ext.dll", CharSet = CharSet.Ansi)] - public static extern ChannelCredentialsSafeHandle grpcsharp_ssl_credentials_create(string pemRootCerts, string keyCertPairCertChain, string keyCertPairPrivateKey); - - [DllImport("grpc_csharp_ext.dll")] - public static extern ChannelCredentialsSafeHandle grpcsharp_composite_channel_credentials_create(ChannelCredentialsSafeHandle channelCreds, CallCredentialsSafeHandle callCreds); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_channel_credentials_release(IntPtr credentials); - - // ChannelSafeHandle - - [DllImport("grpc_csharp_ext.dll")] - public static extern ChannelSafeHandle grpcsharp_insecure_channel_create(string target, ChannelArgsSafeHandle channelArgs); - - [DllImport("grpc_csharp_ext.dll")] - public static extern ChannelSafeHandle grpcsharp_secure_channel_create(ChannelCredentialsSafeHandle credentials, string target, ChannelArgsSafeHandle channelArgs); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallSafeHandle grpcsharp_channel_create_call(ChannelSafeHandle channel, CallSafeHandle parentCall, ContextPropagationFlags propagationMask, CompletionQueueSafeHandle cq, string method, string host, Timespec deadline); - - [DllImport("grpc_csharp_ext.dll")] - public static extern ChannelState grpcsharp_channel_check_connectivity_state(ChannelSafeHandle channel, int tryToConnect); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_channel_watch_connectivity_state(ChannelSafeHandle channel, ChannelState lastObservedState, - Timespec deadline, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CStringSafeHandle grpcsharp_channel_get_target(ChannelSafeHandle call); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_channel_destroy(IntPtr channel); - - // CompletionQueueEvent - - [DllImport("grpc_csharp_ext.dll")] - public static extern int grpcsharp_sizeof_grpc_event(); - - // CompletionQueueSafeHandle - - [DllImport("grpc_csharp_ext.dll")] - public static extern CompletionQueueSafeHandle grpcsharp_completion_queue_create(); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_completion_queue_shutdown(CompletionQueueSafeHandle cq); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CompletionQueueEvent grpcsharp_completion_queue_next(CompletionQueueSafeHandle cq); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CompletionQueueEvent grpcsharp_completion_queue_pluck(CompletionQueueSafeHandle cq, IntPtr tag); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_completion_queue_destroy(IntPtr cq); - - // CStringSafeHandle - - [DllImport("grpc_csharp_ext.dll")] - public static extern void gprsharp_free(IntPtr ptr); - - // MetadataArraySafeHandle - - [DllImport("grpc_csharp_ext.dll")] - public static extern MetadataArraySafeHandle grpcsharp_metadata_array_create(UIntPtr capacity); - - [DllImport("grpc_csharp_ext.dll", CharSet = CharSet.Ansi)] - public static extern void grpcsharp_metadata_array_add(MetadataArraySafeHandle array, string key, byte[] value, UIntPtr valueLength); - - [DllImport("grpc_csharp_ext.dll")] - public static extern UIntPtr grpcsharp_metadata_array_count(IntPtr metadataArray); - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_metadata_array_get_key(IntPtr metadataArray, UIntPtr index); - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_metadata_array_get_value(IntPtr metadataArray, UIntPtr index); - - [DllImport("grpc_csharp_ext.dll")] - public static extern UIntPtr grpcsharp_metadata_array_get_value_length(IntPtr metadataArray, UIntPtr index); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_metadata_array_destroy_full(IntPtr array); - - // NativeLogRedirector - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_redirect_log(GprLogDelegate callback); - - // NativeMetadataCredentialsPlugin - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallCredentialsSafeHandle grpcsharp_metadata_credentials_create_from_plugin(NativeMetadataInterceptor interceptor); - - [DllImport("grpc_csharp_ext.dll", CharSet = CharSet.Ansi)] - public static extern void grpcsharp_metadata_credentials_notify_from_plugin(IntPtr callbackPtr, IntPtr userData, MetadataArraySafeHandle metadataArray, StatusCode statusCode, string errorDetails); - - // ServerCredentialsSafeHandle - - [DllImport("grpc_csharp_ext.dll", CharSet = CharSet.Ansi)] - public static extern ServerCredentialsSafeHandle grpcsharp_ssl_server_credentials_create(string pemRootCerts, string[] keyCertPairCertChainArray, string[] keyCertPairPrivateKeyArray, UIntPtr numKeyCertPairs, bool forceClientAuth); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_server_credentials_release(IntPtr credentials); - - // ServerSafeHandle - - [DllImport("grpc_csharp_ext.dll")] - public static extern ServerSafeHandle grpcsharp_server_create(ChannelArgsSafeHandle args); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_server_register_completion_queue(ServerSafeHandle server, CompletionQueueSafeHandle cq); - - [DllImport("grpc_csharp_ext.dll")] - public static extern int grpcsharp_server_add_insecure_http2_port(ServerSafeHandle server, string addr); - - [DllImport("grpc_csharp_ext.dll")] - public static extern int grpcsharp_server_add_secure_http2_port(ServerSafeHandle server, string addr, ServerCredentialsSafeHandle creds); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_server_start(ServerSafeHandle server); - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_server_request_call(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_server_cancel_all_calls(ServerSafeHandle server); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_server_shutdown_and_notify_callback(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx); - - [DllImport("grpc_csharp_ext.dll")] - public static extern void grpcsharp_server_destroy(IntPtr server); - - // Timespec - - [DllImport("grpc_csharp_ext.dll")] - public static extern Timespec gprsharp_now(ClockType clockType); - - [DllImport("grpc_csharp_ext.dll")] - public static extern Timespec gprsharp_inf_future(ClockType clockType); - - [DllImport("grpc_csharp_ext.dll")] - public static extern Timespec gprsharp_inf_past(ClockType clockType); - - [DllImport("grpc_csharp_ext.dll")] - public static extern Timespec gprsharp_convert_clock_type(Timespec t, ClockType targetClock); - - [DllImport("grpc_csharp_ext.dll")] - public static extern int gprsharp_sizeof_timespec(); - - // Testing - - [DllImport("grpc_csharp_ext.dll")] - public static extern CallError grpcsharp_test_callback([MarshalAs(UnmanagedType.FunctionPtr)] OpCompletionDelegate callback); - - [DllImport("grpc_csharp_ext.dll")] - public static extern IntPtr grpcsharp_test_nop(IntPtr ptr); - } } } diff --git a/src/csharp/Grpc.Core/Internal/UnmanagedLibrary.cs b/src/csharp/Grpc.Core/Internal/UnmanagedLibrary.cs index 5a807461015..dc629bd714f 100644 --- a/src/csharp/Grpc.Core/Internal/UnmanagedLibrary.cs +++ b/src/csharp/Grpc.Core/Internal/UnmanagedLibrary.cs @@ -82,6 +82,32 @@ namespace Grpc.Core.Internal /// public IntPtr LoadSymbol(string symbolName) { + if (PlatformApis.IsWindows) + { + // See http://stackoverflow.com/questions/10473310 for background on this. + if (PlatformApis.Is64Bit) + { + return Windows.GetProcAddress(this.handle, symbolName); + } + else + { + // Yes, we could potentially predict the size... but it's a lot simpler to just try + // all the candidates. Most functions have a suffix of @0, @4 or @8 so we won't be trying + // many options - and if it takes a little bit longer to fail if we've really got the wrong + // library, that's not a big problem. This is only called once per function in the native library. + symbolName = "_" + symbolName + "@"; + for (int stackSize = 0; stackSize < 128; stackSize += 4) + { + IntPtr candidate = Windows.GetProcAddress(this.handle, symbolName + stackSize); + if (candidate != IntPtr.Zero) + { + return candidate; + } + } + // Fail. + return IntPtr.Zero; + } + } if (PlatformApis.IsLinux) { if (PlatformApis.IsMono) @@ -142,13 +168,18 @@ namespace Grpc.Core.Internal return path; } } - throw new FileNotFoundException(String.Format("Error loading native library. Not found in any of the possible locations {0}", libraryPathAlternatives)); + throw new FileNotFoundException( + String.Format("Error loading native library. Not found in any of the possible locations: {0}", + string.Join(",", libraryPathAlternatives))); } private static class Windows { [DllImport("kernel32.dll")] internal static extern IntPtr LoadLibrary(string filename); + + [DllImport("kernel32.dll")] + internal static extern IntPtr GetProcAddress(IntPtr hModule, string procName); } private static class Linux diff --git a/src/csharp/Grpc.Core/NativeDeps.Linux.targets b/src/csharp/Grpc.Core/NativeDeps.Linux.targets index a3848c6f2ed..e0c9132b1d5 100644 --- a/src/csharp/Grpc.Core/NativeDeps.Linux.targets +++ b/src/csharp/Grpc.Core/NativeDeps.Linux.targets @@ -3,7 +3,7 @@ PreserveNewest - nativelibs\linux_x64\libgrpc_csharp_ext.so + libgrpc_csharp_ext.x64.so \ No newline at end of file diff --git a/src/csharp/Grpc.Core/NativeDeps.Mac.targets b/src/csharp/Grpc.Core/NativeDeps.Mac.targets index c3c6264fd3f..e22c7384fc5 100644 --- a/src/csharp/Grpc.Core/NativeDeps.Mac.targets +++ b/src/csharp/Grpc.Core/NativeDeps.Mac.targets @@ -3,7 +3,7 @@ PreserveNewest - nativelibs\macosx_x86\libgrpc_csharp_ext.dylib + libgrpc_csharp_ext.x86.dylib \ No newline at end of file diff --git a/src/csharp/Grpc.Core/NativeDeps.Windows.targets b/src/csharp/Grpc.Core/NativeDeps.Windows.targets index f6a3405e29f..93db0935bc1 100644 --- a/src/csharp/Grpc.Core/NativeDeps.Windows.targets +++ b/src/csharp/Grpc.Core/NativeDeps.Windows.targets @@ -3,7 +3,7 @@ PreserveNewest - nativelibs\windows_x86\grpc_csharp_ext.dll + grpc_csharp_ext.x86.dll \ No newline at end of file diff --git a/src/csharp/Grpc.Core/VersionInfo.cs b/src/csharp/Grpc.Core/VersionInfo.cs index cb20967680e..553aeec58ac 100644 --- a/src/csharp/Grpc.Core/VersionInfo.cs +++ b/src/csharp/Grpc.Core/VersionInfo.cs @@ -48,11 +48,11 @@ namespace Grpc.Core /// /// Current AssemblyFileVersion of gRPC C# assemblies /// - public const string CurrentAssemblyFileVersion = "0.16.0.0"; + public const string CurrentAssemblyFileVersion = "1.1.0.0"; /// /// Current version of gRPC C# /// - public const string CurrentVersion = "0.16.0-dev"; + public const string CurrentVersion = "1.1.0-dev"; } } diff --git a/src/csharp/Grpc.Core/project.json b/src/csharp/Grpc.Core/project.json index 201e5488014..807508896a1 100644 --- a/src/csharp/Grpc.Core/project.json +++ b/src/csharp/Grpc.Core/project.json @@ -1,5 +1,5 @@ { - "version": "0.16.0-dev", + "version": "1.1.0-dev", "title": "gRPC C# Core", "authors": [ "Google Inc." ], "copyright": "Copyright 2015, Google Inc.", @@ -14,12 +14,12 @@ "files": { "mappings": { "build/net45/": "Grpc.Core.targets", - "build/native/bin/windows_x86/": "../nativelibs/windows_x86/grpc_csharp_ext.dll", - "build/native/bin/windows_x64/": "../nativelibs/windows_x64/grpc_csharp_ext.dll", - "build/native/bin/linux_x86/": "../nativelibs/linux_x86/libgrpc_csharp_ext.so", - "build/native/bin/linux_x64/": "../nativelibs/linux_x64/libgrpc_csharp_ext.so", - "build/native/bin/macosx_x86/": "../nativelibs/macosx_x86/libgrpc_csharp_ext.dylib", - "build/native/bin/macosx_x64/": "../nativelibs/macosx_x64/libgrpc_csharp_ext.dylib" + "runtimes/win/native/grpc_csharp_ext.x86.dll": "../nativelibs/windows_x86/grpc_csharp_ext.dll", + "runtimes/win/native/grpc_csharp_ext.x64.dll": "../nativelibs/windows_x64/grpc_csharp_ext.dll", + "runtimes/linux/native/libgrpc_csharp_ext.x86.so": "../nativelibs/linux_x86/libgrpc_csharp_ext.so", + "runtimes/linux/native/libgrpc_csharp_ext.x64.so": "../nativelibs/linux_x64/libgrpc_csharp_ext.so", + "runtimes/osx/native/libgrpc_csharp_ext.x86.dylib": "../nativelibs/macosx_x86/libgrpc_csharp_ext.dylib", + "runtimes/osx/native/libgrpc_csharp_ext.x64.dylib": "../nativelibs/macosx_x64/libgrpc_csharp_ext.dylib" } } }, diff --git a/src/csharp/Grpc.Examples.MathClient/packages.config b/src/csharp/Grpc.Examples.MathClient/packages.config new file mode 100644 index 00000000000..79ece06bef6 --- /dev/null +++ b/src/csharp/Grpc.Examples.MathClient/packages.config @@ -0,0 +1,3 @@ + + + diff --git a/src/csharp/Grpc.Examples.MathClient/project.json b/src/csharp/Grpc.Examples.MathClient/project.json index b865cd50118..206d6c59824 100644 --- a/src/csharp/Grpc.Examples.MathClient/project.json +++ b/src/csharp/Grpc.Examples.MathClient/project.json @@ -14,10 +14,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -33,10 +33,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/src/csharp/Grpc.Examples.MathServer/packages.config b/src/csharp/Grpc.Examples.MathServer/packages.config new file mode 100644 index 00000000000..79ece06bef6 --- /dev/null +++ b/src/csharp/Grpc.Examples.MathServer/packages.config @@ -0,0 +1,3 @@ + + + diff --git a/src/csharp/Grpc.Examples.MathServer/project.json b/src/csharp/Grpc.Examples.MathServer/project.json index b865cd50118..206d6c59824 100644 --- a/src/csharp/Grpc.Examples.MathServer/project.json +++ b/src/csharp/Grpc.Examples.MathServer/project.json @@ -14,10 +14,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -33,10 +33,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/src/csharp/Grpc.Examples.Tests/project.json b/src/csharp/Grpc.Examples.Tests/project.json index cc518eb6ff7..b4c4c5f6910 100644 --- a/src/csharp/Grpc.Examples.Tests/project.json +++ b/src/csharp/Grpc.Examples.Tests/project.json @@ -14,10 +14,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -33,10 +33,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/src/csharp/Grpc.HealthCheck.Tests/project.json b/src/csharp/Grpc.HealthCheck.Tests/project.json index fbf8d92f04d..f44a3225ae6 100644 --- a/src/csharp/Grpc.HealthCheck.Tests/project.json +++ b/src/csharp/Grpc.HealthCheck.Tests/project.json @@ -14,10 +14,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -33,10 +33,10 @@ }, "copyToOutput": { "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/src/csharp/Grpc.HealthCheck/project.json b/src/csharp/Grpc.HealthCheck/project.json index d9daef720f1..0e03e89d6a7 100644 --- a/src/csharp/Grpc.HealthCheck/project.json +++ b/src/csharp/Grpc.HealthCheck/project.json @@ -1,5 +1,5 @@ { - "version": "0.16.0-dev", + "version": "1.1.0-dev", "title": "gRPC C# Healthchecking", "authors": [ "Google Inc." ], "copyright": "Copyright 2015, Google Inc.", @@ -22,7 +22,7 @@ } }, "dependencies": { - "Grpc.Core": "0.16.0-dev", + "Grpc.Core": "1.1.0-dev", "Google.Protobuf": "3.0.0-beta3" }, "frameworks": { diff --git a/src/csharp/Grpc.IntegrationTesting.Client/project.json b/src/csharp/Grpc.IntegrationTesting.Client/project.json index 4a2846feea0..6b61a4b76e8 100644 --- a/src/csharp/Grpc.IntegrationTesting.Client/project.json +++ b/src/csharp/Grpc.IntegrationTesting.Client/project.json @@ -15,10 +15,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -35,10 +35,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/packages.config b/src/csharp/Grpc.IntegrationTesting.QpsWorker/packages.config new file mode 100644 index 00000000000..79ece06bef6 --- /dev/null +++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/packages.config @@ -0,0 +1,3 @@ + + + diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json b/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json index 4a2846feea0..6b61a4b76e8 100644 --- a/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json +++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json @@ -15,10 +15,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -35,10 +35,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/src/csharp/Grpc.IntegrationTesting.Server/project.json b/src/csharp/Grpc.IntegrationTesting.Server/project.json index 4a2846feea0..6b61a4b76e8 100644 --- a/src/csharp/Grpc.IntegrationTesting.Server/project.json +++ b/src/csharp/Grpc.IntegrationTesting.Server/project.json @@ -15,10 +15,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -35,10 +35,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/packages.config b/src/csharp/Grpc.IntegrationTesting.StressClient/packages.config new file mode 100644 index 00000000000..79ece06bef6 --- /dev/null +++ b/src/csharp/Grpc.IntegrationTesting.StressClient/packages.config @@ -0,0 +1,3 @@ + + + diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/project.json b/src/csharp/Grpc.IntegrationTesting.StressClient/project.json index 4a2846feea0..6b61a4b76e8 100644 --- a/src/csharp/Grpc.IntegrationTesting.StressClient/project.json +++ b/src/csharp/Grpc.IntegrationTesting.StressClient/project.json @@ -15,10 +15,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -35,10 +35,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/src/csharp/Grpc.IntegrationTesting/project.json b/src/csharp/Grpc.IntegrationTesting/project.json index 6297600ddc3..dcd9ccabd29 100644 --- a/src/csharp/Grpc.IntegrationTesting/project.json +++ b/src/csharp/Grpc.IntegrationTesting/project.json @@ -15,10 +15,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -35,10 +35,10 @@ "copyToOutput": { "include": "data/*", "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/src/csharp/README.md b/src/csharp/README.md index 86394135c84..18d5945a8a1 100644 --- a/src/csharp/README.md +++ b/src/csharp/README.md @@ -23,9 +23,9 @@ HOW TO USE - Open Visual Studio / MonoDevelop / Xamarin Studio and start a new project/solution. -- Add NuGet package `Grpc` as a dependency (Project options -> Manage NuGet Packages). +- Add the [Grpc](https://www.nuget.org/packages/Grpc/) NuGet package as a dependency (Project options -> Manage NuGet Packages). -- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add NuGet package `Grpc.Tools` that contains Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin. +- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add the [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) NuGet package that contains Protocol Buffers compiler (_protoc_) and the gRPC _protoc_ plugin. BUILD FROM SOURCE ----------------- diff --git a/src/csharp/build_packages.bat b/src/csharp/build_packages.bat index 272b30f385d..f05c0241b65 100644 --- a/src/csharp/build_packages.bat +++ b/src/csharp/build_packages.bat @@ -30,7 +30,7 @@ @rem Builds gRPC NuGet packages @rem Current package versions -set VERSION=0.16.0-dev +set VERSION=1.1.0-dev set PROTOBUF_VERSION=3.0.0-beta3 @rem Packages that depend on prerelease packages (like Google.Protobuf) need to have prerelease suffix as well. diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c index c670ea65c79..3d0947c03dc 100644 --- a/src/csharp/ext/grpc_csharp_ext.c +++ b/src/csharp/ext/grpc_csharp_ext.c @@ -253,8 +253,9 @@ GPR_EXPORT intptr_t GPR_CALLTYPE grpcsharp_batch_context_recv_message_length( if (!ctx->recv_message) { return -1; } - /* TODO(issue:#7206): check return value of grpc_byte_buffer_reader_init. */ - grpc_byte_buffer_reader_init(&reader, ctx->recv_message); + + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, ctx->recv_message)); + return (intptr_t)grpc_byte_buffer_length(reader.buffer_out); } @@ -268,8 +269,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_recv_message_to_buffer( gpr_slice slice; size_t offset = 0; - /* TODO(issue:#7206): check return value of grpc_byte_buffer_reader_init. */ - grpc_byte_buffer_reader_init(&reader, ctx->recv_message); + GPR_ASSERT(grpc_byte_buffer_reader_init(&reader, ctx->recv_message)); while (grpc_byte_buffer_reader_next(&reader, &slice)) { size_t len = GPR_SLICE_LENGTH(slice); diff --git a/src/node/ext/call_credentials.cc b/src/node/ext/call_credentials.cc index 3c8f0c56da4..81fc552fd10 100644 --- a/src/node/ext/call_credentials.cc +++ b/src/node/ext/call_credentials.cc @@ -68,6 +68,8 @@ using v8::Value; Nan::Callback *CallCredentials::constructor; Persistent CallCredentials::fun_tpl; +static Callback *plugin_callback; + CallCredentials::CallCredentials(grpc_call_credentials *credentials) : wrapped_credentials(credentials) {} @@ -88,6 +90,11 @@ void CallCredentials::Init(Local exports) { Nan::New(CreateFromPlugin)).ToLocalChecked()); Nan::Set(exports, Nan::New("CallCredentials").ToLocalChecked(), ctr); constructor = new Nan::Callback(ctr); + + Local callback_tpl = + Nan::New(PluginCallback); + plugin_callback = new Callback( + Nan::GetFunction(callback_tpl).ToLocalChecked()); } bool CallCredentials::HasInstance(Local val) { @@ -195,23 +202,28 @@ NAN_METHOD(PluginCallback) { return Nan::ThrowTypeError( "The callback's third argument must be an object"); } + if (!info[3]->IsObject()) { + return Nan::ThrowTypeError( + "The callback's fourth argument must be an object"); + } shared_ptr resources(new Resources); grpc_status_code code = static_cast( Nan::To(info[0]).FromJust()); Utf8String details_utf8_str(info[1]); char *details = *details_utf8_str; grpc_metadata_array array; + Local callback_data = Nan::To(info[3]).ToLocalChecked(); if (!CreateMetadataArray(Nan::To(info[2]).ToLocalChecked(), &array, resources)){ return Nan::ThrowError("Failed to parse metadata"); } grpc_credentials_plugin_metadata_cb cb = reinterpret_cast( - Nan::Get(info.Callee(), + Nan::Get(callback_data, Nan::New("cb").ToLocalChecked() ).ToLocalChecked().As()->Value()); void *user_data = - Nan::Get(info.Callee(), + Nan::Get(callback_data, Nan::New("user_data").ToLocalChecked() ).ToLocalChecked().As()->Value(); cb(user_data, array.metadata, array.count, code, details); @@ -227,17 +239,17 @@ NAUV_WORK_CB(SendPluginCallback) { while (!callbacks.empty()) { plugin_callback_data *data = callbacks.front(); callbacks.pop_front(); - // Attach cb and user_data to plugin_callback so that it can access them later - v8::Local plugin_callback = Nan::GetFunction( - Nan::New(PluginCallback)).ToLocalChecked(); - Nan::Set(plugin_callback, Nan::New("cb").ToLocalChecked(), + Local callback_data = Nan::New(); + Nan::Set(callback_data, Nan::New("cb").ToLocalChecked(), Nan::New(reinterpret_cast(data->cb))); - Nan::Set(plugin_callback, Nan::New("user_data").ToLocalChecked(), + Nan::Set(callback_data, Nan::New("user_data").ToLocalChecked(), Nan::New(data->user_data)); - const int argc = 2; + const int argc = 3; v8::Local argv[argc] = { Nan::New(data->service_url).ToLocalChecked(), - plugin_callback + callback_data, + // Get Local from Nan::Callback* + **plugin_callback }; Nan::Callback *callback = state->callback; callback->Call(argc, argv); diff --git a/src/node/health_check/package.json b/src/node/health_check/package.json index ad65b319177..40e276055b7 100644 --- a/src/node/health_check/package.json +++ b/src/node/health_check/package.json @@ -1,6 +1,6 @@ { "name": "grpc-health-check", - "version": "0.16.0-dev", + "version": "1.1.0-dev", "author": "Google Inc.", "description": "Health check service for use with gRPC", "repository": { @@ -19,11 +19,11 @@ "lodash": "^3.9.3", "google-protobuf": "^3.0.0-alpha.5" }, - "files": { + "files": [ "LICENSE", "health.js", "v1" - }, + ], "main": "src/node/index.js", "license": "BSD-3-Clause" } diff --git a/src/node/src/credentials.js b/src/node/src/credentials.js index b746d0625df..043df06a669 100644 --- a/src/node/src/credentials.js +++ b/src/node/src/credentials.js @@ -92,7 +92,8 @@ exports.createSsl = ChannelCredentials.createSsl; * @return {CallCredentials} The credentials object */ exports.createFromMetadataGenerator = function(metadata_generator) { - return CallCredentials.createFromPlugin(function(service_url, callback) { + return CallCredentials.createFromPlugin(function(service_url, cb_data, + callback) { metadata_generator({service_url: service_url}, function(error, metadata) { var code = grpc.status.OK; var message = ''; @@ -107,7 +108,7 @@ exports.createFromMetadataGenerator = function(metadata_generator) { metadata = new Metadata(); } } - callback(code, message, metadata._getCoreRepresentation()); + callback(code, message, metadata._getCoreRepresentation(), cb_data); }); }); }; diff --git a/src/node/tools/package.json b/src/node/tools/package.json index 7c256d7ba0d..e5513d78791 100644 --- a/src/node/tools/package.json +++ b/src/node/tools/package.json @@ -1,6 +1,6 @@ { "name": "grpc-tools", - "version": "0.16.0-dev", + "version": "1.1.0-dev", "author": "Google Inc.", "description": "Tools for developing with gRPC on Node.js", "homepage": "http://www.grpc.io/", diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec new file mode 100644 index 00000000000..97f4f586b72 --- /dev/null +++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec @@ -0,0 +1,122 @@ +# CocoaPods podspec for the gRPC Proto Compiler Plugin + +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Pod::Spec.new do |s| + # This pod is only a utility that will be used by other pods _at install time_ (not at compile + # time). Other pods can access it in their `prepare_command` script, under /. + # Because CocoaPods installs pods in alphabetical order, beginning this pod's name with an + # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed + # before them. + s.name = '!ProtoCompiler-gRPCPlugin' + v = '1.0.0-pre1' + s.version = v + s.summary = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.' + s.description = <<-DESC + This podspec only downloads the gRPC protoc plugin so that local pods generating protos can use + it in their invocation of protoc, as part of their prepare_command. + The generated code will have a dependency on the gRPC Objective-C Proto runtime of the same + version. The runtime can be obtained as the "gRPC-ProtoRPC" pod. + DESC + s.homepage = 'http://www.grpc.io' + s.license = { + :type => 'New BSD', + :text => <<-LICENSE + Copyright 2015, Google Inc. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + LICENSE + } + s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' } + + repo = 'grpc/grpc' + release = "objective-c-v#{v}" + file = "grpc_objective_c_plugin-#{v}-macos-x86_64.zip" + s.source = { + :http => "https://github.com/#{repo}/releases/download/#{release}/#{file}", + # TODO(jcanizales): Add sha1 or sha256 + # :sha1 => '??', + } + + repo_root = '../..' + plugin = 'grpc_objective_c_plugin' + + s.preserve_paths = plugin + + # Restrict the protoc version to the one supported by this plugin. + s.dependency '!ProtoCompiler', '3.0.0-beta-3.1' + # For the Protobuf dependency not to complain: + s.ios.deployment_target = '7.1' + s.osx.deployment_target = '10.9' + # Restrict the gRPC runtime version to the one supported by this plugin. + s.dependency 'gRPC-ProtoRPC', v + + # This is only for local development of the plugin: If the Podfile brings this pod from a local + # directory using `:path`, CocoaPods won't download the zip file and so the plugin won't be + # present in this pod's directory. We use that knowledge to check for the existence of the file + # and, if absent, compile the plugin from the local sources. + s.prepare_command = <<-CMD + if [ ! -f #{plugin} ]; then + cd #{repo_root} + # This will build the plugin and put it in #{repo_root}/bins/opt. + # + # TODO(jcanizales): I reckon make will try to use locally-installed libprotoc (headers and + # library binary) if found, which _we do not want_. Find a way for this to always use the + # sources in the repo. + make #{plugin} + cd - + fi + CMD +end diff --git a/src/objective-c/!ProtoCompiler.podspec b/src/objective-c/!ProtoCompiler.podspec new file mode 100644 index 00000000000..56aacc33305 --- /dev/null +++ b/src/objective-c/!ProtoCompiler.podspec @@ -0,0 +1,135 @@ +# Proto Compiler CocoaPods podspec + +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Pod::Spec.new do |s| + # This pod is only a utility that will be used by other pods _at install time_ (not at compile + # time). Other pods can access it in their `prepare_command` script, under /. + # Because CocoaPods installs pods in alphabetical order, beginning this pod's name with an + # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed + # before them. + s.name = '!ProtoCompiler' + v = '3.0.0-beta-3.1' + s.version = v + s.summary = 'The Protobuf Compiler (protoc) generates Objective-C files from .proto files' + s.description = <<-DESC + This podspec only downloads protoc so that local pods generating protos can execute it as part + of their prepare_command. + The generated code will have a dependency on the Protobuf Objective-C runtime of the same + version. The runtime can be obtained as the "Protobuf" pod. + DESC + s.homepage = 'https://github.com/google/protobuf' + s.license = { + :type => 'New BSD', + :text => <<-LICENSE + This license applies to all parts of Protocol Buffers except the following: + + - Atomicops support for generic gcc, located in + src/google/protobuf/stubs/atomicops_internals_generic_gcc.h. + This file is copyrighted by Red Hat Inc. + + - Atomicops support for AIX/POWER, located in + src/google/protobuf/stubs/atomicops_internals_power.h. + This file is copyrighted by Bloomberg Finance LP. + + Copyright 2014, Google Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + Code generated by the Protocol Buffer compiler is owned by the owner + of the input file used when generating it. This code is not + standalone and requires a support library to be linked with it. This + support library is itself covered by the above license. + LICENSE + } + # "The name and email addresses of the library maintainers, not the Podspec maintainer." + s.authors = { 'The Protocol Buffers contributors' => 'protobuf@googlegroups.com' } + + repo = 'google/protobuf' + file = "protoc-#{v}-osx-x86_64.zip" + s.source = { + :http => "https://github.com/#{repo}/releases/download/v#{v}/#{file}", + # TODO(jcanizales): Add sha1 or sha256 + # :sha1 => '??', + } + + s.preserve_paths = 'protoc', + 'google/**/*.proto' # Well-known protobuf types + + # Restrict the protobuf runtime version to the one supported by this version of protoc. + s.dependency 'Protobuf', v + # For the Protobuf dependency not to complain: + s.ios.deployment_target = '7.1' + s.osx.deployment_target = '10.9' + + # This is only for local development of protoc: If the Podfile brings this pod from a local + # directory using `:path`, CocoaPods won't download the zip file and so the compiler won't be + # present in this pod's directory. We use that knowledge to check for the existence of the file + # and, if absent, build it from the local sources. + repo_root = '../..' + plugin = 'grpc_objective_c_plugin' + s.prepare_command = <<-CMD + if [ ! -f protoc ]; then + cd #{repo_root} + # This will build protoc from the Protobuf submodule of gRPC, and put it in + # #{repo_root}/bins/opt/protobuf. + # + # TODO(jcanizales): Make won't build protoc from sources if one's locally installed, which + # _we do not want_. Find a way for this to always build from source. + make #{plugin} + cd - + fi + CMD + +end diff --git a/src/objective-c/BoringSSL.podspec b/src/objective-c/BoringSSL.podspec index 26a0451f7d3..b759997c115 100644 --- a/src/objective-c/BoringSSL.podspec +++ b/src/objective-c/BoringSSL.podspec @@ -31,7 +31,7 @@ Pod::Spec.new do |s| s.name = 'BoringSSL' - version = '4.0' + version = '5.0' s.version = version s.summary = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.' # Adapted from the homepage: @@ -67,8 +67,11 @@ Pod::Spec.new do |s| # "The name and email addresses of the library maintainers, not the Podspec maintainer." s.authors = 'Adam Langley', 'David Benjamin', 'Matt Braithwaite' - s.source = { :git => 'https://boringssl.googlesource.com/boringssl', - :tag => "version_for_cocoapods_#{version}" } + s.source = { + :git => 'https://boringssl.googlesource.com/boringssl', + :tag => "version_for_cocoapods_#{version}", + # :commit => '8d343b44bbab829d1a28fdef650ca95f7db4412e', + } name = 'openssl' @@ -109,8 +112,6 @@ Pod::Spec.new do |s| s.subspec 'Interface' do |ss| ss.header_mappings_dir = 'include/openssl' ss.source_files = 'include/openssl/*.h' - # Doesn't compile correctly; but doesn't seem to be needed: - ss.exclude_files = 'include/openssl/arm_arch.h' end s.subspec 'Implementation' do |ss| ss.header_mappings_dir = '.' @@ -147,6 +148,11 @@ Pod::Spec.new do |s| #include "ssl.h" #include "crypto.h" #include "aes.h" + /* The following macros are defined by base.h. The latter is the first file included by the + other headers. */ + #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) + # include "arm_arch.h" + #endif #include "asn1.h" #include "asn1_mac.h" #include "asn1t.h" @@ -216,7 +222,7 @@ Pod::Spec.new do |s| * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - /* This file was generated by err_data_generate.go. */ + /* This file was generated by err_data_generate.go. */ #include #include @@ -382,42 +388,42 @@ Pod::Spec.new do |s| 0x28340c19, 0x283480ac, 0x283500ea, - 0x2c322775, - 0x2c32a783, - 0x2c332795, - 0x2c33a7a7, - 0x2c3427bb, - 0x2c34a7cd, - 0x2c3527e8, - 0x2c35a7fa, - 0x2c36280d, + 0x2c3227cb, + 0x2c32a7d9, + 0x2c3327eb, + 0x2c33a7fd, + 0x2c342811, + 0x2c34a823, + 0x2c35283e, + 0x2c35a850, + 0x2c362863, 0x2c36832d, - 0x2c37281a, - 0x2c37a82c, - 0x2c38283f, - 0x2c38a856, - 0x2c392864, - 0x2c39a874, - 0x2c3a2886, - 0x2c3aa89a, - 0x2c3b28ab, - 0x2c3ba8ca, - 0x2c3c28de, - 0x2c3ca8f4, - 0x2c3d290d, - 0x2c3da92a, - 0x2c3e293b, - 0x2c3ea949, - 0x2c3f2961, - 0x2c3fa979, - 0x2c402986, + 0x2c372870, + 0x2c37a882, + 0x2c382895, + 0x2c38a8ac, + 0x2c3928ba, + 0x2c39a8ca, + 0x2c3a28dc, + 0x2c3aa8f0, + 0x2c3b2901, + 0x2c3ba920, + 0x2c3c2934, + 0x2c3ca94a, + 0x2c3d2963, + 0x2c3da980, + 0x2c3e2991, + 0x2c3ea99f, + 0x2c3f29b7, + 0x2c3fa9cf, + 0x2c4029dc, 0x2c4090e7, - 0x2c412997, - 0x2c41a9aa, + 0x2c4129ed, + 0x2c41aa00, 0x2c4210c0, - 0x2c42a9bb, + 0x2c42aa11, 0x2c430720, - 0x2c43a8bc, + 0x2c43a912, 0x30320000, 0x30328015, 0x3033001f, @@ -591,145 +597,148 @@ Pod::Spec.new do |s| 0x404619e8, 0x40469a08, 0x40471a16, - 0x40479a2a, - 0x40481a3f, - 0x40489a58, - 0x40491a6f, - 0x40499a89, - 0x404a1aa0, - 0x404a9abe, - 0x404b1ad6, - 0x404b9aed, - 0x404c1b03, - 0x404c9b15, - 0x404d1b36, - 0x404d9b58, - 0x404e1b6c, - 0x404e9b79, - 0x404f1b90, - 0x404f9ba0, - 0x40501bca, - 0x40509bde, - 0x40511bf9, - 0x40519c09, - 0x40521c20, - 0x40529c32, - 0x40531c4a, - 0x40539c5d, - 0x40541c72, - 0x40549c95, - 0x40551ca3, - 0x40559cc0, - 0x40561ccd, - 0x40569ce6, - 0x40571cfe, - 0x40579d11, - 0x40581d26, - 0x40589d38, - 0x40591d48, - 0x40599d61, - 0x405a1d75, - 0x405a9d85, - 0x405b1d9d, - 0x405b9dae, - 0x405c1dc1, - 0x405c9dd2, - 0x405d1ddf, - 0x405d9df6, - 0x405e1e16, + 0x40479a3d, + 0x40481a52, + 0x40489a6b, + 0x40491a82, + 0x40499a9c, + 0x404a1ab3, + 0x404a9ad1, + 0x404b1ae9, + 0x404b9b00, + 0x404c1b16, + 0x404c9b28, + 0x404d1b49, + 0x404d9b6b, + 0x404e1b7f, + 0x404e9b8c, + 0x404f1ba3, + 0x404f9bb3, + 0x40501bdd, + 0x40509bf1, + 0x40511c0c, + 0x40519c1c, + 0x40521c33, + 0x40529c45, + 0x40531c5d, + 0x40539c70, + 0x40541c85, + 0x40549ca8, + 0x40551cb6, + 0x40559cd3, + 0x40561ce0, + 0x40569cf9, + 0x40571d11, + 0x40579d24, + 0x40581d39, + 0x40589d4b, + 0x40591d7a, + 0x40599d93, + 0x405a1da7, + 0x405a9db7, + 0x405b1dcf, + 0x405b9de0, + 0x405c1df3, + 0x405c9e04, + 0x405d1e11, + 0x405d9e28, + 0x405e1e48, 0x405e8a95, - 0x405f1e37, - 0x405f9e44, - 0x40601e52, - 0x40609e74, - 0x40611e9c, - 0x40619eb1, - 0x40621ec8, - 0x40629ed9, - 0x40631eea, - 0x40639eff, - 0x40641f16, - 0x40649f27, - 0x40651f42, - 0x40659f59, - 0x40661f71, - 0x40669f9b, - 0x40671fc6, - 0x40679fe7, - 0x40681ffa, - 0x4068a01b, - 0x4069204d, - 0x4069a07b, - 0x406a209c, - 0x406aa0bc, - 0x406b2244, - 0x406ba267, - 0x406c227d, - 0x406ca4a9, - 0x406d24d8, - 0x406da500, - 0x406e2519, - 0x406ea531, - 0x406f2550, - 0x406fa565, - 0x40702578, - 0x4070a595, + 0x405f1e69, + 0x405f9e76, + 0x40601e84, + 0x40609ea6, + 0x40611ece, + 0x40619ee3, + 0x40621efa, + 0x40629f0b, + 0x40631f1c, + 0x40639f31, + 0x40641f48, + 0x40649f59, + 0x40651f74, + 0x40659f8b, + 0x40661fa3, + 0x40669fcd, + 0x40671ff8, + 0x4067a019, + 0x4068202c, + 0x4068a04d, + 0x4069207f, + 0x4069a0ad, + 0x406a20ce, + 0x406aa0ee, + 0x406b2276, + 0x406ba299, + 0x406c22af, + 0x406ca4db, + 0x406d250a, + 0x406da532, + 0x406e254b, + 0x406ea563, + 0x406f2582, + 0x406fa597, + 0x407025aa, + 0x4070a5c7, 0x40710800, - 0x4071a5a7, - 0x407225ba, - 0x4072a5d3, - 0x407325eb, + 0x4071a5d9, + 0x407225ec, + 0x4072a605, + 0x4073261d, 0x4073936d, - 0x407425ff, - 0x4074a619, - 0x4075262a, - 0x4075a63e, - 0x4076264c, + 0x40742631, + 0x4074a64b, + 0x4075265c, + 0x4075a670, + 0x4076267e, 0x407691aa, - 0x40772671, - 0x4077a693, - 0x407826ae, - 0x4078a6c3, - 0x407926da, - 0x4079a6f0, - 0x407a26fc, - 0x407aa70f, - 0x407b2724, - 0x407ba736, - 0x407c274b, - 0x407ca754, - 0x407d2036, - 0x407d9bb0, - 0x41f4216f, - 0x41f92201, - 0x41fe20f4, - 0x41fea2d0, - 0x41ff23c1, - 0x42032188, - 0x420821aa, - 0x4208a1e6, - 0x420920d8, - 0x4209a220, - 0x420a212f, - 0x420aa10f, - 0x420b214f, - 0x420ba1c8, - 0x420c23dd, - 0x420ca29d, - 0x420d22b7, - 0x420da2ee, - 0x42122308, - 0x421723a4, - 0x4217a34a, - 0x421c236c, - 0x421f2327, - 0x422123f4, - 0x42262387, - 0x422b248d, - 0x422ba456, - 0x422c2475, - 0x422ca430, - 0x422d240f, + 0x407726a3, + 0x4077a6c5, + 0x407826e0, + 0x4078a719, + 0x40792730, + 0x4079a746, + 0x407a2752, + 0x407aa765, + 0x407b277a, + 0x407ba78c, + 0x407c27a1, + 0x407ca7aa, + 0x407d2068, + 0x407d9bc3, + 0x407e26f5, + 0x407e9d5b, + 0x407f1a2a, + 0x41f421a1, + 0x41f92233, + 0x41fe2126, + 0x41fea302, + 0x41ff23f3, + 0x420321ba, + 0x420821dc, + 0x4208a218, + 0x4209210a, + 0x4209a252, + 0x420a2161, + 0x420aa141, + 0x420b2181, + 0x420ba1fa, + 0x420c240f, + 0x420ca2cf, + 0x420d22e9, + 0x420da320, + 0x4212233a, + 0x421723d6, + 0x4217a37c, + 0x421c239e, + 0x421f2359, + 0x42212426, + 0x422623b9, + 0x422b24bf, + 0x422ba488, + 0x422c24a7, + 0x422ca462, + 0x422d2441, 0x4432072b, 0x4432873a, 0x44330746, @@ -772,69 +781,69 @@ Pod::Spec.new do |s| 0x4c3d136d, 0x4c3d937c, 0x4c3e1389, - 0x503229cd, - 0x5032a9dc, - 0x503329e7, - 0x5033a9f7, - 0x50342a10, - 0x5034aa2a, - 0x50352a38, - 0x5035aa4e, - 0x50362a60, - 0x5036aa76, - 0x50372a8f, - 0x5037aaa2, - 0x50382aba, - 0x5038aacb, - 0x50392ae0, - 0x5039aaf4, - 0x503a2b14, - 0x503aab2a, - 0x503b2b42, - 0x503bab54, - 0x503c2b70, - 0x503cab87, - 0x503d2ba0, - 0x503dabb6, - 0x503e2bc3, - 0x503eabd9, - 0x503f2beb, + 0x50322a23, + 0x5032aa32, + 0x50332a3d, + 0x5033aa4d, + 0x50342a66, + 0x5034aa80, + 0x50352a8e, + 0x5035aaa4, + 0x50362ab6, + 0x5036aacc, + 0x50372ae5, + 0x5037aaf8, + 0x50382b10, + 0x5038ab21, + 0x50392b36, + 0x5039ab4a, + 0x503a2b6a, + 0x503aab80, + 0x503b2b98, + 0x503babaa, + 0x503c2bc6, + 0x503cabdd, + 0x503d2bf6, + 0x503dac0c, + 0x503e2c19, + 0x503eac2f, + 0x503f2c41, 0x503f8382, - 0x50402bfe, - 0x5040ac0e, - 0x50412c28, - 0x5041ac37, - 0x50422c51, - 0x5042ac6e, - 0x50432c7e, - 0x5043ac8e, - 0x50442c9d, + 0x50402c54, + 0x5040ac64, + 0x50412c7e, + 0x5041ac8d, + 0x50422ca7, + 0x5042acc4, + 0x50432cd4, + 0x5043ace4, + 0x50442cf3, 0x5044843f, - 0x50452cb1, - 0x5045accf, - 0x50462ce2, - 0x5046acf8, - 0x50472d0a, - 0x5047ad1f, - 0x50482d45, - 0x5048ad53, - 0x50492d66, - 0x5049ad7b, - 0x504a2d91, - 0x504aada1, - 0x504b2dc1, - 0x504badd4, - 0x504c2df7, - 0x504cae25, - 0x504d2e37, - 0x504dae54, - 0x504e2e6f, - 0x504eae8b, - 0x504f2e9d, - 0x504faeb4, - 0x50502ec3, + 0x50452d07, + 0x5045ad25, + 0x50462d38, + 0x5046ad4e, + 0x50472d60, + 0x5047ad75, + 0x50482d9b, + 0x5048ada9, + 0x50492dbc, + 0x5049add1, + 0x504a2de7, + 0x504aadf7, + 0x504b2e17, + 0x504bae2a, + 0x504c2e4d, + 0x504cae7b, + 0x504d2e8d, + 0x504daeaa, + 0x504e2ec5, + 0x504eaee1, + 0x504f2ef3, + 0x504faf0a, + 0x50502f19, 0x505086ef, - 0x50512ed6, + 0x50512f2c, 0x58320ec9, 0x68320e8b, 0x68328c25, @@ -1215,6 +1224,7 @@ Pod::Spec.new do |s| "DH_PUBLIC_VALUE_LENGTH_IS_WRONG\\0" "DH_P_TOO_LONG\\0" "DIGEST_CHECK_FAILED\\0" + "DOWNGRADE_DETECTED\\0" "DTLS_MESSAGE_TOO_BIG\\0" "ECC_CERT_NOT_FOR_SIGNING\\0" "EMS_STATE_INCONSISTENT\\0" @@ -1251,6 +1261,7 @@ Pod::Spec.new do |s| "NO_CIPHERS_AVAILABLE\\0" "NO_CIPHERS_PASSED\\0" "NO_CIPHER_MATCH\\0" + "NO_COMMON_SIGNATURE_ALGORITHMS\\0" "NO_COMPRESSION_SPECIFIED\\0" "NO_METHOD_SPECIFIED\\0" "NO_P256_SUPPORT\\0" @@ -1341,6 +1352,7 @@ Pod::Spec.new do |s| "UNSUPPORTED_COMPRESSION_ALGORITHM\\0" "UNSUPPORTED_ELLIPTIC_CURVE\\0" "UNSUPPORTED_PROTOCOL\\0" + "UNSUPPORTED_PROTOCOL_FOR_CUSTOM_KEY\\0" "WRONG_CERTIFICATE_TYPE\\0" "WRONG_CIPHER_RETURNED\\0" "WRONG_CURVE\\0" diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m index da9473f9a27..05a1d10f6de 100644 --- a/src/objective-c/GRPCClient/GRPCCall.m +++ b/src/objective-c/GRPCClient/GRPCCall.m @@ -208,13 +208,9 @@ NSString * const kGRPCTrailersKey = @"io.grpc.TrailersKey"; // don't want to throw, because the app shouldn't crash for a behavior // that's on the hands of any server to have. Instead we finish and ask // the server to cancel. - // - // TODO(jcanizales): No canonical code is appropriate for this situation - // (because it's just a client problem). Use another domain and an - // appropriately-documented code. [weakSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain - code:GRPCErrorCodeInternal - userInfo:nil]]; + code:GRPCErrorCodeResourceExhausted + userInfo:@{NSLocalizedDescriptionKey: @"Client does not have enough memory to hold the server response."}]]; [weakSelf cancelCall]; return; } diff --git a/src/objective-c/ProtoRPC/ProtoRPC.m b/src/objective-c/ProtoRPC/ProtoRPC.m index e7232f26832..83d1b655e8d 100644 --- a/src/objective-c/ProtoRPC/ProtoRPC.m +++ b/src/objective-c/ProtoRPC/ProtoRPC.m @@ -33,7 +33,11 @@ #import "ProtoRPC.h" -#import +#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS + #import +#else + #import +#endif #import #import diff --git a/src/objective-c/README.md b/src/objective-c/README.md index 736c324ca95..6e917ddd813 100644 --- a/src/objective-c/README.md +++ b/src/objective-c/README.md @@ -1,12 +1,12 @@ [![Cocoapods](https://img.shields.io/cocoapods/v/gRPC.svg)](https://cocoapods.org/pods/gRPC) # gRPC for Objective-C -- [Install protoc with the gRPC plugin](#install) - [Write your API declaration in proto format](#write-protos) - [Integrate a proto library in your project](#cocoapods) - [Use the generated library in your code](#use) - [Use gRPC without Protobuf](#no-proto) -- [Alternative installation methods](#alternatives) +- [Alternatives to the steps above](#alternatives) + - [Install protoc with the gRPC plugin](#install) - [Install protoc and the gRPC plugin without using Homebrew](#no-homebrew) - [Integrate the generated gRPC library without using Cocoapods](#no-cocoapods) @@ -15,18 +15,6 @@ usage and adds some interoperability guarantees. Here we use [Protocol Buffers][ plugin for the Protobuf Compiler (_protoc_) to generate client libraries to communicate with gRPC services. - -## Install protoc with the gRPC plugin - -On Mac OS X, install [homebrew][]. - -Run the following command to install _protoc_ and the gRPC _protoc_ plugin: -```sh -$ curl -fsSL https://goo.gl/getgrpc | bash - -``` -This will download and run the [gRPC install script][]. After the command completes, you're ready to -proceed. - ## Write your API declaration in proto format @@ -40,7 +28,8 @@ Install [Cocoapods](https://cocoapods.org/#install). You need to create a Podspec file for your proto library. You may simply copy the following example to the directory where your `.proto` files are located, updating the name, version and license as -necessary: +necessary. You also need to set the `pods_root` variable to the correct value, depending on where +you place this podspec relative to your Podfile. ```ruby Pod::Spec.new do |s| @@ -55,16 +44,44 @@ Pod::Spec.new do |s| s.ios.deployment_target = '7.1' s.osx.deployment_target = '10.9' + # Base directory where the .proto files are. + src = '.' + + # We'll use protoc with the gRPC plugin. + s.dependency '!ProtoCompiler-gRPCPlugin', '~> 1.0.0-pre1' + + # Pods directory corresponding to this app's Podfile, relative to the location of this podspec. + pods_root = '/Pods' + + # Path where Cocoapods downloads protoc and the gRPC plugin. + protoc_dir = "#{pods_root}/!ProtoCompiler" + protoc = "#{protoc_dir}/protoc" + plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin" + + # Directory where you want the generated files to be placed. This is an example. + dir = "#{pods_root}/#{s.name}" + # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. # You can run this command manually if you later change your protos and need to regenerate. - s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto" + # Alternatively, you can advance the version of this podspec and run `pod update`. + s.prepare_command = <<-CMD + mkdir -p #{dir} + #{protoc} \ + --plugin=protoc-gen-grpc=#{plugin} \ + --objc_out=#{dir} \ + --grpc_out=#{dir} \ + -I #{src} \ + -I #{protoc_dir} \ + #{src}/*.proto + CMD # The --objc_out plugin generates a pair of .pbobjc.h/.pbobjc.m files for each .proto file. - s.subspec "Messages" do |ms| - ms.source_files = "*.pbobjc.{h,m}" - ms.header_mappings_dir = "." + s.subspec 'Messages' do |ms| + ms.source_files = "#{dir}/*.pbobjc.{h,m}" + ms.header_mappings_dir = dir ms.requires_arc = false - ms.dependency "Protobuf", "~> 3.0.0-beta-2" + # The generated files depend on the protobuf runtime. + ms.dependency 'Protobuf' # This is needed by all pods that depend on Protobuf: ms.pod_target_xcconfig = { 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', @@ -73,11 +90,12 @@ Pod::Spec.new do |s| # The --objcgrpc_out plugin generates a pair of .pbrpc.h/.pbrpc.m files for each .proto file with # a service defined. - s.subspec "Services" do |ss| - ss.source_files = "*.pbrpc.{h,m}" - ss.header_mappings_dir = "." + s.subspec 'Services' do |ss| + ss.source_files = "#{dir}/*.pbrpc.{h,m}" + ss.header_mappings_dir = dir ss.requires_arc = true - ss.dependency "gRPC-ProtoRPC", "~> 0.14" + # The generated files depend on the gRPC runtime, and on the files generated by `--objc_out`. + ss.dependency 'gRPC-ProtoRPC' ss.dependency "#{s.name}/Messages" end end @@ -89,11 +107,14 @@ Note: If your proto files are in a directory hierarchy, you might want to adjust the sample Podspec above. For example, you could use: ```ruby - s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto" + s.prepare_command = <<-CMD + ... + #{src}/*.proto #{src}/**/*.proto + CMD ... - ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}" + ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}" ... - ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}" + ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}" ``` Once your library has a Podspec, Cocoapods can install it into any XCode project. For that, go into @@ -121,19 +142,33 @@ pod install ## Use the generated library in your code -Please check this [sample app][] for examples of how to use a generated gRPC library. +Please check the [example apps][] for examples of how to use a generated gRPC library. ## Use gRPC without Protobuf -The [sample app][] has an example of how to use the generic gRPC Objective-C client without -generated files. +This [tests file](https://github.com/grpc/grpc/tree/master/src/objective-c/tests/GRPCClientTests.m) +shows how to use the generic gRPC Objective-C client without generated protobuf files. -## Alternative installation methods +## Alternatives to the steps above + + +### Install _protoc_ with the gRPC plugin + +Although it's not recommended (because it can lead to hard-to-solve version conflicts), it is +sometimes more convenient to install _protoc_ and the gRPC plugin in your development machine, +instead of letting Cocoapods download the appropriate versions for you. To do so, on Mac OS X or +later, install [homebrew][]. + +The run the following command to install _protoc_ and the gRPC _protoc_ plugin: +```sh +$ curl -fsSL https://goo.gl/getgrpc | bash - +``` +This will download and run the [gRPC install script][]. -### Install protoc and the gRPC plugin without using Homebrew +### Install _protoc_ and the gRPC plugin without using Homebrew First install v3 of the Protocol Buffers compiler (_protoc_), by cloning [its Git repository](https://github.com/google/protobuf) and following these @@ -145,15 +180,15 @@ cloned. Compile the gRPC plugins for _protoc_: ```sh -make plugins +make grpc_objective_c_plugin ``` Create a symbolic link to the compiled plugin binary somewhere in your `$PATH`: ```sh ln -s `pwd`/bins/opt/grpc_objective_c_plugin /usr/local/bin/protoc-gen-objcgrpc ``` -(Notice that the name of the created link must begin with "protoc-gen-" for _protoc_ to recognize it -as a plugin). +(Notice that the name of the created link must begin with "`protoc-gen-`" for _protoc_ to recognize +it as a plugin). If you don't want to create the symbolic link, you can alternatively copy the binary (with the appropriate name). Or you might prefer instead to specify the plugin's path as a flag when invoking @@ -178,5 +213,5 @@ Objective-C Protobuf runtime library. [Protocol Buffers]:https://developers.google.com/protocol-buffers/ [homebrew]:http://brew.sh [gRPC install script]:https://raw.githubusercontent.com/grpc/homebrew-grpc/master/scripts/install -[example Podfile]:https://github.com/grpc/grpc/blob/master/src/objective-c/examples/Sample/Podfile -[sample app]: https://github.com/grpc/grpc/tree/master/src/objective-c/examples/Sample +[example Podfile]:https://github.com/grpc/grpc/blob/master/examples/objective-c/helloworld/Podfile +[example apps]: https://github.com/grpc/grpc/tree/master/examples/objective-c diff --git a/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec b/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec index e3b50ddea50..7222a80b887 100644 --- a/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec +++ b/src/objective-c/examples/RemoteTestClient/RemoteTest.podspec @@ -11,15 +11,30 @@ Pod::Spec.new do |s| s.osx.deployment_target = '10.9' # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. + s.dependency "!ProtoCompiler-gRPCPlugin", "~> 1.0.0-pre1" + + repo_root = '../../../..' + bin_dir = "#{repo_root}/bins/$CONFIG" + + protoc = "#{bin_dir}/protobuf/protoc" + well_known_types_dir = "#{repo_root}/third_party/protobuf/src" + plugin = "#{bin_dir}/grpc_objective_c_plugin" + s.prepare_command = <<-CMD - protoc --objc_out=. --objcgrpc_out=. *.proto + #{protoc} \ + --plugin=protoc-gen-grpc=#{plugin} \ + --objc_out=. \ + --grpc_out=. \ + -I . \ + -I #{well_known_types_dir} \ + *.proto CMD s.subspec 'Messages' do |ms| ms.source_files = '*.pbobjc.{h,m}' ms.header_mappings_dir = '.' ms.requires_arc = false - ms.dependency 'Protobuf', '~> 3.0.0-beta-3.1' + ms.dependency 'Protobuf' # This is needed by all pods that depend on Protobuf: ms.pod_target_xcconfig = { 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', @@ -30,7 +45,7 @@ Pod::Spec.new do |s| ss.source_files = '*.pbrpc.{h,m}' ss.header_mappings_dir = '.' ss.requires_arc = true - ss.dependency 'gRPC-ProtoRPC', '~> 0.14' + ss.dependency 'gRPC-ProtoRPC' ss.dependency "#{s.name}/Messages" end end diff --git a/src/objective-c/examples/RemoteTestClient/empty.proto b/src/objective-c/examples/RemoteTestClient/empty.proto deleted file mode 100644 index a678048289e..00000000000 --- a/src/objective-c/examples/RemoteTestClient/empty.proto +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2015, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package grpc.testing; - -option objc_class_prefix = "RMT"; - -// An empty message that you can re-use to avoid defining duplicated empty -// messages in your project. A typical example is to use it as argument or the -// return value of a service API. For instance: -// -// service Foo { -// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { }; -// }; -// -message Empty {} diff --git a/src/objective-c/examples/RemoteTestClient/test.proto b/src/objective-c/examples/RemoteTestClient/test.proto index 514c3b80955..5c359c5c129 100644 --- a/src/objective-c/examples/RemoteTestClient/test.proto +++ b/src/objective-c/examples/RemoteTestClient/test.proto @@ -31,7 +31,7 @@ // of unary/streaming requests/responses. syntax = "proto3"; -import "empty.proto"; +import "google/protobuf/empty.proto"; import "messages.proto"; package grpc.testing; @@ -42,7 +42,7 @@ option objc_class_prefix = "RMT"; // performance with various types of payload. service TestService { // One empty request followed by one empty response. - rpc EmptyCall(grpc.testing.Empty) returns (grpc.testing.Empty); + rpc EmptyCall(google.protobuf.Empty) returns (google.protobuf.Empty); // One request followed by one response. rpc UnaryCall(SimpleRequest) returns (SimpleResponse); diff --git a/src/objective-c/examples/Sample/Podfile b/src/objective-c/examples/Sample/Podfile index 80ab2c320d4..f6f0c00d5d0 100644 --- a/src/objective-c/examples/Sample/Podfile +++ b/src/objective-c/examples/Sample/Podfile @@ -3,6 +3,8 @@ platform :ios, '8.0' install! 'cocoapods', :deterministic_uuids => false +use_frameworks! if ENV['FRAMEWORKS'] == 'YES' + # Location of gRPC's repo root relative to this file. GRPC_LOCAL_SRC = '../../../..' @@ -12,6 +14,9 @@ target 'Sample' do # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following # lines in your application. + pod '!ProtoCompiler', :path => "#{GRPC_LOCAL_SRC}/src/objective-c" + pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c" + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" diff --git a/src/objective-c/examples/Sample/Sample.xcodeproj/project.pbxproj b/src/objective-c/examples/Sample/Sample.xcodeproj/project.pbxproj index 5c2a6d14f96..ab7159cda2f 100644 --- a/src/objective-c/examples/Sample/Sample.xcodeproj/project.pbxproj +++ b/src/objective-c/examples/Sample/Sample.xcodeproj/project.pbxproj @@ -7,7 +7,7 @@ objects = { /* Begin PBXBuildFile section */ - 426A5020E0E158A101BCA1D9 /* libPods-Sample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = C20055928615A6F8434E26B4 /* libPods-Sample.a */; }; + 3F035697392F601049D3DDE1 /* Pods_Sample.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = CC1B27EA0C428429B07BC34B /* Pods_Sample.framework */; }; 6369A2701A9322E20015FC5C /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 6369A26F1A9322E20015FC5C /* main.m */; }; 6369A2731A9322E20015FC5C /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 6369A2721A9322E20015FC5C /* AppDelegate.m */; }; 6369A2761A9322E20015FC5C /* ViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = 6369A2751A9322E20015FC5C /* ViewController.m */; }; @@ -26,7 +26,7 @@ 6369A2751A9322E20015FC5C /* ViewController.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ViewController.m; sourceTree = ""; }; 6369A2781A9322E20015FC5C /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; 6369A27A1A9322E20015FC5C /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; - C20055928615A6F8434E26B4 /* libPods-Sample.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-Sample.a"; sourceTree = BUILT_PRODUCTS_DIR; }; + CC1B27EA0C428429B07BC34B /* Pods_Sample.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_Sample.framework; sourceTree = BUILT_PRODUCTS_DIR; }; E3C01DF315C4E7433BCEC6E6 /* Pods-Sample.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Sample.debug.xcconfig"; path = "Pods/Target Support Files/Pods-Sample/Pods-Sample.debug.xcconfig"; sourceTree = ""; }; /* End PBXFileReference section */ @@ -35,7 +35,7 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 426A5020E0E158A101BCA1D9 /* libPods-Sample.a in Frameworks */, + 3F035697392F601049D3DDE1 /* Pods_Sample.framework in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -95,7 +95,7 @@ C4C2C5219053E079C9EFB930 /* Frameworks */ = { isa = PBXGroup; children = ( - C20055928615A6F8434E26B4 /* libPods-Sample.a */, + CC1B27EA0C428429B07BC34B /* Pods_Sample.framework */, ); name = Frameworks; sourceTree = ""; @@ -129,7 +129,7 @@ 6369A2621A9322E20015FC5C /* Project object */ = { isa = PBXProject; attributes = { - LastUpgradeCheck = 0610; + LastUpgradeCheck = 0730; ORGANIZATIONNAME = gRPC; TargetAttributes = { 6369A2691A9322E20015FC5C = { @@ -260,6 +260,7 @@ "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; COPY_PHASE_STRIP = NO; ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; GCC_C_LANGUAGE_STANDARD = gnu99; GCC_DYNAMIC_NO_PIC = NO; GCC_OPTIMIZATION_LEVEL = 0; @@ -325,6 +326,7 @@ ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; INFOPLIST_FILE = Sample/Info.plist; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "org.grpc.$(PRODUCT_NAME:rfc1034identifier)"; PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Debug; @@ -336,6 +338,7 @@ ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; INFOPLIST_FILE = Sample/Info.plist; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "org.grpc.$(PRODUCT_NAME:rfc1034identifier)"; PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Release; diff --git a/src/objective-c/examples/Sample/Sample/Info.plist b/src/objective-c/examples/Sample/Sample/Info.plist index 4436635ab4e..2cdf09dc2fc 100644 --- a/src/objective-c/examples/Sample/Sample/Info.plist +++ b/src/objective-c/examples/Sample/Sample/Info.plist @@ -7,7 +7,7 @@ CFBundleExecutable $(EXECUTABLE_NAME) CFBundleIdentifier - org.grpc.$(PRODUCT_NAME:rfc1034identifier) + $(PRODUCT_BUNDLE_IDENTIFIER) CFBundleInfoDictionaryVersion 6.0 CFBundleName diff --git a/src/objective-c/examples/Sample/Sample/ViewController.m b/src/objective-c/examples/Sample/Sample/ViewController.m index 433a8a2ba3b..70244ee62dd 100644 --- a/src/objective-c/examples/Sample/Sample/ViewController.m +++ b/src/objective-c/examples/Sample/Sample/ViewController.m @@ -66,9 +66,9 @@ // Same example call using the generic gRPC client library: - ProtoMethod *method = [[ProtoMethod alloc] initWithPackage:@"grpc.testing" - service:@"TestService" - method:@"UnaryCall"]; + GRPCProtoMethod *method = [[GRPCProtoMethod alloc] initWithPackage:@"grpc.testing" + service:@"TestService" + method:@"UnaryCall"]; GRXWriter *requestsWriter = [GRXWriter writerWithValue:[request data]]; diff --git a/src/objective-c/examples/SwiftSample/Info.plist b/src/objective-c/examples/SwiftSample/Info.plist index 10f0450b346..2cdf09dc2fc 100644 --- a/src/objective-c/examples/SwiftSample/Info.plist +++ b/src/objective-c/examples/SwiftSample/Info.plist @@ -7,7 +7,7 @@ CFBundleExecutable $(EXECUTABLE_NAME) CFBundleIdentifier - io.grpc.$(PRODUCT_NAME:rfc1034identifier) + $(PRODUCT_BUNDLE_IDENTIFIER) CFBundleInfoDictionaryVersion 6.0 CFBundleName diff --git a/src/objective-c/examples/SwiftSample/Podfile b/src/objective-c/examples/SwiftSample/Podfile index b675fd29ef5..b08a346ae2a 100644 --- a/src/objective-c/examples/SwiftSample/Podfile +++ b/src/objective-c/examples/SwiftSample/Podfile @@ -3,6 +3,8 @@ platform :ios, '8.0' install! 'cocoapods', :deterministic_uuids => false +use_frameworks! + # Location of gRPC's repo root relative to this file. GRPC_LOCAL_SRC = '../../../..' @@ -12,6 +14,9 @@ target 'SwiftSample' do # Use the local versions of Protobuf, BoringSSL, and gRPC. You don't need any of the following # lines in your application. + pod '!ProtoCompiler', :path => "#{GRPC_LOCAL_SRC}/src/objective-c" + pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c" + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf" pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" diff --git a/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/project.pbxproj b/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/project.pbxproj index 2a1b30f2cf9..afc3da71168 100644 --- a/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/project.pbxproj +++ b/src/objective-c/examples/SwiftSample/SwiftSample.xcodeproj/project.pbxproj @@ -7,11 +7,11 @@ objects = { /* Begin PBXBuildFile section */ + 2C0B024CB798839E17F76126 /* Pods_SwiftSample.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = B394F343BDE186C5436DBDB9 /* Pods_SwiftSample.framework */; }; 633BFFC81B950B210007E424 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 633BFFC71B950B210007E424 /* AppDelegate.swift */; }; 633BFFCA1B950B210007E424 /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 633BFFC91B950B210007E424 /* ViewController.swift */; }; 633BFFCD1B950B210007E424 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 633BFFCB1B950B210007E424 /* Main.storyboard */; }; 633BFFCF1B950B210007E424 /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 633BFFCE1B950B210007E424 /* Images.xcassets */; }; - 92EDB1408A1E1E7DDAB25D9C /* libPods-SwiftSample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 69BB5C6CA3C1F97E007AC527 /* libPods-SwiftSample.a */; }; /* End PBXBuildFile section */ /* Begin PBXFileReference section */ @@ -21,9 +21,8 @@ 633BFFC91B950B210007E424 /* ViewController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ViewController.swift; sourceTree = ""; }; 633BFFCC1B950B210007E424 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; 633BFFCE1B950B210007E424 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; - 6367AD231B951655007FD3A4 /* Bridging-Header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "Bridging-Header.h"; sourceTree = ""; }; - 69BB5C6CA3C1F97E007AC527 /* libPods-SwiftSample.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-SwiftSample.a"; sourceTree = BUILT_PRODUCTS_DIR; }; A7E614A494D89D01BB395761 /* Pods-SwiftSample.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-SwiftSample.debug.xcconfig"; path = "Pods/Target Support Files/Pods-SwiftSample/Pods-SwiftSample.debug.xcconfig"; sourceTree = ""; }; + B394F343BDE186C5436DBDB9 /* Pods_SwiftSample.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_SwiftSample.framework; sourceTree = BUILT_PRODUCTS_DIR; }; C314E3E246AF23AC29B38FCF /* Pods-SwiftSample.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-SwiftSample.release.xcconfig"; path = "Pods/Target Support Files/Pods-SwiftSample/Pods-SwiftSample.release.xcconfig"; sourceTree = ""; }; /* End PBXFileReference section */ @@ -32,7 +31,7 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 92EDB1408A1E1E7DDAB25D9C /* libPods-SwiftSample.a in Frameworks */, + 2C0B024CB798839E17F76126 /* Pods_SwiftSample.framework in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -74,7 +73,6 @@ 633BFFCB1B950B210007E424 /* Main.storyboard */, 633BFFCE1B950B210007E424 /* Images.xcassets */, 633BFFC51B950B210007E424 /* Supporting Files */, - 6367AD231B951655007FD3A4 /* Bridging-Header.h */, ); name = SwiftSample; sourceTree = SOURCE_ROOT; @@ -90,7 +88,7 @@ 9D63A7F6423989BA306810CA /* Frameworks */ = { isa = PBXGroup; children = ( - 69BB5C6CA3C1F97E007AC527 /* libPods-SwiftSample.a */, + B394F343BDE186C5436DBDB9 /* Pods_SwiftSample.framework */, ); name = Frameworks; sourceTree = ""; @@ -125,7 +123,7 @@ isa = PBXProject; attributes = { LastSwiftUpdateCheck = 0710; - LastUpgradeCheck = 0640; + LastUpgradeCheck = 0730; ORGANIZATIONNAME = gRPC; TargetAttributes = { 633BFFC11B950B210007E424 = { @@ -256,6 +254,7 @@ COPY_PHASE_STRIP = NO; DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; GCC_C_LANGUAGE_STANDARD = gnu99; GCC_DYNAMIC_NO_PIC = NO; GCC_NO_COMMON_BLOCKS = YES; @@ -325,8 +324,9 @@ ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; INFOPLIST_FILE = Info.plist; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "io.grpc.$(PRODUCT_NAME:rfc1034identifier)"; PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_OBJC_BRIDGING_HEADER = "Bridging-Header.h"; + SWIFT_OBJC_BRIDGING_HEADER = ""; USER_HEADER_SEARCH_PATHS = ""; }; name = Debug; @@ -338,8 +338,9 @@ ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; INFOPLIST_FILE = Info.plist; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "io.grpc.$(PRODUCT_NAME:rfc1034identifier)"; PRODUCT_NAME = "$(TARGET_NAME)"; - SWIFT_OBJC_BRIDGING_HEADER = "Bridging-Header.h"; + SWIFT_OBJC_BRIDGING_HEADER = ""; USER_HEADER_SEARCH_PATHS = ""; }; name = Release; diff --git a/src/objective-c/examples/SwiftSample/ViewController.swift b/src/objective-c/examples/SwiftSample/ViewController.swift index 2a95d2de516..e7bab137620 100644 --- a/src/objective-c/examples/SwiftSample/ViewController.swift +++ b/src/objective-c/examples/SwiftSample/ViewController.swift @@ -33,6 +33,8 @@ import UIKit +import RemoteTest + class ViewController: UIViewController { override func viewDidLoad() { @@ -60,7 +62,7 @@ class ViewController: UIViewController { // Same but manipulating headers: - var RPC : ProtoRPC! // Needed to convince Swift to capture by reference (__block) + var RPC : GRPCProtoCall! // Needed to convince Swift to capture by reference (__block) RPC = service.RPCToUnaryCallWithRequest(request) { response, error in if let response = response { NSLog("2. Finished successfully with response:\n\(response)") @@ -79,7 +81,7 @@ class ViewController: UIViewController { // Same example call using the generic gRPC client library: - let method = ProtoMethod(package: "grpc.testing", service: "TestService", method: "UnaryCall") + let method = GRPCProtoMethod(package: "grpc.testing", service: "TestService", method: "UnaryCall") let requestsWriter = GRXWriter(value: request.data()) diff --git a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m new file mode 100644 index 00000000000..58abb492cea --- /dev/null +++ b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m @@ -0,0 +1,394 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * This test file is derived from fixture h2_ssl.c in core end2end test + * (test/core/end2end/fixture/h2_ssl.c). The structure of the fixture is + * preserved as much as possible + * + * This fixture creates a server full stack using chttp2 and a client + * full stack using Cronet. End-to-end tests are run against this + * configuration + * + */ + + +#import +#include "test/core/end2end/end2end_tests.h" + +#include +#include + +#include +#include +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/security/credentials/credentials.h" +#include "src/core/lib/support/env.h" +#include "src/core/lib/support/string.h" +#include "src/core/lib/support/tmpfile.h" +#include "test/core/end2end/data/ssl_test_data.h" +#include "test/core/util/port.h" +#include "test/core/util/test_config.h" + +#include +#import + +typedef struct fullstack_secure_fixture_data { + char *localaddr; +} fullstack_secure_fixture_data; + +static grpc_end2end_test_fixture chttp2_create_fixture_secure_fullstack( + grpc_channel_args *client_args, grpc_channel_args *server_args) { + grpc_end2end_test_fixture f; + int port = grpc_pick_unused_port_or_die(); + fullstack_secure_fixture_data *ffd = + gpr_malloc(sizeof(fullstack_secure_fixture_data)); + memset(&f, 0, sizeof(f)); + + gpr_join_host_port(&ffd->localaddr, "localhost", port); + + f.fixture_data = ffd; + f.cq = grpc_completion_queue_create(NULL); + + return f; +} + +static void process_auth_failure(void *state, grpc_auth_context *ctx, + const grpc_metadata *md, size_t md_count, + grpc_process_auth_metadata_done_cb cb, + void *user_data) { + GPR_ASSERT(state == NULL); + cb(user_data, NULL, 0, NULL, 0, GRPC_STATUS_UNAUTHENTICATED, NULL); +} + +static void cronet_init_client_secure_fullstack( + grpc_end2end_test_fixture *f, grpc_channel_args *client_args, + cronet_engine *cronetEngine) { + fullstack_secure_fixture_data *ffd = f->fixture_data; + f->client = + grpc_cronet_secure_channel_create(cronetEngine, ffd->localaddr, client_args, NULL); + GPR_ASSERT(f->client != NULL); +} + +static void chttp2_init_server_secure_fullstack( + grpc_end2end_test_fixture *f, grpc_channel_args *server_args, + grpc_server_credentials *server_creds) { + fullstack_secure_fixture_data *ffd = f->fixture_data; + if (f->server) { + grpc_server_destroy(f->server); + } + f->server = grpc_server_create(server_args, NULL); + grpc_server_register_completion_queue(f->server, f->cq, NULL); + GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr, + server_creds)); + grpc_server_credentials_release(server_creds); + grpc_server_start(f->server); +} + +static void chttp2_tear_down_secure_fullstack(grpc_end2end_test_fixture *f) { + fullstack_secure_fixture_data *ffd = f->fixture_data; + gpr_free(ffd->localaddr); + gpr_free(ffd); +} + +static void cronet_init_client_simple_ssl_secure_fullstack( + grpc_end2end_test_fixture *f, grpc_channel_args *client_args) { + grpc_arg ssl_name_override = {GRPC_ARG_STRING, + GRPC_SSL_TARGET_NAME_OVERRIDE_ARG, + {"foo.test.google.fr"}}; + + grpc_channel_args *new_client_args = + grpc_channel_args_copy_and_add(client_args, &ssl_name_override, 1); + [Cronet setHttp2Enabled:YES]; + [Cronet start]; + cronet_engine *cronetEngine = [Cronet getGlobalEngine]; + + cronet_init_client_secure_fullstack(f, new_client_args, cronetEngine); + grpc_channel_args_destroy(new_client_args); +} + +static int fail_server_auth_check(grpc_channel_args *server_args) { + size_t i; + if (server_args == NULL) return 0; + for (i = 0; i < server_args->num_args; i++) { + if (strcmp(server_args->args[i].key, FAIL_AUTH_CHECK_SERVER_ARG_NAME) == + 0) { + return 1; + } + } + return 0; +} + +static void chttp2_init_server_simple_ssl_secure_fullstack( + grpc_end2end_test_fixture *f, grpc_channel_args *server_args) { + grpc_ssl_pem_key_cert_pair pem_cert_key_pair = {test_server1_key, + test_server1_cert}; + grpc_server_credentials *ssl_creds = + grpc_ssl_server_credentials_create(NULL, &pem_cert_key_pair, 1, 0, NULL); + if (fail_server_auth_check(server_args)) { + grpc_auth_metadata_processor processor = {process_auth_failure, NULL, NULL}; + grpc_server_credentials_set_auth_metadata_processor(ssl_creds, processor); + } + chttp2_init_server_secure_fullstack(f, server_args, ssl_creds); +} + +/* All test configurations */ + +static grpc_end2end_test_config configs[] = { + {"chttp2/simple_ssl_fullstack", + FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION | + FEATURE_MASK_SUPPORTS_PER_CALL_CREDENTIALS, + chttp2_create_fixture_secure_fullstack, + cronet_init_client_simple_ssl_secure_fullstack, + chttp2_init_server_simple_ssl_secure_fullstack, + chttp2_tear_down_secure_fullstack}, +}; + + + +static char *roots_filename; + +@interface CoreCronetEnd2EndTests : XCTestCase + +@end + +@implementation CoreCronetEnd2EndTests + + +// The setUp() function is run before the test cases run and only run once ++ (void)setUp { + [super setUp]; + + FILE *roots_file; + size_t roots_size = strlen(test_root_cert); + + char *argv[] = {"CoreCronetEnd2EndTests"}; + grpc_test_init(1, argv); + grpc_end2end_tests_pre_init(); + + /* Set the SSL roots env var. */ + roots_file = gpr_tmpfile("chttp2_simple_ssl_fullstack_test", &roots_filename); + GPR_ASSERT(roots_filename != NULL); + GPR_ASSERT(roots_file != NULL); + GPR_ASSERT(fwrite(test_root_cert, 1, roots_size, roots_file) == roots_size); + fclose(roots_file); + gpr_setenv(GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR, roots_filename); + + grpc_init(); + +} + +// The tearDown() function is run after all test cases finish running ++ (void)tearDown { + grpc_shutdown(); + + /* Cleanup. */ + remove(roots_filename); + gpr_free(roots_filename); + + [super tearDown]; +} + +- (void)testIndividualCase:(char*)test_case { + char *argv[] = {"h2_ssl", test_case}; + + for (int i = 0; i < sizeof(configs) / sizeof(*configs); i++) { + grpc_end2end_tests(sizeof(argv) / sizeof(argv[0]), argv, configs[i]); + } +} + +// TODO(mxyan): Use NSStringFromSelector(_cmd) to acquire test name from the +// test case method name, so that bodies of test cases can stay identical +- (void)testBadHostname { + [self testIndividualCase:"bad_hostname"]; +} + +- (void)testBinaryMetadata { + [self testIndividualCase:"binary_metadata"]; +} + +- (void)testCallCreds { + [self testIndividualCase:"call_creds"]; +} + +- (void)testCancelAfterAccept { + [self testIndividualCase:"cancel_after_accept"]; +} + +- (void)testCancelAfterClientDone { + [self testIndividualCase:"cancel_after_client_done"]; +} + +- (void)testCancelAfterInvoke { + [self testIndividualCase:"cancel_after_invoke"]; +} + +- (void)testCancelBeforeInvoke { + [self testIndividualCase:"cancel_before_invoke"]; +} + +- (void)testCancelInAVacuum { + [self testIndividualCase:"cancel_in_a_vacuum"]; +} + +- (void)testCancelWithStatus { + [self testIndividualCase:"cancel_with_status"]; +} + +- (void)testCompressedPayload { + [self testIndividualCase:"compressed_payload"]; +} + +- (void)testConnectivity { + [self testIndividualCase:"connectivity"]; +} + +- (void)testDefaultHost { + [self testIndividualCase:"default_host"]; +} + +- (void)testDisappearingServer { + [self testIndividualCase:"disappearing_server"]; +} + +- (void)testEmptyBatch { + [self testIndividualCase:"empty_batch"]; +} + +- (void)testFilterCausesClose { + [self testIndividualCase:"filter_causes_close"]; +} + +- (void)testGracefulServerShutdown { + [self testIndividualCase:"graceful_server_shutdown"]; +} + +- (void)testHighInitialSeqno { + [self testIndividualCase:"high_initial_seqno"]; +} + +- (void)testHpackSize { + [self testIndividualCase:"hpack_size"]; +} + +- (void)testIdempotentRequest { + [self testIndividualCase:"idempotent_request"]; +} + +- (void)testInvokeLargeRequest { + [self testIndividualCase:"invoke_large_request"]; +} + +- (void)testLargeMetadata { + [self testIndividualCase:"large_metadata"]; +} + +- (void)testMaxConcurrentStreams { + [self testIndividualCase:"max_concurrent_streams"]; +} + +- (void)testMaxMessageLength { + [self testIndividualCase:"max_message_length"]; +} + +- (void)testNegativeDeadline { + [self testIndividualCase:"negative_deadline"]; +} + +- (void)testNetworkStatusChange { + [self testIndividualCase:"network_status_change"]; +} + +- (void)testNoOp { + [self testIndividualCase:"no_op"]; +} + +- (void)testPayload { + [self testIndividualCase:"payload"]; +} + +- (void)testPing { + [self testIndividualCase:"ping"]; +} + +- (void)testPingPongStreaming { + [self testIndividualCase:"ping_pong_streaming"]; +} + +- (void)testRegisteredCall { + [self testIndividualCase:"registered_call"]; +} + +- (void)testRequestWithFlags { + [self testIndividualCase:"request_with_flags"]; +} + +- (void)testRequestWithPayload { + [self testIndividualCase:"request_with_payload"]; +} + +- (void)testServerFinishesRequest { + [self testIndividualCase:"server_finishes_request"]; +} + +- (void)testShutdownFinishesCalls { + [self testIndividualCase:"shutdown_finishes_calls"]; +} + +- (void)testShutdownFinishesTags { + [self testIndividualCase:"shutdown_finishes_tags"]; +} + +- (void)testSimpleDelayedRequest { + [self testIndividualCase:"simple_delayed_request"]; +} + +- (void)testSimpleMetadata { + [self testIndividualCase:"simple_metadata"]; +} + +- (void)testSimpleRequest { + [self testIndividualCase:"simple_request"]; +} + +- (void)testStreamingErrorResponse { + [self testIndividualCase:"streaming_error_response"]; +} + +- (void)testTrailingMetadata { + [self testIndividualCase:"trailing_metadata"]; +} + +@end diff --git a/src/objective-c/tests/CoreCronetEnd2EndTests/Info.plist b/src/objective-c/tests/CoreCronetEnd2EndTests/Info.plist new file mode 100644 index 00000000000..fbeeb96ba6c --- /dev/null +++ b/src/objective-c/tests/CoreCronetEnd2EndTests/Info.plist @@ -0,0 +1,24 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + gRPC.$(PRODUCT_NAME:rfc1034identifier) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1 + + diff --git a/src/objective-c/tests/InteropTestsRemoteWithCronet/Info.plist b/src/objective-c/tests/InteropTestsRemoteWithCronet/Info.plist new file mode 100644 index 00000000000..ba72822e872 --- /dev/null +++ b/src/objective-c/tests/InteropTestsRemoteWithCronet/Info.plist @@ -0,0 +1,24 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1 + + diff --git a/src/objective-c/examples/SwiftSample/Bridging-Header.h b/src/objective-c/tests/InteropTestsRemoteWithCronet/InteropTestsRemoteWithCronet.m similarity index 80% rename from src/objective-c/examples/SwiftSample/Bridging-Header.h rename to src/objective-c/tests/InteropTestsRemoteWithCronet/InteropTestsRemoteWithCronet.m index 65f768a760b..fab8ad8d25f 100644 --- a/src/objective-c/examples/SwiftSample/Bridging-Header.h +++ b/src/objective-c/tests/InteropTestsRemoteWithCronet/InteropTestsRemoteWithCronet.m @@ -31,15 +31,20 @@ * */ -#ifndef SwiftSample_Bridging_Header_h -#define SwiftSample_Bridging_Header_h +#import -#import -#import -#import -#import -#import -#import -#import +#import "InteropTests.h" -#endif +static NSString * const kRemoteSSLHost = @"grpc-test.sandbox.googleapis.com"; + +/** Tests in InteropTests.m, sending the RPCs to a remote SSL server. */ +@interface InteropTestsRemoteWithCronet : InteropTests +@end + +@implementation InteropTestsRemoteWithCronet + ++ (NSString *)host { + return kRemoteSSLHost; +} + +@end diff --git a/src/objective-c/tests/Podfile b/src/objective-c/tests/Podfile index 30a34260d40..3d0664a04f5 100644 --- a/src/objective-c/tests/Podfile +++ b/src/objective-c/tests/Podfile @@ -17,16 +17,44 @@ GRPC_LOCAL_SRC = '../../..' ).each do |target_name| target target_name do pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf", :inhibit_warnings => true - pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true - pod 'CronetFramework', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" - pod 'gRPC', :path => GRPC_LOCAL_SRC - pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + + pod '!ProtoCompiler', :path => "#{GRPC_LOCAL_SRC}/src/objective-c" + pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c" + + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true + + pod 'gRPC', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core', :path => GRPC_LOCAL_SRC pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC pod 'RemoteTest', :path => "RemoteTestClient" end end +target 'CoreCronetEnd2EndTests' do + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true + pod 'CronetFramework', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" + pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core/Cronet-Interface', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core/Cronet-Tests', :path => GRPC_LOCAL_SRC +end + +target 'InteropTestsRemoteWithCronet' do + pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf", :inhibit_warnings => true + + pod '!ProtoCompiler', :path => "#{GRPC_LOCAL_SRC}/src/objective-c" + pod '!ProtoCompiler-gRPCPlugin', :path => "#{GRPC_LOCAL_SRC}/src/objective-c" + + pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true + pod 'CronetFramework', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c" + + pod 'gRPC', :path => GRPC_LOCAL_SRC + pod 'gRPC-Core', :path => GRPC_LOCAL_SRC + pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC + pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC + pod 'RemoteTest', :path => "RemoteTestClient" +end + # gRPC-Core.podspec needs to be modified to be successfully used for local development. A Podfile's # pre_install hook lets us do that. The block passed to it runs after the podspecs are downloaded # and before they are installed in the user project. @@ -63,9 +91,9 @@ post_install do |installer| target.build_configurations.each do |config| config.build_settings['GCC_TREAT_WARNINGS_AS_ERRORS'] = 'YES' end - if target.name == 'gRPC-Core' + if target.name == 'gRPC-Core' or target.name == 'gRPC-Core.default-Cronet-Interface-Cronet-Tests' target.build_configurations.each do |config| - # TODO(zyc) Remove this setting after the issue is resolved + # TODO(zyc): Remove this setting after the issue is resolved # GPR_UNREACHABLE_CODE causes "Control may reach end of non-void # function" warning config.build_settings['GCC_WARN_ABOUT_RETURN_TYPE'] = 'NO' diff --git a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec index 25c9c7f8418..53ba1019131 100644 --- a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec +++ b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec @@ -11,20 +11,30 @@ Pod::Spec.new do |s| s.osx.deployment_target = '10.9' # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. + s.dependency "!ProtoCompiler-gRPCPlugin", "~> 1.0.0-pre1" + + repo_root = '../../../..' + bin_dir = "#{repo_root}/bins/$CONFIG" + + protoc = "#{bin_dir}/protobuf/protoc" + well_known_types_dir = "#{repo_root}/third_party/protobuf/src" + plugin = "#{bin_dir}/grpc_objective_c_plugin" + s.prepare_command = <<-CMD - BINDIR=../../../../bins/$CONFIG - PROTOC=$BINDIR/protobuf/protoc - PLUGIN=$BINDIR/grpc_objective_c_plugin - # we use this path to locate well-known proto files - PROTO_SRC=../../../../third_party/protobuf/src - $PROTOC --plugin=protoc-gen-grpc=$PLUGIN --objc_out=. --grpc_out=. *.proto -I $PROTO_SRC -I . + #{protoc} \ + --plugin=protoc-gen-grpc=#{plugin} \ + --objc_out=. \ + --grpc_out=. \ + -I . \ + -I #{well_known_types_dir} \ + *.proto CMD s.subspec "Messages" do |ms| ms.source_files = "*.pbobjc.{h,m}" ms.header_mappings_dir = "." ms.requires_arc = false - ms.dependency "Protobuf", "~> 3.0.0-beta-3.1" + ms.dependency "Protobuf" # This is needed by all pods that depend on Protobuf: ms.pod_target_xcconfig = { 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', @@ -35,7 +45,7 @@ Pod::Spec.new do |s| ss.source_files = "*.pbrpc.{h,m}" ss.header_mappings_dir = "." ss.requires_arc = true - ss.dependency "gRPC-ProtoRPC", "~> 0.14" + ss.dependency "gRPC-ProtoRPC" ss.dependency "#{s.name}/Messages" end end diff --git a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj index f9389a4977f..1d903478fdb 100644 --- a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj +++ b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj @@ -7,11 +7,18 @@ objects = { /* Begin PBXBuildFile section */ + 09B76D9585ACE7403804D9DC /* libPods-InteropTestsRemoteWithCronet.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 9E9444C764F0FFF64A7EB58E /* libPods-InteropTestsRemoteWithCronet.a */; }; 0F9232F984C08643FD40C34F /* libPods-InteropTestsRemote.a in Frameworks */ = {isa = PBXBuildFile; fileRef = DBE059B4AC7A51919467EEC0 /* libPods-InteropTestsRemote.a */; }; 16A9E77B6E336B3C0B9BA6E0 /* libPods-InteropTestsLocalSSL.a in Frameworks */ = {isa = PBXBuildFile; fileRef = DBEDE45BDA60DF1E1C8950C0 /* libPods-InteropTestsLocalSSL.a */; }; 20DFDF829DD993A4A00D5662 /* libPods-RxLibraryUnitTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = A58BE6DF1C62D1739EBB2C78 /* libPods-RxLibraryUnitTests.a */; }; 333E8FC01C8285B7C547D799 /* libPods-InteropTestsLocalCleartext.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FD346DB2C23F676C4842F3FF /* libPods-InteropTestsLocalCleartext.a */; }; 3D7C85F6AA68C4A205E3BA16 /* libPods-Tests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 20DFF2F3C97EF098FE5A3171 /* libPods-Tests.a */; }; + 5E8A5DA71D3840B4000F8BC4 /* CoreCronetEnd2EndTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 5E8A5DA61D3840B4000F8BC4 /* CoreCronetEnd2EndTests.m */; }; + 5E8A5DA91D3840B4000F8BC4 /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; }; + 5EE84BF41D4717E40050C6CC /* InteropTestsRemoteWithCronet.m in Sources */ = {isa = PBXBuildFile; fileRef = 5EE84BF31D4717E40050C6CC /* InteropTestsRemoteWithCronet.m */; }; + 5EE84BF61D4717E40050C6CC /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; }; + 5EE84BFE1D471D400050C6CC /* InteropTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635ED2EB1B1A3BC400FDE5C3 /* InteropTests.m */; }; + 60D2A57ED559F34428C2EEC5 /* libPods-CoreCronetEnd2EndTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBD98AC417B9882D32B19F28 /* libPods-CoreCronetEnd2EndTests.a */; }; 6312AE4E1B1BF49B00341DEE /* GRPCClientTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 6312AE4D1B1BF49B00341DEE /* GRPCClientTests.m */; }; 63423F4A1B150A5F006CF63C /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; }; 635697CD1B14FC11007A7283 /* Tests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635697CC1B14FC11007A7283 /* Tests.m */; }; @@ -38,6 +45,20 @@ /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ + 5E8A5DAA1D3840B4000F8BC4 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 635697BF1B14FC11007A7283 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 635697C61B14FC11007A7283; + remoteInfo = Tests; + }; + 5EE84BF71D4717E40050C6CC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 635697BF1B14FC11007A7283 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 635697C61B14FC11007A7283; + remoteInfo = Tests; + }; 63423F4B1B150A5F006CF63C /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 635697BF1B14FC11007A7283 /* Project object */; @@ -91,12 +112,20 @@ 060EF32D7EC0DF67ED617507 /* Pods-Tests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Tests.debug.xcconfig"; path = "Pods/Target Support Files/Pods-Tests/Pods-Tests.debug.xcconfig"; sourceTree = ""; }; 07D10A965323BEA7FE59A74B /* Pods-RxLibraryUnitTests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-RxLibraryUnitTests.debug.xcconfig"; path = "Pods/Target Support Files/Pods-RxLibraryUnitTests/Pods-RxLibraryUnitTests.debug.xcconfig"; sourceTree = ""; }; 0A4F89D9C90E9C30990218F0 /* Pods.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.release.xcconfig; path = "Pods/Target Support Files/Pods/Pods.release.xcconfig"; sourceTree = ""; }; + 0D2284C3DF7E57F0ED504E39 /* Pods-CoreCronetEnd2EndTests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-CoreCronetEnd2EndTests.debug.xcconfig"; path = "Pods/Target Support Files/Pods-CoreCronetEnd2EndTests/Pods-CoreCronetEnd2EndTests.debug.xcconfig"; sourceTree = ""; }; + 17F60BF2871F6AF85FB3FA12 /* Pods-InteropTestsRemoteWithCronet.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsRemoteWithCronet.debug.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsRemoteWithCronet/Pods-InteropTestsRemoteWithCronet.debug.xcconfig"; sourceTree = ""; }; 20DFF2F3C97EF098FE5A3171 /* libPods-Tests.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-Tests.a"; sourceTree = BUILT_PRODUCTS_DIR; }; 35F2B6BF3BAE8F0DC4AFD76E /* libPods.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libPods.a; sourceTree = BUILT_PRODUCTS_DIR; }; 3B0861FC805389C52DB260D4 /* Pods-RxLibraryUnitTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-RxLibraryUnitTests.release.xcconfig"; path = "Pods/Target Support Files/Pods-RxLibraryUnitTests/Pods-RxLibraryUnitTests.release.xcconfig"; sourceTree = ""; }; + 4AD97096D13D7416DC91A72A /* Pods-CoreCronetEnd2EndTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-CoreCronetEnd2EndTests.release.xcconfig"; path = "Pods/Target Support Files/Pods-CoreCronetEnd2EndTests/Pods-CoreCronetEnd2EndTests.release.xcconfig"; sourceTree = ""; }; 51A275E86C141416ED63FF76 /* Pods-InteropTestsLocalCleartext.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsLocalCleartext.release.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsLocalCleartext/Pods-InteropTestsLocalCleartext.release.xcconfig"; sourceTree = ""; }; 553BBBED24E4162D1F769D65 /* Pods-InteropTestsLocalSSL.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsLocalSSL.debug.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL.debug.xcconfig"; sourceTree = ""; }; 5761E98978DDDF136A58CB7E /* Pods-AllTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-AllTests.release.xcconfig"; path = "Pods/Target Support Files/Pods-AllTests/Pods-AllTests.release.xcconfig"; sourceTree = ""; }; + 5E8A5DA41D3840B4000F8BC4 /* CoreCronetEnd2EndTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = CoreCronetEnd2EndTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + 5E8A5DA61D3840B4000F8BC4 /* CoreCronetEnd2EndTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = CoreCronetEnd2EndTests.m; sourceTree = ""; }; + 5EE84BF11D4717E40050C6CC /* InteropTestsRemoteWithCronet.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = InteropTestsRemoteWithCronet.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + 5EE84BF31D4717E40050C6CC /* InteropTestsRemoteWithCronet.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = InteropTestsRemoteWithCronet.m; sourceTree = ""; }; + 5EE84BF51D4717E40050C6CC /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 6312AE4D1B1BF49B00341DEE /* GRPCClientTests.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GRPCClientTests.m; sourceTree = ""; }; 63423F441B150A5F006CF63C /* AllTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = AllTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; 63423F501B151B77006CF63C /* RxLibraryUnitTests.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RxLibraryUnitTests.m; sourceTree = ""; }; @@ -114,7 +143,9 @@ 63E240CD1B6C4E2B005F3B0E /* InteropTestsLocalSSL.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = InteropTestsLocalSSL.m; sourceTree = ""; }; 63E240CF1B6C63DC005F3B0E /* TestCertificates.bundle */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.plug-in"; path = TestCertificates.bundle; sourceTree = ""; }; 7A2E97E3F469CC2A758D77DE /* Pods-InteropTestsLocalSSL.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsLocalSSL.release.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL.release.xcconfig"; sourceTree = ""; }; + 9E9444C764F0FFF64A7EB58E /* libPods-InteropTestsRemoteWithCronet.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-InteropTestsRemoteWithCronet.a"; sourceTree = BUILT_PRODUCTS_DIR; }; A58BE6DF1C62D1739EBB2C78 /* libPods-RxLibraryUnitTests.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-RxLibraryUnitTests.a"; sourceTree = BUILT_PRODUCTS_DIR; }; + AC414EF7A6BF76ED02B6E480 /* Pods-InteropTestsRemoteWithCronet.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsRemoteWithCronet.release.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsRemoteWithCronet/Pods-InteropTestsRemoteWithCronet.release.xcconfig"; sourceTree = ""; }; B94C27C06733CF98CE1B2757 /* Pods-AllTests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-AllTests.debug.xcconfig"; path = "Pods/Target Support Files/Pods-AllTests/Pods-AllTests.debug.xcconfig"; sourceTree = ""; }; CAE086D5B470DA367D415AB0 /* libPods-AllTests.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-AllTests.a"; sourceTree = BUILT_PRODUCTS_DIR; }; DBE059B4AC7A51919467EEC0 /* libPods-InteropTestsRemote.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-InteropTestsRemote.a"; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -123,11 +154,30 @@ E1486220285AF123EB124008 /* Pods-InteropTestsLocalCleartext.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsLocalCleartext.debug.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsLocalCleartext/Pods-InteropTestsLocalCleartext.debug.xcconfig"; sourceTree = ""; }; E4275A759BDBDF143B9B438F /* Pods-InteropTestsRemote.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsRemote.release.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsRemote/Pods-InteropTestsRemote.release.xcconfig"; sourceTree = ""; }; E6733B838B28453434B556E2 /* Pods-Tests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Tests.release.xcconfig"; path = "Pods/Target Support Files/Pods-Tests/Pods-Tests.release.xcconfig"; sourceTree = ""; }; + FBD98AC417B9882D32B19F28 /* libPods-CoreCronetEnd2EndTests.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-CoreCronetEnd2EndTests.a"; sourceTree = BUILT_PRODUCTS_DIR; }; FD346DB2C23F676C4842F3FF /* libPods-InteropTestsLocalCleartext.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-InteropTestsLocalCleartext.a"; sourceTree = BUILT_PRODUCTS_DIR; }; FF7B5489BCFE40111D768DD0 /* Pods.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.debug.xcconfig; path = "Pods/Target Support Files/Pods/Pods.debug.xcconfig"; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ + 5E8A5DA11D3840B4000F8BC4 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 5E8A5DA91D3840B4000F8BC4 /* libTests.a in Frameworks */, + 60D2A57ED559F34428C2EEC5 /* libPods-CoreCronetEnd2EndTests.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 5EE84BEE1D4717E40050C6CC /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 5EE84BF61D4717E40050C6CC /* libTests.a in Frameworks */, + 09B76D9585ACE7403804D9DC /* libPods-InteropTestsRemoteWithCronet.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; 63423F411B150A5F006CF63C /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; @@ -194,6 +244,8 @@ DBE059B4AC7A51919467EEC0 /* libPods-InteropTestsRemote.a */, A58BE6DF1C62D1739EBB2C78 /* libPods-RxLibraryUnitTests.a */, 20DFF2F3C97EF098FE5A3171 /* libPods-Tests.a */, + FBD98AC417B9882D32B19F28 /* libPods-CoreCronetEnd2EndTests.a */, + 9E9444C764F0FFF64A7EB58E /* libPods-InteropTestsRemoteWithCronet.a */, ); name = Frameworks; sourceTree = ""; @@ -215,15 +267,38 @@ 3B0861FC805389C52DB260D4 /* Pods-RxLibraryUnitTests.release.xcconfig */, 060EF32D7EC0DF67ED617507 /* Pods-Tests.debug.xcconfig */, E6733B838B28453434B556E2 /* Pods-Tests.release.xcconfig */, + 0D2284C3DF7E57F0ED504E39 /* Pods-CoreCronetEnd2EndTests.debug.xcconfig */, + 4AD97096D13D7416DC91A72A /* Pods-CoreCronetEnd2EndTests.release.xcconfig */, + 17F60BF2871F6AF85FB3FA12 /* Pods-InteropTestsRemoteWithCronet.debug.xcconfig */, + AC414EF7A6BF76ED02B6E480 /* Pods-InteropTestsRemoteWithCronet.release.xcconfig */, ); name = Pods; sourceTree = ""; }; + 5E8A5DA51D3840B4000F8BC4 /* CoreCronetEnd2EndTests */ = { + isa = PBXGroup; + children = ( + 5E8A5DA61D3840B4000F8BC4 /* CoreCronetEnd2EndTests.m */, + ); + path = CoreCronetEnd2EndTests; + sourceTree = ""; + }; + 5EE84BF21D4717E40050C6CC /* InteropTestsRemoteWithCronet */ = { + isa = PBXGroup; + children = ( + 5EE84BF31D4717E40050C6CC /* InteropTestsRemoteWithCronet.m */, + 5EE84BF51D4717E40050C6CC /* Info.plist */, + ); + path = InteropTestsRemoteWithCronet; + sourceTree = ""; + }; 635697BE1B14FC11007A7283 = { isa = PBXGroup; children = ( 635697C91B14FC11007A7283 /* Tests */, 63E240CF1B6C63DC005F3B0E /* TestCertificates.bundle */, + 5E8A5DA51D3840B4000F8BC4 /* CoreCronetEnd2EndTests */, + 5EE84BF21D4717E40050C6CC /* InteropTestsRemoteWithCronet */, 635697C81B14FC11007A7283 /* Products */, 51E4650F34F854F41FF053B3 /* Pods */, 136D535E19727099B941D7B1 /* Frameworks */, @@ -239,6 +314,8 @@ 63DC84231BE15267000708E8 /* InteropTestsRemote.xctest */, 63DC84341BE15294000708E8 /* InteropTestsLocalSSL.xctest */, 63DC84431BE152B5000708E8 /* InteropTestsLocalCleartext.xctest */, + 5E8A5DA41D3840B4000F8BC4 /* CoreCronetEnd2EndTests.xctest */, + 5EE84BF11D4717E40050C6CC /* InteropTestsRemoteWithCronet.xctest */, ); name = Products; sourceTree = ""; @@ -270,6 +347,48 @@ /* End PBXGroup section */ /* Begin PBXNativeTarget section */ + 5E8A5DA31D3840B4000F8BC4 /* CoreCronetEnd2EndTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = 5E8A5DAE1D3840B4000F8BC4 /* Build configuration list for PBXNativeTarget "CoreCronetEnd2EndTests" */; + buildPhases = ( + F58F17E425446B15028B9F74 /* [CP] Check Pods Manifest.lock */, + 5E8A5DA01D3840B4000F8BC4 /* Sources */, + 5E8A5DA11D3840B4000F8BC4 /* Frameworks */, + 5E8A5DA21D3840B4000F8BC4 /* Resources */, + E63468C760D0724F18861822 /* [CP] Embed Pods Frameworks */, + 6DFE9E77CAB5760196D79E0F /* [CP] Copy Pods Resources */, + ); + buildRules = ( + ); + dependencies = ( + 5E8A5DAB1D3840B4000F8BC4 /* PBXTargetDependency */, + ); + name = CoreCronetEnd2EndTests; + productName = CoreCronetEnd2EndTests; + productReference = 5E8A5DA41D3840B4000F8BC4 /* CoreCronetEnd2EndTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; + 5EE84BF01D4717E40050C6CC /* InteropTestsRemoteWithCronet */ = { + isa = PBXNativeTarget; + buildConfigurationList = 5EE84BFB1D4717E40050C6CC /* Build configuration list for PBXNativeTarget "InteropTestsRemoteWithCronet" */; + buildPhases = ( + C0F7B1FF6F88CC5FBF362F4C /* [CP] Check Pods Manifest.lock */, + 5EE84BED1D4717E40050C6CC /* Sources */, + 5EE84BEE1D4717E40050C6CC /* Frameworks */, + 5EE84BEF1D4717E40050C6CC /* Resources */, + 31F8D1C407195CBF0C02929B /* [CP] Embed Pods Frameworks */, + DB4D0E73C229F2FF3B364AB3 /* [CP] Copy Pods Resources */, + ); + buildRules = ( + ); + dependencies = ( + 5EE84BF81D4717E40050C6CC /* PBXTargetDependency */, + ); + name = InteropTestsRemoteWithCronet; + productName = InteropTestsRemoteWithCronet; + productReference = 5EE84BF11D4717E40050C6CC /* InteropTestsRemoteWithCronet.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; 63423F431B150A5F006CF63C /* AllTests */ = { isa = PBXNativeTarget; buildConfigurationList = 63423F4D1B150A5F006CF63C /* Build configuration list for PBXNativeTarget "AllTests" */; @@ -403,6 +522,12 @@ LastUpgradeCheck = 0630; ORGANIZATIONNAME = gRPC; TargetAttributes = { + 5E8A5DA31D3840B4000F8BC4 = { + CreatedOnToolsVersion = 7.3.1; + }; + 5EE84BF01D4717E40050C6CC = { + CreatedOnToolsVersion = 7.3.1; + }; 63423F431B150A5F006CF63C = { CreatedOnToolsVersion = 6.3.1; }; @@ -441,11 +566,27 @@ 63DC84221BE15267000708E8 /* InteropTestsRemote */, 63DC84331BE15294000708E8 /* InteropTestsLocalSSL */, 63DC84421BE152B5000708E8 /* InteropTestsLocalCleartext */, + 5E8A5DA31D3840B4000F8BC4 /* CoreCronetEnd2EndTests */, + 5EE84BF01D4717E40050C6CC /* InteropTestsRemoteWithCronet */, ); }; /* End PBXProject section */ /* Begin PBXResourcesBuildPhase section */ + 5E8A5DA21D3840B4000F8BC4 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 5EE84BEF1D4717E40050C6CC /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; 63423F421B150A5F006CF63C /* Resources */ = { isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; @@ -486,6 +627,21 @@ /* End PBXResourcesBuildPhase section */ /* Begin PBXShellScriptBuildPhase section */ + 31F8D1C407195CBF0C02929B /* [CP] Embed Pods Frameworks */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Embed Pods Frameworks"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsRemoteWithCronet/Pods-InteropTestsRemoteWithCronet-frameworks.sh\"\n"; + showEnvVarsInLog = 0; + }; 4C406327D3907A5E5FBA8AC9 /* [CP] Check Pods Manifest.lock */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; @@ -561,6 +717,21 @@ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL-resources.sh\"\n"; showEnvVarsInLog = 0; }; + 6DFE9E77CAB5760196D79E0F /* [CP] Copy Pods Resources */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Copy Pods Resources"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-CoreCronetEnd2EndTests/Pods-CoreCronetEnd2EndTests-resources.sh\"\n"; + showEnvVarsInLog = 0; + }; 7418AC7B3844B29E48D24FC7 /* [CP] Check Pods Manifest.lock */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; @@ -696,6 +867,21 @@ shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; showEnvVarsInLog = 0; }; + C0F7B1FF6F88CC5FBF362F4C /* [CP] Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Check Pods Manifest.lock"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; + showEnvVarsInLog = 0; + }; C2E09DC4BD239F71160F0CC1 /* [CP] Copy Pods Resources */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; @@ -741,9 +927,71 @@ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-RxLibraryUnitTests/Pods-RxLibraryUnitTests-resources.sh\"\n"; showEnvVarsInLog = 0; }; + DB4D0E73C229F2FF3B364AB3 /* [CP] Copy Pods Resources */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Copy Pods Resources"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsRemoteWithCronet/Pods-InteropTestsRemoteWithCronet-resources.sh\"\n"; + showEnvVarsInLog = 0; + }; + E63468C760D0724F18861822 /* [CP] Embed Pods Frameworks */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Embed Pods Frameworks"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-CoreCronetEnd2EndTests/Pods-CoreCronetEnd2EndTests-frameworks.sh\"\n"; + showEnvVarsInLog = 0; + }; + F58F17E425446B15028B9F74 /* [CP] Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "[CP] Check Pods Manifest.lock"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n"; + showEnvVarsInLog = 0; + }; /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ + 5E8A5DA01D3840B4000F8BC4 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 5E8A5DA71D3840B4000F8BC4 /* CoreCronetEnd2EndTests.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 5EE84BED1D4717E40050C6CC /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 5EE84BFE1D471D400050C6CC /* InteropTests.m in Sources */, + 5EE84BF41D4717E40050C6CC /* InteropTestsRemoteWithCronet.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; 63423F401B150A5F006CF63C /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -804,6 +1052,16 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ + 5E8A5DAB1D3840B4000F8BC4 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 635697C61B14FC11007A7283 /* Tests */; + targetProxy = 5E8A5DAA1D3840B4000F8BC4 /* PBXContainerItemProxy */; + }; + 5EE84BF81D4717E40050C6CC /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 635697C61B14FC11007A7283 /* Tests */; + targetProxy = 5EE84BF71D4717E40050C6CC /* PBXContainerItemProxy */; + }; 63423F4C1B150A5F006CF63C /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = 635697C61B14FC11007A7283 /* Tests */; @@ -832,6 +1090,82 @@ /* End PBXTargetDependency section */ /* Begin XCBuildConfiguration section */ + 5E8A5DAC1D3840B4000F8BC4 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 0D2284C3DF7E57F0ED504E39 /* Pods-CoreCronetEnd2EndTests.debug.xcconfig */; + buildSettings = { + CLANG_ANALYZER_NONNULL = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_TESTABILITY = YES; + INFOPLIST_FILE = CoreCronetEnd2EndTests/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 9.3; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = io.grpc.CoreCronetEnd2EndTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + USER_HEADER_SEARCH_PATHS = "$(inherited) \"${PODS_ROOT}/../../../..\""; + }; + name = Debug; + }; + 5E8A5DAD1D3840B4000F8BC4 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 4AD97096D13D7416DC91A72A /* Pods-CoreCronetEnd2EndTests.release.xcconfig */; + buildSettings = { + CLANG_ANALYZER_NONNULL = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + INFOPLIST_FILE = CoreCronetEnd2EndTests/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 9.3; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = io.grpc.CoreCronetEnd2EndTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + USER_HEADER_SEARCH_PATHS = "$(inherited) \"${PODS_ROOT}/../../../..\""; + }; + name = Release; + }; + 5EE84BF91D4717E40050C6CC /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 17F60BF2871F6AF85FB3FA12 /* Pods-InteropTestsRemoteWithCronet.debug.xcconfig */; + buildSettings = { + CLANG_ANALYZER_NONNULL = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_TESTABILITY = YES; + GCC_PREPROCESSOR_DEFINITIONS = ( + "$(inherited)", + "COCOAPODS=1", + "$(inherited)", + "GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1", + "GRPC_COMPILE_WITH_CRONET=1", + ); + INFOPLIST_FILE = InteropTestsRemoteWithCronet/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 9.3; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = io.grpc.InteropTestsRemoteWithCronet; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 5EE84BFA1D4717E40050C6CC /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = AC414EF7A6BF76ED02B6E480 /* Pods-InteropTestsRemoteWithCronet.release.xcconfig */; + buildSettings = { + CLANG_ANALYZER_NONNULL = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + GCC_PREPROCESSOR_DEFINITIONS = ( + "$(inherited)", + "COCOAPODS=1", + "$(inherited)", + "GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1", + "GRPC_COMPILE_WITH_CRONET=1", + ); + INFOPLIST_FILE = InteropTestsRemoteWithCronet/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 9.3; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = io.grpc.InteropTestsRemoteWithCronet; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; 63423F4E1B150A5F006CF63C /* Debug */ = { isa = XCBuildConfiguration; baseConfigurationReference = B94C27C06733CF98CE1B2757 /* Pods-AllTests.debug.xcconfig */; @@ -1071,6 +1405,24 @@ /* End XCBuildConfiguration section */ /* Begin XCConfigurationList section */ + 5E8A5DAE1D3840B4000F8BC4 /* Build configuration list for PBXNativeTarget "CoreCronetEnd2EndTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 5E8A5DAC1D3840B4000F8BC4 /* Debug */, + 5E8A5DAD1D3840B4000F8BC4 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 5EE84BFB1D4717E40050C6CC /* Build configuration list for PBXNativeTarget "InteropTestsRemoteWithCronet" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 5EE84BF91D4717E40050C6CC /* Debug */, + 5EE84BFA1D4717E40050C6CC /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; 63423F4D1B150A5F006CF63C /* Build configuration list for PBXNativeTarget "AllTests" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/CoreCronetEnd2EndTests.xcscheme b/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/CoreCronetEnd2EndTests.xcscheme new file mode 100644 index 00000000000..a1da2e0c97c --- /dev/null +++ b/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/CoreCronetEnd2EndTests.xcscheme @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/InteropTestsRemoteWithCronet.xcscheme b/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/InteropTestsRemoteWithCronet.xcscheme new file mode 100644 index 00000000000..6d92be8b3de --- /dev/null +++ b/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/InteropTestsRemoteWithCronet.xcscheme @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/objective-c/tests/build_example_test.sh b/src/objective-c/tests/build_example_test.sh index 5c3766b4c0b..ae75941ec61 100755 --- a/src/objective-c/tests/build_example_test.sh +++ b/src/objective-c/tests/build_example_test.sh @@ -31,44 +31,33 @@ # Don't run this script standalone. Instead, run from the repository root: # ./tools/run_tests/run_tests.py -l objc -set -eo pipefail +set -evo pipefail cd `dirname $0` -BINDIR=`pwd`/../../../bins/$CONFIG -TMP_PATH=$PATH - -# If `protoc` is not found, add bins/$CONFIG/protobuf/protoc to the search path -hash protoc 2>/dev/null || TMP_PATH=$BINDIR/protobuf:$TMP_PATH - -# If `protoc-gen-objcgrpc` is not found, make a symlink from -# bins/$CONGIF/grpc_objective_c_plugin and add it to the search path -PATH=$TMP_PATH hash protoc-gen-objcgrpc 2>/dev/null || { - ln -sf $BINDIR/grpc_objective_c_plugin $BINDIR/protoc-gen-objcgrpc - TMP_PATH=$BINDIR:$TMP_PATH -} - SCHEME=HelloWorld \ EXAMPLE_PATH=examples/objective-c/helloworld \ - PATH=$TMP_PATH \ ./build_one_example.sh SCHEME=RouteGuideClient \ EXAMPLE_PATH=examples/objective-c/route_guide \ - PATH=$TMP_PATH \ ./build_one_example.sh SCHEME=AuthSample \ EXAMPLE_PATH=examples/objective-c/auth_sample \ - PATH=$TMP_PATH \ + ./build_one_example.sh + +rm -f ../examples/RemoteTestClient/*.{h,m} + +SCHEME=Sample \ + EXAMPLE_PATH=src/objective-c/examples/Sample \ ./build_one_example.sh SCHEME=Sample \ EXAMPLE_PATH=src/objective-c/examples/Sample \ - PATH=$TMP_PATH \ + FRAMEWORKS=YES \ ./build_one_example.sh SCHEME=SwiftSample \ EXAMPLE_PATH=src/objective-c/examples/SwiftSample \ - PATH=$TMP_PATH \ ./build_one_example.sh diff --git a/src/objective-c/tests/build_one_example.sh b/src/objective-c/tests/build_one_example.sh index 24fb8b7bab4..9fef6582a38 100755 --- a/src/objective-c/tests/build_one_example.sh +++ b/src/objective-c/tests/build_one_example.sh @@ -31,7 +31,7 @@ # Don't run this script standalone. Instead, run from the repository root: # ./tools/run_tests/run_tests.py -l objc -set -e +set -ev # Params: # EXAMPLE_PATH - directory of the example @@ -54,7 +54,7 @@ pod install set -o pipefail XCODEBUILD_FILTER='(^===|^\*\*|\bfatal\b|\berror\b|\bwarning\b|\bfail)' xcodebuild \ - clean build \ + build \ -workspace *.xcworkspace \ -scheme $SCHEME \ -destination name="iPhone 6" \ diff --git a/src/objective-c/tests/build_tests.sh b/src/objective-c/tests/build_tests.sh index 8547bfd3a83..bc5bc044943 100755 --- a/src/objective-c/tests/build_tests.sh +++ b/src/objective-c/tests/build_tests.sh @@ -44,26 +44,10 @@ hash xcodebuild 2>/dev/null || { exit 1 } -BINDIR=../../../bins/$CONFIG - -if [ ! -f $BINDIR/protobuf/protoc ]; then - hash protoc 2>/dev/null || { - echo >&2 "Can't find protoc. Make sure run_tests.py is making" \ - "grpc_objective_c_plugin before calling this script." - exit 1 - } - # When protoc is already installed, make doesn't compile one. Put a link - # there so the podspecs can do codegen using that path. - mkdir -p $BINDIR/protobuf - ln -s `which protoc` $BINDIR/protobuf/protoc -fi - -[ -f $BINDIR/interop_server ] || { - echo >&2 "Can't find the test server. Make sure run_tests.py is making" \ - "interop_server before calling this script. It needs to be done" \ - "before because pod install of gRPC renames some C gRPC files" \ - "and not the server's code references to them." - exit 1 -} +# clean the directory +rm -rf Pods +rm -rf Tests.xcworkspace +rm -f Podfile.lock +rm -f RemoteTestClient/*.{h,m} pod install diff --git a/src/objective-c/tests/run_tests.sh b/src/objective-c/tests/run_tests.sh index c4fc5644f28..a265149f488 100755 --- a/src/objective-c/tests/run_tests.sh +++ b/src/objective-c/tests/run_tests.sh @@ -31,13 +31,21 @@ # Don't run this script standalone. Instead, run from the repository root: # ./tools/run_tests/run_tests.py -l objc -set -e +set -ev cd $(dirname $0) # Run the tests server. -../../../bins/$CONFIG/interop_server --port=5050 & -../../../bins/$CONFIG/interop_server --port=5051 --use_tls & + +BINDIR=../../../bins/$CONFIG + +[ -f $BINDIR/interop_server ] || { + echo >&2 "Can't find the test server. Make sure run_tests.py is making" \ + "interop_server before calling this script." + exit 1 +} +$BINDIR/interop_server --port=5050 & +$BINDIR/interop_server --port=5051 --use_tls & # Kill them when this script exits. trap 'kill -9 `jobs -p`' EXIT diff --git a/src/php/README.md b/src/php/README.md index 6cc1ba4d462..7e9819b256d 100644 --- a/src/php/README.md +++ b/src/php/README.md @@ -5,27 +5,16 @@ This directory contains source code for PHP implementation of gRPC layered on sh #Status -Beta +GA ## Environment -Prerequisite: `php` >=5.5, `phpize`, `pecl`, `phpunit` - -**Linux (Debian):** - -```sh -$ sudo apt-get install php5 php5-dev php-pear -``` - -**Linux (CentOS):** - -```sh -$ yum install php55w -$ yum --enablerepo=remi,remi-php55 install php-devel php-pear -``` - -**Mac OS X:** +Prerequisite: +* `php` 5.5 or above, 7.0 or above +* `pear` and `pecl` +* `phpunit` +**PEAR:** ```sh $ curl -O http://pear.php.net/go-pear.phar $ sudo php -d detect_unicode=0 go-pear.phar @@ -43,7 +32,7 @@ $ sudo mv phpunit-old.phar /usr/bin/phpunit Install the gRPC PHP extension ```sh -sudo pecl install grpc-beta +sudo pecl install grpc ``` This will compile and install the gRPC PHP extension into the standard PHP extension directory. You should be able to run the [unit tests](#unit-tests), with the PHP extension installed. @@ -72,13 +61,7 @@ $ sudo make install ### gRPC PHP extension -Install the gRPC PHP extension from PECL - -```sh -$ sudo pecl install grpc-beta -``` - -Or, compile from source +Compile the gRPC PHP extension ```sh $ cd grpc/src/php/ext/grpc @@ -148,12 +131,8 @@ Alternatively, you can download `protoc` binaries from [the protocol buffers Git You need to install `protoc-gen-php` to generate stub class `.php` files from service definition `.proto` files. ```sh -$ cd grpc/src/php/vendor/datto/protobuf-php # if you had run `composer install` in the previous step - -OR - -$ git clone https://github.com/stanley-cheung/Protobuf-PHP # clone from github repo - +$ git clone https://github.com/stanley-cheung/Protobuf-PHP +$ cd Protobuf-PHP $ gem install rake ronn $ rake pear:package version=1.0 $ sudo pear install Protobuf-1.0.tgz @@ -175,7 +154,7 @@ Run a local server serving the math services. Please see [Node][] for how to run ```sh $ cd grpc $ npm install -$ nodejs src/node/test/math/math_server.js +$ node src/node/test/math/math_server.js ``` ### Run test client @@ -212,7 +191,7 @@ Make sure the Node math server is still running, as above. ```sh $ cd grpc $ npm install -$ nodejs src/node/test/math/math_server.js +$ node src/node/test/math/math_server.js ``` Make sure you have run `composer install` to generate the `vendor/autoload.php` file @@ -282,7 +261,7 @@ Make sure the Node math server is still running, as above. ```sh $ cd grpc $ npm install -$ nodejs src/node/test/math/math_server.js +$ node src/node/test/math/math_server.js ``` Make sure you have run `composer install` to generate the `vendor/autoload.php` file diff --git a/src/php/bin/determine_extension_dir.sh b/src/php/bin/determine_extension_dir.sh index b4342ac89fa..a59882506f9 100755 --- a/src/php/bin/determine_extension_dir.sh +++ b/src/php/bin/determine_extension_dir.sh @@ -29,11 +29,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. set -e default_extension_dir=$(php-config --extension-dir) -if command -v brew > /dev/null && \ - brew ls --versions | grep php5[56]-grpc > /dev/null; then - # the grpc php extension was installed by homebrew - : -elif [ ! -e $default_extension_dir/grpc.so ]; then +if [ ! -e $default_extension_dir/grpc.so ]; then # the grpc extension is not found in the default PHP extension dir # try the source modules directory module_dir=../ext/grpc/modules diff --git a/src/php/composer.json b/src/php/composer.json index 2ad73223c65..1eacc643a22 100644 --- a/src/php/composer.json +++ b/src/php/composer.json @@ -5,16 +5,13 @@ "keywords": ["rpc"], "homepage": "http://grpc.io", "license": "BSD-3-Clause", - "repositories": [ - { - "type": "vcs", - "url": "https://github.com/stanley-cheung/Protobuf-PHP" - } - ], + "version": "1.0.0", "require": { "php": ">=5.5.0", - "datto/protobuf-php": "dev-master", - "google/auth": "v0.7" + "stanley-cheung/protobuf-php": "v0.6" + }, + "require-dev": { + "google/auth": "v0.9" }, "autoload": { "psr-4": { diff --git a/src/php/ext/grpc/call.c b/src/php/ext/grpc/call.c index 2cd45f10dc4..66ca1513ed7 100644 --- a/src/php/ext/grpc/call.c +++ b/src/php/ext/grpc/call.c @@ -58,63 +58,44 @@ #include "byte_buffer.h" zend_class_entry *grpc_ce_call; +#if PHP_MAJOR_VERSION >= 7 +static zend_object_handlers call_ce_handlers; +#endif /* Frees and destroys an instance of wrapped_grpc_call */ -void free_wrapped_grpc_call(void *object TSRMLS_DC) { - wrapped_grpc_call *call = (wrapped_grpc_call *)object; - if (call->owned && call->wrapped != NULL) { - grpc_call_destroy(call->wrapped); +PHP_GRPC_FREE_WRAPPED_FUNC_START(wrapped_grpc_call) + if (p->owned && p->wrapped != NULL) { + grpc_call_destroy(p->wrapped); } - efree(call); -} +PHP_GRPC_FREE_WRAPPED_FUNC_END() /* Initializes an instance of wrapped_grpc_call to be associated with an object * of a class specified by class_type */ -zend_object_value create_wrapped_grpc_call(zend_class_entry *class_type - TSRMLS_DC) { - zend_object_value retval; - wrapped_grpc_call *intern; - - intern = (wrapped_grpc_call *)emalloc(sizeof(wrapped_grpc_call)); - memset(intern, 0, sizeof(wrapped_grpc_call)); - +php_grpc_zend_object create_wrapped_grpc_call(zend_class_entry *class_type + TSRMLS_DC) { + PHP_GRPC_ALLOC_CLASS_OBJECT(wrapped_grpc_call); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); - retval.handle = zend_objects_store_put( - intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, - free_wrapped_grpc_call, NULL TSRMLS_CC); - retval.handlers = zend_get_std_object_handlers(); - return retval; -} - -/* Wraps a grpc_call struct in a PHP object. Owned indicates whether the struct - should be destroyed at the end of the object's lifecycle */ -zval *grpc_php_wrap_call(grpc_call *wrapped, bool owned TSRMLS_DC) { - zval *call_object; - MAKE_STD_ZVAL(call_object); - object_init_ex(call_object, grpc_ce_call); - wrapped_grpc_call *call = - (wrapped_grpc_call *)zend_object_store_get_object(call_object TSRMLS_CC); - call->wrapped = wrapped; - call->owned = owned; - return call_object; + PHP_GRPC_FREE_CLASS_OBJECT(wrapped_grpc_call, call_ce_handlers); } /* Creates and returns a PHP array object with the data in a * grpc_metadata_array. Returns NULL on failure */ -zval *grpc_parse_metadata_array(grpc_metadata_array *metadata_array TSRMLS_DC) { +zval *grpc_parse_metadata_array(grpc_metadata_array + *metadata_array TSRMLS_DC) { int count = metadata_array->count; grpc_metadata *elements = metadata_array->metadata; - int i; zval *array; - zval **data = NULL; + PHP_GRPC_MAKE_STD_ZVAL(array); + array_init(array); + int i; HashTable *array_hash; zval *inner_array; char *str_key; char *str_val; size_t key_len; - MAKE_STD_ZVAL(array); - array_init(array); + zval *data = NULL; + array_hash = Z_ARRVAL_P(array); grpc_metadata *elem; for (i = 0; i < count; i++) { @@ -124,9 +105,9 @@ zval *grpc_parse_metadata_array(grpc_metadata_array *metadata_array TSRMLS_DC) { memcpy(str_key, elem->key, key_len); str_val = ecalloc(elem->value_length + 1, sizeof(char)); memcpy(str_val, elem->value, elem->value_length); - if (zend_hash_find(array_hash, str_key, key_len, (void **)data) == - SUCCESS) { - if (Z_TYPE_P(*data) != IS_ARRAY) { + if (php_grpc_zend_hash_find(array_hash, str_key, key_len, (void **)&data) + == SUCCESS) { + if (Z_TYPE_P(data) != IS_ARRAY) { zend_throw_exception(zend_exception_get_default(TSRMLS_C), "Metadata hash somehow contains wrong types.", 1 TSRMLS_CC); @@ -134,11 +115,13 @@ zval *grpc_parse_metadata_array(grpc_metadata_array *metadata_array TSRMLS_DC) { efree(str_val); return NULL; } - add_next_index_stringl(*data, str_val, elem->value_length, false); + php_grpc_add_next_index_stringl(data, str_val, elem->value_length, + false); } else { - MAKE_STD_ZVAL(inner_array); + PHP_GRPC_MAKE_STD_ZVAL(inner_array); array_init(inner_array); - add_next_index_stringl(inner_array, str_val, elem->value_length, false); + php_grpc_add_next_index_stringl(inner_array, str_val, + elem->value_length, false); add_assoc_zval(array, str_key, inner_array); } } @@ -148,61 +131,65 @@ zval *grpc_parse_metadata_array(grpc_metadata_array *metadata_array TSRMLS_DC) { /* Populates a grpc_metadata_array with the data in a PHP array object. Returns true on success and false on failure */ bool create_metadata_array(zval *array, grpc_metadata_array *metadata) { - zval **inner_array; - zval **value; HashTable *array_hash; - HashPosition array_pointer; HashTable *inner_array_hash; - HashPosition inner_array_pointer; - char *key; - uint key_len; - ulong index; + zval *value; + zval *inner_array; if (Z_TYPE_P(array) != IS_ARRAY) { return false; } grpc_metadata_array_init(metadata); array_hash = Z_ARRVAL_P(array); - for (zend_hash_internal_pointer_reset_ex(array_hash, &array_pointer); - zend_hash_get_current_data_ex(array_hash, (void**)&inner_array, - &array_pointer) == SUCCESS; - zend_hash_move_forward_ex(array_hash, &array_pointer)) { - if (zend_hash_get_current_key_ex(array_hash, &key, &key_len, &index, 0, - &array_pointer) != HASH_KEY_IS_STRING) { + + char *key; + int key_type; + PHP_GRPC_HASH_FOREACH_STR_KEY_VAL_START(array_hash, key, key_type, + inner_array) + if (key_type != HASH_KEY_IS_STRING || key == NULL) { return false; } - if (Z_TYPE_P(*inner_array) != IS_ARRAY) { + if (Z_TYPE_P(inner_array) != IS_ARRAY) { return false; } - inner_array_hash = Z_ARRVAL_P(*inner_array); + inner_array_hash = Z_ARRVAL_P(inner_array); metadata->capacity += zend_hash_num_elements(inner_array_hash); - } + PHP_GRPC_HASH_FOREACH_END() + metadata->metadata = gpr_malloc(metadata->capacity * sizeof(grpc_metadata)); - for (zend_hash_internal_pointer_reset_ex(array_hash, &array_pointer); - zend_hash_get_current_data_ex(array_hash, (void**)&inner_array, - &array_pointer) == SUCCESS; - zend_hash_move_forward_ex(array_hash, &array_pointer)) { - if (zend_hash_get_current_key_ex(array_hash, &key, &key_len, &index, 0, - &array_pointer) != HASH_KEY_IS_STRING) { + + char *key1 = NULL; + int key_type1; + PHP_GRPC_HASH_FOREACH_STR_KEY_VAL_START(array_hash, key1, key_type1, + inner_array) + if (key_type1 != HASH_KEY_IS_STRING) { return false; } - inner_array_hash = Z_ARRVAL_P(*inner_array); - for (zend_hash_internal_pointer_reset_ex(inner_array_hash, - &inner_array_pointer); - zend_hash_get_current_data_ex(inner_array_hash, (void**)&value, - &inner_array_pointer) == SUCCESS; - zend_hash_move_forward_ex(inner_array_hash, &inner_array_pointer)) { - if (Z_TYPE_P(*value) != IS_STRING) { + inner_array_hash = Z_ARRVAL_P(inner_array); + PHP_GRPC_HASH_FOREACH_VAL_START(inner_array_hash, value) + if (Z_TYPE_P(value) != IS_STRING) { return false; } - metadata->metadata[metadata->count].key = key; - metadata->metadata[metadata->count].value = Z_STRVAL_P(*value); - metadata->metadata[metadata->count].value_length = Z_STRLEN_P(*value); + metadata->metadata[metadata->count].key = key1; + metadata->metadata[metadata->count].value = Z_STRVAL_P(value); + metadata->metadata[metadata->count].value_length = Z_STRLEN_P(value); metadata->count += 1; - } - } + PHP_GRPC_HASH_FOREACH_END() + PHP_GRPC_HASH_FOREACH_END() return true; } +/* Wraps a grpc_call struct in a PHP object. Owned indicates whether the + struct should be destroyed at the end of the object's lifecycle */ +zval *grpc_php_wrap_call(grpc_call *wrapped, bool owned TSRMLS_DC) { + zval *call_object; + PHP_GRPC_MAKE_STD_ZVAL(call_object); + object_init_ex(call_object, grpc_ce_call); + wrapped_grpc_call *call = Z_WRAPPED_GRPC_CALL_P(call_object); + call->wrapped = wrapped; + call->owned = owned; + return call_object; +} + /** * Constructs a new instance of the Call class. * @param Channel $channel The channel to associate the call with. Must not be @@ -211,30 +198,25 @@ bool create_metadata_array(zval *array, grpc_metadata_array *metadata) { * @param Timeval $absolute_deadline The deadline for completing the call */ PHP_METHOD(Call, __construct) { - wrapped_grpc_call *call = - (wrapped_grpc_call *)zend_object_store_get_object(getThis() TSRMLS_CC); zval *channel_obj; char *method; - int method_len; + php_grpc_int method_len; zval *deadline_obj; char *host_override = NULL; - int host_override_len = 0; + php_grpc_int host_override_len = 0; + wrapped_grpc_call *call = Z_WRAPPED_GRPC_CALL_P(getThis()); + /* "OsO|s" == 1 Object, 1 string, 1 Object, 1 optional string */ - if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "OsO|s", - &channel_obj, grpc_ce_channel, - &method, &method_len, - &deadline_obj, grpc_ce_timeval, - &host_override, &host_override_len) - == FAILURE) { - zend_throw_exception( - spl_ce_InvalidArgumentException, - "Call expects a Channel, a String, a Timeval and an optional String", - 1 TSRMLS_CC); + if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "OsO|s", &channel_obj, + grpc_ce_channel, &method, &method_len, + &deadline_obj, grpc_ce_timeval, &host_override, + &host_override_len) == FAILURE) { + zend_throw_exception(spl_ce_InvalidArgumentException, + "Call expects a Channel, a String, a Timeval and " + "an optional String", 1 TSRMLS_CC); return; } - wrapped_grpc_channel *channel = - (wrapped_grpc_channel *)zend_object_store_get_object( - channel_obj TSRMLS_CC); + wrapped_grpc_channel *channel = Z_WRAPPED_GRPC_CHANNEL_P(channel_obj); if (channel->wrapped == NULL) { zend_throw_exception(spl_ce_InvalidArgumentException, "Call cannot be constructed from a closed Channel", @@ -242,12 +224,11 @@ PHP_METHOD(Call, __construct) { return; } add_property_zval(getThis(), "channel", channel_obj); - wrapped_grpc_timeval *deadline = - (wrapped_grpc_timeval *)zend_object_store_get_object( - deadline_obj TSRMLS_CC); - call->wrapped = grpc_channel_create_call( - channel->wrapped, NULL, GRPC_PROPAGATE_DEFAULTS, completion_queue, method, - host_override, deadline->wrapped, NULL); + wrapped_grpc_timeval *deadline = Z_WRAPPED_GRPC_TIMEVAL_P(deadline_obj); + call->wrapped = + grpc_channel_create_call(channel->wrapped, NULL, GRPC_PROPAGATE_DEFAULTS, + completion_queue, method, host_override, + deadline->wrapped, NULL); call->owned = true; } @@ -257,22 +238,26 @@ PHP_METHOD(Call, __construct) { * @return object Object with results of all actions */ PHP_METHOD(Call, startBatch) { - wrapped_grpc_call *call = - (wrapped_grpc_call *)zend_object_store_get_object(getThis() TSRMLS_CC); + zval *result; + PHP_GRPC_MAKE_STD_ZVAL(result); + object_init(result); + php_grpc_ulong index; + zval *recv_status; + PHP_GRPC_MAKE_STD_ZVAL(recv_status); + object_init(recv_status); + zval *value; + zval *inner_value; + zval *message_value; + zval *message_flags; + wrapped_grpc_call *call = Z_WRAPPED_GRPC_CALL_P(getThis()); + grpc_op ops[8]; size_t op_num = 0; zval *array; - zval **value; - zval **inner_value; HashTable *array_hash; - HashPosition array_pointer; HashTable *status_hash; HashTable *message_hash; - zval **message_value; - zval **message_flags; - char *key; - uint key_len; - ulong index; + grpc_metadata_array metadata; grpc_metadata_array trailing_metadata; grpc_metadata_array recv_metadata; @@ -283,17 +268,15 @@ PHP_METHOD(Call, startBatch) { grpc_byte_buffer *message; int cancelled; grpc_call_error error; - zval *result; char *message_str; size_t message_len; - zval *recv_status; + grpc_metadata_array_init(&metadata); grpc_metadata_array_init(&trailing_metadata); grpc_metadata_array_init(&recv_metadata); grpc_metadata_array_init(&recv_trailing_metadata); - MAKE_STD_ZVAL(result); - object_init(result); memset(ops, 0, sizeof(ops)); + /* "a" == 1 array */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { @@ -301,136 +284,136 @@ PHP_METHOD(Call, startBatch) { "start_batch expects an array", 1 TSRMLS_CC); goto cleanup; } + array_hash = Z_ARRVAL_P(array); - for (zend_hash_internal_pointer_reset_ex(array_hash, &array_pointer); - zend_hash_get_current_data_ex(array_hash, (void**)&value, - &array_pointer) == SUCCESS; - zend_hash_move_forward_ex(array_hash, &array_pointer)) { - if (zend_hash_get_current_key_ex(array_hash, &key, &key_len, &index, 0, - &array_pointer) != HASH_KEY_IS_LONG) { + + char *key = NULL; + int key_type; + PHP_GRPC_HASH_FOREACH_LONG_KEY_VAL_START(array_hash, key, key_type, index, + value) + if (key_type != HASH_KEY_IS_LONG || key != NULL) { zend_throw_exception(spl_ce_InvalidArgumentException, "batch keys must be integers", 1 TSRMLS_CC); goto cleanup; } switch(index) { - case GRPC_OP_SEND_INITIAL_METADATA: - if (!create_metadata_array(*value, &metadata)) { - zend_throw_exception(spl_ce_InvalidArgumentException, - "Bad metadata value given", 1 TSRMLS_CC); - goto cleanup; - } - ops[op_num].data.send_initial_metadata.count = - metadata.count; - ops[op_num].data.send_initial_metadata.metadata = - metadata.metadata; - break; - case GRPC_OP_SEND_MESSAGE: - if (Z_TYPE_PP(value) != IS_ARRAY) { + case GRPC_OP_SEND_INITIAL_METADATA: + if (!create_metadata_array(value, &metadata)) { + zend_throw_exception(spl_ce_InvalidArgumentException, + "Bad metadata value given", 1 TSRMLS_CC); + goto cleanup; + } + ops[op_num].data.send_initial_metadata.count = metadata.count; + ops[op_num].data.send_initial_metadata.metadata = metadata.metadata; + break; + case GRPC_OP_SEND_MESSAGE: + if (Z_TYPE_P(value) != IS_ARRAY) { + zend_throw_exception(spl_ce_InvalidArgumentException, + "Expected an array for send message", + 1 TSRMLS_CC); + goto cleanup; + } + message_hash = Z_ARRVAL_P(value); + if (php_grpc_zend_hash_find(message_hash, "flags", sizeof("flags"), + (void **)&message_flags) == SUCCESS) { + if (Z_TYPE_P(message_flags) != IS_LONG) { zend_throw_exception(spl_ce_InvalidArgumentException, - "Expected an array for send message", + "Expected an int for message flags", 1 TSRMLS_CC); - goto cleanup; } - message_hash = Z_ARRVAL_PP(value); - if (zend_hash_find(message_hash, "flags", sizeof("flags"), - (void **)&message_flags) == SUCCESS) { - if (Z_TYPE_PP(message_flags) != IS_LONG) { - zend_throw_exception(spl_ce_InvalidArgumentException, - "Expected an int for message flags", - 1 TSRMLS_CC); - } - ops[op_num].flags = Z_LVAL_PP(message_flags) & GRPC_WRITE_USED_MASK; - } - if (zend_hash_find(message_hash, "message", sizeof("message"), - (void **)&message_value) != SUCCESS || - Z_TYPE_PP(message_value) != IS_STRING) { + ops[op_num].flags = Z_LVAL_P(message_flags) & GRPC_WRITE_USED_MASK; + } + if (php_grpc_zend_hash_find(message_hash, "message", sizeof("message"), + (void **)&message_value) != SUCCESS || + Z_TYPE_P(message_value) != IS_STRING) { + zend_throw_exception(spl_ce_InvalidArgumentException, + "Expected a string for send message", + 1 TSRMLS_CC); + goto cleanup; + } + ops[op_num].data.send_message = + string_to_byte_buffer(Z_STRVAL_P(message_value), + Z_STRLEN_P(message_value)); + break; + case GRPC_OP_SEND_CLOSE_FROM_CLIENT: + break; + case GRPC_OP_SEND_STATUS_FROM_SERVER: + status_hash = Z_ARRVAL_P(value); + if (php_grpc_zend_hash_find(status_hash, "metadata", sizeof("metadata"), + (void **)&inner_value) == SUCCESS) { + if (!create_metadata_array(inner_value, &trailing_metadata)) { zend_throw_exception(spl_ce_InvalidArgumentException, - "Expected a string for send message", + "Bad trailing metadata value given", 1 TSRMLS_CC); goto cleanup; } - ops[op_num].data.send_message = - string_to_byte_buffer(Z_STRVAL_PP(message_value), - Z_STRLEN_PP(message_value)); - break; - case GRPC_OP_SEND_CLOSE_FROM_CLIENT: - break; - case GRPC_OP_SEND_STATUS_FROM_SERVER: - status_hash = Z_ARRVAL_PP(value); - if (zend_hash_find(status_hash, "metadata", sizeof("metadata"), - (void **)&inner_value) == SUCCESS) { - if (!create_metadata_array(*inner_value, &trailing_metadata)) { - zend_throw_exception(spl_ce_InvalidArgumentException, - "Bad trailing metadata value given", - 1 TSRMLS_CC); - goto cleanup; - } - ops[op_num].data.send_status_from_server.trailing_metadata = - trailing_metadata.metadata; - ops[op_num].data.send_status_from_server.trailing_metadata_count = - trailing_metadata.count; - } - if (zend_hash_find(status_hash, "code", sizeof("code"), - (void**)&inner_value) == SUCCESS) { - if (Z_TYPE_PP(inner_value) != IS_LONG) { - zend_throw_exception(spl_ce_InvalidArgumentException, - "Status code must be an integer", - 1 TSRMLS_CC); - goto cleanup; - } - ops[op_num].data.send_status_from_server.status = - Z_LVAL_PP(inner_value); - } else { + ops[op_num].data.send_status_from_server.trailing_metadata = + trailing_metadata.metadata; + ops[op_num].data.send_status_from_server.trailing_metadata_count = + trailing_metadata.count; + } + if (php_grpc_zend_hash_find(status_hash, "code", sizeof("code"), + (void**)&inner_value) == SUCCESS) { + if (Z_TYPE_P(inner_value) != IS_LONG) { zend_throw_exception(spl_ce_InvalidArgumentException, - "Integer status code is required", + "Status code must be an integer", 1 TSRMLS_CC); goto cleanup; } - if (zend_hash_find(status_hash, "details", sizeof("details"), - (void**)&inner_value) == SUCCESS) { - if (Z_TYPE_PP(inner_value) != IS_STRING) { - zend_throw_exception(spl_ce_InvalidArgumentException, - "Status details must be a string", - 1 TSRMLS_CC); - goto cleanup; - } - ops[op_num].data.send_status_from_server.status_details = - Z_STRVAL_PP(inner_value); - } else { + ops[op_num].data.send_status_from_server.status = + Z_LVAL_P(inner_value); + } else { + zend_throw_exception(spl_ce_InvalidArgumentException, + "Integer status code is required", + 1 TSRMLS_CC); + goto cleanup; + } + if (php_grpc_zend_hash_find(status_hash, "details", sizeof("details"), + (void**)&inner_value) == SUCCESS) { + if (Z_TYPE_P(inner_value) != IS_STRING) { zend_throw_exception(spl_ce_InvalidArgumentException, - "String status details is required", + "Status details must be a string", 1 TSRMLS_CC); goto cleanup; } - break; - case GRPC_OP_RECV_INITIAL_METADATA: - ops[op_num].data.recv_initial_metadata = &recv_metadata; - break; - case GRPC_OP_RECV_MESSAGE: - ops[op_num].data.recv_message = &message; - break; - case GRPC_OP_RECV_STATUS_ON_CLIENT: - ops[op_num].data.recv_status_on_client.trailing_metadata = - &recv_trailing_metadata; - ops[op_num].data.recv_status_on_client.status = &status; - ops[op_num].data.recv_status_on_client.status_details = - &status_details; - ops[op_num].data.recv_status_on_client.status_details_capacity = - &status_details_capacity; - break; - case GRPC_OP_RECV_CLOSE_ON_SERVER: - ops[op_num].data.recv_close_on_server.cancelled = &cancelled; - break; - default: + ops[op_num].data.send_status_from_server.status_details = + Z_STRVAL_P(inner_value); + } else { zend_throw_exception(spl_ce_InvalidArgumentException, - "Unrecognized key in batch", 1 TSRMLS_CC); + "String status details is required", + 1 TSRMLS_CC); goto cleanup; + } + break; + case GRPC_OP_RECV_INITIAL_METADATA: + ops[op_num].data.recv_initial_metadata = &recv_metadata; + break; + case GRPC_OP_RECV_MESSAGE: + ops[op_num].data.recv_message = &message; + break; + case GRPC_OP_RECV_STATUS_ON_CLIENT: + ops[op_num].data.recv_status_on_client.trailing_metadata = + &recv_trailing_metadata; + ops[op_num].data.recv_status_on_client.status = &status; + ops[op_num].data.recv_status_on_client.status_details = + &status_details; + ops[op_num].data.recv_status_on_client.status_details_capacity = + &status_details_capacity; + break; + case GRPC_OP_RECV_CLOSE_ON_SERVER: + ops[op_num].data.recv_close_on_server.cancelled = &cancelled; + break; + default: + zend_throw_exception(spl_ce_InvalidArgumentException, + "Unrecognized key in batch", 1 TSRMLS_CC); + goto cleanup; } ops[op_num].op = (grpc_op_type)index; ops[op_num].flags = 0; ops[op_num].reserved = NULL; op_num++; - } + PHP_GRPC_HASH_FOREACH_END() + error = grpc_call_start_batch(call->wrapped, ops, op_num, call->wrapped, NULL); if (error != GRPC_CALL_OK) { @@ -441,52 +424,65 @@ PHP_METHOD(Call, startBatch) { } grpc_completion_queue_pluck(completion_queue, call->wrapped, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); +#if PHP_MAJOR_VERSION >= 7 + zval recv_md; +#endif for (int i = 0; i < op_num; i++) { switch(ops[i].op) { - case GRPC_OP_SEND_INITIAL_METADATA: - add_property_bool(result, "send_metadata", true); - break; - case GRPC_OP_SEND_MESSAGE: - add_property_bool(result, "send_message", true); - break; - case GRPC_OP_SEND_CLOSE_FROM_CLIENT: - add_property_bool(result, "send_close", true); - break; - case GRPC_OP_SEND_STATUS_FROM_SERVER: - add_property_bool(result, "send_status", true); - break; - case GRPC_OP_RECV_INITIAL_METADATA: - array = grpc_parse_metadata_array(&recv_metadata TSRMLS_CC); - add_property_zval(result, "metadata", array); - Z_DELREF_P(array); - break; - case GRPC_OP_RECV_MESSAGE: - byte_buffer_to_string(message, &message_str, &message_len); - if (message_str == NULL) { - add_property_null(result, "message"); - } else { - add_property_stringl(result, "message", message_str, message_len, - false); - } - break; - case GRPC_OP_RECV_STATUS_ON_CLIENT: - MAKE_STD_ZVAL(recv_status); - object_init(recv_status); - array = grpc_parse_metadata_array(&recv_trailing_metadata TSRMLS_CC); - add_property_zval(recv_status, "metadata", array); - Z_DELREF_P(array); - add_property_long(recv_status, "code", status); - add_property_string(recv_status, "details", status_details, true); - add_property_zval(result, "status", recv_status); - Z_DELREF_P(recv_status); - break; - case GRPC_OP_RECV_CLOSE_ON_SERVER: - add_property_bool(result, "cancelled", cancelled); - break; - default: - break; + case GRPC_OP_SEND_INITIAL_METADATA: + add_property_bool(result, "send_metadata", true); + break; + case GRPC_OP_SEND_MESSAGE: + add_property_bool(result, "send_message", true); + break; + case GRPC_OP_SEND_CLOSE_FROM_CLIENT: + add_property_bool(result, "send_close", true); + break; + case GRPC_OP_SEND_STATUS_FROM_SERVER: + add_property_bool(result, "send_status", true); + break; + case GRPC_OP_RECV_INITIAL_METADATA: +#if PHP_MAJOR_VERSION < 7 + array = grpc_parse_metadata_array(&recv_metadata TSRMLS_CC); + add_property_zval(result, "metadata", array); +#else + recv_md = *grpc_parse_metadata_array(&recv_metadata); + add_property_zval(result, "metadata", &recv_md); +#endif + PHP_GRPC_DELREF(array); + break; + case GRPC_OP_RECV_MESSAGE: + byte_buffer_to_string(message, &message_str, &message_len); + if (message_str == NULL) { + add_property_null(result, "message"); + } else { + php_grpc_add_property_stringl(result, "message", message_str, + message_len, false); + } + break; + case GRPC_OP_RECV_STATUS_ON_CLIENT: +#if PHP_MAJOR_VERSION < 7 + array = grpc_parse_metadata_array(&recv_trailing_metadata TSRMLS_CC); + add_property_zval(recv_status, "metadata", array); +#else + recv_md = *grpc_parse_metadata_array(&recv_trailing_metadata); + add_property_zval(recv_status, "metadata", &recv_md); +#endif + PHP_GRPC_DELREF(array); + add_property_long(recv_status, "code", status); + php_grpc_add_property_string(recv_status, "details", status_details, + true); + add_property_zval(result, "status", recv_status); + PHP_GRPC_DELREF(recv_status); + break; + case GRPC_OP_RECV_CLOSE_ON_SERVER: + add_property_bool(result, "cancelled", cancelled); + break; + default: + break; } } + cleanup: grpc_metadata_array_destroy(&metadata); grpc_metadata_array_destroy(&trailing_metadata); @@ -511,9 +507,8 @@ cleanup: * @return string The URI of the endpoint */ PHP_METHOD(Call, getPeer) { - wrapped_grpc_call *call = - (wrapped_grpc_call *)zend_object_store_get_object(getThis() TSRMLS_CC); - RETURN_STRING(grpc_call_get_peer(call->wrapped), 1); + wrapped_grpc_call *call = Z_WRAPPED_GRPC_CALL_P(getThis()); + PHP_GRPC_RETURN_STRING(grpc_call_get_peer(call->wrapped), 1); } /** @@ -521,8 +516,7 @@ PHP_METHOD(Call, getPeer) { * has not already ended with another status. */ PHP_METHOD(Call, cancel) { - wrapped_grpc_call *call = - (wrapped_grpc_call *)zend_object_store_get_object(getThis() TSRMLS_CC); + wrapped_grpc_call *call = Z_WRAPPED_GRPC_CALL_P(getThis()); grpc_call_cancel(call->wrapped, NULL); } @@ -544,11 +538,8 @@ PHP_METHOD(Call, setCredentials) { } wrapped_grpc_call_credentials *creds = - (wrapped_grpc_call_credentials *)zend_object_store_get_object( - creds_obj TSRMLS_CC); - - wrapped_grpc_call *call = - (wrapped_grpc_call *)zend_object_store_get_object(getThis() TSRMLS_CC); + Z_WRAPPED_GRPC_CALL_CREDS_P(creds_obj); + wrapped_grpc_call *call = Z_WRAPPED_GRPC_CALL_P(getThis()); grpc_call_error error = GRPC_CALL_ERROR; error = grpc_call_set_credentials(call->wrapped, creds->wrapped); @@ -556,16 +547,18 @@ PHP_METHOD(Call, setCredentials) { } static zend_function_entry call_methods[] = { - PHP_ME(Call, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) - PHP_ME(Call, startBatch, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Call, getPeer, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Call, cancel, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Call, setCredentials, NULL, ZEND_ACC_PUBLIC) - PHP_FE_END}; + PHP_ME(Call, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) + PHP_ME(Call, startBatch, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Call, getPeer, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Call, cancel, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Call, setCredentials, NULL, ZEND_ACC_PUBLIC) + PHP_FE_END +}; void grpc_init_call(TSRMLS_D) { zend_class_entry ce; INIT_CLASS_ENTRY(ce, "Grpc\\Call", call_methods); ce.create_object = create_wrapped_grpc_call; grpc_ce_call = zend_register_internal_class(&ce TSRMLS_CC); + PHP_GRPC_INIT_HANDLER(wrapped_grpc_call, call_ce_handlers); } diff --git a/src/php/ext/grpc/call.h b/src/php/ext/grpc/call.h index 36c5f2d2724..e49f9b382ae 100644 --- a/src/php/ext/grpc/call.h +++ b/src/php/ext/grpc/call.h @@ -49,23 +49,38 @@ extern zend_class_entry *grpc_ce_call; /* Wrapper struct for grpc_call that can be associated with a PHP object */ -typedef struct wrapped_grpc_call { - zend_object std; - +PHP_GRPC_WRAP_OBJECT_START(wrapped_grpc_call) bool owned; grpc_call *wrapped; -} wrapped_grpc_call; +PHP_GRPC_WRAP_OBJECT_END(wrapped_grpc_call) -/* Initializes the Call PHP class */ -void grpc_init_call(TSRMLS_D); +#if PHP_MAJOR_VERSION < 7 -/* Creates a Call object that wraps the given grpc_call struct */ -zval *grpc_php_wrap_call(grpc_call *wrapped, bool owned TSRMLS_DC); +#define Z_WRAPPED_GRPC_CALL_P(zv) \ + (wrapped_grpc_call *)zend_object_store_get_object(zv TSRMLS_CC) + +#else + +static inline wrapped_grpc_call +*wrapped_grpc_call_from_obj(zend_object *obj) { + return (wrapped_grpc_call*)((char*)(obj) - + XtOffsetOf(wrapped_grpc_call, std)); +} + +#define Z_WRAPPED_GRPC_CALL_P(zv) wrapped_grpc_call_from_obj(Z_OBJ_P((zv))) + +#endif /* PHP_MAJOR_VERSION */ /* Creates and returns a PHP associative array of metadata from a C array of * call metadata */ zval *grpc_parse_metadata_array(grpc_metadata_array *metadata_array TSRMLS_DC); +/* Creates a Call object that wraps the given grpc_call struct */ +zval *grpc_php_wrap_call(grpc_call *wrapped, bool owned TSRMLS_DC); + +/* Initializes the Call PHP class */ +void grpc_init_call(TSRMLS_D); + /* Populates a grpc_metadata_array with the data in a PHP array object. Returns true on success and false on failure */ bool create_metadata_array(zval *array, grpc_metadata_array *metadata); diff --git a/src/php/ext/grpc/call_credentials.c b/src/php/ext/grpc/call_credentials.c index ec0e6b91813..6921a5df17b 100644 --- a/src/php/ext/grpc/call_credentials.c +++ b/src/php/ext/grpc/call_credentials.c @@ -52,44 +52,35 @@ #include zend_class_entry *grpc_ce_call_credentials; +#if PHP_MAJOR_VERSION >= 7 +static zend_object_handlers call_credentials_ce_handlers; +#endif /* Frees and destroys an instance of wrapped_grpc_call_credentials */ -void free_wrapped_grpc_call_credentials(void *object TSRMLS_DC) { - wrapped_grpc_call_credentials *creds = - (wrapped_grpc_call_credentials *)object; - if (creds->wrapped != NULL) { - grpc_call_credentials_release(creds->wrapped); +PHP_GRPC_FREE_WRAPPED_FUNC_START(wrapped_grpc_call_credentials) + if (p->wrapped != NULL) { + grpc_call_credentials_release(p->wrapped); } - efree(creds); -} +PHP_GRPC_FREE_WRAPPED_FUNC_END() /* Initializes an instance of wrapped_grpc_call_credentials to be * associated with an object of a class specified by class_type */ -zend_object_value create_wrapped_grpc_call_credentials( +php_grpc_zend_object create_wrapped_grpc_call_credentials( zend_class_entry *class_type TSRMLS_DC) { - zend_object_value retval; - wrapped_grpc_call_credentials *intern; - - intern = (wrapped_grpc_call_credentials *)emalloc( - sizeof(wrapped_grpc_call_credentials)); - memset(intern, 0, sizeof(wrapped_grpc_call_credentials)); - + PHP_GRPC_ALLOC_CLASS_OBJECT(wrapped_grpc_call_credentials); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); - retval.handle = zend_objects_store_put( - intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, - free_wrapped_grpc_call_credentials, NULL TSRMLS_CC); - retval.handlers = zend_get_std_object_handlers(); - return retval; + PHP_GRPC_FREE_CLASS_OBJECT(wrapped_grpc_call_credentials, + call_credentials_ce_handlers); } -zval *grpc_php_wrap_call_credentials(grpc_call_credentials *wrapped TSRMLS_DC) { +zval *grpc_php_wrap_call_credentials(grpc_call_credentials + *wrapped TSRMLS_DC) { zval *credentials_object; - MAKE_STD_ZVAL(credentials_object); + PHP_GRPC_MAKE_STD_ZVAL(credentials_object); object_init_ex(credentials_object, grpc_ce_call_credentials); wrapped_grpc_call_credentials *credentials = - (wrapped_grpc_call_credentials *)zend_object_store_get_object( - credentials_object TSRMLS_CC); + Z_WRAPPED_GRPC_CALL_CREDS_P(credentials_object); credentials->wrapped = wrapped; return credentials_object; } @@ -114,15 +105,15 @@ PHP_METHOD(CallCredentials, createComposite) { return; } wrapped_grpc_call_credentials *cred1 = - (wrapped_grpc_call_credentials *)zend_object_store_get_object( - cred1_obj TSRMLS_CC); + Z_WRAPPED_GRPC_CALL_CREDS_P(cred1_obj); wrapped_grpc_call_credentials *cred2 = - (wrapped_grpc_call_credentials *)zend_object_store_get_object( - cred2_obj TSRMLS_CC); + Z_WRAPPED_GRPC_CALL_CREDS_P(cred2_obj); grpc_call_credentials *creds = grpc_composite_call_credentials_create(cred1->wrapped, cred2->wrapped, NULL); - zval *creds_object = grpc_php_wrap_call_credentials(creds TSRMLS_CC); + zval *creds_object; + PHP_GRPC_MAKE_STD_ZVAL(creds_object); + creds_object = grpc_php_wrap_call_credentials(creds TSRMLS_CC); RETURN_DESTROY_ZVAL(creds_object); } @@ -141,13 +132,10 @@ PHP_METHOD(CallCredentials, createFromPlugin) { memset(fci_cache, 0, sizeof(zend_fcall_info_cache)); /* "f" == 1 function */ - if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "f", fci, - fci_cache, - fci->params, - fci->param_count) == FAILURE) { + if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "f*", fci, fci_cache, + fci->params, fci->param_count) == FAILURE) { zend_throw_exception(spl_ce_InvalidArgumentException, - "createFromPlugin expects 1 callback", - 1 TSRMLS_CC); + "createFromPlugin expects 1 callback", 1 TSRMLS_CC); return; } @@ -165,9 +153,11 @@ PHP_METHOD(CallCredentials, createFromPlugin) { plugin.state = (void *)state; plugin.type = ""; - grpc_call_credentials *creds = grpc_metadata_credentials_create_from_plugin( - plugin, NULL); - zval *creds_object = grpc_php_wrap_call_credentials(creds TSRMLS_CC); + grpc_call_credentials *creds = + grpc_metadata_credentials_create_from_plugin(plugin, NULL); + zval *creds_object; + PHP_GRPC_MAKE_STD_ZVAL(creds_object); + creds_object = grpc_php_wrap_call_credentials(creds TSRMLS_CC); RETURN_DESTROY_ZVAL(creds_object); } @@ -181,17 +171,23 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context, /* prepare to call the user callback function with info from the * grpc_auth_metadata_context */ - zval **params[1]; zval *arg; - zval *retval; - MAKE_STD_ZVAL(arg); + PHP_GRPC_MAKE_STD_ZVAL(arg); object_init(arg); - add_property_string(arg, "service_url", context.service_url, true); - add_property_string(arg, "method_name", context.method_name, true); + php_grpc_add_property_string(arg, "service_url", context.service_url, true); + php_grpc_add_property_string(arg, "method_name", context.method_name, true); + zval *retval; + PHP_GRPC_MAKE_STD_ZVAL(retval); +#if PHP_MAJOR_VERSION < 7 + zval **params[1]; params[0] = &arg; - state->fci->param_count = 1; state->fci->params = params; state->fci->retval_ptr_ptr = &retval; +#else + state->fci->params = arg; + state->fci->retval = retval; +#endif + state->fci->param_count = 1; /* call the user callback function */ zend_call_function(state->fci, state->fci_cache TSRMLS_CC); @@ -200,6 +196,7 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context, zend_throw_exception(spl_ce_InvalidArgumentException, "plugin callback must return metadata array", 1 TSRMLS_CC); + return; } grpc_metadata_array metadata; @@ -207,6 +204,7 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context, zend_throw_exception(spl_ce_InvalidArgumentException, "invalid metadata", 1 TSRMLS_CC); grpc_metadata_array_destroy(&metadata); + return; } /* TODO: handle error */ @@ -229,11 +227,14 @@ static zend_function_entry call_credentials_methods[] = { ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(CallCredentials, createFromPlugin, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) - PHP_FE_END}; + PHP_FE_END +}; void grpc_init_call_credentials(TSRMLS_D) { zend_class_entry ce; INIT_CLASS_ENTRY(ce, "Grpc\\CallCredentials", call_credentials_methods); ce.create_object = create_wrapped_grpc_call_credentials; grpc_ce_call_credentials = zend_register_internal_class(&ce TSRMLS_CC); + PHP_GRPC_INIT_HANDLER(wrapped_grpc_call_credentials, + call_credentials_ce_handlers); } diff --git a/src/php/ext/grpc/call_credentials.h b/src/php/ext/grpc/call_credentials.h index d2f6a92449b..c1d85c0fb28 100755 --- a/src/php/ext/grpc/call_credentials.h +++ b/src/php/ext/grpc/call_credentials.h @@ -51,11 +51,27 @@ extern zend_class_entry *grpc_ce_call_credentials; /* Wrapper struct for grpc_call_credentials that can be associated * with a PHP object */ -typedef struct wrapped_grpc_call_credentials { - zend_object std; - +PHP_GRPC_WRAP_OBJECT_START(wrapped_grpc_call_credentials) grpc_call_credentials *wrapped; -} wrapped_grpc_call_credentials; +PHP_GRPC_WRAP_OBJECT_END(wrapped_grpc_call_credentials) + +#if PHP_MAJOR_VERSION < 7 + +#define Z_WRAPPED_GRPC_CALL_CREDS_P(zv) \ + (wrapped_grpc_call_credentials *)zend_object_store_get_object(zv TSRMLS_CC) + +#else + +static inline wrapped_grpc_call_credentials +*wrapped_grpc_call_credentials_from_obj(zend_object *obj) { + return (wrapped_grpc_call_credentials*)( + (char*)(obj) - XtOffsetOf(wrapped_grpc_call_credentials, std)); +} + +#define Z_WRAPPED_GRPC_CALL_CREDS_P(zv) \ + wrapped_grpc_call_credentials_from_obj(Z_OBJ_P((zv))) + +#endif /* PHP_MAJOR_VERSION */ /* Struct to hold callback function for plugin creds API */ typedef struct plugin_state { diff --git a/src/php/ext/grpc/channel.c b/src/php/ext/grpc/channel.c index 8d94c59683e..b5a2c9f6baf 100644 --- a/src/php/ext/grpc/channel.c +++ b/src/php/ext/grpc/channel.c @@ -56,72 +56,68 @@ #include "timeval.h" zend_class_entry *grpc_ce_channel; +#if PHP_MAJOR_VERSION >= 7 +static zend_object_handlers channel_ce_handlers; +#endif /* Frees and destroys an instance of wrapped_grpc_channel */ -void free_wrapped_grpc_channel(void *object TSRMLS_DC) { - wrapped_grpc_channel *channel = (wrapped_grpc_channel *)object; - if (channel->wrapped != NULL) { - grpc_channel_destroy(channel->wrapped); +PHP_GRPC_FREE_WRAPPED_FUNC_START(wrapped_grpc_channel) + if (p->wrapped != NULL) { + grpc_channel_destroy(p->wrapped); } - efree(channel); -} +PHP_GRPC_FREE_WRAPPED_FUNC_END() /* Initializes an instance of wrapped_grpc_channel to be associated with an * object of a class specified by class_type */ -zend_object_value create_wrapped_grpc_channel(zend_class_entry *class_type - TSRMLS_DC) { - zend_object_value retval; - wrapped_grpc_channel *intern; - intern = (wrapped_grpc_channel *)emalloc(sizeof(wrapped_grpc_channel)); - memset(intern, 0, sizeof(wrapped_grpc_channel)); +php_grpc_zend_object create_wrapped_grpc_channel(zend_class_entry *class_type + TSRMLS_DC) { + PHP_GRPC_ALLOC_CLASS_OBJECT(wrapped_grpc_channel); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); - retval.handle = zend_objects_store_put( - intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, - free_wrapped_grpc_channel, NULL TSRMLS_CC); - retval.handlers = zend_get_std_object_handlers(); - return retval; + PHP_GRPC_FREE_CLASS_OBJECT(wrapped_grpc_channel, channel_ce_handlers); } -void php_grpc_read_args_array(zval *args_array, grpc_channel_args *args TSRMLS_DC) { +void php_grpc_read_args_array(zval *args_array, + grpc_channel_args *args TSRMLS_DC) { HashTable *array_hash; - HashPosition array_pointer; int args_index; - zval **data; - char *key; - uint key_len; - ulong index; array_hash = Z_ARRVAL_P(args_array); + if (!array_hash) { + zend_throw_exception(spl_ce_InvalidArgumentException, + "array_hash is NULL", 1 TSRMLS_CC); + return; + } args->num_args = zend_hash_num_elements(array_hash); args->args = ecalloc(args->num_args, sizeof(grpc_arg)); args_index = 0; - for (zend_hash_internal_pointer_reset_ex(array_hash, &array_pointer); - zend_hash_get_current_data_ex(array_hash, (void **)&data, - &array_pointer) == SUCCESS; - zend_hash_move_forward_ex(array_hash, &array_pointer)) { - if (zend_hash_get_current_key_ex(array_hash, &key, &key_len, &index, 0, - &array_pointer) != HASH_KEY_IS_STRING) { + + char *key = NULL; + zval *data; + int key_type; + + PHP_GRPC_HASH_FOREACH_STR_KEY_VAL_START(array_hash, key, key_type, data) + if (key_type != HASH_KEY_IS_STRING) { zend_throw_exception(spl_ce_InvalidArgumentException, "args keys must be strings", 1 TSRMLS_CC); return; } args->args[args_index].key = key; - switch (Z_TYPE_P(*data)) { - case IS_LONG: - args->args[args_index].value.integer = (int)Z_LVAL_P(*data); - args->args[args_index].type = GRPC_ARG_INTEGER; - break; - case IS_STRING: - args->args[args_index].value.string = Z_STRVAL_P(*data); - args->args[args_index].type = GRPC_ARG_STRING; - break; - default: - zend_throw_exception(spl_ce_InvalidArgumentException, - "args values must be int or string", 1 TSRMLS_CC); - return; + switch (Z_TYPE_P(data)) { + case IS_LONG: + args->args[args_index].value.integer = (int)Z_LVAL_P(data); + args->args[args_index].type = GRPC_ARG_INTEGER; + break; + case IS_STRING: + args->args[args_index].value.string = Z_STRVAL_P(data); + args->args[args_index].type = GRPC_ARG_STRING; + break; + default: + zend_throw_exception(spl_ce_InvalidArgumentException, + "args values must be int or string", 1 TSRMLS_CC); + return; } args_index++; - } + PHP_GRPC_HASH_FOREACH_END() } /** @@ -132,16 +128,15 @@ void php_grpc_read_args_array(zval *args_array, grpc_channel_args *args TSRMLS_D * @param array $args The arguments to pass to the Channel (optional) */ PHP_METHOD(Channel, __construct) { - wrapped_grpc_channel *channel = - (wrapped_grpc_channel *)zend_object_store_get_object( - getThis() TSRMLS_CC); + wrapped_grpc_channel *channel = Z_WRAPPED_GRPC_CHANNEL_P(getThis()); + zval *creds_obj = NULL; char *target; - int target_length; + php_grpc_int target_length; zval *args_array = NULL; grpc_channel_args args; HashTable *array_hash; - zval **creds_obj = NULL; wrapped_grpc_channel_credentials *creds = NULL; + /* "sa" == 1 string, 1 array */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sa", &target, &target_length, &args_array) == FAILURE) { @@ -150,21 +145,20 @@ PHP_METHOD(Channel, __construct) { return; } array_hash = Z_ARRVAL_P(args_array); - if (zend_hash_find(array_hash, "credentials", sizeof("credentials"), + if (php_grpc_zend_hash_find(array_hash, "credentials", sizeof("credentials"), (void **)&creds_obj) == SUCCESS) { - if (Z_TYPE_P(*creds_obj) == IS_NULL) { + if (Z_TYPE_P(creds_obj) == IS_NULL) { creds = NULL; - zend_hash_del(array_hash, "credentials", 12); - } else if (zend_get_class_entry(*creds_obj TSRMLS_CC) != - grpc_ce_channel_credentials) { + php_grpc_zend_hash_del(array_hash, "credentials", sizeof("credentials")); + } else if (PHP_GRPC_GET_CLASS_ENTRY(creds_obj) != + grpc_ce_channel_credentials) { zend_throw_exception(spl_ce_InvalidArgumentException, "credentials must be a ChannelCredentials object", 1 TSRMLS_CC); return; } else { - creds = (wrapped_grpc_channel_credentials *)zend_object_store_get_object( - *creds_obj TSRMLS_CC); - zend_hash_del(array_hash, "credentials", 12); + creds = Z_WRAPPED_GRPC_CHANNEL_CREDS_P(creds_obj); + php_grpc_zend_hash_del(array_hash, "credentials", sizeof("credentials")); } } php_grpc_read_args_array(args_array, &args TSRMLS_CC); @@ -182,9 +176,8 @@ PHP_METHOD(Channel, __construct) { * @return string The URI of the endpoint */ PHP_METHOD(Channel, getTarget) { - wrapped_grpc_channel *channel = - (wrapped_grpc_channel *)zend_object_store_get_object(getThis() TSRMLS_CC); - RETURN_STRING(grpc_channel_get_target(channel->wrapped), 1); + wrapped_grpc_channel *channel = Z_WRAPPED_GRPC_CHANNEL_P(getThis()); + PHP_GRPC_RETURN_STRING(grpc_channel_get_target(channel->wrapped), 1); } /** @@ -193,12 +186,12 @@ PHP_METHOD(Channel, getTarget) { * @return long The grpc connectivity state */ PHP_METHOD(Channel, getConnectivityState) { - wrapped_grpc_channel *channel = - (wrapped_grpc_channel *)zend_object_store_get_object(getThis() TSRMLS_CC); - bool try_to_connect; + wrapped_grpc_channel *channel = Z_WRAPPED_GRPC_CHANNEL_P(getThis()); + bool try_to_connect = false; + /* "|b" == 1 optional bool */ - if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|b", &try_to_connect) == - FAILURE) { + if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|b", &try_to_connect) + == FAILURE) { zend_throw_exception(spl_ce_InvalidArgumentException, "getConnectivityState expects a bool", 1 TSRMLS_CC); return; @@ -215,28 +208,26 @@ PHP_METHOD(Channel, getConnectivityState) { * before deadline */ PHP_METHOD(Channel, watchConnectivityState) { - wrapped_grpc_channel *channel = - (wrapped_grpc_channel *)zend_object_store_get_object(getThis() TSRMLS_CC); - long last_state; + wrapped_grpc_channel *channel = Z_WRAPPED_GRPC_CHANNEL_P(getThis()); + php_grpc_long last_state; zval *deadline_obj; + /* "lO" == 1 long 1 object */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "lO", &last_state, &deadline_obj, grpc_ce_timeval) == FAILURE) { zend_throw_exception(spl_ce_InvalidArgumentException, - "watchConnectivityState expects 1 long 1 timeval", - 1 TSRMLS_CC); + "watchConnectivityState expects 1 long 1 timeval", 1 TSRMLS_CC); return; } - wrapped_grpc_timeval *deadline = - (wrapped_grpc_timeval *)zend_object_store_get_object( - deadline_obj TSRMLS_CC); - grpc_channel_watch_connectivity_state( - channel->wrapped, (grpc_connectivity_state)last_state, - deadline->wrapped, completion_queue, NULL); - grpc_event event = grpc_completion_queue_pluck( - completion_queue, NULL, - gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + wrapped_grpc_timeval *deadline = Z_WRAPPED_GRPC_TIMEVAL_P(deadline_obj); + grpc_channel_watch_connectivity_state(channel->wrapped, + (grpc_connectivity_state)last_state, + deadline->wrapped, completion_queue, + NULL); + grpc_event event = + grpc_completion_queue_pluck(completion_queue, NULL, + gpr_inf_future(GPR_CLOCK_REALTIME), NULL); RETURN_BOOL(event.success); } @@ -244,8 +235,7 @@ PHP_METHOD(Channel, watchConnectivityState) { * Close the channel */ PHP_METHOD(Channel, close) { - wrapped_grpc_channel *channel = - (wrapped_grpc_channel *)zend_object_store_get_object(getThis() TSRMLS_CC); + wrapped_grpc_channel *channel = Z_WRAPPED_GRPC_CHANNEL_P(getThis()); if (channel->wrapped != NULL) { grpc_channel_destroy(channel->wrapped); channel->wrapped = NULL; @@ -253,16 +243,18 @@ PHP_METHOD(Channel, close) { } static zend_function_entry channel_methods[] = { - PHP_ME(Channel, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) - PHP_ME(Channel, getTarget, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Channel, getConnectivityState, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Channel, watchConnectivityState, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Channel, close, NULL, ZEND_ACC_PUBLIC) - PHP_FE_END}; + PHP_ME(Channel, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) + PHP_ME(Channel, getTarget, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Channel, getConnectivityState, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Channel, watchConnectivityState, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Channel, close, NULL, ZEND_ACC_PUBLIC) + PHP_FE_END +}; void grpc_init_channel(TSRMLS_D) { zend_class_entry ce; INIT_CLASS_ENTRY(ce, "Grpc\\Channel", channel_methods); ce.create_object = create_wrapped_grpc_channel; grpc_ce_channel = zend_register_internal_class(&ce TSRMLS_CC); + PHP_GRPC_INIT_HANDLER(wrapped_grpc_channel, channel_ce_handlers); } diff --git a/src/php/ext/grpc/channel.h b/src/php/ext/grpc/channel.h index cc5823ee7f7..0b815657d33 100755 --- a/src/php/ext/grpc/channel.h +++ b/src/php/ext/grpc/channel.h @@ -49,16 +49,33 @@ extern zend_class_entry *grpc_ce_channel; /* Wrapper struct for grpc_channel that can be associated with a PHP object */ -typedef struct wrapped_grpc_channel { - zend_object std; - +PHP_GRPC_WRAP_OBJECT_START(wrapped_grpc_channel) grpc_channel *wrapped; -} wrapped_grpc_channel; +PHP_GRPC_WRAP_OBJECT_END(wrapped_grpc_channel) + +#if PHP_MAJOR_VERSION < 7 + +#define Z_WRAPPED_GRPC_CHANNEL_P(zv) \ + (wrapped_grpc_channel *)zend_object_store_get_object(zv TSRMLS_CC) + +#else + +static inline wrapped_grpc_channel +*wrapped_grpc_channel_from_obj(zend_object *obj) { + return (wrapped_grpc_channel*)((char*)(obj) - + XtOffsetOf(wrapped_grpc_channel, std)); +} + +#define Z_WRAPPED_GRPC_CHANNEL_P(zv) \ + wrapped_grpc_channel_from_obj(Z_OBJ_P((zv))) + +#endif /* PHP_MAJOR_VERSION */ /* Initializes the Channel class */ void grpc_init_channel(TSRMLS_D); /* Iterates through a PHP array and populates args with the contents */ -void php_grpc_read_args_array(zval *args_array, grpc_channel_args *args TSRMLS_DC); +void php_grpc_read_args_array(zval *args_array, grpc_channel_args *args + TSRMLS_DC); #endif /* NET_GRPC_PHP_GRPC_CHANNEL_H_ */ diff --git a/src/php/ext/grpc/channel_credentials.c b/src/php/ext/grpc/channel_credentials.c index b76fb105f36..0b356aa25fc 100644 --- a/src/php/ext/grpc/channel_credentials.c +++ b/src/php/ext/grpc/channel_credentials.c @@ -52,7 +52,9 @@ #include zend_class_entry *grpc_ce_channel_credentials; - +#if PHP_MAJOR_VERSION >= 7 +static zend_object_handlers channel_credentials_ce_handlers; +#endif static char *default_pem_root_certs = NULL; static grpc_ssl_roots_override_result get_ssl_roots_override( @@ -65,42 +67,30 @@ static grpc_ssl_roots_override_result get_ssl_roots_override( } /* Frees and destroys an instance of wrapped_grpc_channel_credentials */ -void free_wrapped_grpc_channel_credentials(void *object TSRMLS_DC) { - wrapped_grpc_channel_credentials *creds = - (wrapped_grpc_channel_credentials *)object; - if (creds->wrapped != NULL) { - grpc_channel_credentials_release(creds->wrapped); +PHP_GRPC_FREE_WRAPPED_FUNC_START(wrapped_grpc_channel_credentials) + if (p->wrapped != NULL) { + grpc_channel_credentials_release(p->wrapped); } - efree(creds); -} +PHP_GRPC_FREE_WRAPPED_FUNC_END() /* Initializes an instance of wrapped_grpc_channel_credentials to be * associated with an object of a class specified by class_type */ -zend_object_value create_wrapped_grpc_channel_credentials( +php_grpc_zend_object create_wrapped_grpc_channel_credentials( zend_class_entry *class_type TSRMLS_DC) { - zend_object_value retval; - wrapped_grpc_channel_credentials *intern; - - intern = (wrapped_grpc_channel_credentials *)emalloc( - sizeof(wrapped_grpc_channel_credentials)); - memset(intern, 0, sizeof(wrapped_grpc_channel_credentials)); - + PHP_GRPC_ALLOC_CLASS_OBJECT(wrapped_grpc_channel_credentials); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); - retval.handle = zend_objects_store_put( - intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, - free_wrapped_grpc_channel_credentials, NULL TSRMLS_CC); - retval.handlers = zend_get_std_object_handlers(); - return retval; + PHP_GRPC_FREE_CLASS_OBJECT(wrapped_grpc_channel_credentials, + channel_credentials_ce_handlers); } -zval *grpc_php_wrap_channel_credentials(grpc_channel_credentials *wrapped TSRMLS_DC) { +zval *grpc_php_wrap_channel_credentials(grpc_channel_credentials + *wrapped TSRMLS_DC) { zval *credentials_object; - MAKE_STD_ZVAL(credentials_object); + PHP_GRPC_MAKE_STD_ZVAL(credentials_object); object_init_ex(credentials_object, grpc_ce_channel_credentials); wrapped_grpc_channel_credentials *credentials = - (wrapped_grpc_channel_credentials *)zend_object_store_get_object( - credentials_object TSRMLS_CC); + Z_WRAPPED_GRPC_CHANNEL_CREDS_P(credentials_object); credentials->wrapped = wrapped; return credentials_object; } @@ -112,7 +102,9 @@ zval *grpc_php_wrap_channel_credentials(grpc_channel_credentials *wrapped TSRMLS */ PHP_METHOD(ChannelCredentials, setDefaultRootsPem) { char *pem_roots; - int pem_roots_length; + php_grpc_int pem_roots_length; + + /* "s" == 1 string */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &pem_roots, &pem_roots_length) == FAILURE) { zend_throw_exception(spl_ce_InvalidArgumentException, @@ -129,7 +121,9 @@ PHP_METHOD(ChannelCredentials, setDefaultRootsPem) { */ PHP_METHOD(ChannelCredentials, createDefault) { grpc_channel_credentials *creds = grpc_google_default_credentials_create(); - zval *creds_object = grpc_php_wrap_channel_credentials(creds TSRMLS_CC); + zval *creds_object; + PHP_GRPC_MAKE_STD_ZVAL(creds_object); + creds_object = grpc_php_wrap_channel_credentials(creds TSRMLS_CC); RETURN_DESTROY_ZVAL(creds_object); } @@ -146,11 +140,13 @@ PHP_METHOD(ChannelCredentials, createSsl) { char *pem_root_certs = NULL; grpc_ssl_pem_key_cert_pair pem_key_cert_pair; - int root_certs_length = 0, private_key_length = 0, cert_chain_length = 0; + php_grpc_int root_certs_length = 0; + php_grpc_int private_key_length = 0; + php_grpc_int cert_chain_length = 0; pem_key_cert_pair.private_key = pem_key_cert_pair.cert_chain = NULL; - /* "|s!s!s! == 3 optional nullable strings */ + /* "|s!s!s!" == 3 optional nullable strings */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s!s!s!", &pem_root_certs, &root_certs_length, &pem_key_cert_pair.private_key, @@ -164,7 +160,9 @@ PHP_METHOD(ChannelCredentials, createSsl) { grpc_channel_credentials *creds = grpc_ssl_credentials_create( pem_root_certs, pem_key_cert_pair.private_key == NULL ? NULL : &pem_key_cert_pair, NULL); - zval *creds_object = grpc_php_wrap_channel_credentials(creds TSRMLS_CC); + zval *creds_object; + PHP_GRPC_MAKE_STD_ZVAL(creds_object); + creds_object = grpc_php_wrap_channel_credentials(creds TSRMLS_CC); RETURN_DESTROY_ZVAL(creds_object); } @@ -178,7 +176,7 @@ PHP_METHOD(ChannelCredentials, createComposite) { zval *cred1_obj; zval *cred2_obj; - /* "OO" == 3 Objects */ + /* "OO" == 2 Objects */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "OO", &cred1_obj, grpc_ce_channel_credentials, &cred2_obj, grpc_ce_call_credentials) == FAILURE) { @@ -187,15 +185,15 @@ PHP_METHOD(ChannelCredentials, createComposite) { return; } wrapped_grpc_channel_credentials *cred1 = - (wrapped_grpc_channel_credentials *)zend_object_store_get_object( - cred1_obj TSRMLS_CC); + Z_WRAPPED_GRPC_CHANNEL_CREDS_P(cred1_obj); wrapped_grpc_call_credentials *cred2 = - (wrapped_grpc_call_credentials *)zend_object_store_get_object( - cred2_obj TSRMLS_CC); + Z_WRAPPED_GRPC_CALL_CREDS_P(cred2_obj); grpc_channel_credentials *creds = grpc_composite_channel_credentials_create(cred1->wrapped, cred2->wrapped, NULL); - zval *creds_object = grpc_php_wrap_channel_credentials(creds TSRMLS_CC); + zval *creds_object; + PHP_GRPC_MAKE_STD_ZVAL(creds_object); + creds_object = grpc_php_wrap_channel_credentials(creds TSRMLS_CC); RETURN_DESTROY_ZVAL(creds_object); } @@ -218,7 +216,8 @@ static zend_function_entry channel_credentials_methods[] = { ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(ChannelCredentials, createInsecure, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) - PHP_FE_END}; + PHP_FE_END +}; void grpc_init_channel_credentials(TSRMLS_D) { zend_class_entry ce; @@ -227,4 +226,6 @@ void grpc_init_channel_credentials(TSRMLS_D) { grpc_set_ssl_roots_override_callback(get_ssl_roots_override); ce.create_object = create_wrapped_grpc_channel_credentials; grpc_ce_channel_credentials = zend_register_internal_class(&ce TSRMLS_CC); + PHP_GRPC_INIT_HANDLER(wrapped_grpc_channel_credentials, + channel_credentials_ce_handlers); } diff --git a/src/php/ext/grpc/channel_credentials.h b/src/php/ext/grpc/channel_credentials.h index d89984ce604..b043d91fa69 100755 --- a/src/php/ext/grpc/channel_credentials.h +++ b/src/php/ext/grpc/channel_credentials.h @@ -51,11 +51,27 @@ extern zend_class_entry *grpc_ce_channel_credentials; /* Wrapper struct for grpc_channel_credentials that can be associated * with a PHP object */ -typedef struct wrapped_grpc_channel_credentials { - zend_object std; - +PHP_GRPC_WRAP_OBJECT_START(wrapped_grpc_channel_credentials) grpc_channel_credentials *wrapped; -} wrapped_grpc_channel_credentials; +PHP_GRPC_WRAP_OBJECT_END(wrapped_grpc_channel_credentials) + +#if PHP_MAJOR_VERSION < 7 + +#define Z_WRAPPED_GRPC_CHANNEL_CREDS_P(zv) \ + (wrapped_grpc_channel_credentials *)zend_object_store_get_object(zv TSRMLS_CC) + +#else + +static inline wrapped_grpc_channel_credentials +*wrapped_grpc_channel_credentials_from_obj(zend_object *obj) { + return (wrapped_grpc_channel_credentials *)( + (char*)(obj) - XtOffsetOf(wrapped_grpc_channel_credentials, std)); +} + +#define Z_WRAPPED_GRPC_CHANNEL_CREDS_P(zv) \ + wrapped_grpc_channel_credentials_from_obj(Z_OBJ_P((zv))) + +#endif /* PHP_MAJOR_VERSION */ /* Initializes the ChannelCredentials PHP class */ void grpc_init_channel_credentials(TSRMLS_D); diff --git a/src/php/ext/grpc/package.xml b/src/php/ext/grpc/package.xml deleted file mode 100644 index daf2ee5a53d..00000000000 --- a/src/php/ext/grpc/package.xml +++ /dev/null @@ -1,156 +0,0 @@ - - - grpc - pecl.php.net - A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. - Remote Procedure Calls (RPCs) provide a useful abstraction for building distributed applications and services. The libraries in this repository provide a concrete implementation of the gRPC protocol, layered over HTTP/2. These libraries enable communication between clients and servers using any combination of the supported languages. - - Stanley Cheung - stanleycheung - grpc-packages@google.com - yes - - 2016-01-13 - - - 0.7.0 - 0.7.0 - - - beta - beta - - BSD - -- Breaking change to Credentials class (removed) #3765 -- Replaced by ChannelCredentials and CallCredentials class #3765 -- New plugin based metadata auth API #4394 -- Explicit ChannelCredentials::createInsecure() call - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 5.5.0 - - - 1.4.0 - - - - grpc - - - - - 0.5.0 - 0.5.0 - - - alpha - alpha - - 2015-06-16 - BSD - -First alpha release - - - - - 0.5.1 - 0.5.1 - - - alpha - alpha - - 2015-07-09 - BSD - -Update to wrap gRPC C Core version 0.10.0 - - - - - 0.6.0 - 0.6.0 - - - beta - beta - - 2015-09-24 - BSD - -- support per message compression disable -- expose per-call host override option -- expose connectivity API -- expose channel target and call peer -- add user-agent -- update to wrap gRPC C core library beta version 0.11.0 - - - - - 0.6.1 - 0.6.0 - - - beta - beta - - 2015-10-21 - BSD - -- fixed undefined constant fatal error when run with apache/nginx #2275 - - - - - 0.7.0 - 0.7.0 - - - beta - beta - - 2016-01-13 - BSD - -- Breaking change to Credentials class (removed) #3765 -- Replaced by ChannelCredentials and CallCredentials class #3765 -- New plugin based metadata auth API #4394 -- Explicit ChannelCredentials::createInsecure() call - - - - diff --git a/src/php/ext/grpc/php7_wrapper.h b/src/php/ext/grpc/php7_wrapper.h new file mode 100644 index 00000000000..fd8d35636f0 --- /dev/null +++ b/src/php/ext/grpc/php7_wrapper.h @@ -0,0 +1,218 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +#ifndef PHP7_WRAPPER_GRPC_H +#define PHP7_WRAPPER_GRPC_H + +#if PHP_MAJOR_VERSION < 7 + +#define php_grpc_int int +#define php_grpc_long long +#define php_grpc_ulong ulong +#define php_grpc_zend_object zend_object_value +#define php_grpc_add_property_string(arg, name, context, b) \ + add_property_string(arg, name, context, b) +#define php_grpc_add_property_stringl(res, name, str, len, b) \ + add_property_stringl(res, name, str, len, b) +#define php_grpc_add_next_index_stringl(data, str, len, b) \ + add_next_index_stringl(data, str, len, b) + +#define PHP_GRPC_RETURN_STRING(val, dup) RETURN_STRING(val, dup) +#define PHP_GRPC_MAKE_STD_ZVAL(pzv) MAKE_STD_ZVAL(pzv) +#define PHP_GRPC_DELREF(zv) Z_DELREF_P(zv) + +#define PHP_GRPC_WRAP_OBJECT_START(name) \ + typedef struct name { \ + zend_object std; +#define PHP_GRPC_WRAP_OBJECT_END(name) \ + } name; + +#define PHP_GRPC_FREE_WRAPPED_FUNC_START(class_object) \ + void free_##class_object(void *object TSRMLS_DC) { \ + class_object *p = (class_object *)object; +#define PHP_GRPC_FREE_WRAPPED_FUNC_END() \ + zend_object_std_dtor(&p->std TSRMLS_CC); \ + efree(p); \ + } + +#define PHP_GRPC_ALLOC_CLASS_OBJECT(class_object) \ + class_object *intern; \ + zend_object_value retval; \ + intern = (class_object *)emalloc(sizeof(class_object)); \ + memset(intern, 0, sizeof(class_object)); + +#define PHP_GRPC_FREE_CLASS_OBJECT(class_object, handler) \ + retval.handle = zend_objects_store_put( \ + intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, \ + free_##class_object, NULL TSRMLS_CC); \ + retval.handlers = zend_get_std_object_handlers(); \ + return retval; + +#define PHP_GRPC_HASH_FOREACH_VAL_START(ht, data) \ + zval **tmp_data = NULL; \ + for (zend_hash_internal_pointer_reset(ht); \ + zend_hash_get_current_data(ht, (void**)&tmp_data) == SUCCESS; \ + zend_hash_move_forward(ht)) { \ + data = *tmp_data; + +#define PHP_GRPC_HASH_FOREACH_STR_KEY_VAL_START(ht, key, key_type, data) \ + zval **tmp##key = NULL; \ + ulong index##key; \ + uint len##key; \ + for (zend_hash_internal_pointer_reset(ht); \ + zend_hash_get_current_data(ht, (void**)&tmp##key) == SUCCESS; \ + zend_hash_move_forward(ht)) { \ + key_type = zend_hash_get_current_key_ex(ht, &key, &len##key, &index##key,\ + 0, NULL); \ + data = *tmp##key; + +#define PHP_GRPC_HASH_FOREACH_LONG_KEY_VAL_START(ht, key, key_type, index,\ + data) \ + zval **tmp##key = NULL; \ + uint len##key; \ + for (zend_hash_internal_pointer_reset(ht); \ + zend_hash_get_current_data(ht, (void**)&tmp##key) == SUCCESS; \ + zend_hash_move_forward(ht)) { \ + key_type = zend_hash_get_current_key_ex(ht, &key, &len##key, &index,\ + 0, NULL); \ + data = *tmp##key; + +#define PHP_GRPC_HASH_FOREACH_END() } + +static inline int php_grpc_zend_hash_find(HashTable *ht, char *key, int len, + void **value) { + zval **data = NULL; + if (zend_hash_find(ht, key, len, (void **)&data) == SUCCESS) { + *value = *data; + return SUCCESS; + } else { + *value = NULL; + return FAILURE; + } +} + +#define php_grpc_zend_hash_del zend_hash_del + +#define PHP_GRPC_GET_CLASS_ENTRY(object) zend_get_class_entry(object TSRMLS_CC) + +#define PHP_GRPC_INIT_HANDLER(class_object, handler_name) + +#else + +#define php_grpc_int size_t +#define php_grpc_long zend_long +#define php_grpc_ulong zend_ulong +#define php_grpc_zend_object zend_object* +#define php_grpc_add_property_string(arg, name, context, b) \ + add_property_string(arg, name, context) +#define php_grpc_add_property_stringl(res, name, str, len, b) \ + add_property_stringl(res, name, str, len) +#define php_grpc_add_next_index_stringl(data, str, len, b) \ + add_next_index_stringl(data, str, len) + +#define PHP_GRPC_RETURN_STRING(val, dup) RETURN_STRING(val) +#define PHP_GRPC_MAKE_STD_ZVAL(pzv) \ + zval _stack_zval_##pzv; \ + pzv = &(_stack_zval_##pzv) +#define PHP_GRPC_DELREF(zv) + +#define PHP_GRPC_WRAP_OBJECT_START(name) \ + typedef struct name { +#define PHP_GRPC_WRAP_OBJECT_END(name) \ + zend_object std; \ + } name; + +#define WRAPPED_OBJECT_FROM_OBJ(class_object, obj) \ + class_object##_from_obj(obj); + +#define PHP_GRPC_FREE_WRAPPED_FUNC_START(class_object) \ + static void free_##class_object(zend_object *object) { \ + class_object *p = WRAPPED_OBJECT_FROM_OBJ(class_object, object) +#define PHP_GRPC_FREE_WRAPPED_FUNC_END() \ + zend_object_std_dtor(&p->std); \ + } + +#define PHP_GRPC_ALLOC_CLASS_OBJECT(class_object) \ + class_object *intern; \ + intern = ecalloc(1, sizeof(class_object) + \ + zend_object_properties_size(class_type)); + +#define PHP_GRPC_FREE_CLASS_OBJECT(class_object, handler) \ + intern->std.handlers = &handler; \ + return &intern->std; + +#define PHP_GRPC_HASH_FOREACH_VAL_START(ht, data) \ + ZEND_HASH_FOREACH_VAL(ht, data) { + +#define PHP_GRPC_HASH_FOREACH_STR_KEY_VAL_START(ht, key, key_type, data) \ + zend_string *(zs_##key); \ + ZEND_HASH_FOREACH_STR_KEY_VAL(ht, (zs_##key), data) { \ + if ((zs_##key) == NULL) {key = NULL; key_type = HASH_KEY_IS_LONG;} \ + else {key = (zs_##key)->val; key_type = HASH_KEY_IS_STRING;} + +#define PHP_GRPC_HASH_FOREACH_LONG_KEY_VAL_START(ht, key, key_type, index, \ + data) \ + zend_string *(zs_##key); \ + ZEND_HASH_FOREACH_KEY_VAL(ht, index, zs_##key, data) { \ + if ((zs_##key) == NULL) {key = NULL; key_type = HASH_KEY_IS_LONG;} \ + else {key = (zs_##key)->val; key_type = HASH_KEY_IS_STRING;} + +#define PHP_GRPC_HASH_FOREACH_END() } ZEND_HASH_FOREACH_END(); + +static inline int php_grpc_zend_hash_find(HashTable *ht, char *key, int len, + void **value) { + zval *value_tmp = zend_hash_str_find(ht, key, len -1); + if (value_tmp == NULL) { + return FAILURE; + } else { + *value = (void *)value_tmp; + return SUCCESS; + } +} + +static inline int php_grpc_zend_hash_del(HashTable *ht, char *key, int len) { + return zend_hash_str_del(ht, key, len - 1); +} + +#define PHP_GRPC_GET_CLASS_ENTRY(object) Z_OBJ_P(object)->ce + +#define PHP_GRPC_INIT_HANDLER(class_object, handler_name) \ + memcpy(&handler_name, zend_get_std_object_handlers(), \ + sizeof(zend_object_handlers)); \ + handler_name.offset = XtOffsetOf(class_object, std); \ + handler_name.free_obj = free_##class_object + +#endif /* PHP_MAJOR_VERSION */ + +#endif /* PHP7_WRAPPER_GRPC_H */ diff --git a/src/php/ext/grpc/php_grpc.c b/src/php/ext/grpc/php_grpc.c index 449ba3cd47b..5edfa2da7df 100644 --- a/src/php/ext/grpc/php_grpc.c +++ b/src/php/ext/grpc/php_grpc.c @@ -64,15 +64,19 @@ const zend_function_entry grpc_functions[] = { */ zend_module_entry grpc_module_entry = { #if ZEND_MODULE_API_NO >= 20010901 - STANDARD_MODULE_HEADER, + STANDARD_MODULE_HEADER, #endif - "grpc", grpc_functions, PHP_MINIT(grpc), - PHP_MSHUTDOWN(grpc), NULL, NULL, - PHP_MINFO(grpc), + "grpc", + grpc_functions, + PHP_MINIT(grpc), + PHP_MSHUTDOWN(grpc), + NULL, + NULL, + PHP_MINFO(grpc), #if ZEND_MODULE_API_NO >= 20010901 - PHP_GRPC_VERSION, + PHP_GRPC_VERSION, #endif - STANDARD_MODULE_PROPERTIES}; + STANDARD_MODULE_PROPERTIES}; /* }}} */ #ifdef COMPILE_DL_GRPC @@ -82,23 +86,24 @@ ZEND_GET_MODULE(grpc) /* {{{ PHP_INI */ /* Remove comments and fill if you need to have entries in php.ini -PHP_INI_BEGIN() - STD_PHP_INI_ENTRY("grpc.global_value", "42", PHP_INI_ALL, OnUpdateLong, -global_value, zend_grpc_globals, grpc_globals) - STD_PHP_INI_ENTRY("grpc.global_string", "foobar", PHP_INI_ALL, -OnUpdateString, global_string, zend_grpc_globals, grpc_globals) -PHP_INI_END() + PHP_INI_BEGIN() + STD_PHP_INI_ENTRY("grpc.global_value", "42", PHP_INI_ALL, OnUpdateLong, + global_value, zend_grpc_globals, grpc_globals) + STD_PHP_INI_ENTRY("grpc.global_string", "foobar", PHP_INI_ALL, + OnUpdateString, global_string, zend_grpc_globals, + grpc_globals) + PHP_INI_END() */ /* }}} */ /* {{{ php_grpc_init_globals */ /* Uncomment this function if you have INI entries -static void php_grpc_init_globals(zend_grpc_globals *grpc_globals) -{ - grpc_globals->global_value = 0; - grpc_globals->global_string = NULL; -} + static void php_grpc_init_globals(zend_grpc_globals *grpc_globals) + { + grpc_globals->global_value = 0; + grpc_globals->global_string = NULL; + } */ /* }}} */ @@ -106,7 +111,7 @@ static void php_grpc_init_globals(zend_grpc_globals *grpc_globals) */ PHP_MINIT_FUNCTION(grpc) { /* If you have INI entries, uncomment these lines - REGISTER_INI_ENTRIES(); + REGISTER_INI_ENTRIES(); */ /* Register call error constants */ grpc_init(); @@ -246,7 +251,7 @@ PHP_MINIT_FUNCTION(grpc) { */ PHP_MSHUTDOWN_FUNCTION(grpc) { /* uncomment this line if you have INI entries - UNREGISTER_INI_ENTRIES(); + UNREGISTER_INI_ENTRIES(); */ // WARNING: This function IS being called by PHP when the extension // is unloaded but the logs were somehow suppressed. @@ -265,7 +270,7 @@ PHP_MINFO_FUNCTION(grpc) { php_info_print_table_end(); /* Remove comments if you have entries in php.ini - DISPLAY_INI_ENTRIES(); + DISPLAY_INI_ENTRIES(); */ } /* }}} */ @@ -274,12 +279,3 @@ PHP_MINFO_FUNCTION(grpc) { function definition, where the functions purpose is also documented. Please follow this convention for the convenience of others editing your code. */ - -/* - * Local variables: - * tab-width: 4 - * c-basic-offset: 4 - * End: - * vim600: noet sw=4 ts=4 fdm=marker - * vim<600: noet sw=4 ts=4 - */ diff --git a/src/php/ext/grpc/php_grpc.h b/src/php/ext/grpc/php_grpc.h index 1d4834c50fa..e57a06545e6 100644 --- a/src/php/ext/grpc/php_grpc.h +++ b/src/php/ext/grpc/php_grpc.h @@ -57,6 +57,8 @@ extern zend_module_entry grpc_module_entry; #include "php.h" +#include "php7_wrapper.h" + #include "grpc/grpc.h" #define RETURN_DESTROY_ZVAL(val) \ @@ -72,8 +74,8 @@ PHP_MSHUTDOWN_FUNCTION(grpc); PHP_MINFO_FUNCTION(grpc); /* - Declare any global variables you may need between the BEGIN - and END macros here: + Declare any global variables you may need between the BEGIN + and END macros here: ZEND_BEGIN_MODULE_GLOBALS(grpc) ZEND_END_MODULE_GLOBALS(grpc) diff --git a/src/php/ext/grpc/server.c b/src/php/ext/grpc/server.c index c13e7cd1f92..fc20c42b169 100644 --- a/src/php/ext/grpc/server.c +++ b/src/php/ext/grpc/server.c @@ -57,37 +57,29 @@ #include "timeval.h" zend_class_entry *grpc_ce_server; +#if PHP_MAJOR_VERSION >= 7 +static zend_object_handlers server_ce_handlers; +#endif /* Frees and destroys an instance of wrapped_grpc_server */ -void free_wrapped_grpc_server(void *object TSRMLS_DC) { - wrapped_grpc_server *server = (wrapped_grpc_server *)object; - if (server->wrapped != NULL) { - grpc_server_shutdown_and_notify(server->wrapped, completion_queue, NULL); - grpc_server_cancel_all_calls(server->wrapped); +PHP_GRPC_FREE_WRAPPED_FUNC_START(wrapped_grpc_server) + if (p->wrapped != NULL) { + grpc_server_shutdown_and_notify(p->wrapped, completion_queue, NULL); + grpc_server_cancel_all_calls(p->wrapped); grpc_completion_queue_pluck(completion_queue, NULL, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); - grpc_server_destroy(server->wrapped); + grpc_server_destroy(p->wrapped); } - efree(server); -} +PHP_GRPC_FREE_WRAPPED_FUNC_END() /* Initializes an instance of wrapped_grpc_call to be associated with an object * of a class specified by class_type */ -zend_object_value create_wrapped_grpc_server(zend_class_entry *class_type - TSRMLS_DC) { - zend_object_value retval; - wrapped_grpc_server *intern; - - intern = (wrapped_grpc_server *)emalloc(sizeof(wrapped_grpc_server)); - memset(intern, 0, sizeof(wrapped_grpc_server)); - +php_grpc_zend_object create_wrapped_grpc_server(zend_class_entry *class_type + TSRMLS_DC) { + PHP_GRPC_ALLOC_CLASS_OBJECT(wrapped_grpc_server); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); - retval.handle = zend_objects_store_put( - intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, - free_wrapped_grpc_server, NULL TSRMLS_CC); - retval.handlers = zend_get_std_object_handlers(); - return retval; + PHP_GRPC_FREE_CLASS_OBJECT(wrapped_grpc_server, server_ce_handlers); } /** @@ -95,10 +87,10 @@ zend_object_value create_wrapped_grpc_server(zend_class_entry *class_type * @param array $args The arguments to pass to the server (optional) */ PHP_METHOD(Server, __construct) { - wrapped_grpc_server *server = - (wrapped_grpc_server *)zend_object_store_get_object(getThis() TSRMLS_CC); + wrapped_grpc_server *server = Z_WRAPPED_GRPC_SERVER_P(getThis()); zval *args_array = NULL; grpc_channel_args args; + /* "|a" == 1 optional array */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|a", &args_array) == FAILURE) { @@ -110,6 +102,8 @@ PHP_METHOD(Server, __construct) { if (args_array == NULL) { server->wrapped = grpc_server_create(NULL, NULL); } else { + //TODO(thinkerou): deal it if key of array is long, crash now on php7 + // and update unit test case php_grpc_read_args_array(args_array, &args TSRMLS_CC); server->wrapped = grpc_server_create(&args, NULL); efree(args.args); @@ -126,15 +120,16 @@ PHP_METHOD(Server, __construct) { */ PHP_METHOD(Server, requestCall) { grpc_call_error error_code; - wrapped_grpc_server *server = - (wrapped_grpc_server *)zend_object_store_get_object(getThis() TSRMLS_CC); grpc_call *call; grpc_call_details details; grpc_metadata_array metadata; - zval *result; grpc_event event; - MAKE_STD_ZVAL(result); + + wrapped_grpc_server *server = Z_WRAPPED_GRPC_SERVER_P(getThis()); + zval *result; + PHP_GRPC_MAKE_STD_ZVAL(result); object_init(result); + grpc_call_details_init(&details); grpc_metadata_array_init(&metadata); error_code = @@ -146,20 +141,37 @@ PHP_METHOD(Server, requestCall) { goto cleanup; } event = grpc_completion_queue_pluck(completion_queue, NULL, - gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + gpr_inf_future(GPR_CLOCK_REALTIME), + NULL); if (!event.success) { zend_throw_exception(spl_ce_LogicException, "Failed to request a call for some reason", 1 TSRMLS_CC); goto cleanup; } + php_grpc_add_property_string(result, "method", details.method, true); + php_grpc_add_property_string(result, "host", details.host, true); +#if PHP_MAJOR_VERSION < 7 add_property_zval(result, "call", grpc_php_wrap_call(call, true TSRMLS_CC)); - add_property_string(result, "method", details.method, true); - add_property_string(result, "host", details.host, true); add_property_zval(result, "absolute_deadline", grpc_php_wrap_timeval(details.deadline TSRMLS_CC)); - add_property_zval(result, "metadata", grpc_parse_metadata_array(&metadata TSRMLS_CC)); -cleanup: + add_property_zval(result, "metadata", grpc_parse_metadata_array(&metadata + TSRMLS_CC)); +#else + zval zv_call; + zval zv_timeval; + zval zv_md; + //TODO(thinkerou): why use zval* to unit test error? + zv_call = *grpc_php_wrap_call(call, true); + zv_timeval = *grpc_php_wrap_timeval(details.deadline); + zv_md = *grpc_parse_metadata_array(&metadata); + + add_property_zval(result, "call", &zv_call); + add_property_zval(result, "absolute_deadline", &zv_timeval); + add_property_zval(result, "metadata", &zv_md); +#endif + + cleanup: grpc_call_details_destroy(&details); grpc_metadata_array_destroy(&metadata); RETURN_DESTROY_ZVAL(result); @@ -171,13 +183,13 @@ cleanup: * @return true on success, false on failure */ PHP_METHOD(Server, addHttp2Port) { - wrapped_grpc_server *server = - (wrapped_grpc_server *)zend_object_store_get_object(getThis() TSRMLS_CC); const char *addr; - int addr_len; + php_grpc_int addr_len; + wrapped_grpc_server *server = Z_WRAPPED_GRPC_SERVER_P(getThis()); + /* "s" == 1 string */ - if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &addr, &addr_len) == - FAILURE) { + if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &addr, &addr_len) + == FAILURE) { zend_throw_exception(spl_ce_InvalidArgumentException, "add_http2_port expects a string", 1 TSRMLS_CC); return; @@ -186,11 +198,11 @@ PHP_METHOD(Server, addHttp2Port) { } PHP_METHOD(Server, addSecureHttp2Port) { - wrapped_grpc_server *server = - (wrapped_grpc_server *)zend_object_store_get_object(getThis() TSRMLS_CC); const char *addr; - int addr_len; + php_grpc_int addr_len; zval *creds_obj; + wrapped_grpc_server *server = Z_WRAPPED_GRPC_SERVER_P(getThis()); + /* "sO" == 1 string, 1 object */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO", &addr, &addr_len, &creds_obj, grpc_ce_server_credentials) == @@ -201,8 +213,7 @@ PHP_METHOD(Server, addSecureHttp2Port) { return; } wrapped_grpc_server_credentials *creds = - (wrapped_grpc_server_credentials *)zend_object_store_get_object( - creds_obj TSRMLS_CC); + Z_WRAPPED_GRPC_SERVER_CREDS_P(creds_obj); RETURN_LONG(grpc_server_add_secure_http2_port(server->wrapped, addr, creds->wrapped)); } @@ -212,21 +223,23 @@ PHP_METHOD(Server, addSecureHttp2Port) { * @return Void */ PHP_METHOD(Server, start) { - wrapped_grpc_server *server = - (wrapped_grpc_server *)zend_object_store_get_object(getThis() TSRMLS_CC); + wrapped_grpc_server *server = Z_WRAPPED_GRPC_SERVER_P(getThis()); grpc_server_start(server->wrapped); } static zend_function_entry server_methods[] = { - PHP_ME(Server, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) - PHP_ME(Server, requestCall, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Server, addHttp2Port, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Server, addSecureHttp2Port, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Server, start, NULL, ZEND_ACC_PUBLIC) PHP_FE_END}; + PHP_ME(Server, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) + PHP_ME(Server, requestCall, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Server, addHttp2Port, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Server, addSecureHttp2Port, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Server, start, NULL, ZEND_ACC_PUBLIC) + PHP_FE_END +}; void grpc_init_server(TSRMLS_D) { zend_class_entry ce; INIT_CLASS_ENTRY(ce, "Grpc\\Server", server_methods); ce.create_object = create_wrapped_grpc_server; grpc_ce_server = zend_register_internal_class(&ce TSRMLS_CC); + PHP_GRPC_INIT_HANDLER(wrapped_grpc_server, server_ce_handlers); } diff --git a/src/php/ext/grpc/server.h b/src/php/ext/grpc/server.h index 022257f37c3..a635bc11df9 100755 --- a/src/php/ext/grpc/server.h +++ b/src/php/ext/grpc/server.h @@ -49,11 +49,26 @@ extern zend_class_entry *grpc_ce_server; /* Wrapper struct for grpc_server that can be associated with a PHP object */ -typedef struct wrapped_grpc_server { - zend_object std; - +PHP_GRPC_WRAP_OBJECT_START(wrapped_grpc_server) grpc_server *wrapped; -} wrapped_grpc_server; +PHP_GRPC_WRAP_OBJECT_END(wrapped_grpc_server) + +#if PHP_MAJOR_VERSION < 7 + +#define Z_WRAPPED_GRPC_SERVER_P(zv) \ + (wrapped_grpc_server *)zend_object_store_get_object(zv TSRMLS_CC) + +#else + +static inline wrapped_grpc_server +*wrapped_grpc_server_from_obj(zend_object *obj) { + return (wrapped_grpc_server*)((char*)(obj) - + XtOffsetOf(wrapped_grpc_server, std)); +} + +#define Z_WRAPPED_GRPC_SERVER_P(zv) wrapped_grpc_server_from_obj(Z_OBJ_P((zv))) + +#endif /* PHP_MAJOR_VERSION */ /* Initializes the Server class */ void grpc_init_server(TSRMLS_D); diff --git a/src/php/ext/grpc/server_credentials.c b/src/php/ext/grpc/server_credentials.c index 505da10a282..b05896af4af 100644 --- a/src/php/ext/grpc/server_credentials.c +++ b/src/php/ext/grpc/server_credentials.c @@ -50,44 +50,35 @@ #include zend_class_entry *grpc_ce_server_credentials; +#if PHP_MAJOR_VERSION >= 7 +static zend_object_handlers server_credentials_ce_handlers; +#endif /* Frees and destroys an instace of wrapped_grpc_server_credentials */ -void free_wrapped_grpc_server_credentials(void *object TSRMLS_DC) { - wrapped_grpc_server_credentials *creds = - (wrapped_grpc_server_credentials *)object; - if (creds->wrapped != NULL) { - grpc_server_credentials_release(creds->wrapped); +PHP_GRPC_FREE_WRAPPED_FUNC_START(wrapped_grpc_server_credentials) + if (p->wrapped != NULL) { + grpc_server_credentials_release(p->wrapped); } - efree(creds); -} +PHP_GRPC_FREE_WRAPPED_FUNC_END() /* Initializes an instace of wrapped_grpc_server_credentials to be associated * with an object of a class specified by class_type */ -zend_object_value create_wrapped_grpc_server_credentials( +php_grpc_zend_object create_wrapped_grpc_server_credentials( zend_class_entry *class_type TSRMLS_DC) { - zend_object_value retval; - wrapped_grpc_server_credentials *intern; - - intern = (wrapped_grpc_server_credentials *)emalloc( - sizeof(wrapped_grpc_server_credentials)); - memset(intern, 0, sizeof(wrapped_grpc_server_credentials)); - + PHP_GRPC_ALLOC_CLASS_OBJECT(wrapped_grpc_server_credentials); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); - retval.handle = zend_objects_store_put( - intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, - free_wrapped_grpc_server_credentials, NULL TSRMLS_CC); - retval.handlers = zend_get_std_object_handlers(); - return retval; + PHP_GRPC_FREE_CLASS_OBJECT(wrapped_grpc_server_credentials, + server_credentials_ce_handlers); } -zval *grpc_php_wrap_server_credentials(grpc_server_credentials *wrapped TSRMLS_DC) { +zval *grpc_php_wrap_server_credentials(grpc_server_credentials + *wrapped TSRMLS_DC) { zval *server_credentials_object; - MAKE_STD_ZVAL(server_credentials_object); + PHP_GRPC_MAKE_STD_ZVAL(server_credentials_object); object_init_ex(server_credentials_object, grpc_ce_server_credentials); wrapped_grpc_server_credentials *server_credentials = - (wrapped_grpc_server_credentials *)zend_object_store_get_object( - server_credentials_object TSRMLS_CC); + Z_WRAPPED_GRPC_SERVER_CREDS_P(server_credentials_object); server_credentials->wrapped = wrapped; return server_credentials_object; } @@ -103,7 +94,9 @@ PHP_METHOD(ServerCredentials, createSsl) { char *pem_root_certs = 0; grpc_ssl_pem_key_cert_pair pem_key_cert_pair; - int root_certs_length = 0, private_key_length, cert_chain_length; + php_grpc_int root_certs_length = 0; + php_grpc_int private_key_length; + php_grpc_int cert_chain_length; /* "s!ss" == 1 nullable string, 2 strings */ /* TODO: support multiple key cert pairs. */ @@ -120,17 +113,23 @@ PHP_METHOD(ServerCredentials, createSsl) { grpc_server_credentials *creds = grpc_ssl_server_credentials_create_ex( pem_root_certs, &pem_key_cert_pair, 1, GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE, NULL); - zval *creds_object = grpc_php_wrap_server_credentials(creds TSRMLS_CC); + zval *creds_object; + PHP_GRPC_MAKE_STD_ZVAL(creds_object); + creds_object = grpc_php_wrap_server_credentials(creds TSRMLS_CC); RETURN_DESTROY_ZVAL(creds_object); } static zend_function_entry server_credentials_methods[] = { - PHP_ME(ServerCredentials, createSsl, NULL, - ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_FE_END}; + PHP_ME(ServerCredentials, createSsl, NULL, + ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) + PHP_FE_END + }; void grpc_init_server_credentials(TSRMLS_D) { zend_class_entry ce; INIT_CLASS_ENTRY(ce, "Grpc\\ServerCredentials", server_credentials_methods); ce.create_object = create_wrapped_grpc_server_credentials; grpc_ce_server_credentials = zend_register_internal_class(&ce TSRMLS_CC); + PHP_GRPC_INIT_HANDLER(wrapped_grpc_server_credentials, + server_credentials_ce_handlers); } diff --git a/src/php/ext/grpc/server_credentials.h b/src/php/ext/grpc/server_credentials.h index 7101d650008..6781a614b17 100755 --- a/src/php/ext/grpc/server_credentials.h +++ b/src/php/ext/grpc/server_credentials.h @@ -51,11 +51,27 @@ extern zend_class_entry *grpc_ce_server_credentials; /* Wrapper struct for grpc_server_credentials that can be associated with a PHP * object */ -typedef struct wrapped_grpc_server_credentials { - zend_object std; - +PHP_GRPC_WRAP_OBJECT_START(wrapped_grpc_server_credentials) grpc_server_credentials *wrapped; -} wrapped_grpc_server_credentials; +PHP_GRPC_WRAP_OBJECT_END(wrapped_grpc_server_credentials) + +#if PHP_MAJOR_VERSION < 7 + +#define Z_WRAPPED_GRPC_SERVER_CREDS_P(zv) \ + (wrapped_grpc_server_credentials *)zend_object_store_get_object(zv TSRMLS_CC) + +#else + +static inline wrapped_grpc_server_credentials +*wrapped_grpc_server_credentials_from_obj(zend_object *obj) { + return (wrapped_grpc_server_credentials*)( + (char*)(obj) - XtOffsetOf(wrapped_grpc_server_credentials, std)); +} + +#define Z_WRAPPED_GRPC_SERVER_CREDS_P(zv) \ + wrapped_grpc_server_credentials_from_obj(Z_OBJ_P((zv))) + +#endif /* PHP_MAJOR_VERSION */ /* Initializes the Server_Credentials PHP class */ void grpc_init_server_credentials(TSRMLS_D); diff --git a/src/php/ext/grpc/timeval.c b/src/php/ext/grpc/timeval.c index 5e242162a8c..e145d967726 100644 --- a/src/php/ext/grpc/timeval.c +++ b/src/php/ext/grpc/timeval.c @@ -51,34 +51,29 @@ #include zend_class_entry *grpc_ce_timeval; +#if PHP_MAJOR_VERSION >= 7 +static zend_object_handlers timeval_ce_handlers; +#endif /* Frees and destroys an instance of wrapped_grpc_call */ -void free_wrapped_grpc_timeval(void *object TSRMLS_DC) { efree(object); } +PHP_GRPC_FREE_WRAPPED_FUNC_START(wrapped_grpc_timeval) +PHP_GRPC_FREE_WRAPPED_FUNC_END() /* Initializes an instance of wrapped_grpc_timeval to be associated with an * object of a class specified by class_type */ -zend_object_value create_wrapped_grpc_timeval(zend_class_entry *class_type - TSRMLS_DC) { - zend_object_value retval; - wrapped_grpc_timeval *intern; - intern = (wrapped_grpc_timeval *)emalloc(sizeof(wrapped_grpc_timeval)); - memset(intern, 0, sizeof(wrapped_grpc_timeval)); +php_grpc_zend_object create_wrapped_grpc_timeval(zend_class_entry *class_type + TSRMLS_DC) { + PHP_GRPC_ALLOC_CLASS_OBJECT(wrapped_grpc_timeval); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); - retval.handle = zend_objects_store_put( - intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, - free_wrapped_grpc_timeval, NULL TSRMLS_CC); - retval.handlers = zend_get_std_object_handlers(); - return retval; + PHP_GRPC_FREE_CLASS_OBJECT(wrapped_grpc_timeval, timeval_ce_handlers); } zval *grpc_php_wrap_timeval(gpr_timespec wrapped TSRMLS_DC) { zval *timeval_object; - MAKE_STD_ZVAL(timeval_object); + PHP_GRPC_MAKE_STD_ZVAL(timeval_object); object_init_ex(timeval_object, grpc_ce_timeval); - wrapped_grpc_timeval *timeval = - (wrapped_grpc_timeval *)zend_object_store_get_object( - timeval_object TSRMLS_CC); + wrapped_grpc_timeval *timeval = Z_WRAPPED_GRPC_TIMEVAL_P(timeval_object); memcpy(&timeval->wrapped, &wrapped, sizeof(gpr_timespec)); return timeval_object; } @@ -88,9 +83,9 @@ zval *grpc_php_wrap_timeval(gpr_timespec wrapped TSRMLS_DC) { * @param long $usec The number of microseconds in the interval */ PHP_METHOD(Timeval, __construct) { - wrapped_grpc_timeval *timeval = - (wrapped_grpc_timeval *)zend_object_store_get_object(getThis() TSRMLS_CC); - long microseconds; + wrapped_grpc_timeval *timeval = Z_WRAPPED_GRPC_TIMEVAL_P(getThis()); + php_grpc_long microseconds; + /* "l" == 1 long */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", µseconds) == FAILURE) { @@ -110,6 +105,7 @@ PHP_METHOD(Timeval, __construct) { */ PHP_METHOD(Timeval, add) { zval *other_obj; + /* "O" == 1 Object */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &other_obj, grpc_ce_timeval) == FAILURE) { @@ -117,12 +113,13 @@ PHP_METHOD(Timeval, add) { "add expects a Timeval", 1 TSRMLS_CC); return; } - wrapped_grpc_timeval *self = - (wrapped_grpc_timeval *)zend_object_store_get_object(getThis() TSRMLS_CC); - wrapped_grpc_timeval *other = - (wrapped_grpc_timeval *)zend_object_store_get_object(other_obj TSRMLS_CC); - zval *sum = - grpc_php_wrap_timeval(gpr_time_add(self->wrapped, other->wrapped) TSRMLS_CC); + wrapped_grpc_timeval *self = Z_WRAPPED_GRPC_TIMEVAL_P(getThis()); + wrapped_grpc_timeval *other = Z_WRAPPED_GRPC_TIMEVAL_P(other_obj); + zval *sum; + PHP_GRPC_MAKE_STD_ZVAL(sum); + sum = + grpc_php_wrap_timeval(gpr_time_add(self->wrapped, other->wrapped) + TSRMLS_CC); RETURN_DESTROY_ZVAL(sum); } @@ -134,6 +131,7 @@ PHP_METHOD(Timeval, add) { */ PHP_METHOD(Timeval, subtract) { zval *other_obj; + /* "O" == 1 Object */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &other_obj, grpc_ce_timeval) == FAILURE) { @@ -141,12 +139,13 @@ PHP_METHOD(Timeval, subtract) { "subtract expects a Timeval", 1 TSRMLS_CC); return; } - wrapped_grpc_timeval *self = - (wrapped_grpc_timeval *)zend_object_store_get_object(getThis() TSRMLS_CC); - wrapped_grpc_timeval *other = - (wrapped_grpc_timeval *)zend_object_store_get_object(other_obj TSRMLS_CC); - zval *diff = - grpc_php_wrap_timeval(gpr_time_sub(self->wrapped, other->wrapped) TSRMLS_CC); + wrapped_grpc_timeval *self = Z_WRAPPED_GRPC_TIMEVAL_P(getThis()); + wrapped_grpc_timeval *other = Z_WRAPPED_GRPC_TIMEVAL_P(other_obj); + zval *diff; + PHP_GRPC_MAKE_STD_ZVAL(diff); + diff = + grpc_php_wrap_timeval(gpr_time_sub(self->wrapped, other->wrapped) + TSRMLS_CC); RETURN_DESTROY_ZVAL(diff); } @@ -158,7 +157,9 @@ PHP_METHOD(Timeval, subtract) { * @return long */ PHP_METHOD(Timeval, compare) { - zval *a_obj, *b_obj; + zval *a_obj; + zval *b_obj; + /* "OO" == 2 Objects */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "OO", &a_obj, grpc_ce_timeval, &b_obj, @@ -167,10 +168,8 @@ PHP_METHOD(Timeval, compare) { "compare expects two Timevals", 1 TSRMLS_CC); return; } - wrapped_grpc_timeval *a = - (wrapped_grpc_timeval *)zend_object_store_get_object(a_obj TSRMLS_CC); - wrapped_grpc_timeval *b = - (wrapped_grpc_timeval *)zend_object_store_get_object(b_obj TSRMLS_CC); + wrapped_grpc_timeval *a = Z_WRAPPED_GRPC_TIMEVAL_P(a_obj); + wrapped_grpc_timeval *b = Z_WRAPPED_GRPC_TIMEVAL_P(b_obj); long result = gpr_time_cmp(a->wrapped, b->wrapped); RETURN_LONG(result); } @@ -183,7 +182,10 @@ PHP_METHOD(Timeval, compare) { * @return bool True if $a and $b are within $threshold, False otherwise */ PHP_METHOD(Timeval, similar) { - zval *a_obj, *b_obj, *thresh_obj; + zval *a_obj; + zval *b_obj; + zval *thresh_obj; + /* "OOO" == 3 Objects */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "OOO", &a_obj, grpc_ce_timeval, &b_obj, grpc_ce_timeval, @@ -192,13 +194,9 @@ PHP_METHOD(Timeval, similar) { "compare expects three Timevals", 1 TSRMLS_CC); return; } - wrapped_grpc_timeval *a = - (wrapped_grpc_timeval *)zend_object_store_get_object(a_obj TSRMLS_CC); - wrapped_grpc_timeval *b = - (wrapped_grpc_timeval *)zend_object_store_get_object(b_obj TSRMLS_CC); - wrapped_grpc_timeval *thresh = - (wrapped_grpc_timeval *)zend_object_store_get_object( - thresh_obj TSRMLS_CC); + wrapped_grpc_timeval *a = Z_WRAPPED_GRPC_TIMEVAL_P(a_obj); + wrapped_grpc_timeval *b = Z_WRAPPED_GRPC_TIMEVAL_P(b_obj); + wrapped_grpc_timeval *thresh = Z_WRAPPED_GRPC_TIMEVAL_P(thresh_obj); int result = gpr_time_similar(a->wrapped, b->wrapped, thresh->wrapped); RETURN_BOOL(result); } @@ -208,7 +206,9 @@ PHP_METHOD(Timeval, similar) { * @return Timeval The current time */ PHP_METHOD(Timeval, now) { - zval *now = grpc_php_wrap_timeval(gpr_now(GPR_CLOCK_REALTIME) TSRMLS_CC); + zval *now; + PHP_GRPC_MAKE_STD_ZVAL(now); + now = grpc_php_wrap_timeval(gpr_now(GPR_CLOCK_REALTIME) TSRMLS_CC); RETURN_DESTROY_ZVAL(now); } @@ -217,7 +217,9 @@ PHP_METHOD(Timeval, now) { * @return Timeval Zero length time interval */ PHP_METHOD(Timeval, zero) { - zval *grpc_php_timeval_zero = + zval *grpc_php_timeval_zero; + PHP_GRPC_MAKE_STD_ZVAL(grpc_php_timeval_zero); + grpc_php_timeval_zero = grpc_php_wrap_timeval(gpr_time_0(GPR_CLOCK_REALTIME) TSRMLS_CC); RETURN_ZVAL(grpc_php_timeval_zero, false, /* Copy original before returning? */ @@ -229,7 +231,9 @@ PHP_METHOD(Timeval, zero) { * @return Timeval Infinite future time value */ PHP_METHOD(Timeval, infFuture) { - zval *grpc_php_timeval_inf_future = + zval *grpc_php_timeval_inf_future; + PHP_GRPC_MAKE_STD_ZVAL(grpc_php_timeval_inf_future); + grpc_php_timeval_inf_future = grpc_php_wrap_timeval(gpr_inf_future(GPR_CLOCK_REALTIME) TSRMLS_CC); RETURN_DESTROY_ZVAL(grpc_php_timeval_inf_future); } @@ -239,7 +243,9 @@ PHP_METHOD(Timeval, infFuture) { * @return Timeval Infinite past time value */ PHP_METHOD(Timeval, infPast) { - zval *grpc_php_timeval_inf_past = + zval *grpc_php_timeval_inf_past; + PHP_GRPC_MAKE_STD_ZVAL(grpc_php_timeval_inf_past); + grpc_php_timeval_inf_past = grpc_php_wrap_timeval(gpr_inf_past(GPR_CLOCK_REALTIME) TSRMLS_CC); RETURN_DESTROY_ZVAL(grpc_php_timeval_inf_past); } @@ -249,28 +255,30 @@ PHP_METHOD(Timeval, infPast) { * @return void */ PHP_METHOD(Timeval, sleepUntil) { - wrapped_grpc_timeval *this = - (wrapped_grpc_timeval *)zend_object_store_get_object(getThis() TSRMLS_CC); + wrapped_grpc_timeval *this = Z_WRAPPED_GRPC_TIMEVAL_P(getThis()); gpr_sleep_until(this->wrapped); } static zend_function_entry timeval_methods[] = { - PHP_ME(Timeval, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) - PHP_ME(Timeval, add, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Timeval, compare, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) - PHP_ME(Timeval, infFuture, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) - PHP_ME(Timeval, infPast, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) - PHP_ME(Timeval, now, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) - PHP_ME(Timeval, similar, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) - PHP_ME(Timeval, sleepUntil, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Timeval, subtract, NULL, ZEND_ACC_PUBLIC) - PHP_ME(Timeval, zero, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_FE_END}; + PHP_ME(Timeval, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) + PHP_ME(Timeval, add, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Timeval, compare, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) + PHP_ME(Timeval, infFuture, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) + PHP_ME(Timeval, infPast, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) + PHP_ME(Timeval, now, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) + PHP_ME(Timeval, similar, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) + PHP_ME(Timeval, sleepUntil, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Timeval, subtract, NULL, ZEND_ACC_PUBLIC) + PHP_ME(Timeval, zero, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) + PHP_FE_END +}; void grpc_init_timeval(TSRMLS_D) { zend_class_entry ce; INIT_CLASS_ENTRY(ce, "Grpc\\Timeval", timeval_methods); ce.create_object = create_wrapped_grpc_timeval; grpc_ce_timeval = zend_register_internal_class(&ce TSRMLS_CC); + PHP_GRPC_INIT_HANDLER(wrapped_grpc_timeval, timeval_ce_handlers); } void grpc_shutdown_timeval(TSRMLS_D) {} diff --git a/src/php/ext/grpc/timeval.h b/src/php/ext/grpc/timeval.h index 7456eb6d58c..63a1d702f37 100755 --- a/src/php/ext/grpc/timeval.h +++ b/src/php/ext/grpc/timeval.h @@ -50,11 +50,27 @@ extern zend_class_entry *grpc_ce_timeval; /* Wrapper struct for timeval that can be associated with a PHP object */ -typedef struct wrapped_grpc_timeval { - zend_object std; - +PHP_GRPC_WRAP_OBJECT_START(wrapped_grpc_timeval) gpr_timespec wrapped; -} wrapped_grpc_timeval; +PHP_GRPC_WRAP_OBJECT_END(wrapped_grpc_timeval) + +#if PHP_MAJOR_VERSION < 7 + +#define Z_WRAPPED_GRPC_TIMEVAL_P(zv) \ + (wrapped_grpc_timeval *)zend_object_store_get_object(zv TSRMLS_CC) + +#else + +static inline wrapped_grpc_timeval +*wrapped_grpc_timeval_from_obj(zend_object *obj) { + return (wrapped_grpc_timeval*)((char*)(obj) - + XtOffsetOf(wrapped_grpc_timeval, std)); +} + +#define Z_WRAPPED_GRPC_TIMEVAL_P(zv) \ + wrapped_grpc_timeval_from_obj(Z_OBJ_P((zv))) + +#endif /* PHP_MAJOR_VERSION */ /* Initialize the Timeval PHP class */ void grpc_init_timeval(TSRMLS_D); diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php old mode 100755 new mode 100644 index df3fe85d447..2fec1bd9cc4 --- a/src/php/lib/Grpc/BaseStub.php +++ b/src/php/lib/Grpc/BaseStub.php @@ -84,8 +84,8 @@ class BaseStub } if ($channel) { if (!is_a($channel, 'Channel')) { - throw new \Exception("The channel argument is not a". - "Channel object"); + throw new \Exception('The channel argument is not a'. + 'Channel object'); } $this->channel = $channel; } else { diff --git a/src/php/lib/Grpc/BidiStreamingCall.php b/src/php/lib/Grpc/BidiStreamingCall.php index 95e51c5088d..c2fdb94b867 100644 --- a/src/php/lib/Grpc/BidiStreamingCall.php +++ b/src/php/lib/Grpc/BidiStreamingCall.php @@ -113,6 +113,7 @@ class BidiStreamingCall extends AbstractCall ]); $this->trailing_metadata = $status_event->status->metadata; + return $status_event->status; } } diff --git a/src/php/lib/Grpc/ClientStreamingCall.php b/src/php/lib/Grpc/ClientStreamingCall.php index 315a406735f..4050f7ed06d 100644 --- a/src/php/lib/Grpc/ClientStreamingCall.php +++ b/src/php/lib/Grpc/ClientStreamingCall.php @@ -88,6 +88,7 @@ class ClientStreamingCall extends AbstractCall $status = $event->status; $this->trailing_metadata = $status->metadata; + return [$this->deserializeResponse($event->message), $status]; } } diff --git a/src/php/lib/Grpc/ServerStreamingCall.php b/src/php/lib/Grpc/ServerStreamingCall.php index 53599fe4ae8..ba89d9f9721 100644 --- a/src/php/lib/Grpc/ServerStreamingCall.php +++ b/src/php/lib/Grpc/ServerStreamingCall.php @@ -92,6 +92,7 @@ class ServerStreamingCall extends AbstractCall ]); $this->trailing_metadata = $status_event->status->metadata; + return $status_event->status; } } diff --git a/src/php/lib/Grpc/UnaryCall.php b/src/php/lib/Grpc/UnaryCall.php index b114b771b84..a71b05dc93d 100644 --- a/src/php/lib/Grpc/UnaryCall.php +++ b/src/php/lib/Grpc/UnaryCall.php @@ -77,6 +77,7 @@ class UnaryCall extends AbstractCall $status = $event->status; $this->trailing_metadata = $status->metadata; + return [$this->deserializeResponse($event->message), $status]; } } diff --git a/src/php/tests/interop/interop_client.php b/src/php/tests/interop/interop_client.php index 43b3199d92c..bf40549a04f 100755 --- a/src/php/tests/interop/interop_client.php +++ b/src/php/tests/interop/interop_client.php @@ -450,7 +450,7 @@ function statusCodeAndMessage($stub) { $echo_status = new grpc\testing\EchoStatus(); $echo_status->setCode(2); - $echo_status->setMessage("test status message"); + $echo_status->setMessage('test status message'); $request = new grpc\testing\SimpleRequest(); $request->setResponseStatus($echo_status); @@ -460,7 +460,7 @@ function statusCodeAndMessage($stub) hardAssert($status->code === 2, 'Received unexpected status code'); - hardAssert($status->details === "test status message", + hardAssert($status->details === 'test status message', 'Received unexpected status details'); $streaming_call = $stub->FullDuplexCall(); @@ -473,7 +473,7 @@ function statusCodeAndMessage($stub) $status = $streaming_call->getStatus(); hardAssert($status->code === 2, 'Received unexpected status code'); - hardAssert($status->details === "test status message", + hardAssert($status->details === 'test status message', 'Received unexpected status details'); } @@ -570,9 +570,9 @@ function _makeStub($args) } if ($test_case == 'unimplemented_method') { - $stub = new grpc\testing\UnimplementedServiceClient($server_address, $opts); + $stub = new grpc\testing\UnimplementedServiceClient($server_address, $opts); } else { - $stub = new grpc\testing\TestServiceClient($server_address, $opts); + $stub = new grpc\testing\TestServiceClient($server_address, $opts); } return $stub; diff --git a/src/php/tests/unit_tests/CallCredentialsTest.php b/src/php/tests/unit_tests/CallCredentialsTest.php index 5fec06cd133..1994c8afe56 100644 --- a/src/php/tests/unit_tests/CallCredentialsTest.php +++ b/src/php/tests/unit_tests/CallCredentialsTest.php @@ -148,7 +148,8 @@ class CallCredentialsTest extends PHPUnit_Framework_TestCase $this->call_credentials, $call_credentials2 ); - $this->assertSame('Grpc\CallCredentials', get_class($call_credentials3)); + $this->assertSame('Grpc\CallCredentials', + get_class($call_credentials3)); } /** diff --git a/src/php/tests/unit_tests/CallTest.php b/src/php/tests/unit_tests/CallTest.php old mode 100755 new mode 100644 index fa026f09350..d736f515460 --- a/src/php/tests/unit_tests/CallTest.php +++ b/src/php/tests/unit_tests/CallTest.php @@ -50,6 +50,18 @@ class CallTest extends PHPUnit_Framework_TestCase Grpc\Timeval::infFuture()); } + public function tearDown() + { + unset($this->call); + unset($this->channel); + } + + public function testConstructor() + { + $this->assertSame('Grpc\Call', get_class($this->call)); + $this->assertObjectHasAttribute('channel', $this->call); + } + public function testAddEmptyMetadata() { $batch = [ @@ -81,7 +93,8 @@ class CallTest extends PHPUnit_Framework_TestCase { $batch = [ Grpc\OP_SEND_INITIAL_METADATA => ['key1' => ['value1'], - 'key2' => ['value2', 'value3'], ], + 'key2' => ['value2', + 'value3'], ], ]; $result = $this->call->startBatch($batch); $this->assertTrue($result->send_metadata); @@ -118,4 +131,38 @@ class CallTest extends PHPUnit_Framework_TestCase ]; $result = $this->call->startBatch($batch); } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidConstuctor() + { + $this->call = new Grpc\Call(); + $this->assertNull($this->call); + } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidConstuctor2() + { + $this->call = new Grpc\Call('hi', 'hi', 'hi'); + $this->assertNull($this->call); + } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidSetCredentials() + { + $this->call->setCredentials('hi'); + } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidSetCredentials2() + { + $this->call->setCredentials([]); + } } diff --git a/src/php/tests/unit_tests/ChannelCredentialsTest.php b/src/php/tests/unit_tests/ChannelCredentialsTest.php index 56c1d8f006d..e822929ccda 100644 --- a/src/php/tests/unit_tests/ChannelCredentialsTest.php +++ b/src/php/tests/unit_tests/ChannelCredentialsTest.php @@ -42,10 +42,23 @@ class ChanellCredentialsTest extends PHPUnit_Framework_TestCase { } - public function testCreateDefault() + public function testCreateSslWith3Null() { - $channel_credentials = Grpc\ChannelCredentials::createDefault(); - $this->assertSame('Grpc\ChannelCredentials', get_class($channel_credentials)); + $channel_credentials = Grpc\ChannelCredentials::createSsl(null, null, + null); + $this->assertNotNull($channel_credentials); + } + + public function testCreateSslWith3NullString() + { + $channel_credentials = Grpc\ChannelCredentials::createSsl('', '', ''); + $this->assertNotNull($channel_credentials); + } + + public function testCreateInsecure() + { + $channel_credentials = Grpc\ChannelCredentials::createInsecure(); + $this->assertNull($channel_credentials); } /** @@ -64,10 +77,4 @@ class ChanellCredentialsTest extends PHPUnit_Framework_TestCase $channel_credentials = Grpc\ChannelCredentials::createComposite( 'something', 'something'); } - - public function testCreateInsecure() - { - $channel_credentials = Grpc\ChannelCredentials::createInsecure(); - $this->assertNull($channel_credentials); - } } diff --git a/src/php/tests/unit_tests/ChannelTest.php b/src/php/tests/unit_tests/ChannelTest.php index a1f9053c398..4b35b1a28cc 100644 --- a/src/php/tests/unit_tests/ChannelTest.php +++ b/src/php/tests/unit_tests/ChannelTest.php @@ -40,6 +40,7 @@ class ChannelTest extends PHPUnit_Framework_TestCase public function tearDown() { + unset($this->channel); } public function testInsecureCredentials() @@ -53,6 +54,82 @@ class ChannelTest extends PHPUnit_Framework_TestCase $this->assertSame('Grpc\Channel', get_class($this->channel)); } + public function testGetConnectivityState() + { + $this->channel = new Grpc\Channel('localhost:0', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $state = $this->channel->getConnectivityState(); + $this->assertEquals(0, $state); + } + + public function testGetConnectivityStateWithInt() + { + $this->channel = new Grpc\Channel('localhost:0', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $state = $this->channel->getConnectivityState(123); + $this->assertEquals(0, $state); + } + + public function testGetConnectivityStateWithString() + { + $this->channel = new Grpc\Channel('localhost:0', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $state = $this->channel->getConnectivityState('hello'); + $this->assertEquals(0, $state); + } + + public function testGetConnectivityStateWithBool() + { + $this->channel = new Grpc\Channel('localhost:0', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $state = $this->channel->getConnectivityState(true); + $this->assertEquals(0, $state); + } + + public function testGetTarget() + { + $this->channel = new Grpc\Channel('localhost:8888', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $target = $this->channel->getTarget(); + $this->assertTrue(is_string($target)); + } + + public function testWatchConnectivityState() + { + $this->channel = new Grpc\Channel('localhost:0', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $time = new Grpc\Timeval(1000); + $state = $this->channel->watchConnectivityState(123, $time); + $this->assertTrue($state); + unset($time); + } + + public function testClose() + { + $this->channel = new Grpc\Channel('localhost:0', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $this->assertNotNull($this->channel); + $this->channel->close(); + } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidConstructorWithNull() + { + $this->channel = new Grpc\Channel(); + $this->assertNull($this->channel); + } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidConstructorWith() + { + $this->channel = new Grpc\Channel('localhost', 'invalid'); + $this->assertNull($this->channel); + } + /** * @expectedException InvalidArgumentException */ @@ -78,4 +155,34 @@ class ChannelTest extends PHPUnit_Framework_TestCase ] ); } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidGetConnectivityStateWithArray() + { + $this->channel = new Grpc\Channel('localhost:0', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $this->channel->getConnectivityState([]); + } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidWatchConnectivityState() + { + $this->channel = new Grpc\Channel('localhost:0', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $this->channel->watchConnectivityState([]); + } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidWatchConnectivityState2() + { + $this->channel = new Grpc\Channel('localhost:0', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + $this->channel->watchConnectivityState(1, 'hi'); + } } diff --git a/src/php/tests/unit_tests/EndToEndTest.php b/src/php/tests/unit_tests/EndToEndTest.php old mode 100755 new mode 100644 index 2b09f9d112a..09364580c01 --- a/src/php/tests/unit_tests/EndToEndTest.php +++ b/src/php/tests/unit_tests/EndToEndTest.php @@ -521,7 +521,8 @@ class EndToEndTest extends PHPUnit_Framework_TestCase public function testGetConnectivityState() { - $this->assertTrue($this->channel->getConnectivityState() == Grpc\CHANNEL_IDLE); + $this->assertTrue($this->channel->getConnectivityState() == + Grpc\CHANNEL_IDLE); } public function testWatchConnectivityStateFailed() diff --git a/src/php/tests/unit_tests/SecureEndToEndTest.php b/src/php/tests/unit_tests/SecureEndToEndTest.php old mode 100755 new mode 100644 diff --git a/src/php/tests/unit_tests/ServerTest.php b/src/php/tests/unit_tests/ServerTest.php index 76aaa069704..f2346ab113e 100644 --- a/src/php/tests/unit_tests/ServerTest.php +++ b/src/php/tests/unit_tests/ServerTest.php @@ -36,10 +36,70 @@ class ServerTest extends PHPUnit_Framework_TestCase { public function setUp() { + $this->server = null; } public function tearDown() { + unset($this->server); + } + + public function testConstructorWithNull() + { + $this->server = new Grpc\Server(); + $this->assertNotNull($this->server); + } + + public function testConstructorWithNullArray() + { + $this->server = new Grpc\Server([]); + $this->assertNotNull($this->server); + } + + public function testConstructorWithArray() + { + // key of array must be string + $this->server = new Grpc\Server(['ip' => '127.0.0.1', + 'port' => '8080', ]); + $this->assertNotNull($this->server); + } + + public function testRequestCall() + { + $this->server = new Grpc\Server(); + $port = $this->server->addHttp2Port('0.0.0.0:8888'); + $this->server->start(); + $channel = new Grpc\Channel('localhost:8888', + ['credentials' => Grpc\ChannelCredentials::createInsecure()]); + + $deadline = Grpc\Timeval::infFuture(); + $call = new Grpc\Call($channel, 'dummy_method', $deadline); + + $event = $call->startBatch([Grpc\OP_SEND_INITIAL_METADATA => [], + Grpc\OP_SEND_CLOSE_FROM_CLIENT => true, + ]); + + $c = $this->server->requestCall(); + $this->assertObjectHasAttribute('call', $c); + $this->assertObjectHasAttribute('method', $c); + $this->assertSame('dummy_method', $c->method); + $this->assertObjectHasAttribute('host', $c); + $this->assertTrue(is_string($c->host)); + $this->assertObjectHasAttribute('absolute_deadline', $c); + $this->assertObjectHasAttribute('metadata', $c); + + unset($call); + unset($channel); + } + + private function createSslObj() + { + $server_credentials = Grpc\ServerCredentials::createSsl( + null, + file_get_contents(dirname(__FILE__).'/../data/server1.key'), + file_get_contents(dirname(__FILE__).'/../data/server1.pem')); + + return $server_credentials; } /** @@ -47,7 +107,8 @@ class ServerTest extends PHPUnit_Framework_TestCase */ public function testInvalidConstructor() { - $server = new Grpc\Server('invalid_host'); + $this->server = new Grpc\Server('invalid_host'); + $this->assertNull($this->server); } /** @@ -56,7 +117,7 @@ class ServerTest extends PHPUnit_Framework_TestCase public function testInvalidAddHttp2Port() { $this->server = new Grpc\Server([]); - $this->port = $this->server->addHttp2Port(['0.0.0.0:0']); + $port = $this->server->addHttp2Port(['0.0.0.0:0']); } /** @@ -65,6 +126,24 @@ class ServerTest extends PHPUnit_Framework_TestCase public function testInvalidAddSecureHttp2Port() { $this->server = new Grpc\Server([]); - $this->port = $this->server->addSecureHttp2Port(['0.0.0.0:0']); + $port = $this->server->addSecureHttp2Port(['0.0.0.0:0']); + } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidAddSecureHttp2Port2() + { + $this->server = new Grpc\Server(); + $port = $this->server->addSecureHttp2Port('0.0.0.0:0'); + } + + /** + * @expectedException InvalidArgumentException + */ + public function testInvalidAddSecureHttp2Port3() + { + $this->server = new Grpc\Server(); + $port = $this->server->addSecureHttp2Port('0.0.0.0:0', 'invalid'); } } diff --git a/src/php/tests/unit_tests/TimevalTest.php b/src/php/tests/unit_tests/TimevalTest.php old mode 100755 new mode 100644 index a3dbce079f4..2d19f64c799 --- a/src/php/tests/unit_tests/TimevalTest.php +++ b/src/php/tests/unit_tests/TimevalTest.php @@ -33,6 +33,57 @@ */ class TimevalTest extends PHPUnit_Framework_TestCase { + public function setUp() + { + } + + public function tearDown() + { + unset($this->time); + } + + public function testConstructorWithInt() + { + $this->time = new Grpc\Timeval(1234); + $this->assertNotNull($this->time); + $this->assertSame('Grpc\Timeval', get_class($this->time)); + } + + public function testConstructorWithNegative() + { + $this->time = new Grpc\Timeval(-123); + $this->assertNotNull($this->time); + $this->assertSame('Grpc\Timeval', get_class($this->time)); + } + + public function testConstructorWithZero() + { + $this->time = new Grpc\Timeval(0); + $this->assertNotNull($this->time); + $this->assertSame('Grpc\Timeval', get_class($this->time)); + } + + public function testConstructorWithOct() + { + $this->time = new Grpc\Timeval(0123); + $this->assertNotNull($this->time); + $this->assertSame('Grpc\Timeval', get_class($this->time)); + } + + public function testConstructorWithHex() + { + $this->time = new Grpc\Timeval(0x1A); + $this->assertNotNull($this->time); + $this->assertSame('Grpc\Timeval', get_class($this->time)); + } + + public function testConstructorWithFloat() + { + $this->time = new Grpc\Timeval(123.456); + $this->assertNotNull($this->time); + $this->assertSame('Grpc\Timeval', get_class($this->time)); + } + public function testCompareSame() { $zero = Grpc\Timeval::zero(); @@ -70,6 +121,7 @@ class TimevalTest extends PHPUnit_Framework_TestCase public function testNowAndAdd() { $now = Grpc\Timeval::now(); + $this->assertNotNull($now); $delta = new Grpc\Timeval(1000); $deadline = $now->add($delta); $this->assertGreaterThan(0, Grpc\Timeval::compare($deadline, $now)); @@ -154,5 +206,6 @@ class TimevalTest extends PHPUnit_Framework_TestCase public function testSimilarInvalidParam() { $a = Grpc\Timeval::similar(1000, 1100, 1200); + $this->assertNull($delta); } } diff --git a/src/proto/grpc/testing/control.proto b/src/proto/grpc/testing/control.proto index 20496a8116b..ece69108158 100644 --- a/src/proto/grpc/testing/control.proto +++ b/src/proto/grpc/testing/control.proto @@ -229,4 +229,7 @@ message ScenarioResult { repeated int32 server_cores = 5; // An after-the-fact computed summary ScenarioResultSummary summary = 6; + // Information on success or failure of each worker + repeated bool client_success = 7; + repeated bool server_success = 8; } diff --git a/src/python/grpcio/.gitignore b/src/python/grpcio/.gitignore index 7cd8fab2735..3309795948c 100644 --- a/src/python/grpcio/.gitignore +++ b/src/python/grpcio/.gitignore @@ -14,3 +14,4 @@ doc/ _grpcio_metadata.py htmlcov/ grpc/_cython/_credentials +poison.c diff --git a/src/python/grpcio/_unixccompiler_patch.py b/src/python/grpcio/_unixccompiler_patch.py index 9a697989b30..894c3ef395a 100644 --- a/src/python/grpcio/_unixccompiler_patch.py +++ b/src/python/grpcio/_unixccompiler_patch.py @@ -34,88 +34,44 @@ from distutils import errors from distutils import unixccompiler import os import os.path +import shlex import shutil import sys import tempfile +def _unix_commandfile_spawn(self, command): + """Wrapper around distutils.util.spawn that attempts to use command files. -def _unix_piecemeal_link( - self, target_desc, objects, output_filename, output_dir=None, - libraries=None, library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, - build_temp=None, target_lang=None): - """`link` externalized method taken almost verbatim from UnixCCompiler. + Meant to replace the CCompiler method `spawn` on UnixCCompiler and its + derivatives (e.g. the MinGW32 compiler). - Modifies the link command for unix-like compilers by using a command file so - that long command line argument strings don't break the command shell's - ARG_MAX character limit. + Some commands like `gcc` (and friends like `clang`) support command files to + work around shell command length limits. """ - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = self._fix_lib_args( - libraries, library_dirs, runtime_library_dirs) - # filter out standard library paths, which are not explicitely needed - # for linking - library_dirs = [dir for dir in library_dirs - if not dir in ('/lib', '/lib64', '/usr/lib', '/usr/lib64')] - runtime_library_dirs = [dir for dir in runtime_library_dirs - if not dir in ('/lib', '/lib64', '/usr/lib', '/usr/lib64')] - lib_opts = ccompiler.gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if (not (isinstance(output_dir, str) or isinstance(output_dir, bytes)) - and output_dir is not None): - raise TypeError("'output_dir' must be a string or None") - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - ld_args = (objects + self.objects + - lib_opts + ['-o', output_filename]) - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - try: - if target_desc == ccompiler.CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - if target_lang == "c++" and self.compiler_cxx: - # skip over environment variable settings if /usr/bin/env - # is used to set up the linker's environment. - # This is needed on OSX. Note: this assumes that the - # normal and C++ compiler have the same environment - # settings. - i = 0 - if os.path.basename(linker[0]) == "env": - i = 1 - while '=' in linker[i]: - i = i + 1 - - linker[i] = self.compiler_cxx[i] - - if sys.platform == 'darwin': - import _osx_support - linker = _osx_support.compiler_fixup(linker, ld_args) - - temporary_directory = tempfile.mkdtemp() - command_filename = os.path.abspath( - os.path.join(temporary_directory, 'command')) - with open(command_filename, 'w') as command_file: - escaped_ld_args = [arg.replace('\\', '\\\\') for arg in ld_args] - command_file.write(' '.join(escaped_ld_args)) - self.spawn(linker + ['@{}'.format(command_filename)]) - except errors.DistutilsExecError: - raise ccompiler.LinkError + # Sometimes distutils embeds the executables as full strings including some + # hard-coded flags rather than as lists. + command = list(shlex.split(command[0])) + list(command[1:]) + command_base = os.path.basename(command[0].strip()) + if command_base == 'ccache': + command_base = command[:2] + command_args = command[2:] + elif command_base.startswith('ccache') or command_base in ['gcc', 'clang', 'clang++', 'g++']: + command_base = command[:1] + command_args = command[1:] else: - log.debug("skipping %s (up-to-date)", output_filename) + return ccompiler.CCompiler.spawn(self, command) + temporary_directory = tempfile.mkdtemp() + command_filename = os.path.abspath(os.path.join(temporary_directory, 'command')) + with open(command_filename, 'w') as command_file: + escaped_args = [arg.replace('\\', '\\\\') for arg in command_args] + command_file.write(' '.join(escaped_args)) + modified_command = command_base + ['@{}'.format(command_filename)] + result = ccompiler.CCompiler.spawn(self, modified_command) + shutil.rmtree(temporary_directory) + return result + -# TODO(atash) try replacing this monkeypatch of the compiler harness' link -# operation with a monkeypatch of the distutils `spawn` that applies -# command-argument-file hacks where it can. Might be cleaner. def monkeypatch_unix_compiler(): """Monkeypatching is dumb, but it's either that or we become maintainers of something much, much bigger.""" - unixccompiler.UnixCCompiler.link = _unix_piecemeal_link + unixccompiler.UnixCCompiler.spawn = _unix_commandfile_spawn diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py index 3f91954d5f0..d36ac233050 100644 --- a/src/python/grpcio/commands.py +++ b/src/python/grpcio/commands.py @@ -58,10 +58,31 @@ CONF_PY_ADDENDUM = """ extensions.append('sphinx.ext.napoleon') napoleon_google_docstring = True napoleon_numpy_docstring = True +napoleon_include_special_with_doc = True html_theme = 'sphinx_rtd_theme' """ +API_GLOSSARY = """ + +Glossary +================ + +.. glossary:: + + metadatum + A key-value pair included in the HTTP header. It is a + 2-tuple where the first entry is the key and the + second is the value, i.e. (key, value). The metadata key is an ASCII str, + and must be a valid HTTP header name. The metadata value can be + either a valid HTTP ASCII str, or bytes. If bytes are provided, + the key must end with '-bin', i.e. + ``('binary-metadata-bin', b'\\x00\\xFF')`` + + metadata + A sequence of metadatum. +""" + class CommandError(Exception): """Simple exception class for GRPC custom commands.""" @@ -131,6 +152,9 @@ class SphinxDocumentation(setuptools.Command): conf_filepath = os.path.join('doc', 'src', 'conf.py') with open(conf_filepath, 'a') as conf_file: conf_file.write(CONF_PY_ADDENDUM) + glossary_filepath = os.path.join('doc', 'src', 'grpc.rst') + with open(glossary_filepath, 'a') as glossary_filepath: + glossary_filepath.write(API_GLOSSARY) sphinx.main(['', os.path.join('doc', 'src'), os.path.join('doc', 'build')]) @@ -160,6 +184,71 @@ class BuildPy(build_py.build_py): build_py.build_py.run(self) +def _poison_extensions(extensions, message): + """Includes a file that will always fail to compile in all extensions.""" + poison_filename = os.path.join(PYTHON_STEM, 'poison.c') + with open(poison_filename, 'w') as poison: + poison.write('#error {}'.format(message)) + for extension in extensions: + extension.sources = [poison_filename] + +def check_and_update_cythonization(extensions): + """Replace .pyx files with their generated counterparts and return whether or + not cythonization still needs to occur.""" + for extension in extensions: + generated_pyx_sources = [] + other_sources = [] + for source in extension.sources: + base, file_ext = os.path.splitext(source) + if file_ext == '.pyx': + generated_pyx_source = next( + (base + gen_ext for gen_ext in ('.c', '.cpp',) + if os.path.isfile(base + gen_ext)), None) + if generated_pyx_source: + generated_pyx_sources.append(generated_pyx_source) + else: + sys.stderr.write('Cython-generated files are missing...\n') + return False + else: + other_sources.append(source) + extension.sources = generated_pyx_sources + other_sources + sys.stderr.write('Found cython-generated files...\n') + return True + +def try_cythonize(extensions, linetracing=False, mandatory=True): + """Attempt to cythonize the extensions. + + Args: + extensions: A list of `distutils.extension.Extension`. + linetracing: A bool indicating whether or not to enable linetracing. + mandatory: Whether or not having Cython-generated files is mandatory. If it + is, extensions will be poisoned when they can't be fully generated. + """ + try: + # Break import style to ensure we have access to Cython post-setup_requires + import Cython.Build + except ImportError: + if mandatory: + sys.stderr.write( + "This package needs to generate C files with Cython but it cannot. " + "Poisoning extension sources to disallow extension commands...") + _poison_extensions( + extensions, + "Extensions have been poisoned due to missing Cython-generated code.") + return extensions + cython_compiler_directives = {} + if linetracing: + additional_define_macros = [('CYTHON_TRACE_NOGIL', '1')] + cython_compiler_directives['linetrace'] = True + return Cython.Build.cythonize( + extensions, + include_path=[ + include_dir for extension in extensions for include_dir in extension.include_dirs + ], + compiler_directives=cython_compiler_directives + ) + + class BuildExt(build_ext.build_ext): """Custom build_ext command to enable compiler-specific flags.""" @@ -177,6 +266,8 @@ class BuildExt(build_ext.build_ext): if compiler in BuildExt.LINK_OPTIONS: for extension in self.extensions: extension.extra_link_args += list(BuildExt.LINK_OPTIONS[compiler]) + if not check_and_update_cythonization(self.extensions): + self.extensions = try_cythonize(self.extensions) try: build_ext.build_ext.build_extensions(self) except Exception as error: diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py index afd7db1f75d..513839df7d9 100644 --- a/src/python/grpcio/grpc/__init__.py +++ b/src/python/grpcio/grpc/__init__.py @@ -312,7 +312,7 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)): This method blocks until the value is available. Returns: - The initial metadata as a sequence of pairs of bytes. + The initial :term:`metadata`. """ raise NotImplementedError() @@ -323,7 +323,7 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)): This method blocks until the value is available. Returns: - The trailing metadata as a sequence of pairs of bytes. + The trailing :term:`metadata`. """ raise NotImplementedError() @@ -345,7 +345,7 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)): This method blocks until the value is available. Returns: - The bytes of the details of the RPC. + The details string of the RPC. """ raise NotImplementedError() @@ -394,8 +394,7 @@ class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)): """Inform the gRPC runtime of the metadata to construct a CallCredentials. Args: - metadata: An iterable of 2-sequences (e.g. tuples) of metadata key/value - pairs. + metadata: The :term:`metadata` used to construct the CallCredentials. error: An Exception to indicate error or None to indicate success. """ raise NotImplementedError() @@ -442,7 +441,7 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request: The request value for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -463,7 +462,7 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request: The request value for the RPC. timeout: An optional durating of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -484,7 +483,7 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request: The request value for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -507,7 +506,7 @@ class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request: The request value for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: An optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -530,7 +529,7 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request_iterator: An iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -553,7 +552,7 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request_iterator: An iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -575,7 +574,7 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request_iterator: An iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -599,7 +598,7 @@ class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request_iterator: An iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -707,7 +706,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): """Accesses the metadata from the invocation-side of the RPC. Returns: - The invocation metadata object as a sequence of pairs of bytes. + The invocation :term:`metadata`. """ raise NotImplementedError() @@ -728,8 +727,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): service-side initial metadata to transmit. Args: - initial_metadata: The initial metadata of the RPC as a sequence of pairs - of bytes. + initial_metadata: The initial :term:`metadata`. """ raise NotImplementedError() @@ -741,8 +739,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): service-side trailing metadata to transmit. Args: - trailing_metadata: The trailing metadata of the RPC as a sequence of pairs - of bytes. + trailing_metadata: The trailing :term:`metadata`. """ raise NotImplementedError() @@ -767,7 +764,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): details to transmit. Args: - details: The details bytes of the RPC to be transmitted to + details: The details string of the RPC to be transmitted to the invocation side of the RPC. """ raise NotImplementedError() @@ -815,7 +812,7 @@ class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)): """Describes an RPC that has just arrived for service. Attributes: method: The method name of the RPC. - invocation_metadata: The metadata from the invocation side of the RPC. + invocation_metadata: The :term:`metadata` from the invocation side of the RPC. """ @@ -1219,9 +1216,9 @@ def server(thread_pool, handlers=None): to service RPCs. handlers: An optional sequence of GenericRpcHandlers to be used to service RPCs after the returned Server is started. These handlers need not be the - only handlers the returned Server will use to service RPCs; other - handlers may later be added to the returned Server by calling its - add_generic_rpc_handlers method any time before it is started. + only handlers the server will use to service RPCs; other handlers may + later be added by calling add_generic_rpc_handlers any time before the + returned Server is started. Returns: A Server with which RPCs can be serviced. diff --git a/src/python/grpcio/grpc/_adapter/.gitignore b/src/python/grpcio/grpc/_adapter/.gitignore deleted file mode 100644 index a6f96cd6dbb..00000000000 --- a/src/python/grpcio/grpc/_adapter/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.a -*.so -*.dll -*.pyc -*.pyd diff --git a/src/python/grpcio/grpc/_adapter/_common.py b/src/python/grpcio/grpc/_adapter/_common.py deleted file mode 100644 index 492849f4cbb..00000000000 --- a/src/python/grpcio/grpc/_adapter/_common.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State used by both invocation-side and service-side code.""" - -import enum - - -@enum.unique -class HighWrite(enum.Enum): - """The possible categories of high-level write state.""" - - OPEN = 'OPEN' - CLOSED = 'CLOSED' - - -class WriteState(object): - """A description of the state of writing to an RPC. - - Attributes: - low: A side-specific value describing the low-level state of writing. - high: A HighWrite value describing the high-level state of writing. - pending: A list of bytestrings for the RPC waiting to be written to the - other side of the RPC. - """ - - def __init__(self, low, high, pending): - self.low = low - self.high = high - self.pending = pending - - -class CommonRPCState(object): - """A description of an RPC's state. - - Attributes: - write: A WriteState describing the state of writing to the RPC. - sequence_number: The lowest-unused sequence number for use in generating - tickets locally describing the progress of the RPC. - deserializer: The behavior to be used to deserialize payload bytestreams - taken off the wire. - serializer: The behavior to be used to serialize payloads to be sent on the - wire. - """ - - def __init__(self, write, sequence_number, deserializer, serializer): - self.write = write - self.sequence_number = sequence_number - self.deserializer = deserializer - self.serializer = serializer diff --git a/src/python/grpcio/grpc/_adapter/_intermediary_low.py b/src/python/grpcio/grpc/_adapter/_intermediary_low.py deleted file mode 100644 index 9698ffeabf1..00000000000 --- a/src/python/grpcio/grpc/_adapter/_intermediary_low.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Temporary old _low-like layer. - -Eases refactoring burden while we overhaul the Python framework. - -Plan: - The layers used to look like: - ... # outside _adapter - fore.py + rear.py # visible outside _adapter - _low - _c - The layers currently look like: - ... # outside _adapter - fore.py + rear.py # visible outside _adapter - _low_intermediary # adapter for new '_low' to old '_low' - _low # new '_low' - _c # new '_c' - We will later remove _low_intermediary after refactoring of fore.py and - rear.py according to the ticket system refactoring and get: - ... # outside _adapter, refactored - fore.py + rear.py # visible outside _adapter, refactored - _low # new '_low' - _c # new '_c' -""" - -import collections -import enum - -from grpc._adapter import _low -from grpc._adapter import _types - -_IGNORE_ME_TAG = object() -Code = _types.StatusCode -WriteFlags = _types.OpWriteFlags - - -class Status(collections.namedtuple('Status', ['code', 'details'])): - """Describes an RPC's overall status.""" - - -class ServiceAcceptance( - collections.namedtuple( - 'ServiceAcceptance', ['call', 'method', 'host', 'deadline'])): - """Describes an RPC on the service side at the start of service.""" - - -class Event( - collections.namedtuple( - 'Event', - ['kind', 'tag', 'write_accepted', 'complete_accepted', - 'service_acceptance', 'bytes', 'status', 'metadata'])): - """Describes an event emitted from a completion queue.""" - - @enum.unique - class Kind(enum.Enum): - """Describes the kind of an event.""" - - STOP = object() - WRITE_ACCEPTED = object() - COMPLETE_ACCEPTED = object() - SERVICE_ACCEPTED = object() - READ_ACCEPTED = object() - METADATA_ACCEPTED = object() - FINISH = object() - - -class _TagAdapter(collections.namedtuple('_TagAdapter', [ - 'user_tag', - 'kind' - ])): - pass - - -class Call(object): - """Adapter from old _low.Call interface to new _low.Call.""" - - def __init__(self, channel, completion_queue, method, host, deadline): - self._internal = channel._internal.create_call( - completion_queue._internal, method, host, deadline) - self._metadata = [] - - @staticmethod - def _from_internal(internal): - call = Call.__new__(Call) - call._internal = internal - call._metadata = [] - return call - - def invoke(self, completion_queue, metadata_tag, finish_tag): - err = self._internal.start_batch([ - _types.OpArgs.send_initial_metadata(self._metadata) - ], _IGNORE_ME_TAG) - if err != _types.CallError.OK: - return err - err = self._internal.start_batch([ - _types.OpArgs.recv_initial_metadata() - ], _TagAdapter(metadata_tag, Event.Kind.METADATA_ACCEPTED)) - if err != _types.CallError.OK: - return err - err = self._internal.start_batch([ - _types.OpArgs.recv_status_on_client() - ], _TagAdapter(finish_tag, Event.Kind.FINISH)) - return err - - def write(self, message, tag, flags): - return self._internal.start_batch([ - _types.OpArgs.send_message(message, flags) - ], _TagAdapter(tag, Event.Kind.WRITE_ACCEPTED)) - - def complete(self, tag): - return self._internal.start_batch([ - _types.OpArgs.send_close_from_client() - ], _TagAdapter(tag, Event.Kind.COMPLETE_ACCEPTED)) - - def accept(self, completion_queue, tag): - return self._internal.start_batch([ - _types.OpArgs.recv_close_on_server() - ], _TagAdapter(tag, Event.Kind.FINISH)) - - def add_metadata(self, key, value): - self._metadata.append((key, value)) - - def premetadata(self): - result = self._internal.start_batch([ - _types.OpArgs.send_initial_metadata(self._metadata) - ], _IGNORE_ME_TAG) - self._metadata = [] - return result - - def read(self, tag): - return self._internal.start_batch([ - _types.OpArgs.recv_message() - ], _TagAdapter(tag, Event.Kind.READ_ACCEPTED)) - - def status(self, status, tag): - return self._internal.start_batch([ - _types.OpArgs.send_status_from_server( - self._metadata, status.code, status.details) - ], _TagAdapter(tag, Event.Kind.COMPLETE_ACCEPTED)) - - def cancel(self): - return self._internal.cancel() - - def peer(self): - return self._internal.peer() - - def set_credentials(self, creds): - return self._internal.set_credentials(creds) - - -class Channel(object): - """Adapter from old _low.Channel interface to new _low.Channel.""" - - def __init__(self, hostport, channel_credentials, server_host_override=None): - args = [] - if server_host_override: - args.append((_types.GrpcChannelArgumentKeys.SSL_TARGET_NAME_OVERRIDE.value, server_host_override)) - self._internal = _low.Channel(hostport, args, channel_credentials) - - -class CompletionQueue(object): - """Adapter from old _low.CompletionQueue interface to new _low.CompletionQueue.""" - - def __init__(self): - self._internal = _low.CompletionQueue() - - def get(self, deadline=None): - if deadline is None: - ev = self._internal.next(float('+inf')) - else: - ev = self._internal.next(deadline) - if ev is None: - return None - elif ev.tag is _IGNORE_ME_TAG: - return self.get(deadline) - elif ev.type == _types.EventType.QUEUE_SHUTDOWN: - kind = Event.Kind.STOP - tag = None - write_accepted = None - complete_accepted = None - service_acceptance = None - message_bytes = None - status = None - metadata = None - elif ev.type == _types.EventType.OP_COMPLETE: - kind = ev.tag.kind - tag = ev.tag.user_tag - write_accepted = ev.success if kind == Event.Kind.WRITE_ACCEPTED else None - complete_accepted = ev.success if kind == Event.Kind.COMPLETE_ACCEPTED else None - service_acceptance = ServiceAcceptance(Call._from_internal(ev.call), ev.call_details.method, ev.call_details.host, ev.call_details.deadline) if kind == Event.Kind.SERVICE_ACCEPTED else None - message_bytes = ev.results[0].message if kind == Event.Kind.READ_ACCEPTED else None - status = Status(ev.results[0].status.code, ev.results[0].status.details) if (kind == Event.Kind.FINISH and ev.results[0].status) else Status(_types.StatusCode.CANCELLED if ev.results[0].cancelled else _types.StatusCode.OK, '') if len(ev.results) > 0 and ev.results[0].cancelled is not None else None - metadata = ev.results[0].initial_metadata if (kind in [Event.Kind.SERVICE_ACCEPTED, Event.Kind.METADATA_ACCEPTED]) else (ev.results[0].trailing_metadata if kind == Event.Kind.FINISH else None) - else: - raise RuntimeError('unknown event') - result_ev = Event(kind=kind, tag=tag, write_accepted=write_accepted, complete_accepted=complete_accepted, service_acceptance=service_acceptance, bytes=message_bytes, status=status, metadata=metadata) - return result_ev - - def stop(self): - self._internal.shutdown() - - -class Server(object): - """Adapter from old _low.Server interface to new _low.Server.""" - - def __init__(self, completion_queue): - self._internal = _low.Server(completion_queue._internal, []) - self._internal_cq = completion_queue._internal - - def add_http2_addr(self, addr): - return self._internal.add_http2_port(addr) - - def add_secure_http2_addr(self, addr, server_credentials): - if server_credentials is None: - return self._internal.add_http2_port(addr, None) - else: - return self._internal.add_http2_port(addr, server_credentials) - - def start(self): - return self._internal.start() - - def service(self, tag): - return self._internal.request_call(self._internal_cq, _TagAdapter(tag, Event.Kind.SERVICE_ACCEPTED)) - - def cancel_all_calls(self): - self._internal.cancel_all_calls() - - def stop(self): - return self._internal.shutdown(_TagAdapter(None, Event.Kind.STOP)) - diff --git a/src/python/grpcio/grpc/_adapter/_low.py b/src/python/grpcio/grpc/_adapter/_low.py deleted file mode 100644 index 48410167a07..00000000000 --- a/src/python/grpcio/grpc/_adapter/_low.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import threading - -from grpc import _grpcio_metadata -from grpc import _plugin_wrapping -from grpc._cython import cygrpc -from grpc._adapter import _types - -_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__) - -ChannelCredentials = cygrpc.ChannelCredentials -CallCredentials = cygrpc.CallCredentials -ServerCredentials = cygrpc.ServerCredentials - -channel_credentials_composite = cygrpc.channel_credentials_composite -call_credentials_composite = cygrpc.call_credentials_composite - -def server_credentials_ssl(root_credentials, pair_sequence, force_client_auth): - return cygrpc.server_credentials_ssl( - root_credentials, - [cygrpc.SslPemKeyCertPair(key, pem) for key, pem in pair_sequence], - force_client_auth) - -def channel_credentials_ssl( - root_certificates, private_key, certificate_chain): - pair = None - if private_key is not None or certificate_chain is not None: - pair = cygrpc.SslPemKeyCertPair(private_key, certificate_chain) - return cygrpc.channel_credentials_ssl(root_certificates, pair) - - -call_credentials_metadata_plugin = ( - _plugin_wrapping.call_credentials_metadata_plugin) - - -class CompletionQueue(_types.CompletionQueue): - - def __init__(self): - self.completion_queue = cygrpc.CompletionQueue() - - def next(self, deadline=float('+inf')): - raw_event = self.completion_queue.poll(cygrpc.Timespec(deadline)) - if raw_event.type == cygrpc.CompletionType.queue_timeout: - return None - event_type = raw_event.type - event_tag = raw_event.tag - event_call = Call(raw_event.operation_call) - if raw_event.request_call_details: - event_call_details = _types.CallDetails( - raw_event.request_call_details.method, - raw_event.request_call_details.host, - float(raw_event.request_call_details.deadline)) - else: - event_call_details = None - event_success = raw_event.success - event_results = [] - if raw_event.is_new_request: - event_results.append(_types.OpResult( - _types.OpType.RECV_INITIAL_METADATA, raw_event.request_metadata, - None, None, None, None)) - else: - if raw_event.batch_operations: - for operation in raw_event.batch_operations: - result_type = operation.type - result_initial_metadata = operation.received_metadata_or_none - result_trailing_metadata = operation.received_metadata_or_none - result_message = operation.received_message_or_none - if result_message is not None: - result_message = result_message.bytes() - result_cancelled = operation.received_cancelled_or_none - if operation.has_status: - result_status = _types.Status( - operation.received_status_code_or_none, - operation.received_status_details_or_none) - else: - result_status = None - event_results.append( - _types.OpResult(result_type, result_initial_metadata, - result_trailing_metadata, result_message, - result_status, result_cancelled)) - return _types.Event(event_type, event_tag, event_call, event_call_details, - event_results, event_success) - - def shutdown(self): - self.completion_queue.shutdown() - - -class Call(_types.Call): - - def __init__(self, call): - self.call = call - - def start_batch(self, ops, tag): - translated_ops = [] - for op in ops: - if op.type == _types.OpType.SEND_INITIAL_METADATA: - translated_op = cygrpc.operation_send_initial_metadata( - cygrpc.Metadata( - cygrpc.Metadatum(key, value) - for key, value in op.initial_metadata), - op.flags) - elif op.type == _types.OpType.SEND_MESSAGE: - translated_op = cygrpc.operation_send_message(op.message, op.flags) - elif op.type == _types.OpType.SEND_CLOSE_FROM_CLIENT: - translated_op = cygrpc.operation_send_close_from_client(op.flags) - elif op.type == _types.OpType.SEND_STATUS_FROM_SERVER: - translated_op = cygrpc.operation_send_status_from_server( - cygrpc.Metadata( - cygrpc.Metadatum(key, value) - for key, value in op.trailing_metadata), - op.status.code, - op.status.details, - op.flags) - elif op.type == _types.OpType.RECV_INITIAL_METADATA: - translated_op = cygrpc.operation_receive_initial_metadata( - op.flags) - elif op.type == _types.OpType.RECV_MESSAGE: - translated_op = cygrpc.operation_receive_message(op.flags) - elif op.type == _types.OpType.RECV_STATUS_ON_CLIENT: - translated_op = cygrpc.operation_receive_status_on_client( - op.flags) - elif op.type == _types.OpType.RECV_CLOSE_ON_SERVER: - translated_op = cygrpc.operation_receive_close_on_server(op.flags) - else: - raise ValueError('unexpected operation type {}'.format(op.type)) - translated_ops.append(translated_op) - return self.call.start_batch(cygrpc.Operations(translated_ops), tag) - - def cancel(self, code=None, details=None): - if code is None and details is None: - return self.call.cancel() - else: - return self.call.cancel(code, details) - - def peer(self): - return self.call.peer() - - def set_credentials(self, creds): - return self.call.set_credentials(creds) - - -class Channel(_types.Channel): - - def __init__(self, target, args, creds=None): - args = list(args) + [ - (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)] - args = cygrpc.ChannelArgs( - cygrpc.ChannelArg(key, value) for key, value in args) - if creds is None: - self.channel = cygrpc.Channel(target, args) - else: - self.channel = cygrpc.Channel(target, args, creds) - - def create_call(self, completion_queue, method, host, deadline=None): - internal_call = self.channel.create_call( - None, 0, completion_queue.completion_queue, method, host, - cygrpc.Timespec(deadline)) - return Call(internal_call) - - def check_connectivity_state(self, try_to_connect): - return self.channel.check_connectivity_state(try_to_connect) - - def watch_connectivity_state(self, last_observed_state, deadline, - completion_queue, tag): - self.channel.watch_connectivity_state( - last_observed_state, cygrpc.Timespec(deadline), - completion_queue.completion_queue, tag) - - def target(self): - return self.channel.target() - - -_NO_TAG = object() - -class Server(_types.Server): - - def __init__(self, completion_queue, args): - args = cygrpc.ChannelArgs( - cygrpc.ChannelArg(key, value) for key, value in args) - self.server = cygrpc.Server(args) - self.server.register_completion_queue(completion_queue.completion_queue) - self.server_queue = completion_queue - - def add_http2_port(self, addr, creds=None): - if creds is None: - return self.server.add_http2_port(addr) - else: - return self.server.add_http2_port(addr, creds) - - def start(self): - return self.server.start() - - def shutdown(self, tag=None): - return self.server.shutdown(self.server_queue.completion_queue, tag) - - def request_call(self, completion_queue, tag): - return self.server.request_call(completion_queue.completion_queue, - self.server_queue.completion_queue, tag) - - def cancel_all_calls(self): - return self.server.cancel_all_calls() diff --git a/src/python/grpcio/grpc/_adapter/_types.py b/src/python/grpcio/grpc/_adapter/_types.py deleted file mode 100644 index b7cc6fbbb59..00000000000 --- a/src/python/grpcio/grpc/_adapter/_types.py +++ /dev/null @@ -1,446 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import abc -import collections -import enum - -import six - -from grpc._cython import cygrpc - - -class GrpcChannelArgumentKeys(enum.Enum): - """Mirrors keys used in grpc_channel_args for GRPC-specific arguments.""" - SSL_TARGET_NAME_OVERRIDE = 'grpc.ssl_target_name_override' - - -@enum.unique -class CallError(enum.IntEnum): - """Mirrors grpc_call_error in the C core.""" - OK = cygrpc.CallError.ok - ERROR = cygrpc.CallError.error - ERROR_NOT_ON_SERVER = cygrpc.CallError.not_on_server - ERROR_NOT_ON_CLIENT = cygrpc.CallError.not_on_client - ERROR_ALREADY_ACCEPTED = cygrpc.CallError.already_accepted - ERROR_ALREADY_INVOKED = cygrpc.CallError.already_invoked - ERROR_NOT_INVOKED = cygrpc.CallError.not_invoked - ERROR_ALREADY_FINISHED = cygrpc.CallError.already_finished - ERROR_TOO_MANY_OPERATIONS = cygrpc.CallError.too_many_operations - ERROR_INVALID_FLAGS = cygrpc.CallError.invalid_flags - ERROR_INVALID_METADATA = cygrpc.CallError.invalid_metadata - - -@enum.unique -class StatusCode(enum.IntEnum): - """Mirrors grpc_status_code in the C core.""" - OK = cygrpc.StatusCode.ok - CANCELLED = cygrpc.StatusCode.cancelled - UNKNOWN = cygrpc.StatusCode.unknown - INVALID_ARGUMENT = cygrpc.StatusCode.invalid_argument - DEADLINE_EXCEEDED = cygrpc.StatusCode.deadline_exceeded - NOT_FOUND = cygrpc.StatusCode.not_found - ALREADY_EXISTS = cygrpc.StatusCode.already_exists - PERMISSION_DENIED = cygrpc.StatusCode.permission_denied - RESOURCE_EXHAUSTED = cygrpc.StatusCode.resource_exhausted - FAILED_PRECONDITION = cygrpc.StatusCode.failed_precondition - ABORTED = cygrpc.StatusCode.aborted - OUT_OF_RANGE = cygrpc.StatusCode.out_of_range - UNIMPLEMENTED = cygrpc.StatusCode.unimplemented - INTERNAL = cygrpc.StatusCode.internal - UNAVAILABLE = cygrpc.StatusCode.unavailable - DATA_LOSS = cygrpc.StatusCode.data_loss - UNAUTHENTICATED = cygrpc.StatusCode.unauthenticated - - -@enum.unique -class OpWriteFlags(enum.IntEnum): - """Mirrors defined write-flag constants in the C core.""" - WRITE_BUFFER_HINT = cygrpc.WriteFlag.buffer_hint - WRITE_NO_COMPRESS = cygrpc.WriteFlag.no_compress - - -@enum.unique -class OpType(enum.IntEnum): - """Mirrors grpc_op_type in the C core.""" - SEND_INITIAL_METADATA = cygrpc.OperationType.send_initial_metadata - SEND_MESSAGE = cygrpc.OperationType.send_message - SEND_CLOSE_FROM_CLIENT = cygrpc.OperationType.send_close_from_client - SEND_STATUS_FROM_SERVER = cygrpc.OperationType.send_status_from_server - RECV_INITIAL_METADATA = cygrpc.OperationType.receive_initial_metadata - RECV_MESSAGE = cygrpc.OperationType.receive_message - RECV_STATUS_ON_CLIENT = cygrpc.OperationType.receive_status_on_client - RECV_CLOSE_ON_SERVER = cygrpc.OperationType.receive_close_on_server - - -@enum.unique -class EventType(enum.IntEnum): - """Mirrors grpc_completion_type in the C core.""" - QUEUE_SHUTDOWN = cygrpc.CompletionType.queue_shutdown - QUEUE_TIMEOUT = cygrpc.CompletionType.queue_timeout - OP_COMPLETE = cygrpc.CompletionType.operation_complete - - -@enum.unique -class ConnectivityState(enum.IntEnum): - """Mirrors grpc_connectivity_state in the C core.""" - IDLE = cygrpc.ConnectivityState.idle - CONNECTING = cygrpc.ConnectivityState.connecting - READY = cygrpc.ConnectivityState.ready - TRANSIENT_FAILURE = cygrpc.ConnectivityState.transient_failure - FATAL_FAILURE = cygrpc.ConnectivityState.shutdown - - -class Status(collections.namedtuple( - 'Status', [ - 'code', - 'details', - ])): - """The end status of a GRPC call. - - Attributes: - code (StatusCode): ... - details (str): ... - """ - - -class CallDetails(collections.namedtuple( - 'CallDetails', [ - 'method', - 'host', - 'deadline', - ])): - """Provides information to the server about the client's call. - - Attributes: - method (str): ... - host (str): ... - deadline (float): ... - """ - - -class OpArgs(collections.namedtuple( - 'OpArgs', [ - 'type', - 'initial_metadata', - 'trailing_metadata', - 'message', - 'status', - 'flags', - ])): - """Arguments passed into a GRPC operation. - - Attributes: - type (OpType): ... - initial_metadata (sequence of 2-sequence of str): Only valid if type == - OpType.SEND_INITIAL_METADATA, else is None. - trailing_metadata (sequence of 2-sequence of str): Only valid if type == - OpType.SEND_STATUS_FROM_SERVER, else is None. - message (bytes): Only valid if type == OpType.SEND_MESSAGE, else is None. - status (Status): Only valid if type == OpType.SEND_STATUS_FROM_SERVER, else - is None. - flags (int): a bitwise OR'ing of 0 or more OpWriteFlags values. - """ - - @staticmethod - def send_initial_metadata(initial_metadata): - return OpArgs(OpType.SEND_INITIAL_METADATA, initial_metadata, None, None, None, 0) - - @staticmethod - def send_message(message, flags): - return OpArgs(OpType.SEND_MESSAGE, None, None, message, None, flags) - - @staticmethod - def send_close_from_client(): - return OpArgs(OpType.SEND_CLOSE_FROM_CLIENT, None, None, None, None, 0) - - @staticmethod - def send_status_from_server(trailing_metadata, status_code, status_details): - return OpArgs(OpType.SEND_STATUS_FROM_SERVER, None, trailing_metadata, None, Status(status_code, status_details), 0) - - @staticmethod - def recv_initial_metadata(): - return OpArgs(OpType.RECV_INITIAL_METADATA, None, None, None, None, 0); - - @staticmethod - def recv_message(): - return OpArgs(OpType.RECV_MESSAGE, None, None, None, None, 0) - - @staticmethod - def recv_status_on_client(): - return OpArgs(OpType.RECV_STATUS_ON_CLIENT, None, None, None, None, 0) - - @staticmethod - def recv_close_on_server(): - return OpArgs(OpType.RECV_CLOSE_ON_SERVER, None, None, None, None, 0) - - -class OpResult(collections.namedtuple( - 'OpResult', [ - 'type', - 'initial_metadata', - 'trailing_metadata', - 'message', - 'status', - 'cancelled', - ])): - """Results received from a GRPC operation. - - Attributes: - type (OpType): ... - initial_metadata (sequence of 2-sequence of str): Only valid if type == - OpType.RECV_INITIAL_METADATA, else is None. - trailing_metadata (sequence of 2-sequence of str): Only valid if type == - OpType.RECV_STATUS_ON_CLIENT, else is None. - message (bytes): Only valid if type == OpType.RECV_MESSAGE, else is None. - status (Status): Only valid if type == OpType.RECV_STATUS_ON_CLIENT, else - is None. - cancelled (bool): Only valid if type == OpType.RECV_CLOSE_ON_SERVER, else - is None. - """ - - -class Event(collections.namedtuple( - 'Event', [ - 'type', - 'tag', - 'call', - 'call_details', - 'results', - 'success', - ])): - """An event received from a GRPC completion queue. - - Attributes: - type (EventType): ... - tag (object): ... - call (Call): The Call object associated with this event (if there is one, - else None). - call_details (CallDetails): The call details associated with the - server-side call (if there is such information, else None). - results (list of OpResult): ... - success (bool): ... - """ - - -class CompletionQueue(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def __init__(self): - pass - - def __iter__(self): - """This class may be iterated over. - - This is the equivalent of calling next() repeatedly with an absolute - deadline of None (i.e. no deadline). - """ - return self - - def __next__(self): - return self.next() - - @abc.abstractmethod - def next(self, deadline=float('+inf')): - """Get the next event on this completion queue. - - Args: - deadline (float): absolute deadline in seconds from the Python epoch, or - None for no deadline. - - Returns: - Event: ... - """ - pass - - @abc.abstractmethod - def shutdown(self): - """Begin the shutdown process of this completion queue. - - Note that this does not immediately destroy the completion queue. - Nevertheless, user code should not pass it around after invoking this. - """ - return None - - -class Call(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def start_batch(self, ops, tag): - """Start a batch of operations. - - Args: - ops (sequence of OpArgs): ... - tag (object): ... - - Returns: - CallError: ... - """ - return CallError.ERROR - - @abc.abstractmethod - def cancel(self, code=None, details=None): - """Cancel the call. - - Args: - code (int): Status code to cancel with (on the server side). If - specified, so must `details`. - details (str): Status details to cancel with (on the server side). If - specified, so must `code`. - - Returns: - CallError: ... - """ - return CallError.ERROR - - @abc.abstractmethod - def peer(self): - """Get the peer of this call. - - Returns: - str: the peer of this call. - """ - return None - - def set_credentials(self, creds): - """Set per-call credentials. - - Args: - creds (CallCredentials): Credentials to be set for this call. - """ - return None - - -class Channel(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def __init__(self, target, args, credentials=None): - """Initialize a Channel. - - Args: - target (str): ... - args (sequence of 2-sequence of str, (str|integer)): ... - credentials (ChannelCredentials): If None, create an insecure channel, - else create a secure channel using the client credentials. - """ - - @abc.abstractmethod - def create_call(self, completion_queue, method, host, deadline=float('+inf')): - """Create a call from this channel. - - Args: - completion_queue (CompletionQueue): ... - method (str): ... - host (str): ... - deadline (float): absolute deadline in seconds from the Python epoch, or - None for no deadline. - - Returns: - Call: call object associated with this Channel and passed parameters. - """ - return None - - @abc.abstractmethod - def check_connectivity_state(self, try_to_connect): - """Check and optionally repair the connectivity state of the channel. - - Args: - try_to_connect (bool): whether or not to try to connect the channel if - disconnected. - - Returns: - ConnectivityState: state of the channel at the time of this invocation. - """ - return None - - @abc.abstractmethod - def watch_connectivity_state(self, last_observed_state, deadline, - completion_queue, tag): - """Watch for connectivity state changes from the last_observed_state. - - Args: - last_observed_state (ConnectivityState): ... - deadline (float): ... - completion_queue (CompletionQueue): ... - tag (object) ... - """ - - @abc.abstractmethod - def target(self): - """Get the target of this channel. - - Returns: - str: the target of this channel. - """ - return None - - -class Server(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def __init__(self, completion_queue, args): - """Initialize a server. - - Args: - completion_queue (CompletionQueue): ... - args (sequence of 2-sequence of str, (str|integer)): ... - """ - - @abc.abstractmethod - def add_http2_port(self, address, credentials=None): - """Adds an HTTP/2 address+port to the server. - - Args: - address (str): ... - credentials (ServerCredentials): If None, create an insecure port, else - create a secure port using the server credentials. - """ - - @abc.abstractmethod - def start(self): - """Starts the server.""" - - @abc.abstractmethod - def shutdown(self, tag=None): - """Shuts down the server. Does not immediately destroy the server. - - Args: - tag (object): if not None, have the server place an event on its - completion queue notifying it when this server has completely shut down. - """ - - @abc.abstractmethod - def request_call(self, completion_queue, tag): - """Requests a call from the server on the server's completion queue. - - Args: - completion_queue (CompletionQueue): Completion queue for the call. May be - the same as the server's completion queue. - tag (object) ... - """ diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py index 29dbc3a668b..3117dd1cb31 100644 --- a/src/python/grpcio/grpc/_channel.py +++ b/src/python/grpcio/grpc/_channel.py @@ -353,12 +353,12 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): else: return max(self._deadline - time.time(), 0) - def add_cancellation_callback(self, callback): + def add_callback(self, callback): with self._state.condition: if self._state.callbacks is None: return False else: - self._state.callbacks.append(lambda unused_future: callback()) + self._state.callbacks.append(lambda: callback()) return True def initial_metadata(self): diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi index 7714504d1bb..42fced65457 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi @@ -30,19 +30,29 @@ cimport libc.time -cdef extern from "grpc/_cython/loader.h": +# Typedef types with approximately the same semantics to provide their names to +# Cython +ctypedef int int32_t +ctypedef unsigned uint32_t +ctypedef long int64_t - ctypedef int int32_t - ctypedef unsigned uint32_t - ctypedef long int64_t - int pygrpc_load_core(char*) - int pygrpc_initialize_core() +cdef extern from "grpc/support/alloc.h": void *gpr_malloc(size_t size) nogil void gpr_free(void *ptr) nogil void *gpr_realloc(void *p, size_t size) nogil + +cdef extern from "grpc/byte_buffer_reader.h": + + struct grpc_byte_buffer_reader: + # We don't care about the internals + pass + + +cdef extern from "grpc/grpc.h": + ctypedef struct gpr_slice: # don't worry about writing out the members of gpr_slice; we never access # them directly. @@ -86,7 +96,22 @@ cdef extern from "grpc/_cython/loader.h": gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) nogil int gpr_time_cmp(gpr_timespec a, gpr_timespec b) nogil - + + ctypedef struct grpc_byte_buffer: + # We don't care about the internals. + pass + + grpc_byte_buffer *grpc_raw_byte_buffer_create(gpr_slice *slices, + size_t nslices) nogil + size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) nogil + void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer) nogil + + int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, + grpc_byte_buffer *buffer) nogil + int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader, + gpr_slice *slice) nogil + void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) nogil + ctypedef enum grpc_status_code: GRPC_STATUS_OK GRPC_STATUS_CANCELLED @@ -107,37 +132,6 @@ cdef extern from "grpc/_cython/loader.h": GRPC_STATUS_DATA_LOSS GRPC_STATUS__DO_NOT_USE - ctypedef enum grpc_ssl_roots_override_result: - GRPC_SSL_ROOTS_OVERRIDE_OK - GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY - GRPC_SSL_ROOTS_OVERRIDE_FAILED - - ctypedef enum grpc_ssl_client_certificate_request_type: - GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE, - GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY - GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY - GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY - GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY - - struct grpc_byte_buffer_reader: - # We don't care about the internals - pass - - ctypedef struct grpc_byte_buffer: - # We don't care about the internals. - pass - - grpc_byte_buffer *grpc_raw_byte_buffer_create(gpr_slice *slices, - size_t nslices) nogil - size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) nogil - void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer) nogil - - int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, - grpc_byte_buffer *buffer) nogil - int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader, - gpr_slice *slice) nogil - void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) nogil - const char *GRPC_ARG_PRIMARY_USER_AGENT_STRING const char *GRPC_ARG_ENABLE_CENSUS const char *GRPC_ARG_MAX_CONCURRENT_STREAMS @@ -353,6 +347,21 @@ cdef extern from "grpc/_cython/loader.h": void grpc_server_cancel_all_calls(grpc_server *server) nogil void grpc_server_destroy(grpc_server *server) nogil + +cdef extern from "grpc/grpc_security.h": + + ctypedef enum grpc_ssl_roots_override_result: + GRPC_SSL_ROOTS_OVERRIDE_OK + GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY + GRPC_SSL_ROOTS_OVERRIDE_FAILED + + ctypedef enum grpc_ssl_client_certificate_request_type: + GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE, + GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY + GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY + GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY + GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY + ctypedef struct grpc_ssl_pem_key_cert_pair: const char *private_key const char *certificate_chain "cert_chain" @@ -438,6 +447,9 @@ cdef extern from "grpc/_cython/loader.h": grpc_call_credentials *grpc_metadata_credentials_create_from_plugin( grpc_metadata_credentials_plugin plugin, void *reserved) nogil + +cdef extern from "grpc/compression.h": + ctypedef enum grpc_compression_algorithm: GRPC_COMPRESS_NONE GRPC_COMPRESS_DEFLATE @@ -472,3 +484,4 @@ cdef extern from "grpc/_cython/loader.h": int grpc_compression_options_is_algorithm_enabled( const grpc_compression_options *opts, grpc_compression_algorithm algorithm) nogil + diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pyx b/src/python/grpcio/grpc/_cython/cygrpc.pyx index e055d321bc7..a9520b9c0fa 100644 --- a/src/python/grpcio/grpc/_cython/cygrpc.pyx +++ b/src/python/grpcio/grpc/_cython/cygrpc.pyx @@ -49,11 +49,18 @@ include "grpc/_cython/_cygrpc/server.pyx.pxi" # -def _initialize(): - if not pygrpc_initialize_core(): - raise ImportError('failed to initialize core gRPC library') +cdef extern from "Python.h": + + int Py_AtExit(void(*func)()) + +def _initialize(): + grpc_init() grpc_set_ssl_roots_override_callback( ssl_roots_override_callback) + if Py_AtExit(grpc_shutdown) != 0: + raise ImportError('failed to register gRPC library shutdown callbacks') + + _initialize() diff --git a/src/python/grpcio/grpc/_cython/imports.generated.h b/src/python/grpcio/grpc/_cython/imports.generated.h deleted file mode 100644 index 8e5c9a8ce2b..00000000000 --- a/src/python/grpcio/grpc/_cython/imports.generated.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* TODO(atash) remove cruft */ -#ifndef PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ -#define PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#endif diff --git a/src/python/grpcio/grpc/_cython/loader.c b/src/python/grpcio/grpc/_cython/loader.c deleted file mode 100644 index 34bd8975495..00000000000 --- a/src/python/grpcio/grpc/_cython/loader.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include "loader.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cpluslus */ - -int pygrpc_load_core(char *path) { return 1; } - -// Cython doesn't have Py_AtExit bindings, so we call the C_API directly -int pygrpc_initialize_core(void) { - grpc_init(); - return Py_AtExit(grpc_shutdown) < 0 ? 0 : 1; -} - -#ifdef __cplusplus -} -#endif /* __cpluslus */ - diff --git a/src/python/grpcio/grpc/_cython/loader.h b/src/python/grpcio/grpc/_cython/loader.h deleted file mode 100644 index 62fd2252047..00000000000 --- a/src/python/grpcio/grpc/_cython/loader.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef PYGRPC_LOADER_H_ -#define PYGRPC_LOADER_H_ - -#include "imports.generated.h" - -/* Additional inclusions not covered by "imports.generated.h" */ -#include - -/* TODO(atash) remove cruft */ - -#ifdef __cplusplus -extern "C" { -#endif /* __cpluslus */ - -/* Attempts to load the core if necessary, and return non-zero upon succes. */ -int pygrpc_load_core(char *path); - -/* Initializes grpc and registers grpc_shutdown() to be called right before - * interpreter exit. Returns non-zero upon success. - */ -int pygrpc_initialize_core(void); - -#ifdef __cplusplus -} -#endif /* __cpluslus */ - -#endif /* GRPC_RB_BYTE_BUFFER_H_ */ - diff --git a/src/python/grpcio/grpc/_links/_constants.py b/src/python/grpcio/grpc/_links/_constants.py deleted file mode 100644 index 117fc5a6395..00000000000 --- a/src/python/grpcio/grpc/_links/_constants.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Constants for use within this package.""" - -from grpc._adapter import _intermediary_low -from grpc.beta import interfaces as beta_interfaces - -LOW_STATUS_CODE_TO_HIGH_STATUS_CODE = { - low: high for low, high in zip( - _intermediary_low.Code, beta_interfaces.StatusCode) -} - -HIGH_STATUS_CODE_TO_LOW_STATUS_CODE = { - high: low for low, high in LOW_STATUS_CODE_TO_HIGH_STATUS_CODE.items() -} diff --git a/src/python/grpcio/grpc/_links/invocation.py b/src/python/grpcio/grpc/_links/invocation.py deleted file mode 100644 index 003653e1c84..00000000000 --- a/src/python/grpcio/grpc/_links/invocation.py +++ /dev/null @@ -1,453 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""The RPC-invocation-side bridge between RPC Framework and GRPC-on-the-wire.""" - -import abc -import enum -import logging -import threading -import time - -import six - -from grpc._adapter import _intermediary_low -from grpc._links import _constants -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.foundation import activated -from grpc.framework.foundation import logging_pool -from grpc.framework.foundation import relay -from grpc.framework.interfaces.links import links - -_IDENTITY = lambda x: x - -_STOP = _intermediary_low.Event.Kind.STOP -_WRITE = _intermediary_low.Event.Kind.WRITE_ACCEPTED -_COMPLETE = _intermediary_low.Event.Kind.COMPLETE_ACCEPTED -_READ = _intermediary_low.Event.Kind.READ_ACCEPTED -_METADATA = _intermediary_low.Event.Kind.METADATA_ACCEPTED -_FINISH = _intermediary_low.Event.Kind.FINISH - - -@enum.unique -class _Read(enum.Enum): - AWAITING_METADATA = 'awaiting metadata' - READING = 'reading' - AWAITING_ALLOWANCE = 'awaiting allowance' - CLOSED = 'closed' - - -@enum.unique -class _HighWrite(enum.Enum): - OPEN = 'open' - CLOSED = 'closed' - - -@enum.unique -class _LowWrite(enum.Enum): - OPEN = 'OPEN' - ACTIVE = 'ACTIVE' - CLOSED = 'CLOSED' - - -class _Context(beta_interfaces.GRPCInvocationContext): - - def __init__(self): - self._lock = threading.Lock() - self._disable_next_compression = False - - def disable_next_request_compression(self): - with self._lock: - self._disable_next_compression = True - - def next_compression_disabled(self): - with self._lock: - disabled = self._disable_next_compression - self._disable_next_compression = False - return disabled - - -class _RPCState(object): - - def __init__( - self, call, request_serializer, response_deserializer, sequence_number, - read, allowance, high_write, low_write, due, context): - self.call = call - self.request_serializer = request_serializer - self.response_deserializer = response_deserializer - self.sequence_number = sequence_number - self.read = read - self.allowance = allowance - self.high_write = high_write - self.low_write = low_write - self.due = due - self.context = context - - -def _no_longer_due(kind, rpc_state, key, rpc_states): - rpc_state.due.remove(kind) - if not rpc_state.due: - del rpc_states[key] - - -class _Kernel(object): - - def __init__( - self, channel, host, metadata_transformer, request_serializers, - response_deserializers, ticket_relay): - self._lock = threading.Lock() - self._channel = channel - self._host = host - self._metadata_transformer = metadata_transformer - self._request_serializers = request_serializers - self._response_deserializers = response_deserializers - self._relay = ticket_relay - - self._completion_queue = None - self._rpc_states = {} - self._pool = None - - def _on_write_event(self, operation_id, unused_event, rpc_state): - if rpc_state.high_write is _HighWrite.CLOSED: - rpc_state.call.complete(operation_id) - rpc_state.due.add(_COMPLETE) - rpc_state.due.remove(_WRITE) - rpc_state.low_write = _LowWrite.CLOSED - else: - ticket = links.Ticket( - operation_id, rpc_state.sequence_number, None, None, None, None, 1, - None, None, None, None, None, None, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - rpc_state.low_write = _LowWrite.OPEN - _no_longer_due(_WRITE, rpc_state, operation_id, self._rpc_states) - - def _on_read_event(self, operation_id, event, rpc_state): - if event.bytes is None or _FINISH not in rpc_state.due: - rpc_state.read = _Read.CLOSED - _no_longer_due(_READ, rpc_state, operation_id, self._rpc_states) - else: - if 0 < rpc_state.allowance: - rpc_state.allowance -= 1 - rpc_state.call.read(operation_id) - else: - rpc_state.read = _Read.AWAITING_ALLOWANCE - _no_longer_due(_READ, rpc_state, operation_id, self._rpc_states) - ticket = links.Ticket( - operation_id, rpc_state.sequence_number, None, None, None, None, None, - None, rpc_state.response_deserializer(event.bytes), None, None, None, - None, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - - def _on_metadata_event(self, operation_id, event, rpc_state): - if _FINISH in rpc_state.due: - rpc_state.allowance -= 1 - rpc_state.call.read(operation_id) - rpc_state.read = _Read.READING - rpc_state.due.add(_READ) - rpc_state.due.remove(_METADATA) - ticket = links.Ticket( - operation_id, rpc_state.sequence_number, None, None, - links.Ticket.Subscription.FULL, None, None, event.metadata, None, - None, None, None, None, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - else: - _no_longer_due(_METADATA, rpc_state, operation_id, self._rpc_states) - - def _on_finish_event(self, operation_id, event, rpc_state): - _no_longer_due(_FINISH, rpc_state, operation_id, self._rpc_states) - if event.status.code == _intermediary_low.Code.OK: - termination = links.Ticket.Termination.COMPLETION - elif event.status.code == _intermediary_low.Code.CANCELLED: - termination = links.Ticket.Termination.CANCELLATION - elif event.status.code == _intermediary_low.Code.DEADLINE_EXCEEDED: - termination = links.Ticket.Termination.EXPIRATION - elif event.status.code == _intermediary_low.Code.UNIMPLEMENTED: - termination = links.Ticket.Termination.REMOTE_FAILURE - elif event.status.code == _intermediary_low.Code.UNKNOWN: - termination = links.Ticket.Termination.LOCAL_FAILURE - else: - termination = links.Ticket.Termination.TRANSMISSION_FAILURE - code = _constants.LOW_STATUS_CODE_TO_HIGH_STATUS_CODE[event.status.code] - ticket = links.Ticket( - operation_id, rpc_state.sequence_number, None, None, None, None, None, - None, None, event.metadata, code, event.status.details, termination, - None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - - def _spin(self, completion_queue): - while True: - event = completion_queue.get(None) - with self._lock: - rpc_state = self._rpc_states.get(event.tag, None) - if event.kind is _STOP: - pass - elif event.kind is _WRITE: - self._on_write_event(event.tag, event, rpc_state) - elif event.kind is _METADATA: - self._on_metadata_event(event.tag, event, rpc_state) - elif event.kind is _READ: - self._on_read_event(event.tag, event, rpc_state) - elif event.kind is _FINISH: - self._on_finish_event(event.tag, event, rpc_state) - elif event.kind is _COMPLETE: - _no_longer_due(_COMPLETE, rpc_state, event.tag, self._rpc_states) - else: - logging.error('Illegal RPC event! %s', (event,)) - - if self._completion_queue is None and not self._rpc_states: - completion_queue.stop() - return - - def _invoke( - self, operation_id, group, method, initial_metadata, payload, termination, - timeout, allowance, options): - """Invoke an RPC. - - Args: - operation_id: Any object to be used as an operation ID for the RPC. - group: The group to which the RPC method belongs. - method: The RPC method name. - initial_metadata: The initial metadata object for the RPC. - payload: A payload object for the RPC or None if no payload was given at - invocation-time. - termination: A links.Ticket.Termination value or None indicated whether or - not more writes will follow from this side of the RPC. - timeout: A duration of time in seconds to allow for the RPC. - allowance: The number of payloads (beyond the free first one) that the - local ticket exchange mate has granted permission to be read. - options: A beta_interfaces.GRPCCallOptions value or None. - """ - if termination is links.Ticket.Termination.COMPLETION: - high_write = _HighWrite.CLOSED - elif termination is None: - high_write = _HighWrite.OPEN - else: - return - - transformed_initial_metadata = self._metadata_transformer(initial_metadata) - request_serializer = self._request_serializers.get( - (group, method), _IDENTITY) - response_deserializer = self._response_deserializers.get( - (group, method), _IDENTITY) - - call = _intermediary_low.Call( - self._channel, self._completion_queue, '/%s/%s' % (group, method), - self._host, time.time() + timeout) - if options is not None and options.credentials is not None: - call.set_credentials(options.credentials._low_credentials) - if transformed_initial_metadata is not None: - for metadata_key, metadata_value in transformed_initial_metadata: - call.add_metadata(metadata_key, metadata_value) - call.invoke(self._completion_queue, operation_id, operation_id) - if payload is None: - if high_write is _HighWrite.CLOSED: - call.complete(operation_id) - low_write = _LowWrite.CLOSED - due = set((_METADATA, _COMPLETE, _FINISH,)) - else: - low_write = _LowWrite.OPEN - due = set((_METADATA, _FINISH,)) - else: - if options is not None and options.disable_compression: - flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS - else: - flags = 0 - call.write(request_serializer(payload), operation_id, flags) - low_write = _LowWrite.ACTIVE - due = set((_WRITE, _METADATA, _FINISH,)) - context = _Context() - self._rpc_states[operation_id] = _RPCState( - call, request_serializer, response_deserializer, 1, - _Read.AWAITING_METADATA, 1 if allowance is None else (1 + allowance), - high_write, low_write, due, context) - protocol = links.Protocol(links.Protocol.Kind.INVOCATION_CONTEXT, context) - ticket = links.Ticket( - operation_id, 0, None, None, None, None, None, None, None, None, None, - None, None, protocol) - self._relay.add_value(ticket) - - def _advance(self, operation_id, rpc_state, payload, termination, allowance): - if payload is not None: - disable_compression = rpc_state.context.next_compression_disabled() - if disable_compression: - flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS - else: - flags = 0 - rpc_state.call.write( - rpc_state.request_serializer(payload), operation_id, flags) - rpc_state.low_write = _LowWrite.ACTIVE - rpc_state.due.add(_WRITE) - - if allowance is not None: - if rpc_state.read is _Read.AWAITING_ALLOWANCE: - rpc_state.allowance += allowance - 1 - rpc_state.call.read(operation_id) - rpc_state.read = _Read.READING - rpc_state.due.add(_READ) - else: - rpc_state.allowance += allowance - - if termination is links.Ticket.Termination.COMPLETION: - rpc_state.high_write = _HighWrite.CLOSED - if rpc_state.low_write is _LowWrite.OPEN: - rpc_state.call.complete(operation_id) - rpc_state.due.add(_COMPLETE) - rpc_state.low_write = _LowWrite.CLOSED - elif termination is not None: - rpc_state.call.cancel() - - def add_ticket(self, ticket): - with self._lock: - if ticket.sequence_number == 0: - if self._completion_queue is None: - logging.error('Received invocation ticket %s after stop!', ticket) - else: - if (ticket.protocol is not None and - ticket.protocol.kind is links.Protocol.Kind.CALL_OPTION): - grpc_call_options = ticket.protocol.value - else: - grpc_call_options = None - self._invoke( - ticket.operation_id, ticket.group, ticket.method, - ticket.initial_metadata, ticket.payload, ticket.termination, - ticket.timeout, ticket.allowance, grpc_call_options) - else: - rpc_state = self._rpc_states.get(ticket.operation_id) - if rpc_state is not None: - self._advance( - ticket.operation_id, rpc_state, ticket.payload, - ticket.termination, ticket.allowance) - - def start(self): - """Starts this object. - - This method must be called before attempting to exchange tickets with this - object. - """ - with self._lock: - self._completion_queue = _intermediary_low.CompletionQueue() - self._pool = logging_pool.pool(1) - self._pool.submit(self._spin, self._completion_queue) - - def stop(self): - """Stops this object. - - This method must be called for proper termination of this object, and no - attempts to exchange tickets with this object may be made after this method - has been called. - """ - with self._lock: - if not self._rpc_states: - self._completion_queue.stop() - self._completion_queue = None - pool = self._pool - pool.shutdown(wait=True) - - -class InvocationLink(six.with_metaclass(abc.ABCMeta, links.Link, activated.Activated)): - """A links.Link for use on the invocation-side of a gRPC connection. - - Implementations of this interface are only valid for use when activated. - """ - - -class _InvocationLink(InvocationLink): - - def __init__( - self, channel, host, metadata_transformer, request_serializers, - response_deserializers): - self._relay = relay.relay(None) - self._kernel = _Kernel( - channel, host, - _IDENTITY if metadata_transformer is None else metadata_transformer, - {} if request_serializers is None else request_serializers, - {} if response_deserializers is None else response_deserializers, - self._relay) - - def _start(self): - self._relay.start() - self._kernel.start() - return self - - def _stop(self): - self._kernel.stop() - self._relay.stop() - - def accept_ticket(self, ticket): - """See links.Link.accept_ticket for specification.""" - self._kernel.add_ticket(ticket) - - def join_link(self, link): - """See links.Link.join_link for specification.""" - self._relay.set_behavior(link.accept_ticket) - - def __enter__(self): - """See activated.Activated.__enter__ for specification.""" - return self._start() - - def __exit__(self, exc_type, exc_val, exc_tb): - """See activated.Activated.__exit__ for specification.""" - self._stop() - return False - - def start(self): - """See activated.Activated.start for specification.""" - return self._start() - - def stop(self): - """See activated.Activated.stop for specification.""" - self._stop() - - -def invocation_link( - channel, host, metadata_transformer, request_serializers, - response_deserializers): - """Creates an InvocationLink. - - Args: - channel: An _intermediary_low.Channel for use by the link. - host: The host to specify when invoking RPCs. - metadata_transformer: A callable that takes an invocation-side initial - metadata value and returns another metadata value to send in its place. - May be None. - request_serializers: A dict from group-method pair to request object - serialization behavior. - response_deserializers: A dict from group-method pair to response object - deserialization behavior. - - Returns: - An InvocationLink. - """ - return _InvocationLink( - channel, host, metadata_transformer, request_serializers, - response_deserializers) diff --git a/src/python/grpcio/grpc/_links/service.py b/src/python/grpcio/grpc/_links/service.py deleted file mode 100644 index 5fc4994ca0e..00000000000 --- a/src/python/grpcio/grpc/_links/service.py +++ /dev/null @@ -1,509 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""The RPC-service-side bridge between RPC Framework and GRPC-on-the-wire.""" - -import abc -import enum -import logging -import threading -import six -import time - -from grpc._adapter import _intermediary_low -from grpc._links import _constants -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.foundation import logging_pool -from grpc.framework.foundation import relay -from grpc.framework.interfaces.links import links - -_IDENTITY = lambda x: x - -_TERMINATION_KIND_TO_CODE = { - links.Ticket.Termination.COMPLETION: _intermediary_low.Code.OK, - links.Ticket.Termination.CANCELLATION: _intermediary_low.Code.CANCELLED, - links.Ticket.Termination.EXPIRATION: - _intermediary_low.Code.DEADLINE_EXCEEDED, - links.Ticket.Termination.SHUTDOWN: _intermediary_low.Code.UNAVAILABLE, - links.Ticket.Termination.RECEPTION_FAILURE: _intermediary_low.Code.INTERNAL, - links.Ticket.Termination.TRANSMISSION_FAILURE: - _intermediary_low.Code.INTERNAL, - links.Ticket.Termination.LOCAL_FAILURE: _intermediary_low.Code.UNKNOWN, - links.Ticket.Termination.REMOTE_FAILURE: _intermediary_low.Code.UNKNOWN, -} - -_STOP = _intermediary_low.Event.Kind.STOP -_WRITE = _intermediary_low.Event.Kind.WRITE_ACCEPTED -_COMPLETE = _intermediary_low.Event.Kind.COMPLETE_ACCEPTED -_SERVICE = _intermediary_low.Event.Kind.SERVICE_ACCEPTED -_READ = _intermediary_low.Event.Kind.READ_ACCEPTED -_FINISH = _intermediary_low.Event.Kind.FINISH - - -@enum.unique -class _Read(enum.Enum): - READING = 'reading' - # TODO(issue 2916): This state will again be necessary after eliminating the - # "early_read" field of _RPCState and going back to only reading when granted - # allowance to read. - # AWAITING_ALLOWANCE = 'awaiting allowance' - CLOSED = 'closed' - - -@enum.unique -class _HighWrite(enum.Enum): - OPEN = 'open' - CLOSED = 'closed' - - -@enum.unique -class _LowWrite(enum.Enum): - """The possible categories of low-level write state.""" - - OPEN = 'OPEN' - ACTIVE = 'ACTIVE' - CLOSED = 'CLOSED' - - -class _Context(beta_interfaces.GRPCServicerContext): - - def __init__(self, call): - self._lock = threading.Lock() - self._call = call - self._disable_next_compression = False - - def peer(self): - with self._lock: - return self._call.peer() - - def disable_next_response_compression(self): - with self._lock: - self._disable_next_compression = True - - def next_compression_disabled(self): - with self._lock: - disabled = self._disable_next_compression - self._disable_next_compression = False - return disabled - - -class _RPCState(object): - - def __init__( - self, request_deserializer, response_serializer, sequence_number, read, - early_read, allowance, high_write, low_write, premetadataed, - terminal_metadata, code, message, due, context): - self.request_deserializer = request_deserializer - self.response_serializer = response_serializer - self.sequence_number = sequence_number - self.read = read - # TODO(issue 2916): Eliminate this by eliminating the necessity of calling - # call.read just to advance the RPC. - self.early_read = early_read # A raw (not deserialized) read. - self.allowance = allowance - self.high_write = high_write - self.low_write = low_write - self.premetadataed = premetadataed - self.terminal_metadata = terminal_metadata - self.code = code - self.message = message - self.due = due - self.context = context - - -def _no_longer_due(kind, rpc_state, key, rpc_states): - rpc_state.due.remove(kind) - if not rpc_state.due: - del rpc_states[key] - - -def _metadatafy(call, metadata): - for metadata_key, metadata_value in metadata: - call.add_metadata(metadata_key, metadata_value) - - -def _status(termination_kind, high_code, details): - low_details = b'' if details is None else details - if high_code is None: - low_code = _TERMINATION_KIND_TO_CODE[termination_kind] - else: - low_code = _constants.HIGH_STATUS_CODE_TO_LOW_STATUS_CODE[high_code] - return _intermediary_low.Status(low_code, low_details) - - -class _Kernel(object): - - def __init__(self, request_deserializers, response_serializers, ticket_relay): - self._lock = threading.Lock() - self._request_deserializers = request_deserializers - self._response_serializers = response_serializers - self._relay = ticket_relay - - self._completion_queue = None - self._due = set() - self._server = None - self._rpc_states = {} - self._pool = None - - def _on_service_acceptance_event(self, event, server): - server.service(None) - - service_acceptance = event.service_acceptance - call = service_acceptance.call - call.accept(self._completion_queue, call) - try: - service_method = service_acceptance.method - if six.PY3: - service_method = service_method.decode('latin1') - group, method = service_method.split('/')[1:3] - except ValueError: - logging.info('Illegal path "%s"!', service_acceptance.method) - return - request_deserializer = self._request_deserializers.get( - (group, method), _IDENTITY) - response_serializer = self._response_serializers.get( - (group, method), _IDENTITY) - - call.read(call) - context = _Context(call) - self._rpc_states[call] = _RPCState( - request_deserializer, response_serializer, 1, _Read.READING, None, 1, - _HighWrite.OPEN, _LowWrite.OPEN, False, None, None, None, - set((_READ, _FINISH,)), context) - protocol = links.Protocol(links.Protocol.Kind.SERVICER_CONTEXT, context) - ticket = links.Ticket( - call, 0, group, method, links.Ticket.Subscription.FULL, - service_acceptance.deadline - time.time(), None, event.metadata, None, - None, None, None, None, protocol) - self._relay.add_value(ticket) - - def _on_read_event(self, event): - call = event.tag - rpc_state = self._rpc_states[call] - - if event.bytes is None: - rpc_state.read = _Read.CLOSED - payload = None - termination = links.Ticket.Termination.COMPLETION - _no_longer_due(_READ, rpc_state, call, self._rpc_states) - else: - if 0 < rpc_state.allowance: - payload = rpc_state.request_deserializer(event.bytes) - termination = None - rpc_state.allowance -= 1 - call.read(call) - else: - rpc_state.early_read = event.bytes - _no_longer_due(_READ, rpc_state, call, self._rpc_states) - return - # TODO(issue 2916): Instead of returning: - # rpc_state.read = _Read.AWAITING_ALLOWANCE - ticket = links.Ticket( - call, rpc_state.sequence_number, None, None, None, None, None, None, - payload, None, None, None, termination, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - - def _on_write_event(self, event): - call = event.tag - rpc_state = self._rpc_states[call] - - if rpc_state.high_write is _HighWrite.CLOSED: - if rpc_state.terminal_metadata is not None: - _metadatafy(call, rpc_state.terminal_metadata) - status = _status( - links.Ticket.Termination.COMPLETION, rpc_state.code, - rpc_state.message) - call.status(status, call) - rpc_state.low_write = _LowWrite.CLOSED - rpc_state.due.add(_COMPLETE) - rpc_state.due.remove(_WRITE) - else: - ticket = links.Ticket( - call, rpc_state.sequence_number, None, None, None, None, 1, None, - None, None, None, None, None, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - rpc_state.low_write = _LowWrite.OPEN - _no_longer_due(_WRITE, rpc_state, call, self._rpc_states) - - def _on_finish_event(self, event): - call = event.tag - rpc_state = self._rpc_states[call] - _no_longer_due(_FINISH, rpc_state, call, self._rpc_states) - code = event.status.code - if code == _intermediary_low.Code.OK: - return - - if code == _intermediary_low.Code.CANCELLED: - termination = links.Ticket.Termination.CANCELLATION - elif code == _intermediary_low.Code.DEADLINE_EXCEEDED: - termination = links.Ticket.Termination.EXPIRATION - else: - termination = links.Ticket.Termination.TRANSMISSION_FAILURE - ticket = links.Ticket( - call, rpc_state.sequence_number, None, None, None, None, None, None, - None, None, None, None, termination, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - - def _spin(self, completion_queue, server): - while True: - event = completion_queue.get(None) - with self._lock: - if event.kind is _STOP: - self._due.remove(_STOP) - elif event.kind is _READ: - self._on_read_event(event) - elif event.kind is _WRITE: - self._on_write_event(event) - elif event.kind is _COMPLETE: - _no_longer_due( - _COMPLETE, self._rpc_states.get(event.tag), event.tag, - self._rpc_states) - elif event.kind is _intermediary_low.Event.Kind.FINISH: - self._on_finish_event(event) - elif event.kind is _SERVICE: - if self._server is None: - self._due.remove(_SERVICE) - else: - self._on_service_acceptance_event(event, server) - else: - logging.error('Illegal event! %s', (event,)) - - if not self._due and not self._rpc_states: - completion_queue.stop() - return - - def add_ticket(self, ticket): - with self._lock: - call = ticket.operation_id - rpc_state = self._rpc_states.get(call) - if rpc_state is None: - return - - if ticket.initial_metadata is not None: - _metadatafy(call, ticket.initial_metadata) - call.premetadata() - rpc_state.premetadataed = True - elif not rpc_state.premetadataed: - if (ticket.terminal_metadata is not None or - ticket.payload is not None or - ticket.termination is not None or - ticket.code is not None or - ticket.message is not None): - call.premetadata() - rpc_state.premetadataed = True - - if ticket.allowance is not None: - if rpc_state.early_read is None: - rpc_state.allowance += ticket.allowance - else: - payload = rpc_state.request_deserializer(rpc_state.early_read) - rpc_state.allowance += ticket.allowance - 1 - rpc_state.early_read = None - if rpc_state.read is _Read.READING: - call.read(call) - rpc_state.due.add(_READ) - termination = None - else: - termination = links.Ticket.Termination.COMPLETION - early_read_ticket = links.Ticket( - call, rpc_state.sequence_number, None, None, None, None, None, - None, payload, None, None, None, termination, None) - rpc_state.sequence_number += 1 - self._relay.add_value(early_read_ticket) - - if ticket.payload is not None: - disable_compression = rpc_state.context.next_compression_disabled() - if disable_compression: - flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS - else: - flags = 0 - call.write(rpc_state.response_serializer(ticket.payload), call, flags) - rpc_state.due.add(_WRITE) - rpc_state.low_write = _LowWrite.ACTIVE - - if ticket.terminal_metadata is not None: - rpc_state.terminal_metadata = ticket.terminal_metadata - if ticket.code is not None: - rpc_state.code = ticket.code - if ticket.message is not None: - rpc_state.message = ticket.message - - if ticket.termination is links.Ticket.Termination.COMPLETION: - rpc_state.high_write = _HighWrite.CLOSED - if rpc_state.low_write is _LowWrite.OPEN: - if rpc_state.terminal_metadata is not None: - _metadatafy(call, rpc_state.terminal_metadata) - status = _status( - links.Ticket.Termination.COMPLETION, rpc_state.code, - rpc_state.message) - call.status(status, call) - rpc_state.due.add(_COMPLETE) - rpc_state.low_write = _LowWrite.CLOSED - elif ticket.termination is not None: - if rpc_state.terminal_metadata is not None: - _metadatafy(call, rpc_state.terminal_metadata) - status = _status( - ticket.termination, rpc_state.code, rpc_state.message) - call.status(status, call) - rpc_state.due.add(_COMPLETE) - - def add_port(self, address, server_credentials): - with self._lock: - if self._server is None: - self._completion_queue = _intermediary_low.CompletionQueue() - self._server = _intermediary_low.Server(self._completion_queue) - if server_credentials is None: - return self._server.add_http2_addr(address) - else: - return self._server.add_secure_http2_addr(address, server_credentials) - - def start(self): - with self._lock: - if self._server is None: - self._completion_queue = _intermediary_low.CompletionQueue() - self._server = _intermediary_low.Server(self._completion_queue) - self._pool = logging_pool.pool(1) - self._pool.submit(self._spin, self._completion_queue, self._server) - self._server.start() - self._server.service(None) - self._due.add(_SERVICE) - - def begin_stop(self): - with self._lock: - self._server.stop() - self._due.add(_STOP) - self._server = None - - def end_stop(self): - with self._lock: - pool = self._pool - pool.shutdown(wait=True) - - -class ServiceLink(links.Link): - """A links.Link for use on the service-side of a gRPC connection. - - Implementations of this interface are only valid for use between calls to - their start method and one of their stop methods. - """ - - @abc.abstractmethod - def add_port(self, address, server_credentials): - """Adds a port on which to service RPCs after this link has been started. - - Args: - address: The address on which to service RPCs with a port number of zero - requesting that a port number be automatically selected and used. - server_credentials: An _intermediary_low.ServerCredentials object, or - None for insecure service. - - Returns: - An integer port on which RPCs will be serviced after this link has been - started. This is typically the same number as the port number contained - in the passed address, but will likely be different if the port number - contained in the passed address was zero. - """ - raise NotImplementedError() - - @abc.abstractmethod - def start(self): - """Starts this object. - - This method must be called before attempting to use this Link in ticket - exchange. - """ - raise NotImplementedError() - - @abc.abstractmethod - def begin_stop(self): - """Indicate imminent link stop and immediate rejection of new RPCs. - - New RPCs will be rejected as soon as this method is called, but ongoing RPCs - will be allowed to continue until they terminate. This method does not - block. - """ - raise NotImplementedError() - - @abc.abstractmethod - def end_stop(self): - """Finishes stopping this link. - - begin_stop must have been called exactly once before calling this method. - - All in-progress RPCs will be terminated immediately. - """ - raise NotImplementedError() - - -class _ServiceLink(ServiceLink): - - def __init__(self, request_deserializers, response_serializers): - self._relay = relay.relay(None) - self._kernel = _Kernel( - {} if request_deserializers is None else request_deserializers, - {} if response_serializers is None else response_serializers, - self._relay) - - def accept_ticket(self, ticket): - self._kernel.add_ticket(ticket) - - def join_link(self, link): - self._relay.set_behavior(link.accept_ticket) - - def add_port(self, address, server_credentials): - return self._kernel.add_port(address, server_credentials) - - def start(self): - self._relay.start() - return self._kernel.start() - - def begin_stop(self): - self._kernel.begin_stop() - - def end_stop(self): - self._kernel.end_stop() - self._relay.stop() - - -def service_link(request_deserializers, response_serializers): - """Creates a ServiceLink. - - Args: - request_deserializers: A dict from group-method pair to request object - deserialization behavior. - response_serializers: A dict from group-method pair to response ojbect - serialization behavior. - - Returns: - A ServiceLink. - """ - return _ServiceLink(request_deserializers, response_serializers) diff --git a/src/python/grpcio/grpc/beta/_client_adaptations.py b/src/python/grpcio/grpc/beta/_client_adaptations.py index 73415e0be7f..e4ee44d7a3e 100644 --- a/src/python/grpcio/grpc/beta/_client_adaptations.py +++ b/src/python/grpcio/grpc/beta/_client_adaptations.py @@ -67,7 +67,7 @@ def _abortion(rpc_error_call): error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0] return face.Abortion( error_kind, rpc_error_call.initial_metadata(), - rpc_error_call.trailing_metadata(), code, rpc_error_code.details()) + rpc_error_call.trailing_metadata(), code, rpc_error_call.details()) def _abortion_error(rpc_error_call): @@ -159,9 +159,11 @@ class _Rendezvous(future.Future, face.Call): return self._call.time_remaining() def add_abortion_callback(self, abortion_callback): - registered = self._call.add_callback( - lambda: abortion_callback(_abortion(self._call))) - return None if registered else _abortion(self._call) + def done_callback(): + if self.code() is not grpc.StatusCode.OK: + abortion_callback(_abortion(self._call)) + registered = self._call.add_callback(done_callback) + return None if registered else done_callback() def protocol_context(self): return _InvocationProtocolContext() diff --git a/src/python/grpcio/grpc/beta/_server.py b/src/python/grpcio/grpc/beta/_server.py deleted file mode 100644 index eb0aadb42f9..00000000000 --- a/src/python/grpcio/grpc/beta/_server.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Beta API server implementation.""" - -import threading - -from grpc._links import service -from grpc.beta import interfaces -from grpc.framework.core import implementations as _core_implementations -from grpc.framework.crust import implementations as _crust_implementations -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.links import utilities - -_DEFAULT_POOL_SIZE = 8 -_DEFAULT_TIMEOUT = 300 -_MAXIMUM_TIMEOUT = 24 * 60 * 60 - - -def _set_event(): - event = threading.Event() - event.set() - return event - - -class _GRPCServicer(base.Servicer): - - def __init__(self, delegate): - self._delegate = delegate - - def service(self, group, method, context, output_operator): - try: - return self._delegate.service(group, method, context, output_operator) - except base.NoSuchMethodError as e: - if e.code is None and e.details is None: - raise base.NoSuchMethodError( - interfaces.StatusCode.UNIMPLEMENTED, - 'Method "%s" of service "%s" not implemented!' % (method, group)) - else: - raise - - -class _Server(interfaces.Server): - - def __init__( - self, implementations, multi_implementation, pool, pool_size, - default_timeout, maximum_timeout, grpc_link): - self._lock = threading.Lock() - self._implementations = implementations - self._multi_implementation = multi_implementation - self._customer_pool = pool - self._pool_size = pool_size - self._default_timeout = default_timeout - self._maximum_timeout = maximum_timeout - self._grpc_link = grpc_link - - self._end_link = None - self._stop_events = None - self._pool = None - - def _start(self): - with self._lock: - if self._end_link is not None: - raise ValueError('Cannot start already-started server!') - - if self._customer_pool is None: - self._pool = logging_pool.pool(self._pool_size) - assembly_pool = self._pool - else: - assembly_pool = self._customer_pool - - servicer = _GRPCServicer( - _crust_implementations.servicer( - self._implementations, self._multi_implementation, assembly_pool)) - - self._end_link = _core_implementations.service_end_link( - servicer, self._default_timeout, self._maximum_timeout) - - self._grpc_link.join_link(self._end_link) - self._end_link.join_link(self._grpc_link) - self._grpc_link.start() - self._end_link.start() - - def _dissociate_links_and_shut_down_pool(self): - self._grpc_link.end_stop() - self._grpc_link.join_link(utilities.NULL_LINK) - self._end_link.join_link(utilities.NULL_LINK) - self._end_link = None - if self._pool is not None: - self._pool.shutdown(wait=True) - self._pool = None - - def _stop_stopping(self): - self._dissociate_links_and_shut_down_pool() - for stop_event in self._stop_events: - stop_event.set() - self._stop_events = None - - def _stop_started(self): - self._grpc_link.begin_stop() - self._end_link.stop(0).wait() - self._dissociate_links_and_shut_down_pool() - - def _foreign_thread_stop(self, end_stop_event, stop_events): - end_stop_event.wait() - with self._lock: - if self._stop_events is stop_events: - self._stop_stopping() - - def _schedule_stop(self, grace): - with self._lock: - if self._end_link is None: - return _set_event() - server_stop_event = threading.Event() - if self._stop_events is None: - self._stop_events = [server_stop_event] - self._grpc_link.begin_stop() - else: - self._stop_events.append(server_stop_event) - end_stop_event = self._end_link.stop(grace) - end_stop_thread = threading.Thread( - target=self._foreign_thread_stop, - args=(end_stop_event, self._stop_events)) - end_stop_thread.start() - return server_stop_event - - def _stop_now(self): - with self._lock: - if self._end_link is not None: - if self._stop_events is None: - self._stop_started() - else: - self._stop_stopping() - - def add_insecure_port(self, address): - with self._lock: - if self._end_link is None: - return self._grpc_link.add_port(address, None) - else: - raise ValueError('Can\'t add port to serving server!') - - def add_secure_port(self, address, server_credentials): - with self._lock: - if self._end_link is None: - return self._grpc_link.add_port( - address, server_credentials._low_credentials) # pylint: disable=protected-access - else: - raise ValueError('Can\'t add port to serving server!') - - def start(self): - self._start() - - def stop(self, grace): - if 0 < grace: - return self._schedule_stop(grace) - else: - self._stop_now() - return _set_event() - - def __enter__(self): - self._start() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self._stop_now() - return False - - def __del__(self): - self._stop_now() - - -def server( - implementations, multi_implementation, request_deserializers, - response_serializers, thread_pool, thread_pool_size, default_timeout, - maximum_timeout): - grpc_link = service.service_link(request_deserializers, response_serializers) - return _Server( - implementations, multi_implementation, thread_pool, - _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size, - _DEFAULT_TIMEOUT if default_timeout is None else default_timeout, - _MAXIMUM_TIMEOUT if maximum_timeout is None else maximum_timeout, - grpc_link) diff --git a/src/python/grpcio/grpc/beta/_stub.py b/src/python/grpcio/grpc/beta/_stub.py deleted file mode 100644 index 2af019309af..00000000000 --- a/src/python/grpcio/grpc/beta/_stub.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Beta API stub implementation.""" - -import threading - -from grpc._links import invocation -from grpc.framework.core import implementations as _core_implementations -from grpc.framework.crust import implementations as _crust_implementations -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.links import utilities - -_DEFAULT_POOL_SIZE = 6 - - -class _AutoIntermediary(object): - - def __init__(self, up, down, delegate): - self._lock = threading.Lock() - self._up = up - self._down = down - self._in_context = False - self._delegate = delegate - - def __getattr__(self, attr): - with self._lock: - if self._delegate is None: - raise AttributeError('No useful attributes out of context!') - else: - return getattr(self._delegate, attr) - - def __enter__(self): - with self._lock: - if self._in_context: - raise ValueError('Already in context!') - elif self._delegate is None: - self._delegate = self._up() - self._in_context = True - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - with self._lock: - if not self._in_context: - raise ValueError('Not in context!') - self._down() - self._in_context = False - self._delegate = None - return False - - def __del__(self): - with self._lock: - if self._delegate is not None: - self._down() - self._delegate = None - - -class _StubAssemblyManager(object): - - def __init__( - self, thread_pool, thread_pool_size, end_link, grpc_link, stub_creator): - self._thread_pool = thread_pool - self._pool_size = thread_pool_size - self._end_link = end_link - self._grpc_link = grpc_link - self._stub_creator = stub_creator - self._own_pool = None - - def up(self): - if self._thread_pool is None: - self._own_pool = logging_pool.pool( - _DEFAULT_POOL_SIZE if self._pool_size is None else self._pool_size) - assembly_pool = self._own_pool - else: - assembly_pool = self._thread_pool - self._end_link.join_link(self._grpc_link) - self._grpc_link.join_link(self._end_link) - self._end_link.start() - self._grpc_link.start() - return self._stub_creator(self._end_link, assembly_pool) - - def down(self): - self._end_link.stop(0).wait() - self._grpc_link.stop() - self._end_link.join_link(utilities.NULL_LINK) - self._grpc_link.join_link(utilities.NULL_LINK) - if self._own_pool is not None: - self._own_pool.shutdown(wait=True) - self._own_pool = None - - -def _assemble( - channel, host, metadata_transformer, request_serializers, - response_deserializers, thread_pool, thread_pool_size, stub_creator): - end_link = _core_implementations.invocation_end_link() - grpc_link = invocation.invocation_link( - channel, host, metadata_transformer, request_serializers, - response_deserializers) - stub_assembly_manager = _StubAssemblyManager( - thread_pool, thread_pool_size, end_link, grpc_link, stub_creator) - stub = stub_assembly_manager.up() - return _AutoIntermediary( - stub_assembly_manager.up, stub_assembly_manager.down, stub) - - -def _dynamic_stub_creator(service, cardinalities): - def create_dynamic_stub(end_link, invocation_pool): - return _crust_implementations.dynamic_stub( - end_link, service, cardinalities, invocation_pool) - return create_dynamic_stub - - -def generic_stub( - channel, host, metadata_transformer, request_serializers, - response_deserializers, thread_pool, thread_pool_size): - return _assemble( - channel, host, metadata_transformer, request_serializers, - response_deserializers, thread_pool, thread_pool_size, - _crust_implementations.generic_stub) - - -def dynamic_stub( - channel, host, service, cardinalities, metadata_transformer, - request_serializers, response_deserializers, thread_pool, - thread_pool_size): - return _assemble( - channel, host, metadata_transformer, request_serializers, - response_deserializers, thread_pool, thread_pool_size, - _dynamic_stub_creator(service, cardinalities)) diff --git a/src/python/grpcio/grpc/beta/implementations.py b/src/python/grpcio/grpc/beta/implementations.py index 4ae6e7d6751..ab25fd5eec7 100644 --- a/src/python/grpcio/grpc/beta/implementations.py +++ b/src/python/grpcio/grpc/beta/implementations.py @@ -37,7 +37,6 @@ import threading # pylint: disable=unused-import # cardinality and face are referenced from specification in this module. import grpc from grpc import _auth -from grpc._adapter import _types from grpc.beta import _client_adaptations from grpc.beta import _server_adaptations from grpc.beta import interfaces diff --git a/src/python/grpcio/grpc/framework/core/_constants.py b/src/python/grpcio/grpc/framework/core/_constants.py deleted file mode 100644 index 0f47cb48e03..00000000000 --- a/src/python/grpcio/grpc/framework/core/_constants.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Private constants for the package.""" - -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.links import links - -TICKET_SUBSCRIPTION_FOR_BASE_SUBSCRIPTION_KIND = { - base.Subscription.Kind.NONE: links.Ticket.Subscription.NONE, - base.Subscription.Kind.TERMINATION_ONLY: - links.Ticket.Subscription.TERMINATION, - base.Subscription.Kind.FULL: links.Ticket.Subscription.FULL, - } - -# Mapping from abortive operation outcome to ticket termination to be -# sent to the other side of the operation, or None to indicate that no -# ticket should be sent to the other side in the event of such an -# outcome. -ABORTION_OUTCOME_TO_TICKET_TERMINATION = { - base.Outcome.Kind.CANCELLED: links.Ticket.Termination.CANCELLATION, - base.Outcome.Kind.EXPIRED: links.Ticket.Termination.EXPIRATION, - base.Outcome.Kind.LOCAL_SHUTDOWN: links.Ticket.Termination.SHUTDOWN, - base.Outcome.Kind.REMOTE_SHUTDOWN: None, - base.Outcome.Kind.RECEPTION_FAILURE: - links.Ticket.Termination.RECEPTION_FAILURE, - base.Outcome.Kind.TRANSMISSION_FAILURE: None, - base.Outcome.Kind.LOCAL_FAILURE: links.Ticket.Termination.LOCAL_FAILURE, - base.Outcome.Kind.REMOTE_FAILURE: links.Ticket.Termination.REMOTE_FAILURE, -} - -INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Core) internal error! )-:' -TERMINATION_CALLBACK_EXCEPTION_LOG_MESSAGE = ( - 'Exception calling termination callback!') diff --git a/src/python/grpcio/grpc/framework/core/_context.py b/src/python/grpcio/grpc/framework/core/_context.py deleted file mode 100644 index a346e9d478b..00000000000 --- a/src/python/grpcio/grpc/framework/core/_context.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for operation context.""" - -import time - -# _interfaces is referenced from specification in this module. -from grpc.framework.core import _interfaces # pylint: disable=unused-import -from grpc.framework.core import _utilities -from grpc.framework.interfaces.base import base - - -class OperationContext(base.OperationContext): - """An implementation of interfaces.OperationContext.""" - - def __init__( - self, lock, termination_manager, transmission_manager, - expiration_manager): - """Constructor. - - Args: - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - self._lock = lock - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - - def _abort(self, outcome_kind): - with self._lock: - if self._termination_manager.outcome is None: - outcome = _utilities.Outcome(outcome_kind, None, None) - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - - def outcome(self): - """See base.OperationContext.outcome for specification.""" - with self._lock: - return self._termination_manager.outcome - - def add_termination_callback(self, callback): - """See base.OperationContext.add_termination_callback.""" - with self._lock: - if self._termination_manager.outcome is None: - self._termination_manager.add_callback(callback) - return None - else: - return self._termination_manager.outcome - - def time_remaining(self): - """See base.OperationContext.time_remaining for specification.""" - with self._lock: - deadline = self._expiration_manager.deadline() - return max(0.0, deadline - time.time()) - - def cancel(self): - """See base.OperationContext.cancel for specification.""" - self._abort(base.Outcome.Kind.CANCELLED) - - def fail(self, exception): - """See base.OperationContext.fail for specification.""" - self._abort(base.Outcome.Kind.LOCAL_FAILURE) diff --git a/src/python/grpcio/grpc/framework/core/_emission.py b/src/python/grpcio/grpc/framework/core/_emission.py deleted file mode 100644 index 8ab59dc3e58..00000000000 --- a/src/python/grpcio/grpc/framework/core/_emission.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for handling emitted values.""" - -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.interfaces.base import base - - -class EmissionManager(_interfaces.EmissionManager): - """An EmissionManager implementation.""" - - def __init__( - self, lock, termination_manager, transmission_manager, - expiration_manager): - """Constructor. - - Args: - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - self._lock = lock - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - self._ingestion_manager = None - - self._initial_metadata_seen = False - self._payload_seen = False - self._completion_seen = False - - def set_ingestion_manager(self, ingestion_manager): - """Sets the ingestion manager with which this manager will cooperate. - - Args: - ingestion_manager: The _interfaces.IngestionManager for the operation. - """ - self._ingestion_manager = ingestion_manager - - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - initial_metadata_present = initial_metadata is not None - payload_present = payload is not None - completion_present = completion is not None - allowance_present = allowance is not None - with self._lock: - if self._termination_manager.outcome is None: - if (initial_metadata_present and ( - self._initial_metadata_seen or self._payload_seen or - self._completion_seen) or - payload_present and self._completion_seen or - completion_present and self._completion_seen or - allowance_present and allowance <= 0): - outcome = _utilities.Outcome( - base.Outcome.Kind.LOCAL_FAILURE, None, None) - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - else: - self._initial_metadata_seen |= initial_metadata_present - self._payload_seen |= payload_present - self._completion_seen |= completion_present - if completion_present: - self._termination_manager.emission_complete() - self._ingestion_manager.local_emissions_done() - self._transmission_manager.advance( - initial_metadata, payload, completion, allowance) - if allowance_present: - self._ingestion_manager.add_local_allowance(allowance) diff --git a/src/python/grpcio/grpc/framework/core/_end.py b/src/python/grpcio/grpc/framework/core/_end.py deleted file mode 100644 index 009d27c915c..00000000000 --- a/src/python/grpcio/grpc/framework/core/_end.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Implementation of base.End.""" - -import abc -import threading -import uuid - -import six - -from grpc.framework.core import _operation -from grpc.framework.core import _utilities -from grpc.framework.foundation import callable_util -from grpc.framework.foundation import later -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.links import links -from grpc.framework.interfaces.links import utilities - -_IDLE_ACTION_EXCEPTION_LOG_MESSAGE = 'Exception calling idle action!' - - -class End(six.with_metaclass(abc.ABCMeta, base.End, links.Link)): - """A bridge between base.End and links.Link. - - Implementations of this interface translate arriving tickets into - calls on application objects implementing base interfaces and - translate calls from application objects implementing base interfaces - into tickets sent to a joined link. - """ - - -class _Cycle(object): - """State for a single start-stop End lifecycle.""" - - def __init__(self, pool): - self.pool = pool - self.grace = False - self.futures = [] - self.operations = {} - self.idle_actions = [] - - -def _abort(operations): - for operation in operations: - operation.abort(base.Outcome.Kind.LOCAL_SHUTDOWN) - - -def _cancel_futures(futures): - for future in futures: - future.cancel() - - -def _future_shutdown(lock, cycle, event): - def in_future(): - with lock: - _abort(cycle.operations.values()) - _cancel_futures(cycle.futures) - return in_future - - -class _End(End): - """An End implementation.""" - - def __init__(self, servicer_package): - """Constructor. - - Args: - servicer_package: A _ServicerPackage for servicing operations or None if - this end will not be used to service operations. - """ - self._lock = threading.Condition() - self._servicer_package = servicer_package - - self._stats = {outcome_kind: 0 for outcome_kind in base.Outcome.Kind} - - self._mate = None - - self._cycle = None - - def _termination_action(self, operation_id): - """Constructs the termination action for a single operation. - - Args: - operation_id: The operation ID for the termination action. - - Returns: - A callable that takes an operation outcome kind as its sole parameter and - that should be used as the termination action for the operation - associated with the given operation ID. - """ - def termination_action(outcome_kind): - with self._lock: - self._stats[outcome_kind] += 1 - self._cycle.operations.pop(operation_id, None) - if not self._cycle.operations: - for action in self._cycle.idle_actions: - self._cycle.pool.submit(action) - self._cycle.idle_actions = [] - if self._cycle.grace: - _cancel_futures(self._cycle.futures) - self._cycle.pool.shutdown(wait=False) - self._cycle = None - return termination_action - - def start(self): - """See base.End.start for specification.""" - with self._lock: - if self._cycle is not None: - raise ValueError('Tried to start a not-stopped End!') - else: - self._cycle = _Cycle(logging_pool.pool(1)) - - def stop(self, grace): - """See base.End.stop for specification.""" - with self._lock: - if self._cycle is None: - event = threading.Event() - event.set() - return event - elif not self._cycle.operations: - event = threading.Event() - self._cycle.pool.submit(event.set) - self._cycle.pool.shutdown(wait=False) - self._cycle = None - return event - else: - self._cycle.grace = True - event = threading.Event() - self._cycle.idle_actions.append(event.set) - if 0 < grace: - future = later.later( - grace, _future_shutdown(self._lock, self._cycle, event)) - self._cycle.futures.append(future) - else: - _abort(self._cycle.operations.values()) - return event - - def operate( - self, group, method, subscription, timeout, initial_metadata=None, - payload=None, completion=None, protocol_options=None): - """See base.End.operate for specification.""" - operation_id = uuid.uuid4() - with self._lock: - if self._cycle is None or self._cycle.grace: - raise ValueError('Can\'t operate on stopped or stopping End!') - termination_action = self._termination_action(operation_id) - operation = _operation.invocation_operate( - operation_id, group, method, subscription, timeout, protocol_options, - initial_metadata, payload, completion, self._mate.accept_ticket, - termination_action, self._cycle.pool) - self._cycle.operations[operation_id] = operation - return operation.context, operation.operator - - def operation_stats(self): - """See base.End.operation_stats for specification.""" - with self._lock: - return dict(self._stats) - - def add_idle_action(self, action): - """See base.End.add_idle_action for specification.""" - with self._lock: - if self._cycle is None: - raise ValueError('Can\'t add idle action to stopped End!') - action_with_exceptions_logged = callable_util.with_exceptions_logged( - action, _IDLE_ACTION_EXCEPTION_LOG_MESSAGE) - if self._cycle.operations: - self._cycle.idle_actions.append(action_with_exceptions_logged) - else: - self._cycle.pool.submit(action_with_exceptions_logged) - - def accept_ticket(self, ticket): - """See links.Link.accept_ticket for specification.""" - with self._lock: - if self._cycle is not None: - operation = self._cycle.operations.get(ticket.operation_id) - if operation is not None: - operation.handle_ticket(ticket) - elif self._servicer_package is not None and not self._cycle.grace: - termination_action = self._termination_action(ticket.operation_id) - operation = _operation.service_operate( - self._servicer_package, ticket, self._mate.accept_ticket, - termination_action, self._cycle.pool) - if operation is not None: - self._cycle.operations[ticket.operation_id] = operation - - def join_link(self, link): - """See links.Link.join_link for specification.""" - with self._lock: - self._mate = utilities.NULL_LINK if link is None else link - - -def serviceless_end_link(): - """Constructs an End usable only for invoking operations. - - Returns: - An End usable for translating operations into ticket exchange. - """ - return _End(None) - - -def serviceful_end_link(servicer, default_timeout, maximum_timeout): - """Constructs an End capable of servicing operations. - - Args: - servicer: An interfaces.Servicer for servicing operations. - default_timeout: A length of time in seconds to be used as the default - time alloted for a single operation. - maximum_timeout: A length of time in seconds to be used as the maximum - time alloted for a single operation. - - Returns: - An End capable of servicing the operations requested of it through ticket - exchange. - """ - return _End( - _utilities.ServicerPackage(servicer, default_timeout, maximum_timeout)) diff --git a/src/python/grpcio/grpc/framework/core/_expiration.py b/src/python/grpcio/grpc/framework/core/_expiration.py deleted file mode 100644 index ded0ab6bcef..00000000000 --- a/src/python/grpcio/grpc/framework/core/_expiration.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for operation expiration.""" - -import time - -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import later -from grpc.framework.interfaces.base import base - - -class _ExpirationManager(_interfaces.ExpirationManager): - """An implementation of _interfaces.ExpirationManager.""" - - def __init__( - self, commencement, timeout, maximum_timeout, lock, termination_manager, - transmission_manager): - """Constructor. - - Args: - commencement: The time in seconds since the epoch at which the operation - began. - timeout: A length of time in seconds to allow for the operation to run. - maximum_timeout: The maximum length of time in seconds to allow for the - operation to run despite what is requested via this object's - change_timout method. - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - """ - self._lock = lock - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._commencement = commencement - self._maximum_timeout = maximum_timeout - - self._timeout = timeout - self._deadline = commencement + timeout - self._index = None - self._future = None - - def _expire(self, index): - def expire(): - with self._lock: - if self._future is not None and index == self._index: - self._future = None - self._termination_manager.expire() - self._transmission_manager.abort( - _utilities.Outcome(base.Outcome.Kind.EXPIRED, None, None)) - return expire - - def start(self): - self._index = 0 - self._future = later.later(self._timeout, self._expire(0)) - - def change_timeout(self, timeout): - if self._future is not None and timeout != self._timeout: - self._future.cancel() - new_timeout = min(timeout, self._maximum_timeout) - new_index = self._index + 1 - self._timeout = new_timeout - self._deadline = self._commencement + new_timeout - self._index = new_index - delay = self._deadline - time.time() - self._future = later.later(delay, self._expire(new_index)) - if new_timeout != timeout: - self._transmission_manager.timeout(new_timeout) - - def deadline(self): - return self._deadline - - def terminate(self): - if self._future: - self._future.cancel() - self._future = None - self._deadline_index = None - - -def invocation_expiration_manager( - timeout, lock, termination_manager, transmission_manager): - """Creates an _interfaces.ExpirationManager appropriate for front-side use. - - Args: - timeout: A length of time in seconds to allow for the operation to run. - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - - Returns: - An _interfaces.ExpirationManager appropriate for invocation-side use. - """ - expiration_manager = _ExpirationManager( - time.time(), timeout, timeout, lock, termination_manager, - transmission_manager) - expiration_manager.start() - return expiration_manager - - -def service_expiration_manager( - timeout, default_timeout, maximum_timeout, lock, termination_manager, - transmission_manager): - """Creates an _interfaces.ExpirationManager appropriate for back-side use. - - Args: - timeout: A length of time in seconds to allow for the operation to run. May - be None in which case default_timeout will be used. - default_timeout: The default length of time in seconds to allow for the - operation to run if the front-side customer has not specified such a value - (or if the value they specified is not yet known). - maximum_timeout: The maximum length of time in seconds to allow for the - operation to run. - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - - Returns: - An _interfaces.ExpirationManager appropriate for service-side use. - """ - expiration_manager = _ExpirationManager( - time.time(), default_timeout if timeout is None else timeout, - maximum_timeout, lock, termination_manager, transmission_manager) - expiration_manager.start() - return expiration_manager diff --git a/src/python/grpcio/grpc/framework/core/_ingestion.py b/src/python/grpcio/grpc/framework/core/_ingestion.py deleted file mode 100644 index f2767c981b2..00000000000 --- a/src/python/grpcio/grpc/framework/core/_ingestion.py +++ /dev/null @@ -1,439 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for ingestion during an operation.""" - -import abc -import collections -import enum - -import six - -from grpc.framework.core import _constants -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import abandonment -from grpc.framework.foundation import callable_util -from grpc.framework.interfaces.base import base - -_CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE = 'Exception initializing ingestion!' -_INGESTION_EXCEPTION_LOG_MESSAGE = 'Exception during ingestion!' - - -class _SubscriptionCreation( - collections.namedtuple( - '_SubscriptionCreation', - ('kind', 'subscription', 'code', 'details',))): - """A sum type for the outcome of ingestion initialization. - - Attributes: - kind: A Kind value coarsely indicating how subscription creation completed. - subscription: The created subscription. Only present if kind is - Kind.SUBSCRIPTION. - code: A code value to be sent to the other side of the operation along with - an indication that the operation is being aborted due to an error on the - remote side of the operation. Only present if kind is Kind.REMOTE_ERROR. - details: A details value to be sent to the other side of the operation - along with an indication that the operation is being aborted due to an - error on the remote side of the operation. Only present if kind is - Kind.REMOTE_ERROR. - """ - - @enum.unique - class Kind(enum.Enum): - SUBSCRIPTION = 'subscription' - REMOTE_ERROR = 'remote error' - ABANDONED = 'abandoned' - - -class _SubscriptionCreator(six.with_metaclass(abc.ABCMeta)): - """Common specification of subscription-creating behavior.""" - - @abc.abstractmethod - def create(self, group, method): - """Creates the base.Subscription of the local customer. - - Any exceptions raised by this method should be attributed to and treated as - defects in the customer code called by this method. - - Args: - group: The group identifier of the operation. - method: The method identifier of the operation. - - Returns: - A _SubscriptionCreation describing the result of subscription creation. - """ - raise NotImplementedError() - - -class _ServiceSubscriptionCreator(_SubscriptionCreator): - """A _SubscriptionCreator appropriate for service-side use.""" - - def __init__(self, servicer, operation_context, output_operator): - """Constructor. - - Args: - servicer: The base.Servicer that will service the operation. - operation_context: A base.OperationContext for the operation to be passed - to the customer. - output_operator: A base.Operator for the operation to be passed to the - customer and to be called by the customer to accept operation data - emitted by the customer. - """ - self._servicer = servicer - self._operation_context = operation_context - self._output_operator = output_operator - - def create(self, group, method): - try: - subscription = self._servicer.service( - group, method, self._operation_context, self._output_operator) - except base.NoSuchMethodError as e: - return _SubscriptionCreation( - _SubscriptionCreation.Kind.REMOTE_ERROR, None, e.code, e.details) - except abandonment.Abandoned: - return _SubscriptionCreation( - _SubscriptionCreation.Kind.ABANDONED, None, None, None) - else: - return _SubscriptionCreation( - _SubscriptionCreation.Kind.SUBSCRIPTION, subscription, None, None) - - -def _wrap(behavior): - def wrapped(*args, **kwargs): - try: - behavior(*args, **kwargs) - except abandonment.Abandoned: - return False - else: - return True - return wrapped - - -class _IngestionManager(_interfaces.IngestionManager): - """An implementation of _interfaces.IngestionManager.""" - - def __init__( - self, lock, pool, subscription, subscription_creator, termination_manager, - transmission_manager, expiration_manager, protocol_manager): - """Constructor. - - Args: - lock: The operation-wide lock. - pool: A thread pool in which to execute customer code. - subscription: A base.Subscription describing the customer's interest in - operation values from the other side. May be None if - subscription_creator is not None. - subscription_creator: A _SubscriptionCreator wrapping the portion of - customer code that when called returns the base.Subscription describing - the customer's interest in operation values from the other side. May be - None if subscription is not None. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - protocol_manager: The _interfaces.ProtocolManager for the operation. - """ - self._lock = lock - self._pool = pool - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - self._protocol_manager = protocol_manager - - if subscription is None: - self._subscription_creator = subscription_creator - self._wrapped_operator = None - elif subscription.kind is base.Subscription.Kind.FULL: - self._subscription_creator = None - self._wrapped_operator = _wrap(subscription.operator.advance) - else: - # TODO(nathaniel): Support other subscriptions. - raise ValueError('Unsupported subscription "%s"!' % subscription.kind) - self._pending_initial_metadata = None - self._pending_payloads = [] - self._pending_completion = None - self._local_allowance = 1 - # A nonnegative integer or None, with None indicating that the local - # customer is done emitting anyway so there's no need to bother it by - # informing it that the remote customer has granted it further permission to - # emit. - self._remote_allowance = 0 - self._processing = False - - def _abort_internal_only(self): - self._subscription_creator = None - self._wrapped_operator = None - self._pending_initial_metadata = None - self._pending_payloads = None - self._pending_completion = None - - def _abort_and_notify(self, outcome_kind, code, details): - self._abort_internal_only() - if self._termination_manager.outcome is None: - outcome = _utilities.Outcome(outcome_kind, code, details) - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - - def _operator_next(self): - """Computes the next step for full-subscription ingestion. - - Returns: - An initial_metadata, payload, completion, allowance, continue quintet - indicating what operation values (if any) are available to pass into - customer code and whether or not there is anything immediately - actionable to call customer code to do. - """ - if self._wrapped_operator is None: - return None, None, None, None, False - else: - initial_metadata, payload, completion, allowance, action = [None] * 5 - if self._pending_initial_metadata is not None: - initial_metadata = self._pending_initial_metadata - self._pending_initial_metadata = None - action = True - if self._pending_payloads and 0 < self._local_allowance: - payload = self._pending_payloads.pop(0) - self._local_allowance -= 1 - action = True - if not self._pending_payloads and self._pending_completion is not None: - completion = self._pending_completion - self._pending_completion = None - action = True - if self._remote_allowance is not None and 0 < self._remote_allowance: - allowance = self._remote_allowance - self._remote_allowance = 0 - action = True - return initial_metadata, payload, completion, allowance, bool(action) - - def _operator_process( - self, wrapped_operator, initial_metadata, payload, - completion, allowance): - while True: - advance_outcome = callable_util.call_logging_exceptions( - wrapped_operator, _INGESTION_EXCEPTION_LOG_MESSAGE, - initial_metadata=initial_metadata, payload=payload, - completion=completion, allowance=allowance) - if advance_outcome.exception is None: - if advance_outcome.return_value: - with self._lock: - if self._termination_manager.outcome is not None: - return - if completion is not None: - self._termination_manager.ingestion_complete() - initial_metadata, payload, completion, allowance, moar = ( - self._operator_next()) - if not moar: - self._processing = False - return - else: - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify( - base.Outcome.Kind.LOCAL_FAILURE, None, None) - return - else: - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None) - return - - def _operator_post_create(self, subscription): - wrapped_operator = _wrap(subscription.operator.advance) - with self._lock: - if self._termination_manager.outcome is not None: - return - self._wrapped_operator = wrapped_operator - self._subscription_creator = None - metadata, payload, completion, allowance, moar = self._operator_next() - if not moar: - self._processing = False - return - self._operator_process( - wrapped_operator, metadata, payload, completion, allowance) - - def _create(self, subscription_creator, group, name): - outcome = callable_util.call_logging_exceptions( - subscription_creator.create, - _CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE, group, name) - if outcome.return_value is None: - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None) - elif outcome.return_value.kind is _SubscriptionCreation.Kind.ABANDONED: - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None) - elif outcome.return_value.kind is _SubscriptionCreation.Kind.REMOTE_ERROR: - code = outcome.return_value.code - details = outcome.return_value.details - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify( - base.Outcome.Kind.REMOTE_FAILURE, code, details) - elif outcome.return_value.subscription.kind is base.Subscription.Kind.FULL: - self._protocol_manager.set_protocol_receiver( - outcome.return_value.subscription.protocol_receiver) - self._operator_post_create(outcome.return_value.subscription) - else: - # TODO(nathaniel): Support other subscriptions. - raise ValueError( - 'Unsupported "%s"!' % outcome.return_value.subscription.kind) - - def _store_advance(self, initial_metadata, payload, completion, allowance): - if initial_metadata is not None: - self._pending_initial_metadata = initial_metadata - if payload is not None: - self._pending_payloads.append(payload) - if completion is not None: - self._pending_completion = completion - if allowance is not None and self._remote_allowance is not None: - self._remote_allowance += allowance - - def _operator_advance(self, initial_metadata, payload, completion, allowance): - if self._processing: - self._store_advance(initial_metadata, payload, completion, allowance) - else: - action = False - if initial_metadata is not None: - action = True - if payload is not None: - if 0 < self._local_allowance: - self._local_allowance -= 1 - action = True - else: - self._pending_payloads.append(payload) - payload = False - if completion is not None: - if self._pending_payloads: - self._pending_completion = completion - else: - action = True - if allowance is not None and self._remote_allowance is not None: - allowance += self._remote_allowance - self._remote_allowance = 0 - action = True - if action: - self._pool.submit( - callable_util.with_exceptions_logged( - self._operator_process, _constants.INTERNAL_ERROR_LOG_MESSAGE), - self._wrapped_operator, initial_metadata, payload, completion, - allowance) - - def set_group_and_method(self, group, method): - """See _interfaces.IngestionManager.set_group_and_method for spec.""" - if self._subscription_creator is not None and not self._processing: - self._pool.submit( - callable_util.with_exceptions_logged( - self._create, _constants.INTERNAL_ERROR_LOG_MESSAGE), - self._subscription_creator, group, method) - self._processing = True - - def add_local_allowance(self, allowance): - """See _interfaces.IngestionManager.add_local_allowance for spec.""" - if any((self._subscription_creator, self._wrapped_operator,)): - self._local_allowance += allowance - if not self._processing: - initial_metadata, payload, completion, allowance, moar = ( - self._operator_next()) - if moar: - self._pool.submit( - callable_util.with_exceptions_logged( - self._operator_process, - _constants.INTERNAL_ERROR_LOG_MESSAGE), - initial_metadata, payload, completion, allowance) - - def local_emissions_done(self): - self._remote_allowance = None - - def advance(self, initial_metadata, payload, completion, allowance): - """See _interfaces.IngestionManager.advance for specification.""" - if self._subscription_creator is not None: - self._store_advance(initial_metadata, payload, completion, allowance) - elif self._wrapped_operator is not None: - self._operator_advance(initial_metadata, payload, completion, allowance) - - -def invocation_ingestion_manager( - subscription, lock, pool, termination_manager, transmission_manager, - expiration_manager, protocol_manager): - """Creates an IngestionManager appropriate for invocation-side use. - - Args: - subscription: A base.Subscription indicating the customer's interest in the - data and results from the service-side of the operation. - lock: The operation-wide lock. - pool: A thread pool in which to execute customer code. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - protocol_manager: The _interfaces.ProtocolManager for the operation. - - Returns: - An IngestionManager appropriate for invocation-side use. - """ - return _IngestionManager( - lock, pool, subscription, None, termination_manager, transmission_manager, - expiration_manager, protocol_manager) - - -def service_ingestion_manager( - servicer, operation_context, output_operator, lock, pool, - termination_manager, transmission_manager, expiration_manager, - protocol_manager): - """Creates an IngestionManager appropriate for service-side use. - - The returned IngestionManager will require its set_group_and_name method to be - called before its advance method may be called. - - Args: - servicer: A base.Servicer for servicing the operation. - operation_context: A base.OperationContext for the operation to be passed to - the customer. - output_operator: A base.Operator for the operation to be passed to the - customer and to be called by the customer to accept operation data output - by the customer. - lock: The operation-wide lock. - pool: A thread pool in which to execute customer code. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - protocol_manager: The _interfaces.ProtocolManager for the operation. - - Returns: - An IngestionManager appropriate for service-side use. - """ - subscription_creator = _ServiceSubscriptionCreator( - servicer, operation_context, output_operator) - return _IngestionManager( - lock, pool, None, subscription_creator, termination_manager, - transmission_manager, expiration_manager, protocol_manager) diff --git a/src/python/grpcio/grpc/framework/core/_interfaces.py b/src/python/grpcio/grpc/framework/core/_interfaces.py deleted file mode 100644 index 63ac82f80e7..00000000000 --- a/src/python/grpcio/grpc/framework/core/_interfaces.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Package-internal interfaces.""" - -import abc - -import six - -from grpc.framework.interfaces.base import base - - -class TerminationManager(six.with_metaclass(abc.ABCMeta)): - """An object responsible for handling the termination of an operation. - - Attributes: - outcome: None if the operation is active or a base.Outcome value if it has - terminated. - """ - - @abc.abstractmethod - def add_callback(self, callback): - """Registers a callback to be called on operation termination. - - If the operation has already terminated the callback will not be called. - - Args: - callback: A callable that will be passed a base.Outcome value. - - Returns: - None if the operation has not yet terminated and the passed callback will - be called when it does, or a base.Outcome value describing the - operation termination if the operation has terminated and the callback - will not be called as a result of this method call. - """ - raise NotImplementedError() - - @abc.abstractmethod - def emission_complete(self): - """Indicates that emissions from customer code have completed.""" - raise NotImplementedError() - - @abc.abstractmethod - def transmission_complete(self): - """Indicates that transmissions to the remote end are complete. - - Returns: - True if the operation has terminated or False if the operation remains - ongoing. - """ - raise NotImplementedError() - - @abc.abstractmethod - def reception_complete(self, code, details): - """Indicates that reception from the other side is complete. - - Args: - code: An application-specific code value. - details: An application-specific details value. - """ - raise NotImplementedError() - - @abc.abstractmethod - def ingestion_complete(self): - """Indicates that customer code ingestion of received values is complete.""" - raise NotImplementedError() - - @abc.abstractmethod - def expire(self): - """Indicates that the operation must abort because it has taken too long.""" - raise NotImplementedError() - - @abc.abstractmethod - def abort(self, outcome): - """Indicates that the operation must abort for the indicated reason. - - Args: - outcome: A base.Outcome indicating operation abortion. - """ - raise NotImplementedError() - - -class TransmissionManager(six.with_metaclass(abc.ABCMeta)): - """A manager responsible for transmitting to the other end of an operation.""" - - @abc.abstractmethod - def kick_off( - self, group, method, timeout, protocol_options, initial_metadata, - payload, completion, allowance): - """Transmits the values associated with operation invocation.""" - raise NotImplementedError() - - @abc.abstractmethod - def advance(self, initial_metadata, payload, completion, allowance): - """Accepts values for transmission to the other end of the operation. - - Args: - initial_metadata: An initial metadata value to be transmitted to the other - side of the operation. May only ever be non-None once. - payload: A payload value. - completion: A base.Completion value. May only ever be non-None in the last - transmission to be made to the other side. - allowance: A positive integer communicating the number of additional - payloads allowed to be transmitted from the other side to this side of - the operation, or None if no additional allowance is being granted in - this call. - """ - raise NotImplementedError() - - @abc.abstractmethod - def timeout(self, timeout): - """Accepts for transmission to the other side a new timeout value. - - Args: - timeout: A positive float used as the new timeout value for the operation - to be transmitted to the other side. - """ - raise NotImplementedError() - - @abc.abstractmethod - def allowance(self, allowance): - """Indicates to this manager that the remote customer is allowing payloads. - - Args: - allowance: A positive integer indicating the number of additional payloads - the remote customer is allowing to be transmitted from this side of the - operation. - """ - raise NotImplementedError() - - @abc.abstractmethod - def remote_complete(self): - """Indicates to this manager that data from the remote side is complete.""" - raise NotImplementedError() - - @abc.abstractmethod - def abort(self, outcome): - """Indicates that the operation has aborted. - - Args: - outcome: A base.Outcome for the operation. If None, indicates that the - operation abortion should not be communicated to the other side of the - operation. - """ - raise NotImplementedError() - - -class ExpirationManager(six.with_metaclass(abc.ABCMeta)): - """A manager responsible for aborting the operation if it runs out of time.""" - - @abc.abstractmethod - def change_timeout(self, timeout): - """Changes the timeout allotted for the operation. - - Operation duration is always measure from the beginning of the operation; - calling this method changes the operation's allotted time to timeout total - seconds, not timeout seconds from the time of this method call. - - Args: - timeout: A length of time in seconds to allow for the operation. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deadline(self): - """Returns the time until which the operation is allowed to run. - - Returns: - The time (seconds since the epoch) at which the operation will expire. - """ - raise NotImplementedError() - - @abc.abstractmethod - def terminate(self): - """Indicates to this manager that the operation has terminated.""" - raise NotImplementedError() - - -class ProtocolManager(six.with_metaclass(abc.ABCMeta)): - """A manager of protocol-specific values passing through an operation.""" - - @abc.abstractmethod - def set_protocol_receiver(self, protocol_receiver): - """Registers the customer object that will receive protocol objects. - - Args: - protocol_receiver: A base.ProtocolReceiver to which protocol objects for - the operation should be passed. - """ - raise NotImplementedError() - - @abc.abstractmethod - def accept_protocol_context(self, protocol_context): - """Accepts the protocol context object for the operation. - - Args: - protocol_context: An object designated for use as the protocol context - of the operation, with further semantics implementation-determined. - """ - raise NotImplementedError() - - -class EmissionManager(six.with_metaclass(abc.ABCMeta, base.Operator)): - """A manager of values emitted by customer code.""" - - @abc.abstractmethod - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - """Accepts a value emitted by customer code. - - This method should only be called by customer code. - - Args: - initial_metadata: An initial metadata value emitted by the local customer - to be sent to the other side of the operation. - payload: A payload value emitted by the local customer to be sent to the - other side of the operation. - completion: A Completion value emitted by the local customer to be sent to - the other side of the operation. - allowance: A positive integer indicating an additional number of payloads - that the local customer is willing to accept from the other side of the - operation. - """ - raise NotImplementedError() - - -class IngestionManager(six.with_metaclass(abc.ABCMeta)): - """A manager responsible for executing customer code. - - This name of this manager comes from its responsibility to pass successive - values from the other side of the operation into the code of the local - customer. - """ - - @abc.abstractmethod - def set_group_and_method(self, group, method): - """Communicates to this IngestionManager the operation group and method. - - Args: - group: The group identifier of the operation. - method: The method identifier of the operation. - """ - raise NotImplementedError() - - @abc.abstractmethod - def add_local_allowance(self, allowance): - """Communicates to this IngestionManager that more payloads may be ingested. - - Args: - allowance: A positive integer indicating an additional number of payloads - that the local customer is willing to ingest. - """ - raise NotImplementedError() - - @abc.abstractmethod - def local_emissions_done(self): - """Indicates to this manager that local emissions are done.""" - raise NotImplementedError() - - @abc.abstractmethod - def advance(self, initial_metadata, payload, completion, allowance): - """Advances the operation by passing values to the local customer.""" - raise NotImplementedError() - - -class ReceptionManager(six.with_metaclass(abc.ABCMeta)): - """A manager responsible for receiving tickets from the other end.""" - - @abc.abstractmethod - def receive_ticket(self, ticket): - """Handle a ticket from the other side of the operation. - - Args: - ticket: A links.Ticket for the operation. - """ - raise NotImplementedError() - - -class Operation(six.with_metaclass(abc.ABCMeta)): - """An ongoing operation. - - Attributes: - context: A base.OperationContext object for the operation. - operator: A base.Operator object for the operation for use by the customer - of the operation. - """ - - @abc.abstractmethod - def handle_ticket(self, ticket): - """Handle a ticket from the other side of the operation. - - Args: - ticket: A links.Ticket from the other side of the operation. - """ - raise NotImplementedError() - - @abc.abstractmethod - def abort(self, outcome_kind): - """Aborts the operation. - - Args: - outcome_kind: A base.Outcome.Kind value indicating operation abortion. - """ - raise NotImplementedError() diff --git a/src/python/grpcio/grpc/framework/core/_operation.py b/src/python/grpcio/grpc/framework/core/_operation.py deleted file mode 100644 index 020c0c9ed9a..00000000000 --- a/src/python/grpcio/grpc/framework/core/_operation.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Implementation of operations.""" - -import threading - -from grpc.framework.core import _context -from grpc.framework.core import _emission -from grpc.framework.core import _expiration -from grpc.framework.core import _ingestion -from grpc.framework.core import _interfaces -from grpc.framework.core import _protocol -from grpc.framework.core import _reception -from grpc.framework.core import _termination -from grpc.framework.core import _transmission -from grpc.framework.core import _utilities - - -class _EasyOperation(_interfaces.Operation): - """A trivial implementation of interfaces.Operation.""" - - def __init__( - self, lock, termination_manager, transmission_manager, expiration_manager, - context, operator, reception_manager): - """Constructor. - - Args: - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - context: A base.OperationContext for use by the customer during the - operation. - operator: A base.Operator for use by the customer during the operation. - reception_manager: The _interfaces.ReceptionManager for the operation. - """ - self._lock = lock - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - self._reception_manager = reception_manager - - self.context = context - self.operator = operator - - def handle_ticket(self, ticket): - with self._lock: - self._reception_manager.receive_ticket(ticket) - - def abort(self, outcome_kind): - with self._lock: - if self._termination_manager.outcome is None: - outcome = _utilities.Outcome(outcome_kind, None, None) - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - - -def invocation_operate( - operation_id, group, method, subscription, timeout, protocol_options, - initial_metadata, payload, completion, ticket_sink, termination_action, - pool): - """Constructs objects necessary for front-side operation management. - - Args: - operation_id: An object identifying the operation. - group: The group identifier of the operation. - method: The method identifier of the operation. - subscription: A base.Subscription describing the customer's interest in the - results of the operation. - timeout: A length of time in seconds to allow for the operation. - protocol_options: A transport-specific, application-specific, and/or - protocol-specific value relating to the invocation. May be None. - initial_metadata: An initial metadata value to be sent to the other side of - the operation. May be None if the initial metadata will be passed later or - if there will be no initial metadata passed at all. - payload: The first payload value to be transmitted to the other side. May be - None if there is no such value or if the customer chose not to pass it at - operation invocation. - completion: A base.Completion value indicating the end of values passed to - the other side of the operation. - ticket_sink: A callable that accepts links.Tickets and delivers them to the - other side of the operation. - termination_action: A callable that accepts the outcome of the operation as - a base.Outcome value to be called on operation completion. - pool: A thread pool with which to do the work of the operation. - - Returns: - An _interfaces.Operation for the operation. - """ - lock = threading.Lock() - with lock: - termination_manager = _termination.invocation_termination_manager( - termination_action, pool) - transmission_manager = _transmission.TransmissionManager( - operation_id, ticket_sink, lock, pool, termination_manager) - expiration_manager = _expiration.invocation_expiration_manager( - timeout, lock, termination_manager, transmission_manager) - protocol_manager = _protocol.invocation_protocol_manager( - subscription, lock, pool, termination_manager, transmission_manager, - expiration_manager) - operation_context = _context.OperationContext( - lock, termination_manager, transmission_manager, expiration_manager) - emission_manager = _emission.EmissionManager( - lock, termination_manager, transmission_manager, expiration_manager) - ingestion_manager = _ingestion.invocation_ingestion_manager( - subscription, lock, pool, termination_manager, transmission_manager, - expiration_manager, protocol_manager) - reception_manager = _reception.ReceptionManager( - termination_manager, transmission_manager, expiration_manager, - protocol_manager, ingestion_manager) - - termination_manager.set_expiration_manager(expiration_manager) - transmission_manager.set_expiration_manager(expiration_manager) - emission_manager.set_ingestion_manager(ingestion_manager) - - transmission_manager.kick_off( - group, method, timeout, protocol_options, initial_metadata, payload, - completion, None) - - return _EasyOperation( - lock, termination_manager, transmission_manager, expiration_manager, - operation_context, emission_manager, reception_manager) - - -def service_operate( - servicer_package, ticket, ticket_sink, termination_action, pool): - """Constructs an Operation for service of an operation. - - Args: - servicer_package: A _utilities.ServicerPackage to be used servicing the - operation. - ticket: The first links.Ticket received for the operation. - ticket_sink: A callable that accepts links.Tickets and delivers them to the - other side of the operation. - termination_action: A callable that accepts the outcome of the operation as - a base.Outcome value to be called on operation completion. - pool: A thread pool with which to do the work of the operation. - - Returns: - An _interfaces.Operation for the operation. - """ - lock = threading.Lock() - with lock: - termination_manager = _termination.service_termination_manager( - termination_action, pool) - transmission_manager = _transmission.TransmissionManager( - ticket.operation_id, ticket_sink, lock, pool, termination_manager) - expiration_manager = _expiration.service_expiration_manager( - ticket.timeout, servicer_package.default_timeout, - servicer_package.maximum_timeout, lock, termination_manager, - transmission_manager) - protocol_manager = _protocol.service_protocol_manager( - lock, pool, termination_manager, transmission_manager, - expiration_manager) - operation_context = _context.OperationContext( - lock, termination_manager, transmission_manager, expiration_manager) - emission_manager = _emission.EmissionManager( - lock, termination_manager, transmission_manager, expiration_manager) - ingestion_manager = _ingestion.service_ingestion_manager( - servicer_package.servicer, operation_context, emission_manager, lock, - pool, termination_manager, transmission_manager, expiration_manager, - protocol_manager) - reception_manager = _reception.ReceptionManager( - termination_manager, transmission_manager, expiration_manager, - protocol_manager, ingestion_manager) - - termination_manager.set_expiration_manager(expiration_manager) - transmission_manager.set_expiration_manager(expiration_manager) - emission_manager.set_ingestion_manager(ingestion_manager) - - reception_manager.receive_ticket(ticket) - - return _EasyOperation( - lock, termination_manager, transmission_manager, expiration_manager, - operation_context, emission_manager, reception_manager) diff --git a/src/python/grpcio/grpc/framework/core/_protocol.py b/src/python/grpcio/grpc/framework/core/_protocol.py deleted file mode 100644 index 3177b5e302a..00000000000 --- a/src/python/grpcio/grpc/framework/core/_protocol.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for passing protocol objects in an operation.""" - -import collections -import enum - -from grpc.framework.core import _constants -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import callable_util -from grpc.framework.interfaces.base import base - -_EXCEPTION_LOG_MESSAGE = 'Exception delivering protocol object!' - -_LOCAL_FAILURE_OUTCOME = _utilities.Outcome( - base.Outcome.Kind.LOCAL_FAILURE, None, None) - - -class _Awaited( - collections.namedtuple('_Awaited', ('kind', 'value',))): - - @enum.unique - class Kind(enum.Enum): - NOT_YET_ARRIVED = 'not yet arrived' - ARRIVED = 'arrived' - -_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None) -_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None) - - -class _Transitory( - collections.namedtuple('_Transitory', ('kind', 'value',))): - - @enum.unique - class Kind(enum.Enum): - NOT_YET_SEEN = 'not yet seen' - PRESENT = 'present' - GONE = 'gone' - -_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None) -_GONE = _Transitory(_Transitory.Kind.GONE, None) - - -class _ProtocolManager(_interfaces.ProtocolManager): - """An implementation of _interfaces.ExpirationManager.""" - - def __init__( - self, protocol_receiver, lock, pool, termination_manager, - transmission_manager, expiration_manager): - """Constructor. - - Args: - protocol_receiver: An _Awaited wrapping of the base.ProtocolReceiver to - which protocol objects should be passed during the operation. May be - of kind _Awaited.Kind.NOT_YET_ARRIVED if the customer's subscription is - not yet known and may be of kind _Awaited.Kind.ARRIVED but with a value - of None if the customer's subscription did not include a - ProtocolReceiver. - lock: The operation-wide lock. - pool: A thread pool. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - self._lock = lock - self._pool = pool - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - - self._protocol_receiver = protocol_receiver - self._context = _NOT_YET_SEEN - - def _abort_and_notify(self, outcome): - if self._termination_manager.outcome is None: - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - - def _deliver(self, behavior, value): - def deliver(): - delivery_outcome = callable_util.call_logging_exceptions( - behavior, _EXCEPTION_LOG_MESSAGE, value) - if delivery_outcome.kind is callable_util.Outcome.Kind.RAISED: - with self._lock: - self._abort_and_notify(_LOCAL_FAILURE_OUTCOME) - self._pool.submit( - callable_util.with_exceptions_logged( - deliver, _constants.INTERNAL_ERROR_LOG_MESSAGE)) - - def set_protocol_receiver(self, protocol_receiver): - """See _interfaces.ProtocolManager.set_protocol_receiver for spec.""" - self._protocol_receiver = _Awaited(_Awaited.Kind.ARRIVED, protocol_receiver) - if (self._context.kind is _Transitory.Kind.PRESENT and - protocol_receiver is not None): - self._deliver(protocol_receiver.context, self._context.value) - self._context = _GONE - - def accept_protocol_context(self, protocol_context): - """See _interfaces.ProtocolManager.accept_protocol_context for spec.""" - if self._protocol_receiver.kind is _Awaited.Kind.ARRIVED: - if self._protocol_receiver.value is not None: - self._deliver(self._protocol_receiver.value.context, protocol_context) - self._context = _GONE - else: - self._context = _Transitory(_Transitory.Kind.PRESENT, protocol_context) - - -def invocation_protocol_manager( - subscription, lock, pool, termination_manager, transmission_manager, - expiration_manager): - """Creates an _interfaces.ProtocolManager for invocation-side use. - - Args: - subscription: The local customer's subscription to the operation. - lock: The operation-wide lock. - pool: A thread pool. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - if subscription.kind is base.Subscription.Kind.FULL: - awaited_protocol_receiver = _Awaited( - _Awaited.Kind.ARRIVED, subscription.protocol_receiver) - else: - awaited_protocol_receiver = _ARRIVED_AND_NONE - return _ProtocolManager( - awaited_protocol_receiver, lock, pool, termination_manager, - transmission_manager, expiration_manager) - - -def service_protocol_manager( - lock, pool, termination_manager, transmission_manager, expiration_manager): - """Creates an _interfaces.ProtocolManager for service-side use. - - Args: - lock: The operation-wide lock. - pool: A thread pool. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - return _ProtocolManager( - _NOT_YET_ARRIVED, lock, pool, termination_manager, transmission_manager, - expiration_manager) diff --git a/src/python/grpcio/grpc/framework/core/_reception.py b/src/python/grpcio/grpc/framework/core/_reception.py deleted file mode 100644 index ff81450dee9..00000000000 --- a/src/python/grpcio/grpc/framework/core/_reception.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for ticket reception.""" - -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.base import utilities -from grpc.framework.interfaces.links import links - -_REMOTE_TICKET_TERMINATION_TO_LOCAL_OUTCOME_KIND = { - links.Ticket.Termination.CANCELLATION: base.Outcome.Kind.CANCELLED, - links.Ticket.Termination.EXPIRATION: base.Outcome.Kind.EXPIRED, - links.Ticket.Termination.SHUTDOWN: base.Outcome.Kind.REMOTE_SHUTDOWN, - links.Ticket.Termination.RECEPTION_FAILURE: - base.Outcome.Kind.RECEPTION_FAILURE, - links.Ticket.Termination.TRANSMISSION_FAILURE: - base.Outcome.Kind.TRANSMISSION_FAILURE, - links.Ticket.Termination.LOCAL_FAILURE: base.Outcome.Kind.REMOTE_FAILURE, - links.Ticket.Termination.REMOTE_FAILURE: base.Outcome.Kind.LOCAL_FAILURE, -} - -_RECEPTION_FAILURE_OUTCOME = _utilities.Outcome( - base.Outcome.Kind.RECEPTION_FAILURE, None, None) - - -def _carrying_protocol_context(ticket): - return ticket.protocol is not None and ticket.protocol.kind in ( - links.Protocol.Kind.INVOCATION_CONTEXT, - links.Protocol.Kind.SERVICER_CONTEXT,) - - -class ReceptionManager(_interfaces.ReceptionManager): - """A ReceptionManager based around a _Receiver passed to it.""" - - def __init__( - self, termination_manager, transmission_manager, expiration_manager, - protocol_manager, ingestion_manager): - """Constructor. - - Args: - termination_manager: The operation's _interfaces.TerminationManager. - transmission_manager: The operation's _interfaces.TransmissionManager. - expiration_manager: The operation's _interfaces.ExpirationManager. - protocol_manager: The operation's _interfaces.ProtocolManager. - ingestion_manager: The operation's _interfaces.IngestionManager. - """ - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - self._protocol_manager = protocol_manager - self._ingestion_manager = ingestion_manager - - self._lowest_unseen_sequence_number = 0 - self._out_of_sequence_tickets = {} - self._aborted = False - - def _abort(self, outcome): - self._aborted = True - if self._termination_manager.outcome is None: - self._termination_manager.abort(outcome) - self._transmission_manager.abort(None) - self._expiration_manager.terminate() - - def _sequence_failure(self, ticket): - """Determines a just-arrived ticket's sequential legitimacy. - - Args: - ticket: A just-arrived ticket. - - Returns: - True if the ticket is sequentially legitimate; False otherwise. - """ - if ticket.sequence_number < self._lowest_unseen_sequence_number: - return True - elif ticket.sequence_number in self._out_of_sequence_tickets: - return True - else: - return False - - def _process_one(self, ticket): - if ticket.sequence_number == 0: - self._ingestion_manager.set_group_and_method(ticket.group, ticket.method) - if _carrying_protocol_context(ticket): - self._protocol_manager.accept_protocol_context(ticket.protocol.value) - else: - self._protocol_manager.accept_protocol_context(None) - if ticket.timeout is not None: - self._expiration_manager.change_timeout(ticket.timeout) - if ticket.termination is None: - completion = None - else: - completion = utilities.completion( - ticket.terminal_metadata, ticket.code, ticket.message) - self._termination_manager.reception_complete(ticket.code, ticket.message) - self._ingestion_manager.advance( - ticket.initial_metadata, ticket.payload, completion, ticket.allowance) - if ticket.allowance is not None: - self._transmission_manager.allowance(ticket.allowance) - - def _process(self, ticket): - """Process those tickets ready to be processed. - - Args: - ticket: A just-arrived ticket the sequence number of which matches this - _ReceptionManager's _lowest_unseen_sequence_number field. - """ - while True: - self._process_one(ticket) - next_ticket = self._out_of_sequence_tickets.pop( - ticket.sequence_number + 1, None) - if next_ticket is None: - self._lowest_unseen_sequence_number = ticket.sequence_number + 1 - return - else: - ticket = next_ticket - - def receive_ticket(self, ticket): - """See _interfaces.ReceptionManager.receive_ticket for specification.""" - if self._aborted: - return - elif self._sequence_failure(ticket): - self._abort(_RECEPTION_FAILURE_OUTCOME) - elif ticket.termination not in (None, links.Ticket.Termination.COMPLETION): - outcome_kind = _REMOTE_TICKET_TERMINATION_TO_LOCAL_OUTCOME_KIND[ - ticket.termination] - self._abort( - _utilities.Outcome(outcome_kind, ticket.code, ticket.message)) - elif ticket.sequence_number == self._lowest_unseen_sequence_number: - self._process(ticket) - else: - self._out_of_sequence_tickets[ticket.sequence_number] = ticket diff --git a/src/python/grpcio/grpc/framework/core/_termination.py b/src/python/grpcio/grpc/framework/core/_termination.py deleted file mode 100644 index fff3a3fc146..00000000000 --- a/src/python/grpcio/grpc/framework/core/_termination.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for operation termination.""" - -import abc - -import six - -from grpc.framework.core import _constants -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import callable_util -from grpc.framework.interfaces.base import base - - -def _invocation_completion_predicate( - unused_emission_complete, unused_transmission_complete, - unused_reception_complete, ingestion_complete): - return ingestion_complete - - -def _service_completion_predicate( - unused_emission_complete, transmission_complete, unused_reception_complete, - ingestion_complete): - return transmission_complete and ingestion_complete - - -class TerminationManager(six.with_metaclass(abc.ABCMeta, _interfaces.TerminationManager)): - """A _interfaces.TransmissionManager on which another manager may be set.""" - - @abc.abstractmethod - def set_expiration_manager(self, expiration_manager): - """Sets the expiration manager with which this manager will interact. - - Args: - expiration_manager: The _interfaces.ExpirationManager associated with the - current operation. - """ - raise NotImplementedError() - - -class _TerminationManager(TerminationManager): - """An implementation of TerminationManager.""" - - def __init__(self, predicate, action, pool): - """Constructor. - - Args: - predicate: One of _invocation_completion_predicate or - _service_completion_predicate to be used to determine when the operation - has completed. - action: A behavior to pass the operation outcome's kind on operation - termination. - pool: A thread pool. - """ - self._predicate = predicate - self._action = action - self._pool = pool - self._expiration_manager = None - - self._callbacks = [] - - self._code = None - self._details = None - self._emission_complete = False - self._transmission_complete = False - self._reception_complete = False - self._ingestion_complete = False - - # The None-ness of outcome is the operation-wide record of whether and how - # the operation has terminated. - self.outcome = None - - def set_expiration_manager(self, expiration_manager): - self._expiration_manager = expiration_manager - - def _terminate_internal_only(self, outcome): - """Terminates the operation. - - Args: - outcome: A base.Outcome describing the outcome of the operation. - """ - self.outcome = outcome - callbacks = list(self._callbacks) - self._callbacks = None - - act = callable_util.with_exceptions_logged( - self._action, _constants.INTERNAL_ERROR_LOG_MESSAGE) - - # TODO(issue 3202): Don't call the local application's callbacks if it has - # previously shown a programming defect. - if False and outcome.kind is base.Outcome.Kind.LOCAL_FAILURE: - self._pool.submit(act, base.Outcome.Kind.LOCAL_FAILURE) - else: - def call_callbacks_and_act(callbacks, outcome): - for callback in callbacks: - callback_outcome = callable_util.call_logging_exceptions( - callback, _constants.TERMINATION_CALLBACK_EXCEPTION_LOG_MESSAGE, - outcome) - if callback_outcome.exception is not None: - act_outcome_kind = base.Outcome.Kind.LOCAL_FAILURE - break - else: - act_outcome_kind = outcome.kind - act(act_outcome_kind) - - self._pool.submit( - callable_util.with_exceptions_logged( - call_callbacks_and_act, _constants.INTERNAL_ERROR_LOG_MESSAGE), - callbacks, outcome) - - def _terminate_and_notify(self, outcome): - self._terminate_internal_only(outcome) - self._expiration_manager.terminate() - - def _perhaps_complete(self): - if self._predicate( - self._emission_complete, self._transmission_complete, - self._reception_complete, self._ingestion_complete): - self._terminate_and_notify( - _utilities.Outcome( - base.Outcome.Kind.COMPLETED, self._code, self._details)) - return True - else: - return False - - def is_active(self): - """See _interfaces.TerminationManager.is_active for specification.""" - return self.outcome is None - - def add_callback(self, callback): - """See _interfaces.TerminationManager.add_callback for specification.""" - if self.outcome is None: - self._callbacks.append(callback) - return None - else: - return self.outcome - - def emission_complete(self): - """See superclass method for specification.""" - if self.outcome is None: - self._emission_complete = True - self._perhaps_complete() - - def transmission_complete(self): - """See superclass method for specification.""" - if self.outcome is None: - self._transmission_complete = True - return self._perhaps_complete() - else: - return False - - def reception_complete(self, code, details): - """See superclass method for specification.""" - if self.outcome is None: - self._reception_complete = True - self._code = code - self._details = details - self._perhaps_complete() - - def ingestion_complete(self): - """See superclass method for specification.""" - if self.outcome is None: - self._ingestion_complete = True - self._perhaps_complete() - - def expire(self): - """See _interfaces.TerminationManager.expire for specification.""" - self._terminate_internal_only( - _utilities.Outcome(base.Outcome.Kind.EXPIRED, None, None)) - - def abort(self, outcome): - """See _interfaces.TerminationManager.abort for specification.""" - self._terminate_and_notify(outcome) - - -def invocation_termination_manager(action, pool): - """Creates a TerminationManager appropriate for invocation-side use. - - Args: - action: An action to call on operation termination. - pool: A thread pool in which to execute the passed action and any - termination callbacks that are registered during the operation. - - Returns: - A TerminationManager appropriate for invocation-side use. - """ - return _TerminationManager(_invocation_completion_predicate, action, pool) - - -def service_termination_manager(action, pool): - """Creates a TerminationManager appropriate for service-side use. - - Args: - action: An action to call on operation termination. - pool: A thread pool in which to execute the passed action and any - termination callbacks that are registered during the operation. - - Returns: - A TerminationManager appropriate for service-side use. - """ - return _TerminationManager(_service_completion_predicate, action, pool) diff --git a/src/python/grpcio/grpc/framework/core/_transmission.py b/src/python/grpcio/grpc/framework/core/_transmission.py deleted file mode 100644 index 65b12c4160e..00000000000 --- a/src/python/grpcio/grpc/framework/core/_transmission.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for ticket transmission during an operation.""" - -import collections -import enum - -from grpc.framework.core import _constants -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import callable_util -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.links import links - -_TRANSMISSION_EXCEPTION_LOG_MESSAGE = 'Exception during transmission!' - -_TRANSMISSION_FAILURE_OUTCOME = _utilities.Outcome( - base.Outcome.Kind.TRANSMISSION_FAILURE, None, None) - - -def _explode_completion(completion): - if completion is None: - return None, None, None, None - else: - return ( - completion.terminal_metadata, completion.code, completion.message, - links.Ticket.Termination.COMPLETION) - - -class _Abort( - collections.namedtuple( - '_Abort', ('kind', 'termination', 'code', 'details',))): - """Tracks whether the operation aborted and what is to be done about it. - - Attributes: - kind: A Kind value describing the overall kind of the _Abort. - termination: A links.Ticket.Termination value to be sent to the other side - of the operation. Only valid if kind is Kind.ABORTED_NOTIFY_NEEDED. - code: A code value to be sent to the other side of the operation. Only - valid if kind is Kind.ABORTED_NOTIFY_NEEDED. - details: A details value to be sent to the other side of the operation. - Only valid if kind is Kind.ABORTED_NOTIFY_NEEDED. - """ - - @enum.unique - class Kind(enum.Enum): - NOT_ABORTED = 'not aborted' - ABORTED_NOTIFY_NEEDED = 'aborted notify needed' - ABORTED_NO_NOTIFY = 'aborted no notify' - -_NOT_ABORTED = _Abort(_Abort.Kind.NOT_ABORTED, None, None, None) -_ABORTED_NO_NOTIFY = _Abort(_Abort.Kind.ABORTED_NO_NOTIFY, None, None, None) - - -class TransmissionManager(_interfaces.TransmissionManager): - """An _interfaces.TransmissionManager that sends links.Tickets.""" - - def __init__( - self, operation_id, ticket_sink, lock, pool, termination_manager): - """Constructor. - - Args: - operation_id: The operation's ID. - ticket_sink: A callable that accepts tickets and sends them to the other - side of the operation. - lock: The operation-servicing-wide lock object. - pool: A thread pool in which the work of transmitting tickets will be - performed. - termination_manager: The _interfaces.TerminationManager associated with - this operation. - """ - self._lock = lock - self._pool = pool - self._ticket_sink = ticket_sink - self._operation_id = operation_id - self._termination_manager = termination_manager - self._expiration_manager = None - - self._lowest_unused_sequence_number = 0 - self._remote_allowance = 1 - self._remote_complete = False - self._timeout = None - self._local_allowance = 0 - self._initial_metadata = None - self._payloads = [] - self._completion = None - self._abort = _NOT_ABORTED - self._transmitting = False - - def set_expiration_manager(self, expiration_manager): - """Sets the ExpirationManager with which this manager will cooperate.""" - self._expiration_manager = expiration_manager - - def _next_ticket(self): - """Creates the next ticket to be transmitted. - - Returns: - A links.Ticket to be sent to the other side of the operation or None if - there is nothing to be sent at this time. - """ - if self._abort.kind is _Abort.Kind.ABORTED_NO_NOTIFY: - return None - elif self._abort.kind is _Abort.Kind.ABORTED_NOTIFY_NEEDED: - termination = self._abort.termination - code, details = self._abort.code, self._abort.details - self._abort = _ABORTED_NO_NOTIFY - return links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - None, None, None, None, None, None, code, details, termination, None) - - action = False - # TODO(nathaniel): Support other subscriptions. - local_subscription = links.Ticket.Subscription.FULL - timeout = self._timeout - if timeout is not None: - self._timeout = None - action = True - if self._local_allowance <= 0: - allowance = None - else: - allowance = self._local_allowance - self._local_allowance = 0 - action = True - initial_metadata = self._initial_metadata - if initial_metadata is not None: - self._initial_metadata = None - action = True - if not self._payloads or self._remote_allowance <= 0: - payload = None - else: - payload = self._payloads.pop(0) - self._remote_allowance -= 1 - action = True - if self._completion is None or self._payloads: - terminal_metadata, code, message, termination = None, None, None, None - else: - terminal_metadata, code, message, termination = _explode_completion( - self._completion) - self._completion = None - action = True - - if action: - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - local_subscription, timeout, allowance, initial_metadata, payload, - terminal_metadata, code, message, termination, None) - self._lowest_unused_sequence_number += 1 - return ticket - else: - return None - - def _transmit(self, ticket): - """Commences the transmission loop sending tickets. - - Args: - ticket: A links.Ticket to be sent to the other side of the operation. - """ - def transmit(ticket): - while True: - transmission_outcome = callable_util.call_logging_exceptions( - self._ticket_sink, _TRANSMISSION_EXCEPTION_LOG_MESSAGE, ticket) - if transmission_outcome.exception is None: - with self._lock: - if ticket.termination is links.Ticket.Termination.COMPLETION: - self._termination_manager.transmission_complete() - ticket = self._next_ticket() - if ticket is None: - self._transmitting = False - return - else: - with self._lock: - self._abort = _ABORTED_NO_NOTIFY - if self._termination_manager.outcome is None: - self._termination_manager.abort(_TRANSMISSION_FAILURE_OUTCOME) - self._expiration_manager.terminate() - return - - self._pool.submit(callable_util.with_exceptions_logged( - transmit, _constants.INTERNAL_ERROR_LOG_MESSAGE), ticket) - self._transmitting = True - - def kick_off( - self, group, method, timeout, protocol_options, initial_metadata, - payload, completion, allowance): - """See _interfaces.TransmissionManager.kickoff for specification.""" - # TODO(nathaniel): Support other subscriptions. - subscription = links.Ticket.Subscription.FULL - terminal_metadata, code, message, termination = _explode_completion( - completion) - self._remote_allowance = 1 if payload is None else 0 - protocol = links.Protocol(links.Protocol.Kind.CALL_OPTION, protocol_options) - ticket = links.Ticket( - self._operation_id, 0, group, method, subscription, timeout, allowance, - initial_metadata, payload, terminal_metadata, code, message, - termination, protocol) - self._lowest_unused_sequence_number = 1 - self._transmit(ticket) - - def advance(self, initial_metadata, payload, completion, allowance): - """See _interfaces.TransmissionManager.advance for specification.""" - if self._abort.kind is not _Abort.Kind.NOT_ABORTED: - return - - effective_initial_metadata = initial_metadata - effective_payload = payload - effective_completion = completion - if allowance is not None and not self._remote_complete: - effective_allowance = allowance - else: - effective_allowance = None - if self._transmitting: - if effective_initial_metadata is not None: - self._initial_metadata = effective_initial_metadata - if effective_payload is not None: - self._payloads.append(effective_payload) - if effective_completion is not None: - self._completion = effective_completion - if effective_allowance is not None: - self._local_allowance += effective_allowance - else: - if effective_payload is not None: - if 0 < self._remote_allowance: - ticket_payload = effective_payload - self._remote_allowance -= 1 - else: - self._payloads.append(effective_payload) - ticket_payload = None - else: - ticket_payload = None - if effective_completion is not None and not self._payloads: - ticket_completion = effective_completion - else: - self._completion = effective_completion - ticket_completion = None - if any( - (effective_initial_metadata, ticket_payload, ticket_completion, - effective_allowance)): - terminal_metadata, code, message, termination = _explode_completion( - completion) - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - None, None, allowance, effective_initial_metadata, ticket_payload, - terminal_metadata, code, message, termination, None) - self._lowest_unused_sequence_number += 1 - self._transmit(ticket) - - def timeout(self, timeout): - """See _interfaces.TransmissionManager.timeout for specification.""" - if self._abort.kind is not _Abort.Kind.NOT_ABORTED: - return - elif self._transmitting: - self._timeout = timeout - else: - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - None, timeout, None, None, None, None, None, None, None, None) - self._lowest_unused_sequence_number += 1 - self._transmit(ticket) - - def allowance(self, allowance): - """See _interfaces.TransmissionManager.allowance for specification.""" - if self._abort.kind is not _Abort.Kind.NOT_ABORTED: - return - elif self._transmitting or not self._payloads: - self._remote_allowance += allowance - else: - self._remote_allowance += allowance - 1 - payload = self._payloads.pop(0) - if self._payloads: - completion = None - else: - completion = self._completion - self._completion = None - terminal_metadata, code, message, termination = _explode_completion( - completion) - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - None, None, None, None, payload, terminal_metadata, code, message, - termination, None) - self._lowest_unused_sequence_number += 1 - self._transmit(ticket) - - def remote_complete(self): - """See _interfaces.TransmissionManager.remote_complete for specification.""" - self._remote_complete = True - self._local_allowance = 0 - - def abort(self, outcome): - """See _interfaces.TransmissionManager.abort for specification.""" - if self._abort.kind is _Abort.Kind.NOT_ABORTED: - if outcome is None: - self._abort = _ABORTED_NO_NOTIFY - else: - termination = _constants.ABORTION_OUTCOME_TO_TICKET_TERMINATION.get( - outcome.kind) - if termination is None: - self._abort = _ABORTED_NO_NOTIFY - elif self._transmitting: - self._abort = _Abort( - _Abort.Kind.ABORTED_NOTIFY_NEEDED, termination, outcome.code, - outcome.details) - else: - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, - None, None, None, None, None, None, None, outcome.code, - outcome.details, termination, None) - self._transmit(ticket) - self._abort = _ABORTED_NO_NOTIFY diff --git a/src/python/grpcio/grpc/framework/core/_utilities.py b/src/python/grpcio/grpc/framework/core/_utilities.py deleted file mode 100644 index abedc727e4a..00000000000 --- a/src/python/grpcio/grpc/framework/core/_utilities.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Package-internal utilities.""" - -import collections - -from grpc.framework.interfaces.base import base - - -class ServicerPackage( - collections.namedtuple( - 'ServicerPackage', ('servicer', 'default_timeout', 'maximum_timeout'))): - """A trivial bundle class. - - Attributes: - servicer: A base.Servicer. - default_timeout: A float indicating the length of time in seconds to allow - for an operation invoked without a timeout. - maximum_timeout: A float indicating the maximum length of time in seconds to - allow for an operation. - """ - - -class Outcome( - base.Outcome, - collections.namedtuple('Outcome', ('kind', 'code', 'details',))): - """A trivial implementation of base.Outcome.""" diff --git a/src/python/grpcio/grpc/framework/core/implementations.py b/src/python/grpcio/grpc/framework/core/implementations.py deleted file mode 100644 index 364a7faed40..00000000000 --- a/src/python/grpcio/grpc/framework/core/implementations.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Entry points into the ticket-exchange-based base layer implementation.""" - -# base and links are referenced from specification in this module. -from grpc.framework.core import _end -from grpc.framework.interfaces.base import base # pylint: disable=unused-import -from grpc.framework.interfaces.links import links # pylint: disable=unused-import - - -def invocation_end_link(): - """Creates a base.End-links.Link suitable for operation invocation. - - Returns: - An object that is both a base.End and a links.Link, that supports operation - invocation, and that translates operation invocation into ticket exchange. - """ - return _end.serviceless_end_link() - - -def service_end_link(servicer, default_timeout, maximum_timeout): - """Creates a base.End-links.Link suitable for operation service. - - Args: - servicer: A base.Servicer for servicing operations. - default_timeout: A length of time in seconds to be used as the default - time alloted for a single operation. - maximum_timeout: A length of time in seconds to be used as the maximum - time alloted for a single operation. - - Returns: - An object that is both a base.End and a links.Link and that services - operations that arrive at it through ticket exchange. - """ - return _end.serviceful_end_link(servicer, default_timeout, maximum_timeout) diff --git a/src/python/grpcio/grpc/framework/crust/_calls.py b/src/python/grpcio/grpc/framework/crust/_calls.py deleted file mode 100644 index bff940d7471..00000000000 --- a/src/python/grpcio/grpc/framework/crust/_calls.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Utility functions for invoking RPCs.""" - -from grpc.framework.crust import _control -from grpc.framework.interfaces.base import utilities -from grpc.framework.interfaces.face import face - -_ITERATOR_EXCEPTION_LOG_MESSAGE = 'Exception iterating over requests!' - -_EMPTY_COMPLETION = utilities.completion(None, None, None) - - -def _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - complete): - rendezvous = _control.Rendezvous(None, None) - subscription = utilities.full_subscription( - rendezvous, _control.protocol_receiver(rendezvous)) - operation_context, operator = end.operate( - group, method, subscription, timeout, protocol_options=protocol_options, - initial_metadata=initial_metadata, payload=payload, - completion=_EMPTY_COMPLETION if complete else None) - rendezvous.set_operator_and_context(operator, operation_context) - outcome = operation_context.add_termination_callback(rendezvous.set_outcome) - if outcome is not None: - rendezvous.set_outcome(outcome) - return rendezvous, operation_context, outcome - - -def _event_return_unary( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool): - if outcome is None: - def in_pool(): - abortion = rendezvous.add_abortion_callback(abortion_callback) - if abortion is None: - try: - receiver.initial_metadata(rendezvous.initial_metadata()) - receiver.response(next(rendezvous)) - receiver.complete( - rendezvous.terminal_metadata(), rendezvous.code(), - rendezvous.details()) - except face.AbortionError: - pass - else: - abortion_callback(abortion) - pool.submit(_control.pool_wrap(in_pool, operation_context)) - return rendezvous - - -def _event_return_stream( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool): - if outcome is None: - def in_pool(): - abortion = rendezvous.add_abortion_callback(abortion_callback) - if abortion is None: - try: - receiver.initial_metadata(rendezvous.initial_metadata()) - for response in rendezvous: - receiver.response(response) - receiver.complete( - rendezvous.terminal_metadata(), rendezvous.code(), - rendezvous.details()) - except face.AbortionError: - pass - else: - abortion_callback(abortion) - pool.submit(_control.pool_wrap(in_pool, operation_context)) - return rendezvous - - -def blocking_unary_unary( - end, group, method, timeout, with_call, protocol_options, initial_metadata, - payload): - """Services in a blocking fashion a unary-unary servicer method.""" - rendezvous, unused_operation_context, unused_outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - if with_call: - return next(rendezvous), rendezvous - else: - return next(rendezvous) - - -def future_unary_unary( - end, group, method, timeout, protocol_options, initial_metadata, payload): - """Services a value-in value-out servicer method by returning a Future.""" - rendezvous, unused_operation_context, unused_outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - return rendezvous - - -def inline_unary_stream( - end, group, method, timeout, protocol_options, initial_metadata, payload): - """Services a value-in stream-out servicer method.""" - rendezvous, unused_operation_context, unused_outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - return rendezvous - - -def blocking_stream_unary( - end, group, method, timeout, with_call, protocol_options, initial_metadata, - payload_iterator, pool): - """Services in a blocking fashion a stream-in value-out servicer method.""" - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - if outcome is None: - def in_pool(): - for payload in payload_iterator: - rendezvous.consume(payload) - rendezvous.terminate() - pool.submit(_control.pool_wrap(in_pool, operation_context)) - if with_call: - return next(rendezvous), rendezvous - else: - return next(rendezvous) - else: - if with_call: - return next(rendezvous), rendezvous - else: - return next(rendezvous) - - -def future_stream_unary( - end, group, method, timeout, protocol_options, initial_metadata, - payload_iterator, pool): - """Services a stream-in value-out servicer method by returning a Future.""" - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - if outcome is None: - def in_pool(): - for payload in payload_iterator: - rendezvous.consume(payload) - rendezvous.terminate() - pool.submit(_control.pool_wrap(in_pool, operation_context)) - return rendezvous - - -def inline_stream_stream( - end, group, method, timeout, protocol_options, initial_metadata, - payload_iterator, pool): - """Services a stream-in stream-out servicer method.""" - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - if outcome is None: - def in_pool(): - for payload in payload_iterator: - rendezvous.consume(payload) - rendezvous.terminate() - pool.submit(_control.pool_wrap(in_pool, operation_context)) - return rendezvous - - -def event_unary_unary( - end, group, method, timeout, protocol_options, initial_metadata, payload, - receiver, abortion_callback, pool): - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - return _event_return_unary( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool) - - -def event_unary_stream( - end, group, method, timeout, protocol_options, initial_metadata, payload, - receiver, abortion_callback, pool): - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - return _event_return_stream( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool) - - -def event_stream_unary( - end, group, method, timeout, protocol_options, initial_metadata, receiver, - abortion_callback, pool): - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - return _event_return_unary( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool) - - -def event_stream_stream( - end, group, method, timeout, protocol_options, initial_metadata, receiver, - abortion_callback, pool): - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - return _event_return_stream( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool) diff --git a/src/python/grpcio/grpc/framework/crust/_control.py b/src/python/grpcio/grpc/framework/crust/_control.py deleted file mode 100644 index 9b4167bda0c..00000000000 --- a/src/python/grpcio/grpc/framework/crust/_control.py +++ /dev/null @@ -1,584 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for translating between sync and async control flow.""" - -import collections -import enum -import sys -import threading -import time - -from grpc.framework.foundation import abandonment -from grpc.framework.foundation import callable_util -from grpc.framework.foundation import future -from grpc.framework.foundation import stream -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.base import utilities -from grpc.framework.interfaces.face import face - -_DONE_CALLBACK_LOG_MESSAGE = 'Exception calling Future "done" callback!' -_INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Crust) Internal Error! )-:' - -_CANNOT_SET_INITIAL_METADATA = ( - 'Could not set initial metadata - has it already been set, or has a ' + - 'payload already been sent?') -_CANNOT_SET_TERMINAL_METADATA = ( - 'Could not set terminal metadata - has it already been set, or has RPC ' + - 'completion already been indicated?') -_CANNOT_SET_CODE = ( - 'Could not set code - has it already been set, or has RPC completion ' + - 'already been indicated?') -_CANNOT_SET_DETAILS = ( - 'Could not set details - has it already been set, or has RPC completion ' + - 'already been indicated?') - - -class _DummyOperator(base.Operator): - - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - pass - -_DUMMY_OPERATOR = _DummyOperator() - - -class _Awaited( - collections.namedtuple('_Awaited', ('kind', 'value',))): - - @enum.unique - class Kind(enum.Enum): - NOT_YET_ARRIVED = 'not yet arrived' - ARRIVED = 'arrived' - -_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None) -_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None) - - -class _Transitory( - collections.namedtuple('_Transitory', ('kind', 'value',))): - - @enum.unique - class Kind(enum.Enum): - NOT_YET_SEEN = 'not yet seen' - PRESENT = 'present' - GONE = 'gone' - -_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None) -_GONE = _Transitory(_Transitory.Kind.GONE, None) - - -class _Termination( - collections.namedtuple( - '_Termination', ('terminated', 'abortion', 'abortion_error',))): - """Values indicating whether and how an RPC has terminated. - - Attributes: - terminated: A boolean indicating whether or not the RPC has terminated. - abortion: A face.Abortion value describing the RPC's abortion or None if the - RPC did not abort. - abortion_error: A face.AbortionError describing the RPC's abortion or None - if the RPC did not abort. - """ - -_NOT_TERMINATED = _Termination(False, None, None) - -_OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR = { - base.Outcome.Kind.COMPLETED: lambda *unused_args: _Termination( - True, None, None), - base.Outcome.Kind.CANCELLED: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.CANCELLED, *args), - face.CancellationError(*args)), - base.Outcome.Kind.EXPIRED: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.EXPIRED, *args), - face.ExpirationError(*args)), - base.Outcome.Kind.LOCAL_SHUTDOWN: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.LOCAL_SHUTDOWN, *args), - face.LocalShutdownError(*args)), - base.Outcome.Kind.REMOTE_SHUTDOWN: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.REMOTE_SHUTDOWN, *args), - face.RemoteShutdownError(*args)), - base.Outcome.Kind.RECEPTION_FAILURE: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args), - face.NetworkError(*args)), - base.Outcome.Kind.TRANSMISSION_FAILURE: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args), - face.NetworkError(*args)), - base.Outcome.Kind.LOCAL_FAILURE: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.LOCAL_FAILURE, *args), - face.LocalError(*args)), - base.Outcome.Kind.REMOTE_FAILURE: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.REMOTE_FAILURE, *args), - face.RemoteError(*args)), -} - - -def _wait_once_until(condition, until): - if until is None: - condition.wait() - else: - remaining = until - time.time() - if remaining < 0: - raise future.TimeoutError() - else: - condition.wait(timeout=remaining) - - -def _done_callback_as_operation_termination_callback( - done_callback, rendezvous): - def operation_termination_callback(operation_outcome): - rendezvous.set_outcome(operation_outcome) - done_callback(rendezvous) - return operation_termination_callback - - -def _abortion_callback_as_operation_termination_callback( - rpc_abortion_callback, rendezvous_set_outcome): - def operation_termination_callback(operation_outcome): - termination = rendezvous_set_outcome(operation_outcome) - if termination.abortion is not None: - rpc_abortion_callback(termination.abortion) - return operation_termination_callback - - -class Rendezvous(base.Operator, future.Future, stream.Consumer, face.Call): - """A rendez-vous for the threads of an operation. - - Instances of this object present iterator and stream.Consumer interfaces for - interacting with application code and present a base.Operator interface and - maintain a base.Operator internally for interacting with base interface code. - """ - - def __init__(self, operator, operation_context): - self._condition = threading.Condition() - - self._operator = operator - self._operation_context = operation_context - - self._protocol_context = _NOT_YET_ARRIVED - - self._up_initial_metadata = _NOT_YET_ARRIVED - self._up_payload = None - self._up_allowance = 1 - self._up_completion = _NOT_YET_ARRIVED - self._down_initial_metadata = _NOT_YET_SEEN - self._down_payload = None - self._down_allowance = 1 - self._down_terminal_metadata = _NOT_YET_SEEN - self._down_code = _NOT_YET_SEEN - self._down_details = _NOT_YET_SEEN - - self._termination = _NOT_TERMINATED - - # The semantics of future.Future.cancel and future.Future.cancelled are - # slightly wonky, so they have to be tracked separately from the rest of the - # result of the RPC. This field tracks whether cancellation was requested - # prior to termination of the RPC - self._cancelled = False - - def set_operator_and_context(self, operator, operation_context): - with self._condition: - self._operator = operator - self._operation_context = operation_context - - def _down_completion(self): - if self._down_terminal_metadata.kind is _Transitory.Kind.NOT_YET_SEEN: - terminal_metadata = None - self._down_terminal_metadata = _GONE - elif self._down_terminal_metadata.kind is _Transitory.Kind.PRESENT: - terminal_metadata = self._down_terminal_metadata.value - self._down_terminal_metadata = _GONE - else: - terminal_metadata = None - if self._down_code.kind is _Transitory.Kind.NOT_YET_SEEN: - code = None - self._down_code = _GONE - elif self._down_code.kind is _Transitory.Kind.PRESENT: - code = self._down_code.value - self._down_code = _GONE - else: - code = None - if self._down_details.kind is _Transitory.Kind.NOT_YET_SEEN: - details = None - self._down_details = _GONE - elif self._down_details.kind is _Transitory.Kind.PRESENT: - details = self._down_details.value - self._down_details = _GONE - else: - details = None - return utilities.completion(terminal_metadata, code, details) - - def _set_outcome(self, outcome): - if not self._termination.terminated: - self._operator = _DUMMY_OPERATOR - self._operation_context = None - self._down_initial_metadata = _GONE - self._down_payload = None - self._down_terminal_metadata = _GONE - self._down_code = _GONE - self._down_details = _GONE - - if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED: - initial_metadata = None - else: - initial_metadata = self._up_initial_metadata.value - if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED: - terminal_metadata = None - else: - terminal_metadata = self._up_completion.value.terminal_metadata - if outcome.kind is base.Outcome.Kind.COMPLETED: - code = self._up_completion.value.code - details = self._up_completion.value.message - else: - code = outcome.code - details = outcome.details - self._termination = _OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR[ - outcome.kind](initial_metadata, terminal_metadata, code, details) - - self._condition.notify_all() - - return self._termination - - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - with self._condition: - if initial_metadata is not None: - self._up_initial_metadata = _Awaited( - _Awaited.Kind.ARRIVED, initial_metadata) - if payload is not None: - if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED: - self._up_initial_metadata = _ARRIVED_AND_NONE - self._up_payload = payload - self._up_allowance -= 1 - if completion is not None: - if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED: - self._up_initial_metadata = _ARRIVED_AND_NONE - self._up_completion = _Awaited( - _Awaited.Kind.ARRIVED, completion) - if allowance is not None: - if self._down_payload is not None: - self._operator.advance(payload=self._down_payload) - self._down_payload = None - self._down_allowance += allowance - 1 - else: - self._down_allowance += allowance - self._condition.notify_all() - - def cancel(self): - with self._condition: - if self._operation_context is not None: - self._operation_context.cancel() - self._cancelled = True - return False - - def cancelled(self): - with self._condition: - return self._cancelled - - def running(self): - with self._condition: - return not self._termination.terminated - - def done(self): - with self._condition: - return self._termination.terminated - - def result(self, timeout=None): - until = None if timeout is None else time.time() + timeout - with self._condition: - while True: - if self._termination.terminated: - if self._termination.abortion is None: - return self._up_payload - elif self._termination.abortion.kind is face.Abortion.Kind.CANCELLED: - raise future.CancelledError() - else: - raise self._termination.abortion_error # pylint: disable=raising-bad-type - else: - _wait_once_until(self._condition, until) - - def exception(self, timeout=None): - until = None if timeout is None else time.time() + timeout - with self._condition: - while True: - if self._termination.terminated: - if self._termination.abortion is None: - return None - else: - return self._termination.abortion_error - else: - _wait_once_until(self._condition, until) - - def traceback(self, timeout=None): - until = None if timeout is None else time.time() + timeout - with self._condition: - while True: - if self._termination.terminated: - if self._termination.abortion_error is None: - return None - else: - abortion_error = self._termination.abortion_error - break - else: - _wait_once_until(self._condition, until) - - try: - raise abortion_error - except face.AbortionError: - return sys.exc_info()[2] - - def add_done_callback(self, fn): - with self._condition: - if self._operation_context is not None: - outcome = self._operation_context.add_termination_callback( - _done_callback_as_operation_termination_callback(fn, self)) - if outcome is None: - return - else: - self._set_outcome(outcome) - - fn(self) - - def consume(self, value): - with self._condition: - while True: - if self._termination.terminated: - return - elif 0 < self._down_allowance: - self._operator.advance(payload=value) - self._down_allowance -= 1 - return - else: - self._condition.wait() - - def terminate(self): - with self._condition: - if self._termination.terminated: - return - elif self._down_code.kind is _Transitory.Kind.GONE: - # Conform to specified idempotence of terminate by ignoring extra calls. - return - else: - completion = self._down_completion() - self._operator.advance(completion=completion) - - def consume_and_terminate(self, value): - with self._condition: - while True: - if self._termination.terminated: - return - elif 0 < self._down_allowance: - completion = self._down_completion() - self._operator.advance(payload=value, completion=completion) - return - else: - self._condition.wait() - - def __iter__(self): - return self - - def __next__(self): - return self.next() - - def next(self): - with self._condition: - while True: - if self._termination.abortion_error is not None: - raise self._termination.abortion_error - elif self._up_payload is not None: - payload = self._up_payload - self._up_payload = None - if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED: - self._operator.advance(allowance=1) - return payload - elif self._up_completion.kind is _Awaited.Kind.ARRIVED: - raise StopIteration() - else: - self._condition.wait() - - def is_active(self): - with self._condition: - return not self._termination.terminated - - def time_remaining(self): - if self._operation_context is None: - return 0 - else: - return self._operation_context.time_remaining() - - def add_abortion_callback(self, abortion_callback): - with self._condition: - if self._operation_context is None: - return self._termination.abortion - else: - outcome = self._operation_context.add_termination_callback( - _abortion_callback_as_operation_termination_callback( - abortion_callback, self.set_outcome)) - if outcome is not None: - return self._set_outcome(outcome).abortion - else: - return self._termination.abortion - - def protocol_context(self): - with self._condition: - while True: - if self._protocol_context.kind is _Awaited.Kind.ARRIVED: - return self._protocol_context.value - elif self._termination.abortion_error is not None: - raise self._termination.abortion_error - else: - self._condition.wait() - - def initial_metadata(self): - with self._condition: - while True: - if self._up_initial_metadata.kind is _Awaited.Kind.ARRIVED: - return self._up_initial_metadata.value - elif self._termination.terminated: - return None - else: - self._condition.wait() - - def terminal_metadata(self): - with self._condition: - while True: - if self._up_completion.kind is _Awaited.Kind.ARRIVED: - return self._up_completion.value.terminal_metadata - elif self._termination.terminated: - return None - else: - self._condition.wait() - - def code(self): - with self._condition: - while True: - if self._up_completion.kind is _Awaited.Kind.ARRIVED: - return self._up_completion.value.code - elif self._termination.terminated: - return None - else: - self._condition.wait() - - def details(self): - with self._condition: - while True: - if self._up_completion.kind is _Awaited.Kind.ARRIVED: - return self._up_completion.value.message - elif self._termination.terminated: - return None - else: - self._condition.wait() - - def set_initial_metadata(self, initial_metadata): - with self._condition: - if (self._down_initial_metadata.kind is not - _Transitory.Kind.NOT_YET_SEEN): - raise ValueError(_CANNOT_SET_INITIAL_METADATA) - else: - self._down_initial_metadata = _GONE - self._operator.advance(initial_metadata=initial_metadata) - - def set_terminal_metadata(self, terminal_metadata): - with self._condition: - if (self._down_terminal_metadata.kind is not - _Transitory.Kind.NOT_YET_SEEN): - raise ValueError(_CANNOT_SET_TERMINAL_METADATA) - else: - self._down_terminal_metadata = _Transitory( - _Transitory.Kind.PRESENT, terminal_metadata) - - def set_code(self, code): - with self._condition: - if self._down_code.kind is not _Transitory.Kind.NOT_YET_SEEN: - raise ValueError(_CANNOT_SET_CODE) - else: - self._down_code = _Transitory(_Transitory.Kind.PRESENT, code) - - def set_details(self, details): - with self._condition: - if self._down_details.kind is not _Transitory.Kind.NOT_YET_SEEN: - raise ValueError(_CANNOT_SET_DETAILS) - else: - self._down_details = _Transitory(_Transitory.Kind.PRESENT, details) - - def set_protocol_context(self, protocol_context): - with self._condition: - self._protocol_context = _Awaited( - _Awaited.Kind.ARRIVED, protocol_context) - self._condition.notify_all() - - def set_outcome(self, outcome): - with self._condition: - return self._set_outcome(outcome) - - -class _ProtocolReceiver(base.ProtocolReceiver): - - def __init__(self, rendezvous): - self._rendezvous = rendezvous - - def context(self, protocol_context): - self._rendezvous.set_protocol_context(protocol_context) - - -def protocol_receiver(rendezvous): - return _ProtocolReceiver(rendezvous) - - -def pool_wrap(behavior, operation_context): - """Wraps an operation-related behavior so that it may be called in a pool. - - Args: - behavior: A callable related to carrying out an operation. - operation_context: A base_interfaces.OperationContext for the operation. - - Returns: - A callable that when called carries out the behavior of the given callable - and handles whatever exceptions it raises appropriately. - """ - def translation(*args): - try: - behavior(*args) - except ( - abandonment.Abandoned, - face.CancellationError, - face.ExpirationError, - face.LocalShutdownError, - face.RemoteShutdownError, - face.NetworkError, - face.RemoteError, - ) as e: - if operation_context.outcome() is None: - operation_context.fail(e) - except Exception as e: - operation_context.fail(e) - return callable_util.with_exceptions_logged( - translation, _INTERNAL_ERROR_LOG_MESSAGE) diff --git a/src/python/grpcio/grpc/framework/crust/_service.py b/src/python/grpcio/grpc/framework/crust/_service.py deleted file mode 100644 index 9903415c099..00000000000 --- a/src/python/grpcio/grpc/framework/crust/_service.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Behaviors for servicing RPCs.""" - -from grpc.framework.crust import _control -from grpc.framework.foundation import abandonment -from grpc.framework.interfaces.base import utilities -from grpc.framework.interfaces.face import face - - -class _ServicerContext(face.ServicerContext): - - def __init__(self, rendezvous): - self._rendezvous = rendezvous - - def is_active(self): - return self._rendezvous.is_active() - - def time_remaining(self): - return self._rendezvous.time_remaining() - - def add_abortion_callback(self, abortion_callback): - return self._rendezvous.add_abortion_callback(abortion_callback) - - def cancel(self): - self._rendezvous.cancel() - - def protocol_context(self): - return self._rendezvous.protocol_context() - - def invocation_metadata(self): - return self._rendezvous.initial_metadata() - - def initial_metadata(self, initial_metadata): - self._rendezvous.set_initial_metadata(initial_metadata) - - def terminal_metadata(self, terminal_metadata): - self._rendezvous.set_terminal_metadata(terminal_metadata) - - def code(self, code): - self._rendezvous.set_code(code) - - def details(self, details): - self._rendezvous.set_details(details) - - -def _adaptation(pool, in_pool): - def adaptation(operator, operation_context): - rendezvous = _control.Rendezvous(operator, operation_context) - subscription = utilities.full_subscription( - rendezvous, _control.protocol_receiver(rendezvous)) - outcome = operation_context.add_termination_callback(rendezvous.set_outcome) - if outcome is None: - pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous) - return subscription - else: - raise abandonment.Abandoned() - return adaptation - - -def adapt_inline_unary_unary(method, pool): - def in_pool(rendezvous): - request = next(rendezvous) - response = method(request, _ServicerContext(rendezvous)) - rendezvous.consume_and_terminate(response) - return _adaptation(pool, in_pool) - - -def adapt_inline_unary_stream(method, pool): - def in_pool(rendezvous): - request = next(rendezvous) - response_iterator = method(request, _ServicerContext(rendezvous)) - for response in response_iterator: - rendezvous.consume(response) - rendezvous.terminate() - return _adaptation(pool, in_pool) - - -def adapt_inline_stream_unary(method, pool): - def in_pool(rendezvous): - response = method(rendezvous, _ServicerContext(rendezvous)) - rendezvous.consume_and_terminate(response) - return _adaptation(pool, in_pool) - - -def adapt_inline_stream_stream(method, pool): - def in_pool(rendezvous): - response_iterator = method(rendezvous, _ServicerContext(rendezvous)) - for response in response_iterator: - rendezvous.consume(response) - rendezvous.terminate() - return _adaptation(pool, in_pool) - - -def adapt_event_unary_unary(method, pool): - def in_pool(rendezvous): - request = next(rendezvous) - method( - request, rendezvous.consume_and_terminate, _ServicerContext(rendezvous)) - return _adaptation(pool, in_pool) - - -def adapt_event_unary_stream(method, pool): - def in_pool(rendezvous): - request = next(rendezvous) - method(request, rendezvous, _ServicerContext(rendezvous)) - return _adaptation(pool, in_pool) - - -def adapt_event_stream_unary(method, pool): - def in_pool(rendezvous): - request_consumer = method( - rendezvous.consume_and_terminate, _ServicerContext(rendezvous)) - for request in rendezvous: - request_consumer.consume(request) - request_consumer.terminate() - return _adaptation(pool, in_pool) - - -def adapt_event_stream_stream(method, pool): - def in_pool(rendezvous): - request_consumer = method(rendezvous, _ServicerContext(rendezvous)) - for request in rendezvous: - request_consumer.consume(request) - request_consumer.terminate() - return _adaptation(pool, in_pool) - - -def adapt_multi_method(multi_method, pool): - def adaptation(group, method, operator, operation_context): - rendezvous = _control.Rendezvous(operator, operation_context) - subscription = utilities.full_subscription( - rendezvous, _control.protocol_receiver(rendezvous)) - outcome = operation_context.add_termination_callback(rendezvous.set_outcome) - if outcome is None: - def in_pool(): - request_consumer = multi_method.service( - group, method, rendezvous, _ServicerContext(rendezvous)) - for request in rendezvous: - request_consumer.consume(request) - request_consumer.terminate() - pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous) - return subscription - else: - raise abandonment.Abandoned() - return adaptation diff --git a/src/python/grpcio/grpc/framework/crust/implementations.py b/src/python/grpcio/grpc/framework/crust/implementations.py deleted file mode 100644 index 2d3ab733b6a..00000000000 --- a/src/python/grpcio/grpc/framework/crust/implementations.py +++ /dev/null @@ -1,366 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Entry points into the Crust layer of RPC Framework.""" - -import six - -from grpc.framework.common import cardinality -from grpc.framework.common import style -from grpc.framework.crust import _calls -from grpc.framework.crust import _service -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.face import face - - -class _BaseServicer(base.Servicer): - - def __init__(self, adapted_methods, adapted_multi_method): - self._adapted_methods = adapted_methods - self._adapted_multi_method = adapted_multi_method - - def service(self, group, method, context, output_operator): - adapted_method = self._adapted_methods.get((group, method), None) - if adapted_method is not None: - return adapted_method(output_operator, context) - elif self._adapted_multi_method is not None: - try: - return self._adapted_multi_method( - group, method, output_operator, context) - except face.NoSuchMethodError: - raise base.NoSuchMethodError(None, None) - else: - raise base.NoSuchMethodError(None, None) - - -class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable): - - def __init__(self, end, group, method, pool): - self._end = end - self._group = group - self._method = method - self._pool = pool - - def __call__( - self, request, timeout, metadata=None, with_call=False, - protocol_options=None): - return _calls.blocking_unary_unary( - self._end, self._group, self._method, timeout, with_call, - protocol_options, metadata, request) - - def future(self, request, timeout, metadata=None, protocol_options=None): - return _calls.future_unary_unary( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request) - - def event( - self, request, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_unary_unary( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request, receiver, abortion_callback, self._pool) - - -class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable): - - def __init__(self, end, group, method, pool): - self._end = end - self._group = group - self._method = method - self._pool = pool - - def __call__(self, request, timeout, metadata=None, protocol_options=None): - return _calls.inline_unary_stream( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request) - - def event( - self, request, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_unary_stream( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request, receiver, abortion_callback, self._pool) - - -class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable): - - def __init__(self, end, group, method, pool): - self._end = end - self._group = group - self._method = method - self._pool = pool - - def __call__( - self, request_iterator, timeout, metadata=None, - with_call=False, protocol_options=None): - return _calls.blocking_stream_unary( - self._end, self._group, self._method, timeout, with_call, - protocol_options, metadata, request_iterator, self._pool) - - def future( - self, request_iterator, timeout, metadata=None, protocol_options=None): - return _calls.future_stream_unary( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request_iterator, self._pool) - - def event( - self, receiver, abortion_callback, timeout, metadata=None, - protocol_options=None): - return _calls.event_stream_unary( - self._end, self._group, self._method, timeout, protocol_options, - metadata, receiver, abortion_callback, self._pool) - - -class _StreamStreamMultiCallable(face.StreamStreamMultiCallable): - - def __init__(self, end, group, method, pool): - self._end = end - self._group = group - self._method = method - self._pool = pool - - def __call__( - self, request_iterator, timeout, metadata=None, protocol_options=None): - return _calls.inline_stream_stream( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request_iterator, self._pool) - - def event( - self, receiver, abortion_callback, timeout, metadata=None, - protocol_options=None): - return _calls.event_stream_stream( - self._end, self._group, self._method, timeout, protocol_options, - metadata, receiver, abortion_callback, self._pool) - - -class _GenericStub(face.GenericStub): - """An face.GenericStub implementation.""" - - def __init__(self, end, pool): - self._end = end - self._pool = pool - - def blocking_unary_unary( - self, group, method, request, timeout, metadata=None, - with_call=None, protocol_options=None): - return _calls.blocking_unary_unary( - self._end, group, method, timeout, with_call, protocol_options, - metadata, request) - - def future_unary_unary( - self, group, method, request, timeout, metadata=None, - protocol_options=None): - return _calls.future_unary_unary( - self._end, group, method, timeout, protocol_options, metadata, request) - - def inline_unary_stream( - self, group, method, request, timeout, metadata=None, - protocol_options=None): - return _calls.inline_unary_stream( - self._end, group, method, timeout, protocol_options, metadata, request) - - def blocking_stream_unary( - self, group, method, request_iterator, timeout, metadata=None, - with_call=None, protocol_options=None): - return _calls.blocking_stream_unary( - self._end, group, method, timeout, with_call, protocol_options, - metadata, request_iterator, self._pool) - - def future_stream_unary( - self, group, method, request_iterator, timeout, metadata=None, - protocol_options=None): - return _calls.future_stream_unary( - self._end, group, method, timeout, protocol_options, metadata, - request_iterator, self._pool) - - def inline_stream_stream( - self, group, method, request_iterator, timeout, metadata=None, - protocol_options=None): - return _calls.inline_stream_stream( - self._end, group, method, timeout, protocol_options, metadata, - request_iterator, self._pool) - - def event_unary_unary( - self, group, method, request, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_unary_unary( - self._end, group, method, timeout, protocol_options, metadata, request, - receiver, abortion_callback, self._pool) - - def event_unary_stream( - self, group, method, request, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_unary_stream( - self._end, group, method, timeout, protocol_options, metadata, request, - receiver, abortion_callback, self._pool) - - def event_stream_unary( - self, group, method, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_stream_unary( - self._end, group, method, timeout, protocol_options, metadata, receiver, - abortion_callback, self._pool) - - def event_stream_stream( - self, group, method, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_stream_stream( - self._end, group, method, timeout, protocol_options, metadata, receiver, - abortion_callback, self._pool) - - def unary_unary(self, group, method): - return _UnaryUnaryMultiCallable(self._end, group, method, self._pool) - - def unary_stream(self, group, method): - return _UnaryStreamMultiCallable(self._end, group, method, self._pool) - - def stream_unary(self, group, method): - return _StreamUnaryMultiCallable(self._end, group, method, self._pool) - - def stream_stream(self, group, method): - return _StreamStreamMultiCallable(self._end, group, method, self._pool) - - -class _DynamicStub(face.DynamicStub): - """An face.DynamicStub implementation.""" - - def __init__(self, end, group, cardinalities, pool): - self._end = end - self._group = group - self._cardinalities = cardinalities - self._pool = pool - - def __getattr__(self, attr): - method_cardinality = self._cardinalities.get(attr) - if method_cardinality is cardinality.Cardinality.UNARY_UNARY: - return _UnaryUnaryMultiCallable(self._end, self._group, attr, self._pool) - elif method_cardinality is cardinality.Cardinality.UNARY_STREAM: - return _UnaryStreamMultiCallable(self._end, self._group, attr, self._pool) - elif method_cardinality is cardinality.Cardinality.STREAM_UNARY: - return _StreamUnaryMultiCallable(self._end, self._group, attr, self._pool) - elif method_cardinality is cardinality.Cardinality.STREAM_STREAM: - return _StreamStreamMultiCallable( - self._end, self._group, attr, self._pool) - else: - raise AttributeError('_DynamicStub object has no attribute "%s"!' % attr) - - -def _adapt_method_implementations(method_implementations, pool): - adapted_implementations = {} - for name, method_implementation in six.iteritems(method_implementations): - if method_implementation.style is style.Service.INLINE: - if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: - adapted_implementations[name] = _service.adapt_inline_unary_unary( - method_implementation.unary_unary_inline, pool) - elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: - adapted_implementations[name] = _service.adapt_inline_unary_stream( - method_implementation.unary_stream_inline, pool) - elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: - adapted_implementations[name] = _service.adapt_inline_stream_unary( - method_implementation.stream_unary_inline, pool) - elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM: - adapted_implementations[name] = _service.adapt_inline_stream_stream( - method_implementation.stream_stream_inline, pool) - elif method_implementation.style is style.Service.EVENT: - if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: - adapted_implementations[name] = _service.adapt_event_unary_unary( - method_implementation.unary_unary_event, pool) - elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: - adapted_implementations[name] = _service.adapt_event_unary_stream( - method_implementation.unary_stream_event, pool) - elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: - adapted_implementations[name] = _service.adapt_event_stream_unary( - method_implementation.stream_unary_event, pool) - elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM: - adapted_implementations[name] = _service.adapt_event_stream_stream( - method_implementation.stream_stream_event, pool) - return adapted_implementations - - -def servicer(method_implementations, multi_method_implementation, pool): - """Creates a base.Servicer. - - It is guaranteed that any passed face.MultiMethodImplementation will - only be called to service an RPC if there is no - face.MethodImplementation for the RPC method in the passed - method_implementations dictionary. - - Args: - method_implementations: A dictionary from RPC method name to - face.MethodImplementation object to be used to service the named - RPC method. - multi_method_implementation: An face.MultiMethodImplementation to be - used to service any RPCs not serviced by the - face.MethodImplementations given in the method_implementations - dictionary, or None. - pool: A thread pool. - - Returns: - A base.Servicer that services RPCs via the given implementations. - """ - adapted_implementations = _adapt_method_implementations( - method_implementations, pool) - if multi_method_implementation is None: - adapted_multi_method_implementation = None - else: - adapted_multi_method_implementation = _service.adapt_multi_method( - multi_method_implementation, pool) - return _BaseServicer( - adapted_implementations, adapted_multi_method_implementation) - - -def generic_stub(end, pool): - """Creates an face.GenericStub. - - Args: - end: A base.End. - pool: A futures.ThreadPoolExecutor. - - Returns: - A face.GenericStub that performs RPCs via the given base.End. - """ - return _GenericStub(end, pool) - - -def dynamic_stub(end, group, cardinalities, pool): - """Creates an face.DynamicStub. - - Args: - end: A base.End. - group: The group identifier for all RPCs to be made with the created - face.DynamicStub. - cardinalities: A dict from method identifier to cardinality.Cardinality - value identifying the cardinality of every RPC method to be supported by - the created face.DynamicStub. - pool: A futures.ThreadPoolExecutor. - - Returns: - A face.DynamicStub that performs RPCs via the given base.End. - """ - return _DynamicStub(end, group, cardinalities, pool) diff --git a/src/python/grpcio/grpc/framework/foundation/_timer_future.py b/src/python/grpcio/grpc/framework/foundation/_timer_future.py deleted file mode 100644 index 2c9996aa9db..00000000000 --- a/src/python/grpcio/grpc/framework/foundation/_timer_future.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Affords a Future implementation based on Python's threading.Timer.""" - -import sys -import threading -import time - -from grpc.framework.foundation import future - - -class TimerFuture(future.Future): - """A Future implementation based around Timer objects.""" - - def __init__(self, compute_time, computation): - """Constructor. - - Args: - compute_time: The time after which to begin this future's computation. - computation: The computation to be performed within this Future. - """ - self._lock = threading.Lock() - self._compute_time = compute_time - self._computation = computation - self._timer = None - self._computing = False - self._computed = False - self._cancelled = False - self._result = None - self._exception = None - self._traceback = None - self._waiting = [] - - def _compute(self): - """Performs the computation embedded in this Future. - - Or doesn't, if the time to perform it has not yet arrived. - """ - with self._lock: - time_remaining = self._compute_time - time.time() - if 0 < time_remaining: - self._timer = threading.Timer(time_remaining, self._compute) - self._timer.start() - return - else: - self._computing = True - - try: - return_value = self._computation() - exception = None - traceback = None - except Exception as e: # pylint: disable=broad-except - return_value = None - exception = e - traceback = sys.exc_info()[2] - - with self._lock: - self._computing = False - self._computed = True - self._return_value = return_value - self._exception = exception - self._traceback = traceback - waiting = self._waiting - - for callback in waiting: - callback(self) - - def start(self): - """Starts this Future. - - This must be called exactly once, immediately after construction. - """ - with self._lock: - self._timer = threading.Timer( - self._compute_time - time.time(), self._compute) - self._timer.start() - - def cancel(self): - """See future.Future.cancel for specification.""" - with self._lock: - if self._computing or self._computed: - return False - elif self._cancelled: - return True - else: - self._timer.cancel() - self._cancelled = True - waiting = self._waiting - - for callback in waiting: - try: - callback(self) - except Exception: # pylint: disable=broad-except - pass - - return True - - def cancelled(self): - """See future.Future.cancelled for specification.""" - with self._lock: - return self._cancelled - - def running(self): - """See future.Future.running for specification.""" - with self._lock: - return not self._computed and not self._cancelled - - def done(self): - """See future.Future.done for specification.""" - with self._lock: - return self._computed or self._cancelled - - def result(self, timeout=None): - """See future.Future.result for specification.""" - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - if self._exception is None: - return self._return_value - else: - raise self._exception # pylint: disable=raising-bad-type - - condition = threading.Condition() - def notify_condition(unused_future): - with condition: - condition.notify() - self._waiting.append(notify_condition) - - with condition: - condition.wait(timeout=timeout) - - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - if self._exception is None: - return self._return_value - else: - raise self._exception # pylint: disable=raising-bad-type - else: - raise future.TimeoutError() - - def exception(self, timeout=None): - """See future.Future.exception for specification.""" - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - return self._exception - - condition = threading.Condition() - def notify_condition(unused_future): - with condition: - condition.notify() - self._waiting.append(notify_condition) - - with condition: - condition.wait(timeout=timeout) - - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - return self._exception - else: - raise future.TimeoutError() - - def traceback(self, timeout=None): - """See future.Future.traceback for specification.""" - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - return self._traceback - - condition = threading.Condition() - def notify_condition(unused_future): - with condition: - condition.notify() - self._waiting.append(notify_condition) - - with condition: - condition.wait(timeout=timeout) - - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - return self._traceback - else: - raise future.TimeoutError() - - def add_done_callback(self, fn): - """See future.Future.add_done_callback for specification.""" - with self._lock: - if not self._computed and not self._cancelled: - self._waiting.append(fn) - return - - fn(self) diff --git a/src/python/grpcio/grpc/framework/foundation/activated.py b/src/python/grpcio/grpc/framework/foundation/activated.py deleted file mode 100644 index 8b8e4f45b5c..00000000000 --- a/src/python/grpcio/grpc/framework/foundation/activated.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Interfaces related to streams of values or objects.""" - -import abc - -import six - -class Activated(six.with_metaclass(abc.ABCMeta)): - """Interface for objects that may be started and stopped. - - Values implementing this type must also implement the context manager - protocol. - """ - - @abc.abstractmethod - def __enter__(self): - """See the context manager protocol for specification.""" - raise NotImplementedError() - - @abc.abstractmethod - def __exit__(self, exc_type, exc_val, exc_tb): - """See the context manager protocol for specification.""" - raise NotImplementedError() - - @abc.abstractmethod - def start(self): - """Activates this object. - - Returns: - A value equal to the value returned by this object's __enter__ method. - """ - raise NotImplementedError() - - @abc.abstractmethod - def stop(self): - """Deactivates this object.""" - raise NotImplementedError() diff --git a/src/python/grpcio/grpc/framework/foundation/relay.py b/src/python/grpcio/grpc/framework/foundation/relay.py deleted file mode 100644 index 20f41b27388..00000000000 --- a/src/python/grpcio/grpc/framework/foundation/relay.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Implementations of in-order work deference.""" - -import abc -import enum -import threading - -from grpc.framework.foundation import activated -from grpc.framework.foundation import logging_pool - -_NULL_BEHAVIOR = lambda unused_value: None - - -class Relay(object): - """Performs work submitted to it in another thread. - - Performs work in the order in which work was submitted to it; otherwise there - would be no reason to use an implementation of this interface instead of a - thread pool. - """ - - @abc.abstractmethod - def add_value(self, value): - """Adds a value to be passed to the behavior registered with this Relay. - - Args: - value: A value that will be passed to a call made in another thread to the - behavior registered with this Relay. - """ - raise NotImplementedError() - - @abc.abstractmethod - def set_behavior(self, behavior): - """Sets the behavior that this Relay should call when passed values. - - Args: - behavior: The behavior that this Relay should call in another thread when - passed a value, or None to have passed values ignored. - """ - raise NotImplementedError() - - -class _PoolRelay(activated.Activated, Relay): - - @enum.unique - class _State(enum.Enum): - INACTIVE = 'inactive' - IDLE = 'idle' - SPINNING = 'spinning' - - def __init__(self, pool, behavior): - self._condition = threading.Condition() - self._pool = pool - self._own_pool = pool is None - self._state = _PoolRelay._State.INACTIVE - self._activated = False - self._spinning = False - self._values = [] - self._behavior = _NULL_BEHAVIOR if behavior is None else behavior - - def _spin(self, behavior, value): - while True: - behavior(value) - with self._condition: - if self._values: - value = self._values.pop(0) - behavior = self._behavior - else: - self._state = _PoolRelay._State.IDLE - self._condition.notify_all() - break - - def add_value(self, value): - with self._condition: - if self._state is _PoolRelay._State.INACTIVE: - raise ValueError('add_value not valid on inactive Relay!') - elif self._state is _PoolRelay._State.IDLE: - self._pool.submit(self._spin, self._behavior, value) - self._state = _PoolRelay._State.SPINNING - else: - self._values.append(value) - - def set_behavior(self, behavior): - with self._condition: - self._behavior = _NULL_BEHAVIOR if behavior is None else behavior - - def _start(self): - with self._condition: - self._state = _PoolRelay._State.IDLE - if self._own_pool: - self._pool = logging_pool.pool(1) - return self - - def _stop(self): - with self._condition: - while self._state is _PoolRelay._State.SPINNING: - self._condition.wait() - if self._own_pool: - self._pool.shutdown(wait=True) - self._state = _PoolRelay._State.INACTIVE - - def __enter__(self): - return self._start() - - def __exit__(self, exc_type, exc_val, exc_tb): - self._stop() - return False - - def start(self): - return self._start() - - def stop(self): - self._stop() - - -def relay(behavior): - """Creates a Relay. - - Args: - behavior: The behavior to be called by the created Relay, or None to have - passed values dropped until a different behavior is given to the returned - Relay later. - - Returns: - An object that is both an activated.Activated and a Relay. The object is - only valid for use as a Relay when activated. - """ - return _PoolRelay(None, behavior) - - -def pool_relay(pool, behavior): - """Creates a Relay that uses a given thread pool. - - This object will make use of at most one thread in the given pool. - - Args: - pool: A futures.ThreadPoolExecutor for use by the created Relay. - behavior: The behavior to be called by the created Relay, or None to have - passed values dropped until a different behavior is given to the returned - Relay later. - - Returns: - An object that is both an activated.Activated and a Relay. The object is - only valid for use as a Relay when activated. - """ - return _PoolRelay(pool, behavior) diff --git a/src/python/grpcio/grpc/framework/interfaces/links/links.py b/src/python/grpcio/grpc/framework/interfaces/links/links.py deleted file mode 100644 index 9631b19078e..00000000000 --- a/src/python/grpcio/grpc/framework/interfaces/links/links.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""The low-level ticket-exchanging-links interface of RPC Framework.""" - -import abc -import collections -import enum - -import six - - -class Protocol(collections.namedtuple('Protocol', ('kind', 'value',))): - """A sum type for handles to a system that transmits tickets. - - Attributes: - kind: A Kind value identifying the kind of value being passed. - value: The value being passed between the high-level application and the - system affording ticket transport. - """ - - @enum.unique - class Kind(enum.Enum): - CALL_OPTION = 'call option' - SERVICER_CONTEXT = 'servicer context' - INVOCATION_CONTEXT = 'invocation context' - - -class Ticket( - collections.namedtuple( - 'Ticket', - ('operation_id', 'sequence_number', 'group', 'method', 'subscription', - 'timeout', 'allowance', 'initial_metadata', 'payload', - 'terminal_metadata', 'code', 'message', 'termination', 'protocol',))): - """A sum type for all values sent from a front to a back. - - Attributes: - operation_id: A unique-with-respect-to-equality hashable object identifying - a particular operation. - sequence_number: A zero-indexed integer sequence number identifying the - ticket's place in the stream of tickets sent in one direction for the - particular operation. - group: The group to which the method of the operation belongs. Must be - present in the first ticket from invocation side to service side. Ignored - for all other tickets exchanged during the operation. - method: The name of an operation. Must be present in the first ticket from - invocation side to service side. Ignored for all other tickets exchanged - during the operation. - subscription: A Subscription value describing the interest one side has in - receiving information from the other side. Must be present in the first - ticket from either side. Ignored for all other tickets exchanged during - the operation. - timeout: A nonzero length of time (measured from the beginning of the - operation) to allow for the entire operation. Must be present in the first - ticket from invocation side to service side. Optional for all other - tickets exchanged during the operation. Receipt of a value from the other - side of the operation indicates the value in use by that side. Setting a - value on a later ticket allows either side to request time extensions (or - even time reductions!) on in-progress operations. - allowance: A positive integer granting permission for a number of payloads - to be transmitted to the communicating side of the operation, or None if - no additional allowance is being granted with this ticket. - initial_metadata: An optional metadata value communicated from one side to - the other at the beginning of the operation. May be non-None in at most - one ticket from each side. Any non-None value must appear no later than - the first payload value. - payload: A customer payload object. May be None. - terminal_metadata: A metadata value comminicated from one side to the other - at the end of the operation. May be non-None in the same ticket as - the code and message, but must be None for all earlier tickets. - code: A value communicated at operation completion. May be None. - message: A value communicated at operation completion. May be None. - termination: A Termination value describing the end of the operation, or - None if the operation has not yet terminated. If set, no further tickets - may be sent in the same direction. - protocol: A Protocol value or None, with further semantics being a matter - between high-level application and underlying ticket transport. - """ - - @enum.unique - class Subscription(enum.Enum): - """Identifies the level of subscription of a side of an operation.""" - - NONE = 'none' - TERMINATION = 'termination' - FULL = 'full' - - @enum.unique - class Termination(enum.Enum): - """Identifies the termination of an operation.""" - - COMPLETION = 'completion' - CANCELLATION = 'cancellation' - EXPIRATION = 'expiration' - SHUTDOWN = 'shutdown' - RECEPTION_FAILURE = 'reception failure' - TRANSMISSION_FAILURE = 'transmission failure' - LOCAL_FAILURE = 'local failure' - REMOTE_FAILURE = 'remote failure' - - -class Link(six.with_metaclass(abc.ABCMeta)): - """Accepts and emits tickets.""" - - @abc.abstractmethod - def accept_ticket(self, ticket): - """Accept a Ticket. - - Args: - ticket: Any Ticket. - """ - raise NotImplementedError() - - @abc.abstractmethod - def join_link(self, link): - """Mates this object with a peer with which it will exchange tickets.""" - raise NotImplementedError() diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index b37e27c27ea..fc0908b2f1b 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -81,6 +81,7 @@ CORE_SOURCE_FILES = [ 'src/core/lib/channel/channel_stack_builder.c', 'src/core/lib/channel/compress_filter.c', 'src/core/lib/channel/connected_channel.c', + 'src/core/lib/channel/handshaker.c', 'src/core/lib/channel/http_client_filter.c', 'src/core/lib/channel/http_server_filter.c', 'src/core/lib/compression/compression.c', diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py index 0f4db9d972e..ea38526a281 100644 --- a/src/python/grpcio/grpc_version.py +++ b/src/python/grpcio/grpc_version.py @@ -29,4 +29,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!! -VERSION='0.16.0.dev0' +VERSION='1.1.0.dev0' diff --git a/src/python/grpcio_health_checking/MANIFEST.in b/src/python/grpcio_health_checking/MANIFEST.in index 7d26647697e..7407f646d16 100644 --- a/src/python/grpcio_health_checking/MANIFEST.in +++ b/src/python/grpcio_health_checking/MANIFEST.in @@ -1,3 +1,4 @@ +include grpc_version.py include health_commands.py -graft grpc_health +graft grpc global-exclude *.pyc diff --git a/src/python/grpcio_health_checking/grpc_health/__init__.py b/src/python/grpcio_health_checking/grpc/__init__.py similarity index 96% rename from src/python/grpcio_health_checking/grpc_health/__init__.py rename to src/python/grpcio_health_checking/grpc/__init__.py index 70865191060..fcc7048815f 100644 --- a/src/python/grpcio_health_checking/grpc_health/__init__.py +++ b/src/python/grpcio_health_checking/grpc/__init__.py @@ -27,4 +27,4 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - +__import__('pkg_resources').declare_namespace(__name__) diff --git a/src/python/grpcio/grpc/_adapter/__init__.py b/src/python/grpcio_health_checking/grpc/health/__init__.py similarity index 100% rename from src/python/grpcio/grpc/_adapter/__init__.py rename to src/python/grpcio_health_checking/grpc/health/__init__.py diff --git a/src/python/grpcio/grpc/_links/__init__.py b/src/python/grpcio_health_checking/grpc/health/v1/__init__.py similarity index 100% rename from src/python/grpcio/grpc/_links/__init__.py rename to src/python/grpcio_health_checking/grpc/health/v1/__init__.py diff --git a/src/python/grpcio_health_checking/grpc_health/health/v1/health.py b/src/python/grpcio_health_checking/grpc/health/v1/health.py similarity index 81% rename from src/python/grpcio_health_checking/grpc_health/health/v1/health.py rename to src/python/grpcio_health_checking/grpc/health/v1/health.py index 8da60c70cb1..8108ac10962 100644 --- a/src/python/grpcio_health_checking/grpc_health/health/v1/health.py +++ b/src/python/grpcio_health_checking/grpc/health/v1/health.py @@ -31,10 +31,12 @@ import threading -from grpc_health.health.v1 import health_pb2 +import grpc +from grpc.health.v1 import health_pb2 -class HealthServicer(health_pb2.BetaHealthServicer): + +class HealthServicer(health_pb2.HealthServicer): """Servicer handling RPCs for service statuses.""" def __init__(self): @@ -43,14 +45,12 @@ class HealthServicer(health_pb2.BetaHealthServicer): def Check(self, request, context): with self._server_status_lock: - if request.service not in self._server_status: - # TODO(atash): once the Python API has a way of setting the server - # status, bring us into conformance with the health check spec by - # returning the NOT_FOUND status here. - raise NotImplementedError() + status = self._server_status.get(request.service) + if status is None: + context.set_code(grpc.StatusCode.NOT_FOUND) + return health_pb2.HealthCheckResponse() else: - return health_pb2.HealthCheckResponse( - status=self._server_status[request.service]) + return health_pb2.HealthCheckResponse(status=status) def set(self, service, status): """Sets the status of a service. @@ -63,4 +63,3 @@ class HealthServicer(health_pb2.BetaHealthServicer): """ with self._server_status_lock: self._server_status[service] = status - diff --git a/src/python/grpcio_health_checking/grpc_health/health/__init__.py b/src/python/grpcio_health_checking/grpc_health/health/__init__.py deleted file mode 100644 index 70865191060..00000000000 --- a/src/python/grpcio_health_checking/grpc_health/health/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - diff --git a/src/python/grpcio_health_checking/grpc_health/health/v1/__init__.py b/src/python/grpcio_health_checking/grpc_health/health/v1/__init__.py deleted file mode 100644 index 70865191060..00000000000 --- a/src/python/grpcio_health_checking/grpc_health/health/v1/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - diff --git a/src/python/grpcio/grpc/framework/interfaces/links/__init__.py b/src/python/grpcio_health_checking/grpc_version.py similarity index 90% rename from src/python/grpcio/grpc/framework/interfaces/links/__init__.py rename to src/python/grpcio_health_checking/grpc_version.py index 70865191060..be0d0ced3cb 100644 --- a/src/python/grpcio/grpc/framework/interfaces/links/__init__.py +++ b/src/python/grpcio_health_checking/grpc_version.py @@ -1,4 +1,4 @@ -# Copyright 2015, Google Inc. +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,4 +27,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_health_checking/grpc_version.py.template`!!! +VERSION='1.1.0.dev0' diff --git a/src/python/grpcio_health_checking/health_commands.py b/src/python/grpcio_health_checking/health_commands.py index a7a59f6974b..66df25da63f 100644 --- a/src/python/grpcio_health_checking/health_commands.py +++ b/src/python/grpcio_health_checking/health_commands.py @@ -29,16 +29,10 @@ """Provides distutils command classes for the GRPC Python setup process.""" -import distutils -import glob import os -import os.path import shutil -import subprocess -import sys import setuptools -from setuptools.command import build_py ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__))) HEALTH_PROTO = os.path.join(ROOT_DIR, '../../proto/grpc/health/v1/health.proto') @@ -60,12 +54,25 @@ class CopyProtoModules(setuptools.Command): if os.path.isfile(HEALTH_PROTO): shutil.copyfile( HEALTH_PROTO, - os.path.join(ROOT_DIR, 'grpc_health/health/v1/health.proto')) + os.path.join(ROOT_DIR, 'grpc/health/v1/health.proto')) -class BuildPy(build_py.build_py): - """Custom project build command.""" +class BuildPackageProtos(setuptools.Command): + """Command to generate project *_pb2.py modules from proto files.""" + + description = 'build grpc protobuf modules' + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass def run(self): - self.run_command('build_proto_modules') - build_py.build_py.run(self) + # due to limitations of the proto generator, we require that only *one* + # directory is provided as an 'include' directory. We assume it's the '' key + # to `self.distribution.package_dir` (and get a key error if it's not + # there). + from grpc.tools import command + command.build_package_protos(self.distribution.package_dir['']) diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py index 70b4575bf5d..727d6288853 100644 --- a/src/python/grpcio_health_checking/setup.py +++ b/src/python/grpcio_health_checking/setup.py @@ -30,49 +30,42 @@ """Setup module for the GRPC Python package's optional health checking.""" import os -import os.path import sys -from distutils import core as _core import setuptools -import grpc.tools.command - # Ensure we're in the proper directory whether or not we're being used by pip. os.chdir(os.path.dirname(os.path.abspath(__file__))) # Break import-style to ensure we can actually find our commands module. import health_commands - -PACKAGES = ( - setuptools.find_packages('.') -) +import grpc_version PACKAGE_DIRECTORIES = { '': '.', } SETUP_REQUIRES = ( - 'grpcio-tools>=0.14.0', + 'grpcio-tools>=0.15.0', ) INSTALL_REQUIRES = ( - 'grpcio>=0.13.1', + 'grpcio>=0.15.0', ) COMMAND_CLASS = { # Run preprocess from the repository *before* doing any packaging! 'preprocess': health_commands.CopyProtoModules, - - 'build_proto_modules': grpc.tools.command.BuildProtoModules, - 'build_py': health_commands.BuildPy, + 'build_package_protos': health_commands.BuildPackageProtos, } setuptools.setup( name='grpcio-health-checking', - version='0.14.0', - packages=list(PACKAGES), + version=grpc_version.VERSION, + license='3-clause BSD', package_dir=PACKAGE_DIRECTORIES, + packages=setuptools.find_packages('.'), + namespace_packages=['grpc'], install_requires=INSTALL_REQUIRES, setup_requires=SETUP_REQUIRES, cmdclass=COMMAND_CLASS diff --git a/src/python/grpcio_tests/commands.py b/src/python/grpcio_tests/commands.py index 171829b62fc..5ee551cfe1e 100644 --- a/src/python/grpcio_tests/commands.py +++ b/src/python/grpcio_tests/commands.py @@ -138,7 +138,7 @@ class BuildPy(build_py.build_py): def run(self): try: - self.run_command('build_proto_modules') + self.run_command('build_package_protos') except CommandError as error: sys.stderr.write('warning: %s\n' % error.message) build_py.build_py.run(self) diff --git a/src/python/grpcio_tests/grpc_version.py b/src/python/grpcio_tests/grpc_version.py index 7aa600728a8..90f68a5741a 100644 --- a/src/python/grpcio_tests/grpc_version.py +++ b/src/python/grpcio_tests/grpc_version.py @@ -29,4 +29,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!! -VERSION='0.16.0.dev0' +VERSION='1.1.0.dev0' diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py index 7eef420bdb0..0afaf7dfa21 100644 --- a/src/python/grpcio_tests/setup.py +++ b/src/python/grpcio_tests/setup.py @@ -75,7 +75,7 @@ COMMAND_CLASS = { # Run `preprocess` *before* doing any packaging! 'preprocess': commands.GatherProto, - 'build_proto_modules': grpc.tools.command.BuildProtoModules, + 'build_package_protos': grpc.tools.command.BuildPackageProtos, 'build_py': commands.BuildPy, 'run_interop': commands.RunInterop, 'test_lite': commands.TestLite diff --git a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py index 1b633886635..80300d13df7 100644 --- a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py +++ b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py @@ -27,48 +27,68 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Tests of grpc_health.health.v1.health.""" +"""Tests of grpc.health.v1.health.""" import unittest -from grpc_health.health.v1 import health -from grpc_health.health.v1 import health_pb2 +import grpc +from grpc.framework.foundation import logging_pool +from grpc.health.v1 import health +from grpc.health.v1 import health_pb2 + +from tests.unit.framework.common import test_constants class HealthServicerTest(unittest.TestCase): def setUp(self): - self.servicer = health.HealthServicer() - self.servicer.set('', health_pb2.HealthCheckResponse.SERVING) - self.servicer.set('grpc.test.TestServiceServing', - health_pb2.HealthCheckResponse.SERVING) - self.servicer.set('grpc.test.TestServiceUnknown', - health_pb2.HealthCheckResponse.UNKNOWN) - self.servicer.set('grpc.test.TestServiceNotServing', - health_pb2.HealthCheckResponse.NOT_SERVING) + servicer = health.HealthServicer() + servicer.set('', health_pb2.HealthCheckResponse.SERVING) + servicer.set('grpc.test.TestServiceServing', + health_pb2.HealthCheckResponse.SERVING) + servicer.set('grpc.test.TestServiceUnknown', + health_pb2.HealthCheckResponse.UNKNOWN) + servicer.set('grpc.test.TestServiceNotServing', + health_pb2.HealthCheckResponse.NOT_SERVING) + server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) + self._server = grpc.server(server_pool) + port = self._server.add_insecure_port('[::]:0') + health_pb2.add_HealthServicer_to_server(servicer, self._server) + self._server.start() + + channel = grpc.insecure_channel('localhost:%d' % port) + self._stub = health_pb2.HealthStub(channel) def test_empty_service(self): request = health_pb2.HealthCheckRequest() - resp = self.servicer.Check(request, None) - self.assertEqual(resp.status, health_pb2.HealthCheckResponse.SERVING) + resp = self._stub.Check(request) + self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status) def test_serving_service(self): request = health_pb2.HealthCheckRequest( service='grpc.test.TestServiceServing') - resp = self.servicer.Check(request, None) - self.assertEqual(resp.status, health_pb2.HealthCheckResponse.SERVING) + resp = self._stub.Check(request) + self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status) def test_unknown_serivce(self): request = health_pb2.HealthCheckRequest( service='grpc.test.TestServiceUnknown') - resp = self.servicer.Check(request, None) - self.assertEqual(resp.status, health_pb2.HealthCheckResponse.UNKNOWN) + resp = self._stub.Check(request) + self.assertEqual(health_pb2.HealthCheckResponse.UNKNOWN, resp.status) def test_not_serving_service(self): request = health_pb2.HealthCheckRequest( service='grpc.test.TestServiceNotServing') - resp = self.servicer.Check(request, None) - self.assertEqual(resp.status, health_pb2.HealthCheckResponse.NOT_SERVING) + resp = self._stub.Check(request) + self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING, resp.status) + + def test_not_found_service(self): + request = health_pb2.HealthCheckRequest( + service='not-found') + with self.assertRaises(grpc.RpcError) as context: + resp = self._stub.Check(request) + + self.assertEqual(grpc.StatusCode.NOT_FOUND, context.exception.code()) if __name__ == '__main__': diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py index 86aa0495a2c..97e6c9e27ef 100644 --- a/src/python/grpcio_tests/tests/interop/methods.py +++ b/src/python/grpcio_tests/tests/interop/methods.py @@ -39,6 +39,7 @@ import time from oauth2client import client as oauth2client_client +import grpc from grpc.beta import implementations from grpc.beta import interfaces from grpc.framework.common import cardinality @@ -57,12 +58,18 @@ class TestService(test_pb2.BetaTestServiceServicer): return empty_pb2.Empty() def UnaryCall(self, request, context): + if request.HasField('response_status'): + context.code(request.response_status.code) + context.details(request.response_status.message) return messages_pb2.SimpleResponse( payload=messages_pb2.Payload( type=messages_pb2.COMPRESSABLE, body=b'\x00' * request.response_size)) def StreamingOutputCall(self, request, context): + if request.HasField('response_status'): + context.code(request.response_status.code) + context.details(request.response_status.message) for response_parameters in request.response_parameters: yield messages_pb2.StreamingOutputCallResponse( payload=messages_pb2.Payload( @@ -79,6 +86,9 @@ class TestService(test_pb2.BetaTestServiceServicer): def FullDuplexCall(self, request_iterator, context): for request in request_iterator: + if request.HasField('response_status'): + context.code(request.response_status.code) + context.details(request.response_status.message) for response_parameters in request.response_parameters: yield messages_pb2.StreamingOutputCallResponse( payload=messages_pb2.Payload( @@ -289,6 +299,39 @@ def _empty_stream(stub): pass +def _status_code_and_message(stub): + with stub: + message = 'test status message' + code = 2 + status = grpc.StatusCode.UNKNOWN # code = 2 + request = messages_pb2.SimpleRequest( + response_type=messages_pb2.COMPRESSABLE, + response_size=1, + payload=messages_pb2.Payload(body=b'\x00'), + response_status=messages_pb2.EchoStatus(code=code, message=message) + ) + response_future = stub.UnaryCall.future(request, _TIMEOUT) + if response_future.code() != status: + raise ValueError( + 'expected code %s, got %s' % (status, response_future.code())) + if response_future.details() != message: + raise ValueError( + 'expected message %s, got %s' % (message, response_future.details())) + + request = messages_pb2.StreamingOutputCallRequest( + response_type=messages_pb2.COMPRESSABLE, + response_parameters=( + messages_pb2.ResponseParameters(size=1),), + response_status=messages_pb2.EchoStatus(code=code, message=message)) + response_iterator = stub.StreamingOutputCall(request, _TIMEOUT) + if response_future.code() != status: + raise ValueError( + 'expected code %s, got %s' % (status, response_iterator.code())) + if response_future.details() != message: + raise ValueError( + 'expected message %s, got %s' % (message, response_iterator.details())) + + def _compute_engine_creds(stub, args): response = _large_unary_common_behavior(stub, True, True) if args.default_service_account != response.username: @@ -347,6 +390,7 @@ class TestCase(enum.Enum): CANCEL_AFTER_BEGIN = 'cancel_after_begin' CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response' EMPTY_STREAM = 'empty_stream' + STATUS_CODE_AND_MESSAGE = 'status_code_and_message' COMPUTE_ENGINE_CREDS = 'compute_engine_creds' OAUTH2_AUTH_TOKEN = 'oauth2_auth_token' JWT_TOKEN_CREDS = 'jwt_token_creds' @@ -372,6 +416,8 @@ class TestCase(enum.Enum): _timeout_on_sleeping_server(stub) elif self is TestCase.EMPTY_STREAM: _empty_stream(stub) + elif self is TestCase.STATUS_CODE_AND_MESSAGE: + _status_code_and_message(stub) elif self is TestCase.COMPUTE_ENGINE_CREDS: _compute_engine_creds(stub, args) elif self is TestCase.OAUTH2_AUTH_TOKEN: diff --git a/src/python/grpcio_tests/tests/qps/qps_worker.py b/src/python/grpcio_tests/tests/qps/qps_worker.py index 3abf0d08dd9..2371ff09562 100644 --- a/src/python/grpcio_tests/tests/qps/qps_worker.py +++ b/src/python/grpcio_tests/tests/qps/qps_worker.py @@ -40,7 +40,7 @@ from tests.qps import worker_server def run_worker_server(port): - server = grpc.server((), futures.ThreadPoolExecutor(max_workers=5)) + server = grpc.server(futures.ThreadPoolExecutor(max_workers=5)) servicer = worker_server.WorkerServer() services_pb2.add_WorkerServiceServicer_to_server(servicer, server) server.add_insecure_port('[::]:{}'.format(port)) diff --git a/src/python/grpcio_tests/tests/qps/worker_server.py b/src/python/grpcio_tests/tests/qps/worker_server.py index 932a1ffe2b4..46d542940f1 100644 --- a/src/python/grpcio_tests/tests/qps/worker_server.py +++ b/src/python/grpcio_tests/tests/qps/worker_server.py @@ -82,7 +82,7 @@ class WorkerServer(services_pb2.WorkerServiceServicer): server_threads = multiprocessing.cpu_count() * 5 else: server_threads = config.async_server_threads - server = grpc.server((), futures.ThreadPoolExecutor( + server = grpc.server(futures.ThreadPoolExecutor( max_workers=server_threads)) if config.server_type == control_pb2.ASYNC_SERVER: servicer = benchmark_server.BenchmarkServer() diff --git a/src/python/grpcio_tests/tests/unit/_adapter/.gitignore b/src/python/grpcio_tests/tests/unit/_adapter/.gitignore deleted file mode 100644 index a6f96cd6dbb..00000000000 --- a/src/python/grpcio_tests/tests/unit/_adapter/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.a -*.so -*.dll -*.pyc -*.pyd diff --git a/src/python/grpcio_tests/tests/unit/_adapter/__init__.py b/src/python/grpcio_tests/tests/unit/_adapter/__init__.py deleted file mode 100644 index 70865191060..00000000000 --- a/src/python/grpcio_tests/tests/unit/_adapter/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - diff --git a/src/python/grpcio_tests/tests/unit/_adapter/_proto_scenarios.py b/src/python/grpcio_tests/tests/unit/_adapter/_proto_scenarios.py deleted file mode 100644 index 7a90eacf77f..00000000000 --- a/src/python/grpcio_tests/tests/unit/_adapter/_proto_scenarios.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Test scenarios using protocol buffers.""" - -import abc -import threading - -import six - -from tests.unit._junkdrawer import math_pb2 - - -class ProtoScenario(six.with_metaclass(abc.ABCMeta)): - """An RPC test scenario using protocol buffers.""" - - @abc.abstractmethod - def method(self): - """Access the test method name. - - Returns: - The test method name. - """ - raise NotImplementedError() - - @abc.abstractmethod - def serialize_request(self, request): - """Serialize a request protocol buffer. - - Args: - request: A request protocol buffer. - - Returns: - The bytestring serialization of the given request protocol buffer. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_request(self, request_bytestring): - """Deserialize a request protocol buffer. - - Args: - request_bytestring: The bytestring serialization of a request protocol - buffer. - - Returns: - The request protocol buffer deserialized from the given byte string. - """ - raise NotImplementedError() - - @abc.abstractmethod - def serialize_response(self, response): - """Serialize a response protocol buffer. - - Args: - response: A response protocol buffer. - - Returns: - The bytestring serialization of the given response protocol buffer. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_response(self, response_bytestring): - """Deserialize a response protocol buffer. - - Args: - response_bytestring: The bytestring serialization of a response protocol - buffer. - - Returns: - The response protocol buffer deserialized from the given byte string. - """ - raise NotImplementedError() - - @abc.abstractmethod - def requests(self): - """Access the sequence of requests for this scenario. - - Returns: - A sequence of request protocol buffers. - """ - raise NotImplementedError() - - @abc.abstractmethod - def response_for_request(self, request): - """Access the response for a particular request. - - Args: - request: A request protocol buffer. - - Returns: - The response protocol buffer appropriate for the given request. - """ - raise NotImplementedError() - - @abc.abstractmethod - def verify_requests(self, experimental_requests): - """Verify the requests transmitted through the system under test. - - Args: - experimental_requests: The request protocol buffers transmitted through - the system under test. - - Returns: - True if the requests satisfy this test scenario; False otherwise. - """ - raise NotImplementedError() - - @abc.abstractmethod - def verify_responses(self, experimental_responses): - """Verify the responses transmitted through the system under test. - - Args: - experimental_responses: The response protocol buffers transmitted through - the system under test. - - Returns: - True if the responses satisfy this test scenario; False otherwise. - """ - raise NotImplementedError() - - -class EmptyScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - def method(self): - return 'DivMany' - - def serialize_request(self, request): - raise ValueError('This should not be necessary to call!') - - def deserialize_request(self, request_bytestring): - raise ValueError('This should not be necessary to call!') - - def serialize_response(self, response): - raise ValueError('This should not be necessary to call!') - - def deserialize_response(self, response_bytestring): - raise ValueError('This should not be necessary to call!') - - def requests(self): - return () - - def response_for_request(self, request): - raise ValueError('This should not be necessary to call!') - - def verify_requests(self, experimental_requests): - return not experimental_requests - - def verify_responses(self, experimental_responses): - return not experimental_responses - - -class BidirectionallyUnaryScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - _DIVIDEND = 59 - _DIVISOR = 7 - _QUOTIENT = 8 - _REMAINDER = 3 - - _REQUEST = math_pb2.DivArgs(dividend=_DIVIDEND, divisor=_DIVISOR) - _RESPONSE = math_pb2.DivReply(quotient=_QUOTIENT, remainder=_REMAINDER) - - def method(self): - return 'Div' - - def serialize_request(self, request): - return request.SerializeToString() - - def deserialize_request(self, request_bytestring): - return math_pb2.DivArgs.FromString(request_bytestring) - - def serialize_response(self, response): - return response.SerializeToString() - - def deserialize_response(self, response_bytestring): - return math_pb2.DivReply.FromString(response_bytestring) - - def requests(self): - return [self._REQUEST] - - def response_for_request(self, request): - return self._RESPONSE - - def verify_requests(self, experimental_requests): - return tuple(experimental_requests) == (self._REQUEST,) - - def verify_responses(self, experimental_responses): - return tuple(experimental_responses) == (self._RESPONSE,) - - -class BidirectionallyStreamingScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - _STREAM_LENGTH = 200 - _REQUESTS = tuple( - math_pb2.DivArgs(dividend=59 + index, divisor=7 + index) - for index in range(_STREAM_LENGTH)) - - def __init__(self): - self._lock = threading.Lock() - self._responses = [] - - def method(self): - return 'DivMany' - - def serialize_request(self, request): - return request.SerializeToString() - - def deserialize_request(self, request_bytestring): - return math_pb2.DivArgs.FromString(request_bytestring) - - def serialize_response(self, response): - return response.SerializeToString() - - def deserialize_response(self, response_bytestring): - return math_pb2.DivReply.FromString(response_bytestring) - - def requests(self): - return self._REQUESTS - - def response_for_request(self, request): - quotient, remainder = divmod(request.dividend, request.divisor) - response = math_pb2.DivReply(quotient=quotient, remainder=remainder) - with self._lock: - self._responses.append(response) - return response - - def verify_requests(self, experimental_requests): - return tuple(experimental_requests) == self._REQUESTS - - def verify_responses(self, experimental_responses): - with self._lock: - return tuple(experimental_responses) == tuple(self._responses) diff --git a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py index 9d1dbc189b8..f9a8e2401bb 100644 --- a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py +++ b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py @@ -281,8 +281,8 @@ class ServerClientMixin(object): ], server_call_tag) self.assertEqual(cygrpc.CallError.ok, server_start_batch_result) - client_event = client_event_future.result() server_event = self.server_completion_queue.poll(cygrpc_deadline) + client_event = client_event_future.result() self.assertEqual(6, len(client_event.batch_operations)) found_client_op_types = set() diff --git a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py index 24a2faef852..b33802bf572 100644 --- a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py +++ b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py @@ -184,11 +184,11 @@ if __name__ == '__main__': args = parser.parse_args() if args.scenario == UNSTARTED_SERVER: - server = grpc.server((), DaemonPool()) + server = grpc.server(DaemonPool()) if args.wait_for_interrupt: time.sleep(WAIT_TIME) elif args.scenario == RUNNING_SERVER: - server = grpc.server((), DaemonPool()) + server = grpc.server(DaemonPool()) port = server.add_insecure_port('[::]:0') server.start() if args.wait_for_interrupt: @@ -203,7 +203,7 @@ if __name__ == '__main__': if args.wait_for_interrupt: time.sleep(WAIT_TIME) elif args.scenario == POLL_CONNECTIVITY: - server = grpc.server((), DaemonPool()) + server = grpc.server(DaemonPool()) port = server.add_insecure_port('[::]:0') server.start() channel = grpc.insecure_channel('localhost:%d' % port) @@ -217,7 +217,7 @@ if __name__ == '__main__': else: handler = GenericHandler() - server = grpc.server((), DaemonPool()) + server = grpc.server(DaemonPool()) port = server.add_insecure_port('[::]:0') server.add_generic_rpc_handlers((handler,)) server.start() diff --git a/src/python/grpcio_tests/tests/unit/_exit_test.py b/src/python/grpcio_tests/tests/unit/_exit_test.py index b0d6af73e58..5a4a32887c3 100644 --- a/src/python/grpcio_tests/tests/unit/_exit_test.py +++ b/src/python/grpcio_tests/tests/unit/_exit_test.py @@ -84,6 +84,7 @@ def wait(process): process.wait() +@unittest.skip('https://github.com/grpc/grpc/issues/7311') class ExitTest(unittest.TestCase): def test_unstarted_server(self): diff --git a/src/python/grpcio_tests/tests/unit/_junkdrawer/math_pb2.py b/src/python/grpcio_tests/tests/unit/_junkdrawer/math_pb2.py deleted file mode 100644 index 20165955b4d..00000000000 --- a/src/python/grpcio_tests/tests/unit/_junkdrawer/math_pb2.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# TODO(nathaniel): Remove this from source control after having made -# generation from the math.proto source part of GRPC's build-and-test -# process. - -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: math.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='math.proto', - package='math', - serialized_pb=_b('\n\nmath.proto\x12\x04math\",\n\x07\x44ivArgs\x12\x10\n\x08\x64ividend\x18\x01 \x02(\x03\x12\x0f\n\x07\x64ivisor\x18\x02 \x02(\x03\"/\n\x08\x44ivReply\x12\x10\n\x08quotient\x18\x01 \x02(\x03\x12\x11\n\tremainder\x18\x02 \x02(\x03\"\x18\n\x07\x46ibArgs\x12\r\n\x05limit\x18\x01 \x01(\x03\"\x12\n\x03Num\x12\x0b\n\x03num\x18\x01 \x02(\x03\"\x19\n\x08\x46ibReply\x12\r\n\x05\x63ount\x18\x01 \x02(\x03\x32\xa4\x01\n\x04Math\x12&\n\x03\x44iv\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00\x12.\n\x07\x44ivMany\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00(\x01\x30\x01\x12#\n\x03\x46ib\x12\r.math.FibArgs\x1a\t.math.Num\"\x00\x30\x01\x12\x1f\n\x03Sum\x12\t.math.Num\x1a\t.math.Num\"\x00(\x01') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_DIVARGS = _descriptor.Descriptor( - name='DivArgs', - full_name='math.DivArgs', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='dividend', full_name='math.DivArgs.dividend', index=0, - number=1, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='divisor', full_name='math.DivArgs.divisor', index=1, - number=2, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=20, - serialized_end=64, -) - - -_DIVREPLY = _descriptor.Descriptor( - name='DivReply', - full_name='math.DivReply', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='quotient', full_name='math.DivReply.quotient', index=0, - number=1, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='remainder', full_name='math.DivReply.remainder', index=1, - number=2, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=66, - serialized_end=113, -) - - -_FIBARGS = _descriptor.Descriptor( - name='FibArgs', - full_name='math.FibArgs', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='limit', full_name='math.FibArgs.limit', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=115, - serialized_end=139, -) - - -_NUM = _descriptor.Descriptor( - name='Num', - full_name='math.Num', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num', full_name='math.Num.num', index=0, - number=1, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=141, - serialized_end=159, -) - - -_FIBREPLY = _descriptor.Descriptor( - name='FibReply', - full_name='math.FibReply', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='count', full_name='math.FibReply.count', index=0, - number=1, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=161, - serialized_end=186, -) - -DESCRIPTOR.message_types_by_name['DivArgs'] = _DIVARGS -DESCRIPTOR.message_types_by_name['DivReply'] = _DIVREPLY -DESCRIPTOR.message_types_by_name['FibArgs'] = _FIBARGS -DESCRIPTOR.message_types_by_name['Num'] = _NUM -DESCRIPTOR.message_types_by_name['FibReply'] = _FIBREPLY - -DivArgs = _reflection.GeneratedProtocolMessageType('DivArgs', (_message.Message,), dict( - DESCRIPTOR = _DIVARGS, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.DivArgs) - )) -_sym_db.RegisterMessage(DivArgs) - -DivReply = _reflection.GeneratedProtocolMessageType('DivReply', (_message.Message,), dict( - DESCRIPTOR = _DIVREPLY, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.DivReply) - )) -_sym_db.RegisterMessage(DivReply) - -FibArgs = _reflection.GeneratedProtocolMessageType('FibArgs', (_message.Message,), dict( - DESCRIPTOR = _FIBARGS, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.FibArgs) - )) -_sym_db.RegisterMessage(FibArgs) - -Num = _reflection.GeneratedProtocolMessageType('Num', (_message.Message,), dict( - DESCRIPTOR = _NUM, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.Num) - )) -_sym_db.RegisterMessage(Num) - -FibReply = _reflection.GeneratedProtocolMessageType('FibReply', (_message.Message,), dict( - DESCRIPTOR = _FIBREPLY, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.FibReply) - )) -_sym_db.RegisterMessage(FibReply) - - -# @@protoc_insertion_point(module_scope) diff --git a/src/python/grpcio_tests/tests/unit/_links/__init__.py b/src/python/grpcio_tests/tests/unit/_links/__init__.py deleted file mode 100644 index 70865191060..00000000000 --- a/src/python/grpcio_tests/tests/unit/_links/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - diff --git a/src/python/grpcio_tests/tests/unit/_links/_proto_scenarios.py b/src/python/grpcio_tests/tests/unit/_links/_proto_scenarios.py deleted file mode 100644 index 50661085f9d..00000000000 --- a/src/python/grpcio_tests/tests/unit/_links/_proto_scenarios.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Test scenarios using protocol buffers.""" - -import abc -import threading - -import six - -from tests.unit._junkdrawer import math_pb2 -from tests.unit.framework.common import test_constants - - -class ProtoScenario(six.with_metaclass(abc.ABCMeta)): - """An RPC test scenario using protocol buffers.""" - - @abc.abstractmethod - def group_and_method(self): - """Access the test group and method. - - Returns: - The test group and method as a pair. - """ - raise NotImplementedError() - - @abc.abstractmethod - def serialize_request(self, request): - """Serialize a request protocol buffer. - - Args: - request: A request protocol buffer. - - Returns: - The bytestring serialization of the given request protocol buffer. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_request(self, request_bytestring): - """Deserialize a request protocol buffer. - - Args: - request_bytestring: The bytestring serialization of a request protocol - buffer. - - Returns: - The request protocol buffer deserialized from the given byte string. - """ - raise NotImplementedError() - - @abc.abstractmethod - def serialize_response(self, response): - """Serialize a response protocol buffer. - - Args: - response: A response protocol buffer. - - Returns: - The bytestring serialization of the given response protocol buffer. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_response(self, response_bytestring): - """Deserialize a response protocol buffer. - - Args: - response_bytestring: The bytestring serialization of a response protocol - buffer. - - Returns: - The response protocol buffer deserialized from the given byte string. - """ - raise NotImplementedError() - - @abc.abstractmethod - def requests(self): - """Access the sequence of requests for this scenario. - - Returns: - A sequence of request protocol buffers. - """ - raise NotImplementedError() - - @abc.abstractmethod - def response_for_request(self, request): - """Access the response for a particular request. - - Args: - request: A request protocol buffer. - - Returns: - The response protocol buffer appropriate for the given request. - """ - raise NotImplementedError() - - @abc.abstractmethod - def verify_requests(self, experimental_requests): - """Verify the requests transmitted through the system under test. - - Args: - experimental_requests: The request protocol buffers transmitted through - the system under test. - - Returns: - True if the requests satisfy this test scenario; False otherwise. - """ - raise NotImplementedError() - - @abc.abstractmethod - def verify_responses(self, experimental_responses): - """Verify the responses transmitted through the system under test. - - Args: - experimental_responses: The response protocol buffers transmitted through - the system under test. - - Returns: - True if the responses satisfy this test scenario; False otherwise. - """ - raise NotImplementedError() - - -class EmptyScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - def group_and_method(self): - return 'math.Math', 'DivMany' - - def serialize_request(self, request): - raise ValueError('This should not be necessary to call!') - - def deserialize_request(self, request_bytestring): - raise ValueError('This should not be necessary to call!') - - def serialize_response(self, response): - raise ValueError('This should not be necessary to call!') - - def deserialize_response(self, response_bytestring): - raise ValueError('This should not be necessary to call!') - - def requests(self): - return () - - def response_for_request(self, request): - raise ValueError('This should not be necessary to call!') - - def verify_requests(self, experimental_requests): - return not experimental_requests - - def verify_responses(self, experimental_responses): - return not experimental_responses - - -class BidirectionallyUnaryScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - _DIVIDEND = 59 - _DIVISOR = 7 - _QUOTIENT = 8 - _REMAINDER = 3 - - _REQUEST = math_pb2.DivArgs(dividend=_DIVIDEND, divisor=_DIVISOR) - _RESPONSE = math_pb2.DivReply(quotient=_QUOTIENT, remainder=_REMAINDER) - - def group_and_method(self): - return 'math.Math', 'Div' - - def serialize_request(self, request): - return request.SerializeToString() - - def deserialize_request(self, request_bytestring): - return math_pb2.DivArgs.FromString(request_bytestring) - - def serialize_response(self, response): - return response.SerializeToString() - - def deserialize_response(self, response_bytestring): - return math_pb2.DivReply.FromString(response_bytestring) - - def requests(self): - return [self._REQUEST] - - def response_for_request(self, request): - return self._RESPONSE - - def verify_requests(self, experimental_requests): - return tuple(experimental_requests) == (self._REQUEST,) - - def verify_responses(self, experimental_responses): - return tuple(experimental_responses) == (self._RESPONSE,) - - -class BidirectionallyStreamingScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - _REQUESTS = tuple( - math_pb2.DivArgs(dividend=59 + index, divisor=7 + index) - for index in range(test_constants.STREAM_LENGTH)) - - def __init__(self): - self._lock = threading.Lock() - self._responses = [] - - def group_and_method(self): - return 'math.Math', 'DivMany' - - def serialize_request(self, request): - return request.SerializeToString() - - def deserialize_request(self, request_bytestring): - return math_pb2.DivArgs.FromString(request_bytestring) - - def serialize_response(self, response): - return response.SerializeToString() - - def deserialize_response(self, response_bytestring): - return math_pb2.DivReply.FromString(response_bytestring) - - def requests(self): - return self._REQUESTS - - def response_for_request(self, request): - quotient, remainder = divmod(request.dividend, request.divisor) - response = math_pb2.DivReply(quotient=quotient, remainder=remainder) - with self._lock: - self._responses.append(response) - return response - - def verify_requests(self, experimental_requests): - return tuple(experimental_requests) == self._REQUESTS - - def verify_responses(self, experimental_responses): - with self._lock: - return tuple(experimental_responses) == tuple(self._responses) diff --git a/src/python/grpcio_tests/tests/unit/_rpc_test.py b/src/python/grpcio_tests/tests/unit/_rpc_test.py index 59bf240d286..ab6546bf87f 100644 --- a/src/python/grpcio_tests/tests/unit/_rpc_test.py +++ b/src/python/grpcio_tests/tests/unit/_rpc_test.py @@ -233,7 +233,11 @@ class RPCTest(unittest.TestCase): ('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),)) response = response_future.result() + self.assertIsInstance(response_future, grpc.Future) + self.assertIsInstance(response_future, grpc.Call) self.assertEqual(expected_response, response) + self.assertIsNone(response_future.exception()) + self.assertIsNone(response_future.traceback()) def testSuccessfulUnaryRequestStreamResponse(self): request = b'\x37\x58' @@ -287,6 +291,8 @@ class RPCTest(unittest.TestCase): response = response_future.result() self.assertEqual(expected_response, response) + self.assertIsNone(response_future.exception()) + self.assertIsNone(response_future.traceback()) def testSuccessfulStreamRequestStreamResponse(self): requests = tuple(b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH)) @@ -459,6 +465,10 @@ class RPCTest(unittest.TestCase): self.assertTrue(response_future.cancelled()) with self.assertRaises(grpc.FutureCancelledError): response_future.result() + with self.assertRaises(grpc.FutureCancelledError): + response_future.exception() + with self.assertRaises(grpc.FutureCancelledError): + response_future.traceback() self.assertIs(grpc.StatusCode.CANCELLED, response_future.code()) def testCancelledUnaryRequestStreamResponse(self): @@ -495,6 +505,10 @@ class RPCTest(unittest.TestCase): self.assertTrue(response_future.cancelled()) with self.assertRaises(grpc.FutureCancelledError): response_future.result() + with self.assertRaises(grpc.FutureCancelledError): + response_future.exception() + with self.assertRaises(grpc.FutureCancelledError): + response_future.traceback() self.assertIsNotNone(response_future.initial_metadata()) self.assertIs(grpc.StatusCode.CANCELLED, response_future.code()) self.assertIsNotNone(response_future.details()) @@ -528,6 +542,7 @@ class RPCTest(unittest.TestCase): request, timeout=test_constants.SHORT_TIMEOUT, metadata=(('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),)) + self.assertIsInstance(exception_context.exception, grpc.Call) self.assertIsNotNone(exception_context.exception.initial_metadata()) self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) @@ -556,6 +571,7 @@ class RPCTest(unittest.TestCase): self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) + self.assertIsNotNone(response_future.traceback()) self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, response_future.exception().code()) @@ -585,6 +601,8 @@ class RPCTest(unittest.TestCase): request_iterator, timeout=test_constants.SHORT_TIMEOUT, metadata=(('test', 'ExpiredStreamRequestBlockingUnaryResponse'),)) + self.assertIsInstance(exception_context.exception, grpc.RpcError) + self.assertIsInstance(exception_context.exception, grpc.Call) self.assertIsNotNone(exception_context.exception.initial_metadata()) self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) @@ -601,6 +619,8 @@ class RPCTest(unittest.TestCase): response_future = multi_callable.future( request_iterator, timeout=test_constants.SHORT_TIMEOUT, metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),)) + with self.assertRaises(grpc.FutureTimeoutError): + response_future.result(timeout=test_constants.SHORT_TIMEOUT / 2.0) response_future.add_done_callback(callback) value_passed_to_callback = callback.value() @@ -610,6 +630,7 @@ class RPCTest(unittest.TestCase): self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) + self.assertIsNotNone(response_future.traceback()) self.assertIs(response_future, value_passed_to_callback) self.assertIsNotNone(response_future.initial_metadata()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code()) @@ -656,11 +677,14 @@ class RPCTest(unittest.TestCase): response_future.add_done_callback(callback) value_passed_to_callback = callback.value() + self.assertIsInstance(response_future, grpc.Future) + self.assertIsInstance(response_future, grpc.Call) with self.assertRaises(grpc.RpcError) as exception_context: response_future.result() self.assertIs( grpc.StatusCode.UNKNOWN, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) + self.assertIsNotNone(response_future.traceback()) self.assertIs(grpc.StatusCode.UNKNOWN, response_future.exception().code()) self.assertIs(response_future, value_passed_to_callback) @@ -709,6 +733,7 @@ class RPCTest(unittest.TestCase): self.assertIs( grpc.StatusCode.UNKNOWN, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) + self.assertIsNotNone(response_future.traceback()) self.assertIs(response_future, value_passed_to_callback) def testFailedStreamRequestStreamResponse(self): diff --git a/src/python/grpcio_tests/tests/unit/framework/core/__init__.py b/src/python/grpcio_tests/tests/unit/framework/core/__init__.py deleted file mode 100644 index 70865191060..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/core/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/__init__.py deleted file mode 100644 index 70865191060..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_control.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_control.py deleted file mode 100644 index 0eb38abf222..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_control.py +++ /dev/null @@ -1,570 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Part of the tests of the base interface of RPC Framework.""" - -from __future__ import division - -import abc -import collections -import enum -import random # pylint: disable=unused-import -import threading -import time - -import six - -from grpc.framework.interfaces.base import base -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.base import _sequence -from tests.unit.framework.interfaces.base import _state -from tests.unit.framework.interfaces.base import test_interfaces # pylint: disable=unused-import - -_GROUP = 'base test cases test group' -_METHOD = 'base test cases test method' - -_PAYLOAD_RANDOM_SECTION_MAXIMUM_SIZE = test_constants.PAYLOAD_SIZE // 20 -_MINIMUM_PAYLOAD_SIZE = test_constants.PAYLOAD_SIZE // 600 - - -def _create_payload(randomness): - length = randomness.randint( - _MINIMUM_PAYLOAD_SIZE, test_constants.PAYLOAD_SIZE) - random_section_length = randomness.randint( - 0, min(_PAYLOAD_RANDOM_SECTION_MAXIMUM_SIZE, length)) - random_section = bytes( - bytearray( - randomness.getrandbits(8) for _ in range(random_section_length))) - sevens_section = b'\x07' * (length - random_section_length) - return b''.join(randomness.sample((random_section, sevens_section), 2)) - - -def _anything_in_flight(state): - return ( - state.invocation_initial_metadata_in_flight is not None or - state.invocation_payloads_in_flight or - state.invocation_completion_in_flight is not None or - state.service_initial_metadata_in_flight is not None or - state.service_payloads_in_flight or - state.service_completion_in_flight is not None or - 0 < state.invocation_allowance_in_flight or - 0 < state.service_allowance_in_flight - ) - - -def _verify_service_advance_and_update_state( - initial_metadata, payload, completion, allowance, state, implementation): - if initial_metadata is not None: - if state.invocation_initial_metadata_received: - return 'Later invocation initial metadata received: %s' % ( - initial_metadata,) - if state.invocation_payloads_received: - return 'Invocation initial metadata received after payloads: %s' % ( - state.invocation_payloads_received) - if state.invocation_completion_received: - return 'Invocation initial metadata received after invocation completion!' - if not implementation.metadata_transmitted( - state.invocation_initial_metadata_in_flight, initial_metadata): - return 'Invocation initial metadata maltransmitted: %s, %s' % ( - state.invocation_initial_metadata_in_flight, initial_metadata) - else: - state.invocation_initial_metadata_in_flight = None - state.invocation_initial_metadata_received = True - - if payload is not None: - if state.invocation_completion_received: - return 'Invocation payload received after invocation completion!' - elif not state.invocation_payloads_in_flight: - return 'Invocation payload "%s" received but not in flight!' % (payload,) - elif state.invocation_payloads_in_flight[0] != payload: - return 'Invocation payload mismatch: %s, %s' % ( - state.invocation_payloads_in_flight[0], payload) - elif state.service_side_invocation_allowance < 1: - return 'Disallowed invocation payload!' - else: - state.invocation_payloads_in_flight.pop(0) - state.invocation_payloads_received += 1 - state.service_side_invocation_allowance -= 1 - - if completion is not None: - if state.invocation_completion_received: - return 'Later invocation completion received: %s' % (completion,) - elif not implementation.completion_transmitted( - state.invocation_completion_in_flight, completion): - return 'Invocation completion maltransmitted: %s, %s' % ( - state.invocation_completion_in_flight, completion) - else: - state.invocation_completion_in_flight = None - state.invocation_completion_received = True - - if allowance is not None: - if allowance <= 0: - return 'Illegal allowance value: %s' % (allowance,) - else: - state.service_allowance_in_flight -= allowance - state.service_side_service_allowance += allowance - - -def _verify_invocation_advance_and_update_state( - initial_metadata, payload, completion, allowance, state, implementation): - if initial_metadata is not None: - if state.service_initial_metadata_received: - return 'Later service initial metadata received: %s' % (initial_metadata,) - if state.service_payloads_received: - return 'Service initial metadata received after service payloads: %s' % ( - state.service_payloads_received) - if state.service_completion_received: - return 'Service initial metadata received after service completion!' - if not implementation.metadata_transmitted( - state.service_initial_metadata_in_flight, initial_metadata): - return 'Service initial metadata maltransmitted: %s, %s' % ( - state.service_initial_metadata_in_flight, initial_metadata) - else: - state.service_initial_metadata_in_flight = None - state.service_initial_metadata_received = True - - if payload is not None: - if state.service_completion_received: - return 'Service payload received after service completion!' - elif not state.service_payloads_in_flight: - return 'Service payload "%s" received but not in flight!' % (payload,) - elif state.service_payloads_in_flight[0] != payload: - return 'Service payload mismatch: %s, %s' % ( - state.invocation_payloads_in_flight[0], payload) - elif state.invocation_side_service_allowance < 1: - return 'Disallowed service payload!' - else: - state.service_payloads_in_flight.pop(0) - state.service_payloads_received += 1 - state.invocation_side_service_allowance -= 1 - - if completion is not None: - if state.service_completion_received: - return 'Later service completion received: %s' % (completion,) - elif not implementation.completion_transmitted( - state.service_completion_in_flight, completion): - return 'Service completion maltransmitted: %s, %s' % ( - state.service_completion_in_flight, completion) - else: - state.service_completion_in_flight = None - state.service_completion_received = True - - if allowance is not None: - if allowance <= 0: - return 'Illegal allowance value: %s' % (allowance,) - else: - state.invocation_allowance_in_flight -= allowance - state.invocation_side_service_allowance += allowance - - -class Invocation( - collections.namedtuple( - 'Invocation', - ('group', 'method', 'subscription_kind', 'timeout', 'initial_metadata', - 'payload', 'completion',))): - """A description of operation invocation. - - Attributes: - group: The group identifier for the operation. - method: The method identifier for the operation. - subscription_kind: A base.Subscription.Kind value describing the kind of - subscription to use for the operation. - timeout: A duration in seconds to pass as the timeout value for the - operation. - initial_metadata: An object to pass as the initial metadata for the - operation or None. - payload: An object to pass as a payload value for the operation or None. - completion: An object to pass as a completion value for the operation or - None. - """ - - -class OnAdvance( - collections.namedtuple( - 'OnAdvance', - ('kind', 'initial_metadata', 'payload', 'completion', 'allowance'))): - """Describes action to be taken in a test in response to an advance call. - - Attributes: - kind: A Kind value describing the overall kind of response. - initial_metadata: An initial metadata value to pass to a call of the advance - method of the operator under test. Only valid if kind is Kind.ADVANCE and - may be None. - payload: A payload value to pass to a call of the advance method of the - operator under test. Only valid if kind is Kind.ADVANCE and may be None. - completion: A base.Completion value to pass to a call of the advance method - of the operator under test. Only valid if kind is Kind.ADVANCE and may be - None. - allowance: An allowance value to pass to a call of the advance method of the - operator under test. Only valid if kind is Kind.ADVANCE and may be None. - """ - - @enum.unique - class Kind(enum.Enum): - ADVANCE = 'advance' - DEFECT = 'defect' - IDLE = 'idle' - - -_DEFECT_ON_ADVANCE = OnAdvance(OnAdvance.Kind.DEFECT, None, None, None, None) -_IDLE_ON_ADVANCE = OnAdvance(OnAdvance.Kind.IDLE, None, None, None, None) - - -class Instruction( - collections.namedtuple( - 'Instruction', - ('kind', 'advance_args', 'advance_kwargs', 'conclude_success', - 'conclude_message', 'conclude_invocation_outcome_kind', - 'conclude_service_outcome_kind',))): - """""" - - @enum.unique - class Kind(enum.Enum): - ADVANCE = 'ADVANCE' - CANCEL = 'CANCEL' - CONCLUDE = 'CONCLUDE' - - -class Controller(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def failed(self, message): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def serialize_request(self, request): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_request(self, serialized_request): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def serialize_response(self, response): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_response(self, serialized_response): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def invocation(self): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def poll(self): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def on_service_advance( - self, initial_metadata, payload, completion, allowance): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def on_invocation_advance( - self, initial_metadata, payload, completion, allowance): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def service_on_termination(self, outcome): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def invocation_on_termination(self, outcome): - """""" - raise NotImplementedError() - - -class ControllerCreator(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def name(self): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def controller(self, implementation, randomness): - """""" - raise NotImplementedError() - - -class _Remainder( - collections.namedtuple( - '_Remainder', - ('invocation_payloads', 'service_payloads', 'invocation_completion', - 'service_completion',))): - """Describes work remaining to be done in a portion of a test. - - Attributes: - invocation_payloads: The number of payloads to be sent from the invocation - side of the operation to the service side of the operation. - service_payloads: The number of payloads to be sent from the service side of - the operation to the invocation side of the operation. - invocation_completion: Whether or not completion from the invocation side of - the operation should be indicated and has yet to be indicated. - service_completion: Whether or not completion from the service side of the - operation should be indicated and has yet to be indicated. - """ - - -class _SequenceController(Controller): - - def __init__(self, sequence, implementation, randomness): - """Constructor. - - Args: - sequence: A _sequence.Sequence describing the steps to be taken in the - test at a relatively high level. - implementation: A test_interfaces.Implementation encapsulating the - base interface implementation that is the system under test. - randomness: A random.Random instance for use in the test. - """ - self._condition = threading.Condition() - self._sequence = sequence - self._implementation = implementation - self._randomness = randomness - - self._until = None - self._remaining_elements = None - self._poll_next = None - self._message = None - - self._state = _state.OperationState() - self._todo = None - - # called with self._condition - def _failed(self, message): - self._message = message - self._condition.notify_all() - - def _passed(self, invocation_outcome, service_outcome): - self._poll_next = Instruction( - Instruction.Kind.CONCLUDE, None, None, True, None, invocation_outcome, - service_outcome) - self._condition.notify_all() - - def failed(self, message): - with self._condition: - self._failed(message) - - def serialize_request(self, request): - return request + request - - def deserialize_request(self, serialized_request): - return serialized_request[:len(serialized_request) // 2] - - def serialize_response(self, response): - return response * 3 - - def deserialize_response(self, serialized_response): - return serialized_response[2 * len(serialized_response) // 3:] - - def invocation(self): - with self._condition: - self._until = time.time() + self._sequence.maximum_duration - self._remaining_elements = list(self._sequence.elements) - if self._sequence.invocation.initial_metadata: - initial_metadata = self._implementation.invocation_initial_metadata() - self._state.invocation_initial_metadata_in_flight = initial_metadata - else: - initial_metadata = None - if self._sequence.invocation.payload: - payload = _create_payload(self._randomness) - self._state.invocation_payloads_in_flight.append(payload) - else: - payload = None - if self._sequence.invocation.complete: - completion = self._implementation.invocation_completion() - self._state.invocation_completion_in_flight = completion - else: - completion = None - return Invocation( - _GROUP, _METHOD, base.Subscription.Kind.FULL, - self._sequence.invocation.timeout, initial_metadata, payload, - completion) - - def poll(self): - with self._condition: - while True: - if self._message is not None: - return Instruction( - Instruction.Kind.CONCLUDE, None, None, False, self._message, None, - None) - elif self._poll_next: - poll_next = self._poll_next - self._poll_next = None - return poll_next - elif self._until < time.time(): - return Instruction( - Instruction.Kind.CONCLUDE, None, None, False, - 'overran allotted time!', None, None) - else: - self._condition.wait(timeout=self._until-time.time()) - - def on_service_advance( - self, initial_metadata, payload, completion, allowance): - with self._condition: - message = _verify_service_advance_and_update_state( - initial_metadata, payload, completion, allowance, self._state, - self._implementation) - if message is not None: - self._failed(message) - if self._todo is not None: - raise ValueError('TODO!!!') - elif _anything_in_flight(self._state): - return _IDLE_ON_ADVANCE - elif self._remaining_elements: - element = self._remaining_elements.pop(0) - if element.kind is _sequence.Element.Kind.SERVICE_TRANSMISSION: - if element.transmission.initial_metadata: - initial_metadata = self._implementation.service_initial_metadata() - self._state.service_initial_metadata_in_flight = initial_metadata - else: - initial_metadata = None - if element.transmission.payload: - payload = _create_payload(self._randomness) - self._state.service_payloads_in_flight.append(payload) - self._state.service_side_service_allowance -= 1 - else: - payload = None - if element.transmission.complete: - completion = self._implementation.service_completion() - self._state.service_completion_in_flight = completion - else: - completion = None - if (not self._state.invocation_completion_received and - 0 <= self._state.service_side_invocation_allowance): - allowance = 1 - self._state.service_side_invocation_allowance += 1 - self._state.invocation_allowance_in_flight += 1 - else: - allowance = None - return OnAdvance( - OnAdvance.Kind.ADVANCE, initial_metadata, payload, completion, - allowance) - else: - raise ValueError('TODO!!!') - else: - return _IDLE_ON_ADVANCE - - def on_invocation_advance( - self, initial_metadata, payload, completion, allowance): - with self._condition: - message = _verify_invocation_advance_and_update_state( - initial_metadata, payload, completion, allowance, self._state, - self._implementation) - if message is not None: - self._failed(message) - if self._todo is not None: - raise ValueError('TODO!!!') - elif _anything_in_flight(self._state): - return _IDLE_ON_ADVANCE - elif self._remaining_elements: - element = self._remaining_elements.pop(0) - if element.kind is _sequence.Element.Kind.INVOCATION_TRANSMISSION: - if element.transmission.initial_metadata: - initial_metadata = self._implementation.invocation_initial_metadata() - self._state.invocation_initial_metadata_in_fight = initial_metadata - else: - initial_metadata = None - if element.transmission.payload: - payload = _create_payload(self._randomness) - self._state.invocation_payloads_in_flight.append(payload) - self._state.invocation_side_invocation_allowance -= 1 - else: - payload = None - if element.transmission.complete: - completion = self._implementation.invocation_completion() - self._state.invocation_completion_in_flight = completion - else: - completion = None - if (not self._state.service_completion_received and - 0 <= self._state.invocation_side_service_allowance): - allowance = 1 - self._state.invocation_side_service_allowance += 1 - self._state.service_allowance_in_flight += 1 - else: - allowance = None - return OnAdvance( - OnAdvance.Kind.ADVANCE, initial_metadata, payload, completion, - allowance) - else: - raise ValueError('TODO!!!') - else: - return _IDLE_ON_ADVANCE - - def service_on_termination(self, outcome): - with self._condition: - self._state.service_side_outcome = outcome - if self._todo is not None or self._remaining_elements: - self._failed('Premature service-side outcome %s!' % (outcome,)) - elif outcome.kind is not self._sequence.outcome_kinds.service: - self._failed( - 'Incorrect service-side outcome kind: %s should have been %s' % ( - outcome.kind, self._sequence.outcome_kinds.service)) - elif self._state.invocation_side_outcome is not None: - self._passed(self._state.invocation_side_outcome.kind, outcome.kind) - - def invocation_on_termination(self, outcome): - with self._condition: - self._state.invocation_side_outcome = outcome - if self._todo is not None or self._remaining_elements: - self._failed('Premature invocation-side outcome %s!' % (outcome,)) - elif outcome.kind is not self._sequence.outcome_kinds.invocation: - self._failed( - 'Incorrect invocation-side outcome kind: %s should have been %s' % ( - outcome.kind, self._sequence.outcome_kinds.invocation)) - elif self._state.service_side_outcome is not None: - self._passed(outcome.kind, self._state.service_side_outcome.kind) - - -class _SequenceControllerCreator(ControllerCreator): - - def __init__(self, sequence): - self._sequence = sequence - - def name(self): - return self._sequence.name - - def controller(self, implementation, randomness): - return _SequenceController(self._sequence, implementation, randomness) - - -CONTROLLER_CREATORS = tuple( - _SequenceControllerCreator(sequence) for sequence in _sequence.SEQUENCES) diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_sequence.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_sequence.py deleted file mode 100644 index 571d0e1e632..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_sequence.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Part of the tests of the base interface of RPC Framework.""" - -import collections -import enum - -from grpc.framework.interfaces.base import base -from tests.unit.framework.common import test_constants - - -class Invocation( - collections.namedtuple( - 'Invocation', ('timeout', 'initial_metadata', 'payload', 'complete',))): - """A recipe for operation invocation. - - Attributes: - timeout: A duration in seconds to pass to the system under test as the - operation's timeout value. - initial_metadata: A boolean indicating whether or not to pass initial - metadata when invoking the operation. - payload: A boolean indicating whether or not to pass a payload when - invoking the operation. - complete: A boolean indicating whether or not to indicate completion of - transmissions from the invoking side of the operation when invoking the - operation. - """ - - -class Transmission( - collections.namedtuple( - 'Transmission', ('initial_metadata', 'payload', 'complete',))): - """A recipe for a single transmission in an operation. - - Attributes: - initial_metadata: A boolean indicating whether or not to pass initial - metadata as part of the transmission. - payload: A boolean indicating whether or not to pass a payload as part of - the transmission. - complete: A boolean indicating whether or not to indicate completion of - transmission from the transmitting side of the operation as part of the - transmission. - """ - - -class Intertransmission( - collections.namedtuple('Intertransmission', ('invocation', 'service',))): - """A recipe for multiple transmissions in an operation. - - Attributes: - invocation: An integer describing the number of payloads to send from the - invocation side of the operation to the service side. - service: An integer describing the number of payloads to send from the - service side of the operation to the invocation side. - """ - - -class Element(collections.namedtuple('Element', ('kind', 'transmission',))): - """A sum type for steps to perform when testing an operation. - - Attributes: - kind: A Kind value describing the kind of step to perform in the test. - transmission: Only valid for kinds Kind.INVOCATION_TRANSMISSION and - Kind.SERVICE_TRANSMISSION, a Transmission value describing the details of - the transmission to be made. - """ - - @enum.unique - class Kind(enum.Enum): - INVOCATION_TRANSMISSION = 'invocation transmission' - SERVICE_TRANSMISSION = 'service transmission' - INTERTRANSMISSION = 'intertransmission' - INVOCATION_CANCEL = 'invocation cancel' - SERVICE_CANCEL = 'service cancel' - INVOCATION_FAILURE = 'invocation failure' - SERVICE_FAILURE = 'service failure' - - -class OutcomeKinds( - collections.namedtuple('Outcome', ('invocation', 'service',))): - """A description of the expected outcome of an operation test. - - Attributes: - invocation: The base.Outcome.Kind value expected on the invocation side of - the operation. - service: The base.Outcome.Kind value expected on the service side of the - operation. - """ - - -class Sequence( - collections.namedtuple( - 'Sequence', - ('name', 'maximum_duration', 'invocation', 'elements', - 'outcome_kinds',))): - """Describes at a high level steps to perform in a test. - - Attributes: - name: The string name of the sequence. - maximum_duration: A length of time in seconds to allow for the test before - declaring it to have failed. - invocation: An Invocation value describing how to invoke the operation - under test. - elements: A sequence of Element values describing at coarse granularity - actions to take during the operation under test. - outcome_kinds: An OutcomeKinds value describing the expected outcome kinds - of the test. - """ - -_EASY = Sequence( - 'Easy', - test_constants.TIME_ALLOWANCE, - Invocation(test_constants.LONG_TIMEOUT, True, True, True), - ( - Element( - Element.Kind.SERVICE_TRANSMISSION, Transmission(True, True, True)), - ), - OutcomeKinds(base.Outcome.Kind.COMPLETED, base.Outcome.Kind.COMPLETED)) - -_PEASY = Sequence( - 'Peasy', - test_constants.TIME_ALLOWANCE, - Invocation(test_constants.LONG_TIMEOUT, True, True, False), - ( - Element( - Element.Kind.SERVICE_TRANSMISSION, Transmission(True, True, False)), - Element( - Element.Kind.INVOCATION_TRANSMISSION, - Transmission(False, True, True)), - Element( - Element.Kind.SERVICE_TRANSMISSION, Transmission(False, True, True)), - ), - OutcomeKinds(base.Outcome.Kind.COMPLETED, base.Outcome.Kind.COMPLETED)) - - -# TODO(issue 2959): Finish this test suite. This tuple of sequences should -# contain at least the values in the Cartesian product of (half-duplex, -# full-duplex) * (zero payloads, one payload, test_constants.STREAM_LENGTH -# payloads) * (completion, cancellation, expiration, programming defect in -# servicer code). -SEQUENCES = ( - _EASY, - _PEASY, -) diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_state.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_state.py deleted file mode 100644 index 21cf33aeb6a..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/_state.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Part of the tests of the base interface of RPC Framework.""" - - -class OperationState(object): - - def __init__(self): - self.invocation_initial_metadata_in_flight = None - self.invocation_initial_metadata_received = False - self.invocation_payloads_in_flight = [] - self.invocation_payloads_received = 0 - self.invocation_completion_in_flight = None - self.invocation_completion_received = False - self.service_initial_metadata_in_flight = None - self.service_initial_metadata_received = False - self.service_payloads_in_flight = [] - self.service_payloads_received = 0 - self.service_completion_in_flight = None - self.service_completion_received = False - self.invocation_side_invocation_allowance = 1 - self.invocation_side_service_allowance = 1 - self.service_side_invocation_allowance = 1 - self.service_side_service_allowance = 1 - self.invocation_allowance_in_flight = 0 - self.service_allowance_in_flight = 0 - self.invocation_side_outcome = None - self.service_side_outcome = None diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_cases.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_cases.py deleted file mode 100644 index 5d16bf98be3..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_cases.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests of the base interface of RPC Framework.""" - -from __future__ import division - -import logging -import random -import threading -import time -import unittest - -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.base import utilities -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.base import _control -from tests.unit.framework.interfaces.base import test_interfaces - -_SYNCHRONICITY_VARIATION = (('Sync', False), ('Async', True)) - -_EMPTY_OUTCOME_KIND_DICT = { - outcome_kind: 0 for outcome_kind in base.Outcome.Kind} - - -class _Serialization(test_interfaces.Serialization): - - def serialize_request(self, request): - return request + request - - def deserialize_request(self, serialized_request): - return serialized_request[:len(serialized_request) // 2] - - def serialize_response(self, response): - return response * 3 - - def deserialize_response(self, serialized_response): - return serialized_response[2 * len(serialized_response) // 3:] - - -def _advance(quadruples, operator, controller): - try: - for quadruple in quadruples: - operator.advance( - initial_metadata=quadruple[0], payload=quadruple[1], - completion=quadruple[2], allowance=quadruple[3]) - except Exception as e: # pylint: disable=broad-except - controller.failed('Exception on advance: %e' % e) - - -class _Operator(base.Operator): - - def __init__(self, controller, on_advance, pool, operator_under_test): - self._condition = threading.Condition() - self._controller = controller - self._on_advance = on_advance - self._pool = pool - self._operator_under_test = operator_under_test - self._pending_advances = [] - - def set_operator_under_test(self, operator_under_test): - with self._condition: - self._operator_under_test = operator_under_test - pent_advances = self._pending_advances - self._pending_advances = [] - pool = self._pool - controller = self._controller - - if pool is None: - _advance(pent_advances, operator_under_test, controller) - else: - pool.submit(_advance, pent_advances, operator_under_test, controller) - - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - on_advance = self._on_advance( - initial_metadata, payload, completion, allowance) - if on_advance.kind is _control.OnAdvance.Kind.ADVANCE: - with self._condition: - pool = self._pool - operator_under_test = self._operator_under_test - controller = self._controller - - quadruple = ( - on_advance.initial_metadata, on_advance.payload, - on_advance.completion, on_advance.allowance) - if pool is None: - _advance((quadruple,), operator_under_test, controller) - else: - pool.submit(_advance, (quadruple,), operator_under_test, controller) - elif on_advance.kind is _control.OnAdvance.Kind.DEFECT: - raise ValueError( - 'Deliberately raised exception from Operator.advance (in a test)!') - - -class _ProtocolReceiver(base.ProtocolReceiver): - - def __init__(self): - self._condition = threading.Condition() - self._contexts = [] - - def context(self, protocol_context): - with self._condition: - self._contexts.append(protocol_context) - - -class _Servicer(base.Servicer): - """A base.Servicer with instrumented for testing.""" - - def __init__(self, group, method, controllers, pool): - self._condition = threading.Condition() - self._group = group - self._method = method - self._pool = pool - self._controllers = list(controllers) - - def service(self, group, method, context, output_operator): - with self._condition: - controller = self._controllers.pop(0) - if group != self._group or method != self._method: - controller.fail( - '%s != %s or %s != %s' % (group, self._group, method, self._method)) - raise base.NoSuchMethodError(None, None) - else: - operator = _Operator( - controller, controller.on_service_advance, self._pool, - output_operator) - outcome = context.add_termination_callback( - controller.service_on_termination) - if outcome is not None: - controller.service_on_termination(outcome) - return utilities.full_subscription(operator, _ProtocolReceiver()) - - -class _OperationTest(unittest.TestCase): - - def setUp(self): - if self._synchronicity_variation: - self._pool = logging_pool.pool(test_constants.POOL_SIZE) - else: - self._pool = None - self._controller = self._controller_creator.controller( - self._implementation, self._randomness) - - def tearDown(self): - if self._synchronicity_variation: - self._pool.shutdown(wait=True) - else: - self._pool = None - - def test_operation(self): - invocation = self._controller.invocation() - if invocation.subscription_kind is base.Subscription.Kind.FULL: - test_operator = _Operator( - self._controller, self._controller.on_invocation_advance, - self._pool, None) - subscription = utilities.full_subscription( - test_operator, _ProtocolReceiver()) - else: - # TODO(nathaniel): support and test other subscription kinds. - self.fail('Non-full subscriptions not yet supported!') - - servicer = _Servicer( - invocation.group, invocation.method, (self._controller,), self._pool) - - invocation_end, service_end, memo = self._implementation.instantiate( - {(invocation.group, invocation.method): _Serialization()}, servicer) - - try: - invocation_end.start() - service_end.start() - operation_context, operator_under_test = invocation_end.operate( - invocation.group, invocation.method, subscription, invocation.timeout, - initial_metadata=invocation.initial_metadata, payload=invocation.payload, - completion=invocation.completion) - test_operator.set_operator_under_test(operator_under_test) - outcome = operation_context.add_termination_callback( - self._controller.invocation_on_termination) - if outcome is not None: - self._controller.invocation_on_termination(outcome) - except Exception as e: # pylint: disable=broad-except - self._controller.failed('Exception on invocation: %s' % e) - self.fail(e) - - while True: - instruction = self._controller.poll() - if instruction.kind is _control.Instruction.Kind.ADVANCE: - try: - test_operator.advance( - *instruction.advance_args, **instruction.advance_kwargs) - except Exception as e: # pylint: disable=broad-except - self._controller.failed('Exception on instructed advance: %s' % e) - elif instruction.kind is _control.Instruction.Kind.CANCEL: - try: - operation_context.cancel() - except Exception as e: # pylint: disable=broad-except - self._controller.failed('Exception on cancel: %s' % e) - elif instruction.kind is _control.Instruction.Kind.CONCLUDE: - break - - invocation_stop_event = invocation_end.stop(0) - service_stop_event = service_end.stop(0) - invocation_stop_event.wait() - service_stop_event.wait() - invocation_stats = invocation_end.operation_stats() - service_stats = service_end.operation_stats() - - self._implementation.destantiate(memo) - - self.assertTrue( - instruction.conclude_success, msg=instruction.conclude_message) - - expected_invocation_stats = dict(_EMPTY_OUTCOME_KIND_DICT) - expected_invocation_stats[ - instruction.conclude_invocation_outcome_kind] += 1 - self.assertDictEqual(expected_invocation_stats, invocation_stats) - expected_service_stats = dict(_EMPTY_OUTCOME_KIND_DICT) - expected_service_stats[instruction.conclude_service_outcome_kind] += 1 - self.assertDictEqual(expected_service_stats, service_stats) - - -def test_cases(implementation): - """Creates unittest.TestCase classes for a given Base implementation. - - Args: - implementation: A test_interfaces.Implementation specifying creation and - destruction of the Base implementation under test. - - Returns: - A sequence of subclasses of unittest.TestCase defining tests of the - specified Base layer implementation. - """ - random_seed = hash(time.time()) - logging.warning('Random seed for this execution: %s', random_seed) - randomness = random.Random(x=random_seed) - - test_case_classes = [] - for synchronicity_variation in _SYNCHRONICITY_VARIATION: - for controller_creator in _control.CONTROLLER_CREATORS: - name = ''.join( - (synchronicity_variation[0], controller_creator.name(), 'Test',)) - test_case_classes.append( - type(name, (_OperationTest,), - {'_implementation': implementation, - '_randomness': randomness, - '_synchronicity_variation': synchronicity_variation[1], - '_controller_creator': controller_creator, - '__module__': implementation.__module__, - })) - - return test_case_classes diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_interfaces.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_interfaces.py deleted file mode 100644 index 5eba475ba89..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/base/test_interfaces.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Interfaces used in tests of implementations of the Base layer.""" - -import abc - -import six - -from grpc.framework.interfaces.base import base # pylint: disable=unused-import - - -class Serialization(six.with_metaclass(abc.ABCMeta)): - """Specifies serialization and deserialization of test payloads.""" - - def serialize_request(self, request): - """Serializes a request value used in a test. - - Args: - request: A request value created by a test. - - Returns: - A bytestring that is the serialization of the given request. - """ - raise NotImplementedError() - - def deserialize_request(self, serialized_request): - """Deserializes a request value used in a test. - - Args: - serialized_request: A bytestring that is the serialization of some request - used in a test. - - Returns: - The request value encoded by the given bytestring. - """ - raise NotImplementedError() - - def serialize_response(self, response): - """Serializes a response value used in a test. - - Args: - response: A response value created by a test. - - Returns: - A bytestring that is the serialization of the given response. - """ - raise NotImplementedError() - - def deserialize_response(self, serialized_response): - """Deserializes a response value used in a test. - - Args: - serialized_response: A bytestring that is the serialization of some - response used in a test. - - Returns: - The response value encoded by the given bytestring. - """ - raise NotImplementedError() - - -class Implementation(six.with_metaclass(abc.ABCMeta)): - """Specifies an implementation of the Base layer.""" - - @abc.abstractmethod - def instantiate(self, serializations, servicer): - """Instantiates the Base layer implementation to be used in a test. - - Args: - serializations: A dict from group-method pair to Serialization object - specifying how to serialize and deserialize payload values used in the - test. - servicer: A base.Servicer object to be called to service RPCs made during - the test. - - Returns: - A sequence of length three the first element of which is a - base.End to be used to invoke RPCs, the second element of which is a - base.End to be used to service invoked RPCs, and the third element of - which is an arbitrary memo object to be kept and passed to destantiate - at the conclusion of the test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def destantiate(self, memo): - """Destroys the Base layer implementation under test. - - Args: - memo: The object from the third position of the return value of a call to - instantiate. - """ - raise NotImplementedError() - - @abc.abstractmethod - def invocation_initial_metadata(self): - """Provides an operation's invocation-side initial metadata. - - Returns: - A value to use for an operation's invocation-side initial metadata, or - None. - """ - raise NotImplementedError() - - @abc.abstractmethod - def service_initial_metadata(self): - """Provides an operation's service-side initial metadata. - - Returns: - A value to use for an operation's service-side initial metadata, or - None. - """ - raise NotImplementedError() - - @abc.abstractmethod - def invocation_completion(self): - """Provides an operation's invocation-side completion. - - Returns: - A base.Completion to use for an operation's invocation-side completion. - """ - raise NotImplementedError() - - @abc.abstractmethod - def service_completion(self): - """Provides an operation's service-side completion. - - Returns: - A base.Completion to use for an operation's service-side completion. - """ - raise NotImplementedError() - - @abc.abstractmethod - def metadata_transmitted(self, original_metadata, transmitted_metadata): - """Identifies whether or not metadata was properly transmitted. - - Args: - original_metadata: A metadata value passed to the system under test. - transmitted_metadata: The same metadata value after having been - transmitted through the system under test. - - Returns: - Whether or not the metadata was properly transmitted. - """ - raise NotImplementedError() - - @abc.abstractmethod - def completion_transmitted(self, original_completion, transmitted_completion): - """Identifies whether or not a base.Completion was properly transmitted. - - Args: - original_completion: A base.Completion passed to the system under test. - transmitted_completion: The same completion value after having been - transmitted through the system under test. - - Returns: - Whether or not the completion was properly transmitted. - """ - raise NotImplementedError() diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py index d32208f9eb0..df620b19ba5 100644 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py @@ -434,11 +434,13 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. for test_messages in test_messages_sequence: request = test_messages.request() callback = _Callback() + abortion_callback = _Callback() with self._control.fail(): response_future = self._invoker.future(group, method)( request, _3069_test_constant.REALLY_SHORT_TIMEOUT) response_future.add_done_callback(callback) + response_future.add_abortion_callback(abortion_callback) self.assertIs(callback.future(), response_future) # Because the servicer fails outside of the thread from which the @@ -450,6 +452,7 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. with self.assertRaises(face.ExpirationError): response_future.result() self.assertIsNotNone(response_future.traceback()) + self.assertIsNotNone(abortion_callback.future()) def testFailedUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -472,11 +475,13 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. for test_messages in test_messages_sequence: requests = test_messages.requests() callback = _Callback() + abortion_callback = _Callback() with self._control.fail(): response_future = self._invoker.future(group, method)( iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT) response_future.add_done_callback(callback) + response_future.add_abortion_callback(abortion_callback) self.assertIs(callback.future(), response_future) # Because the servicer fails outside of the thread from which the @@ -488,6 +493,7 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. with self.assertRaises(face.ExpirationError): response_future.result() self.assertIsNotNone(response_future.traceback()) + self.assertIsNotNone(abortion_callback.future()) def testFailedStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_receiver.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_receiver.py deleted file mode 100644 index 48f31fc6770..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_receiver.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""A utility useful in tests of asynchronous, event-driven interfaces.""" - -import threading - -from grpc.framework.interfaces.face import face - - -class Receiver(face.ResponseReceiver): - """A utility object useful in tests of asynchronous code.""" - - def __init__(self): - self._condition = threading.Condition() - self._initial_metadata = None - self._responses = [] - self._terminal_metadata = None - self._code = None - self._details = None - self._completed = False - self._abortion = None - - def abort(self, abortion): - with self._condition: - self._abortion = abortion - self._condition.notify_all() - - def initial_metadata(self, initial_metadata): - with self._condition: - self._initial_metadata = initial_metadata - - def response(self, response): - with self._condition: - self._responses.append(response) - - def complete(self, terminal_metadata, code, details): - with self._condition: - self._terminal_metadata = terminal_metadata - self._code = code - self._details = details - self._completed = True - self._condition.notify_all() - - def block_until_terminated(self): - with self._condition: - while self._abortion is None and not self._completed: - self._condition.wait() - - def unary_response(self): - with self._condition: - if self._abortion is not None: - raise AssertionError('Aborted: "{}"!'.format(self._abortion)) - elif len(self._responses) != 1: - raise AssertionError( - '%d responses received, not exactly one!', len(self._responses)) - else: - return self._responses[0] - - def stream_responses(self): - with self._condition: - if self._abortion is None: - return list(self._responses) - else: - raise AssertionError('Aborted: "{}"!'.format(self._abortion)) - - def abortion(self): - with self._condition: - return self._abortion diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/links/__init__.py deleted file mode 100644 index 70865191060..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_cases.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_cases.py deleted file mode 100644 index 608e64119ec..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_cases.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests of the links interface of RPC Framework.""" - -# unittest is referenced from specification in this module. -import abc -import unittest # pylint: disable=unused-import - -import six - -from grpc.framework.interfaces.links import links -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.links import test_utilities - - -def at_least_n_payloads_received_predicate(n): - def predicate(ticket_sequence): - payload_count = 0 - for ticket in ticket_sequence: - if ticket.payload is not None: - payload_count += 1 - if n <= payload_count: - return True - else: - return False - return predicate - - -def terminated(ticket_sequence): - return ticket_sequence and ticket_sequence[-1].termination is not None - -_TRANSMISSION_GROUP = 'test.Group' -_TRANSMISSION_METHOD = 'TestMethod' - - -class TransmissionTest(six.with_metaclass(abc.ABCMeta)): - """Tests ticket transmission between two connected links. - - This class must be mixed into a unittest.TestCase that implements the abstract - methods it provides. - """ - - # This is a unittest.TestCase mix-in. - # pylint: disable=invalid-name - - @abc.abstractmethod - def create_transmitting_links(self): - """Creates two connected links for use in this test. - - Returns: - Two links.Links, the first of which will be used on the invocation side - of RPCs and the second of which will be used on the service side of - RPCs. - """ - raise NotImplementedError() - - @abc.abstractmethod - def destroy_transmitting_links(self, invocation_side_link, service_side_link): - """Destroys the two connected links created for this test. - - - Args: - invocation_side_link: The link used on the invocation side of RPCs in - this test. - service_side_link: The link used on the service side of RPCs in this - test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_invocation_initial_metadata(self): - """Creates a value for use as invocation-side initial metadata. - - Returns: - A metadata value appropriate for use as invocation-side initial metadata - or None if invocation-side initial metadata transmission is not - supported by the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_invocation_terminal_metadata(self): - """Creates a value for use as invocation-side terminal metadata. - - Returns: - A metadata value appropriate for use as invocation-side terminal - metadata or None if invocation-side terminal metadata transmission is - not supported by the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_service_initial_metadata(self): - """Creates a value for use as service-side initial metadata. - - Returns: - A metadata value appropriate for use as service-side initial metadata or - None if service-side initial metadata transmission is not supported by - the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_service_terminal_metadata(self): - """Creates a value for use as service-side terminal metadata. - - Returns: - A metadata value appropriate for use as service-side terminal metadata or - None if service-side terminal metadata transmission is not supported by - the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_invocation_completion(self): - """Creates values for use as invocation-side code and message. - - Returns: - An invocation-side code value and an invocation-side message value. - Either or both may be None if invocation-side code and/or - invocation-side message transmission is not supported by the links - under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_service_completion(self): - """Creates values for use as service-side code and message. - - Returns: - A service-side code value and a service-side message value. Either or - both may be None if service-side code and/or service-side message - transmission is not supported by the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def assertMetadataTransmitted(self, original_metadata, transmitted_metadata): - """Asserts that transmitted_metadata contains original_metadata. - - Args: - original_metadata: A metadata object used in this test. - transmitted_metadata: A metadata object obtained after transmission - through the system under test. - - Raises: - AssertionError: if the transmitted_metadata object does not contain - original_metadata. - """ - raise NotImplementedError() - - def group_and_method(self): - """Returns the group and method used in this test case. - - Returns: - A pair of the group and method used in this test case. - """ - return _TRANSMISSION_GROUP, _TRANSMISSION_METHOD - - def serialize_request(self, request): - """Serializes a request value used in this test case. - - Args: - request: A request value created by this test case. - - Returns: - A bytestring that is the serialization of the given request. - """ - return request - - def deserialize_request(self, serialized_request): - """Deserializes a request value used in this test case. - - Args: - serialized_request: A bytestring that is the serialization of some request - used in this test case. - - Returns: - The request value encoded by the given bytestring. - """ - return serialized_request - - def serialize_response(self, response): - """Serializes a response value used in this test case. - - Args: - response: A response value created by this test case. - - Returns: - A bytestring that is the serialization of the given response. - """ - return response - - def deserialize_response(self, serialized_response): - """Deserializes a response value used in this test case. - - Args: - serialized_response: A bytestring that is the serialization of some - response used in this test case. - - Returns: - The response value encoded by the given bytestring. - """ - return serialized_response - - def _assert_is_valid_metadata_payload_sequence( - self, ticket_sequence, payloads, initial_metadata, terminal_metadata): - initial_metadata_seen = False - seen_payloads = [] - terminal_metadata_seen = False - - for ticket in ticket_sequence: - if ticket.initial_metadata is not None: - self.assertFalse(initial_metadata_seen) - self.assertFalse(seen_payloads) - self.assertFalse(terminal_metadata_seen) - self.assertMetadataTransmitted(initial_metadata, ticket.initial_metadata) - initial_metadata_seen = True - - if ticket.payload is not None: - self.assertFalse(terminal_metadata_seen) - seen_payloads.append(ticket.payload) - - if ticket.terminal_metadata is not None: - self.assertFalse(terminal_metadata_seen) - self.assertMetadataTransmitted(terminal_metadata, ticket.terminal_metadata) - terminal_metadata_seen = True - self.assertSequenceEqual(payloads, seen_payloads) - - def _assert_is_valid_invocation_sequence( - self, ticket_sequence, group, method, payloads, initial_metadata, - terminal_metadata, termination): - self.assertLess(0, len(ticket_sequence)) - self.assertEqual(group, ticket_sequence[0].group) - self.assertEqual(method, ticket_sequence[0].method) - self._assert_is_valid_metadata_payload_sequence( - ticket_sequence, payloads, initial_metadata, terminal_metadata) - self.assertIs(termination, ticket_sequence[-1].termination) - - def _assert_is_valid_service_sequence( - self, ticket_sequence, payloads, initial_metadata, terminal_metadata, - code, message, termination): - self.assertLess(0, len(ticket_sequence)) - self._assert_is_valid_metadata_payload_sequence( - ticket_sequence, payloads, initial_metadata, terminal_metadata) - self.assertEqual(code, ticket_sequence[-1].code) - self.assertEqual(message, ticket_sequence[-1].message) - self.assertIs(termination, ticket_sequence[-1].termination) - - def setUp(self): - self._invocation_link, self._service_link = self.create_transmitting_links() - self._invocation_mate = test_utilities.RecordingLink() - self._service_mate = test_utilities.RecordingLink() - self._invocation_link.join_link(self._invocation_mate) - self._service_link.join_link(self._service_mate) - - def tearDown(self): - self.destroy_transmitting_links(self._invocation_link, self._service_link) - - def testSimplestRoundTrip(self): - """Tests transmission of one ticket in each direction.""" - invocation_operation_id = object() - invocation_payload = b'\x07' * 1023 - timeout = test_constants.LONG_TIMEOUT - invocation_initial_metadata = self.create_invocation_initial_metadata() - invocation_terminal_metadata = self.create_invocation_terminal_metadata() - invocation_code, invocation_message = self.create_invocation_completion() - service_payload = b'\x08' * 1025 - service_initial_metadata = self.create_service_initial_metadata() - service_terminal_metadata = self.create_service_terminal_metadata() - service_code, service_message = self.create_service_completion() - - original_invocation_ticket = links.Ticket( - invocation_operation_id, 0, _TRANSMISSION_GROUP, _TRANSMISSION_METHOD, - links.Ticket.Subscription.FULL, timeout, 0, invocation_initial_metadata, - invocation_payload, invocation_terminal_metadata, invocation_code, - invocation_message, links.Ticket.Termination.COMPLETION, None) - self._invocation_link.accept_ticket(original_invocation_ticket) - - self._service_mate.block_until_tickets_satisfy( - at_least_n_payloads_received_predicate(1)) - service_operation_id = self._service_mate.tickets()[0].operation_id - - self._service_mate.block_until_tickets_satisfy(terminated) - self._assert_is_valid_invocation_sequence( - self._service_mate.tickets(), _TRANSMISSION_GROUP, _TRANSMISSION_METHOD, - (invocation_payload,), invocation_initial_metadata, - invocation_terminal_metadata, links.Ticket.Termination.COMPLETION) - - original_service_ticket = links.Ticket( - service_operation_id, 0, None, None, links.Ticket.Subscription.FULL, - timeout, 0, service_initial_metadata, service_payload, - service_terminal_metadata, service_code, service_message, - links.Ticket.Termination.COMPLETION, None) - self._service_link.accept_ticket(original_service_ticket) - self._invocation_mate.block_until_tickets_satisfy(terminated) - self._assert_is_valid_service_sequence( - self._invocation_mate.tickets(), (service_payload,), - service_initial_metadata, service_terminal_metadata, service_code, - service_message, links.Ticket.Termination.COMPLETION) diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_utilities.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_utilities.py deleted file mode 100644 index 39c7f2fc635..00000000000 --- a/src/python/grpcio_tests/tests/unit/framework/interfaces/links/test_utilities.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior appropriate for use in tests.""" - -import logging -import threading -import time - -from grpc.framework.interfaces.links import links -from grpc.framework.interfaces.links import utilities - -# A more-or-less arbitrary limit on the length of raw data values to be logged. -_UNCOMFORTABLY_LONG = 48 - - -def _safe_for_log_ticket(ticket): - """Creates a safe-for-printing-to-the-log ticket for a given ticket. - - Args: - ticket: Any links.Ticket. - - Returns: - A links.Ticket that is as much as can be equal to the given ticket but - possibly features values like the string "" in - place of the actual values of the given ticket. - """ - if isinstance(ticket.payload, (basestring,)): - payload_length = len(ticket.payload) - else: - payload_length = -1 - if payload_length < _UNCOMFORTABLY_LONG: - return ticket - else: - return links.Ticket( - ticket.operation_id, ticket.sequence_number, - ticket.group, ticket.method, ticket.subscription, ticket.timeout, - ticket.allowance, ticket.initial_metadata, - ''.format(payload_length), - ticket.terminal_metadata, ticket.code, ticket.message, - ticket.termination, None) - - -class RecordingLink(links.Link): - """A Link that records every ticket passed to it.""" - - def __init__(self): - self._condition = threading.Condition() - self._tickets = [] - - def accept_ticket(self, ticket): - with self._condition: - self._tickets.append(ticket) - self._condition.notify_all() - - def join_link(self, link): - pass - - def block_until_tickets_satisfy(self, predicate): - """Blocks until the received tickets satisfy the given predicate. - - Args: - predicate: A callable that takes a sequence of tickets and returns a - boolean value. - """ - with self._condition: - while not predicate(self._tickets): - self._condition.wait() - - def tickets(self): - """Returns a copy of the list of all tickets received by this Link.""" - with self._condition: - return tuple(self._tickets) - - -class _Pipe(object): - """A conduit that logs all tickets passed through it.""" - - def __init__(self, name): - self._lock = threading.Lock() - self._name = name - self._left_mate = utilities.NULL_LINK - self._right_mate = utilities.NULL_LINK - - def accept_left_to_right_ticket(self, ticket): - with self._lock: - logging.warning( - '%s: moving left to right through %s: %s', time.time(), self._name, - _safe_for_log_ticket(ticket)) - try: - self._right_mate.accept_ticket(ticket) - except Exception as e: # pylint: disable=broad-except - logging.exception(e) - - def accept_right_to_left_ticket(self, ticket): - with self._lock: - logging.warning( - '%s: moving right to left through %s: %s', time.time(), self._name, - _safe_for_log_ticket(ticket)) - try: - self._left_mate.accept_ticket(ticket) - except Exception as e: # pylint: disable=broad-except - logging.exception(e) - - def join_left_mate(self, left_mate): - with self._lock: - self._left_mate = utilities.NULL_LINK if left_mate is None else left_mate - - def join_right_mate(self, right_mate): - with self._lock: - self._right_mate = ( - utilities.NULL_LINK if right_mate is None else right_mate) - - -class _Facade(links.Link): - - def __init__(self, accept, join): - self._accept = accept - self._join = join - - def accept_ticket(self, ticket): - self._accept(ticket) - - def join_link(self, link): - self._join(link) - - -def logging_links(name): - """Creates a conduit that logs all tickets passed through it. - - Args: - name: A name to use for the conduit to identify itself in logging output. - - Returns: - Two links.Links, the first of which is the "left" side of the conduit - and the second of which is the "right" side of the conduit. - """ - pipe = _Pipe(name) - left_facade = _Facade(pipe.accept_left_to_right_ticket, pipe.join_left_mate) - right_facade = _Facade(pipe.accept_right_to_left_ticket, pipe.join_right_mate) - return left_facade, right_facade diff --git a/src/ruby/.rubocop.yml b/src/ruby/.rubocop.yml index 34bb4775435..0f61ccfa812 100644 --- a/src/ruby/.rubocop.yml +++ b/src/ruby/.rubocop.yml @@ -5,8 +5,8 @@ inherit_from: .rubocop_todo.yml AllCops: Exclude: - 'bin/apis/**/*' - - 'bin/math.rb' - - 'bin/math_services.rb' + - 'bin/math_pb.rb' + - 'bin/math_services_pb.rb' - 'pb/grpc/health/v1/*' - 'pb/test/**/*' diff --git a/src/ruby/bin/math_client.rb b/src/ruby/bin/math_client.rb index d7e00e42938..1f238a798ba 100755 --- a/src/ruby/bin/math_client.rb +++ b/src/ruby/bin/math_client.rb @@ -40,7 +40,7 @@ $LOAD_PATH.unshift(lib_dir) unless $LOAD_PATH.include?(lib_dir) $LOAD_PATH.unshift(this_dir) unless $LOAD_PATH.include?(this_dir) require 'grpc' -require 'math_services' +require 'math_services_pb' require 'optparse' include GRPC::Core::TimeConsts diff --git a/src/ruby/bin/math.rb b/src/ruby/bin/math_pb.rb old mode 100755 new mode 100644 similarity index 100% rename from src/ruby/bin/math.rb rename to src/ruby/bin/math_pb.rb diff --git a/src/ruby/bin/math_server.rb b/src/ruby/bin/math_server.rb index 1ee4c5632d9..751a6ebcab7 100755 --- a/src/ruby/bin/math_server.rb +++ b/src/ruby/bin/math_server.rb @@ -42,7 +42,7 @@ $LOAD_PATH.unshift(this_dir) unless $LOAD_PATH.include?(this_dir) require 'forwardable' require 'grpc' require 'logger' -require 'math_services' +require 'math_services_pb' require 'optparse' # RubyLogger defines a logger for gRPC based on the standard ruby logger. diff --git a/src/ruby/bin/math_services.rb b/src/ruby/bin/math_services_pb.rb old mode 100755 new mode 100644 similarity index 92% rename from src/ruby/bin/math_services.rb rename to src/ruby/bin/math_services_pb.rb index 34c36abddae..2ba1825d4f4 --- a/src/ruby/bin/math_services.rb +++ b/src/ruby/bin/math_services_pb.rb @@ -32,7 +32,7 @@ # require 'grpc' -require 'math' +require 'math_pb' module Math module Math @@ -44,15 +44,15 @@ module Math self.unmarshal_class_method = :decode self.service_name = 'math.Math' - # Div divides args.dividend by args.divisor and returns the quotient and - # remainder. + # Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + # and remainder. rpc :Div, DivArgs, DivReply # DivMany accepts an arbitrary number of division args from the client stream # and sends back the results in the reply stream. The stream continues until # the client closes its end; the server does the same after sending all the # replies. The stream ends immediately if either end aborts. rpc :DivMany, stream(DivArgs), stream(DivReply) - # Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib + # Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib # generates up to limit numbers; otherwise it continues until the call is # canceled. Unlike Fib above, Fib has no final FibReply. rpc :Fib, FibArgs, stream(Num) diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c index 21261244436..67a42af619b 100644 --- a/src/ruby/ext/grpc/rb_call.c +++ b/src/ruby/ext/grpc/rb_call.c @@ -38,6 +38,7 @@ #include #include +#include #include "rb_byte_buffer.h" #include "rb_call_credentials.h" @@ -910,6 +911,12 @@ static void Init_grpc_op_codes() { UINT2NUM(GRPC_OP_RECV_CLOSE_ON_SERVER)); } +static void Init_grpc_metadata_keys() { + VALUE grpc_rb_mMetadataKeys = rb_define_module_under(grpc_rb_mGrpcCore, "MetadataKeys"); + rb_define_const(grpc_rb_mMetadataKeys, "COMPRESSION_REQUEST_ALGORITHM", + rb_str_new2(GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY)); +} + void Init_grpc_call() { /* CallError inherits from Exception to signal that it is non-recoverable */ grpc_rb_eCallError = @@ -972,6 +979,7 @@ void Init_grpc_call() { Init_grpc_error_codes(); Init_grpc_op_codes(); Init_grpc_write_flags(); + Init_grpc_metadata_keys(); } /* Gets the call from the ruby object */ diff --git a/src/ruby/ext/grpc/rb_channel.c b/src/ruby/ext/grpc/rb_channel.c index 18a15d01252..e6d30a174b8 100644 --- a/src/ruby/ext/grpc/rb_channel.c +++ b/src/ruby/ext/grpc/rb_channel.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "rb_grpc.h" #include "rb_call.h" #include "rb_channel_args.h" @@ -71,6 +72,7 @@ typedef struct grpc_rb_channel { /* The actual channel */ grpc_channel *wrapped; + grpc_completion_queue *queue; } grpc_rb_channel; /* Destroys Channel instances. */ @@ -83,6 +85,7 @@ static void grpc_rb_channel_free(void *p) { if (ch->wrapped != NULL) { grpc_channel_destroy(ch->wrapped); + grpc_rb_completion_queue_destroy(ch->queue); } xfree(p); @@ -165,6 +168,7 @@ static VALUE grpc_rb_channel_init(int argc, VALUE *argv, VALUE self) { } rb_ivar_set(self, id_target, target); wrapper->wrapped = ch; + wrapper->queue = grpc_completion_queue_create(NULL); return self; } @@ -203,16 +207,18 @@ static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE *argv, the completion queue with success=0 */ static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self, VALUE last_state, - VALUE cqueue, - VALUE deadline, - VALUE tag) { + VALUE deadline) { grpc_rb_channel *wrapper = NULL; grpc_channel *ch = NULL; grpc_completion_queue *cq = NULL; - cq = grpc_rb_get_wrapped_completion_queue(cqueue); + void *tag = wrapper; + + grpc_event event; + TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper); ch = wrapper->wrapped; + cq = wrapper->queue; if (ch == NULL) { rb_raise(rb_eRuntimeError, "closed!"); return Qnil; @@ -222,9 +228,16 @@ static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self, (grpc_connectivity_state)NUM2LONG(last_state), grpc_rb_time_timeval(deadline, /* absolute time */ 0), cq, - ROBJECT(tag)); + tag); - return Qnil; + event = rb_completion_queue_pluck(cq, tag, + gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + + if (event.success) { + return Qtrue; + } else { + return Qfalse; + } } /* Create a call given a grpc_channel, in order to call method. The request diff --git a/src/ruby/ext/grpc/rb_completion_queue.h b/src/ruby/ext/grpc/rb_completion_queue.h index 9f8f6aa5fff..aa9dc6416af 100644 --- a/src/ruby/ext/grpc/rb_completion_queue.h +++ b/src/ruby/ext/grpc/rb_completion_queue.h @@ -38,9 +38,6 @@ #include -/* Gets the wrapped completion queue from the ruby wrapper */ -grpc_completion_queue *grpc_rb_get_wrapped_completion_queue(VALUE v); - void grpc_rb_completion_queue_destroy(grpc_completion_queue *cq); /** diff --git a/src/ruby/ext/grpc/rb_compression_options.c b/src/ruby/ext/grpc/rb_compression_options.c new file mode 100644 index 00000000000..0a3a215b1ca --- /dev/null +++ b/src/ruby/ext/grpc/rb_compression_options.c @@ -0,0 +1,464 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include + +#include "rb_compression_options.h" +#include "rb_grpc_imports.generated.h" + +#include +#include +#include +#include +#include +#include + +#include "rb_grpc.h" + +static VALUE grpc_rb_cCompressionOptions = Qnil; + +/* Ruby Ids for the names of valid compression levels. */ +static VALUE id_compress_level_none = Qnil; +static VALUE id_compress_level_low = Qnil; +static VALUE id_compress_level_medium = Qnil; +static VALUE id_compress_level_high = Qnil; + +/* grpc_rb_compression_options wraps a grpc_compression_options. + * It can be used to get the channel argument key-values for specific + * compression settings. */ + +/* Note that ruby objects of this type don't carry any state in other + * Ruby objects and don't have a mark for GC. */ +typedef struct grpc_rb_compression_options { + /* The actual compression options that's being wrapped */ + grpc_compression_options *wrapped; +} grpc_rb_compression_options; + +/* Destroys the compression options instances and free the + * wrapped grpc compression options. */ +static void grpc_rb_compression_options_free(void *p) { + grpc_rb_compression_options *wrapper = NULL; + if (p == NULL) { + return; + }; + wrapper = (grpc_rb_compression_options *)p; + + if (wrapper->wrapped != NULL) { + gpr_free(wrapper->wrapped); + wrapper->wrapped = NULL; + } + + xfree(p); +} + +/* Ruby recognized data type for the CompressionOptions class. */ +static rb_data_type_t grpc_rb_compression_options_data_type = { + "grpc_compression_options", + {NULL, + grpc_rb_compression_options_free, + GRPC_RB_MEMSIZE_UNAVAILABLE, + {NULL, NULL}}, + NULL, + NULL, +#ifdef RUBY_TYPED_FREE_IMMEDIATELY + RUBY_TYPED_FREE_IMMEDIATELY +#endif +}; + +/* Allocates CompressionOptions instances. + Allocate the wrapped grpc compression options and + initialize it here too. */ +static VALUE grpc_rb_compression_options_alloc(VALUE cls) { + grpc_rb_compression_options *wrapper = + gpr_malloc(sizeof(grpc_rb_compression_options)); + wrapper->wrapped = NULL; + wrapper->wrapped = gpr_malloc(sizeof(grpc_compression_options)); + grpc_compression_options_init(wrapper->wrapped); + + return TypedData_Wrap_Struct(cls, &grpc_rb_compression_options_data_type, + wrapper); +} + +/* Disables a compression algorithm, given the GRPC core internal number of a + * compression algorithm. */ +VALUE grpc_rb_compression_options_disable_compression_algorithm_internal( + VALUE self, VALUE algorithm_to_disable) { + grpc_compression_algorithm compression_algorithm = 0; + grpc_rb_compression_options *wrapper = NULL; + + TypedData_Get_Struct(self, grpc_rb_compression_options, + &grpc_rb_compression_options_data_type, wrapper); + compression_algorithm = + (grpc_compression_algorithm)NUM2INT(algorithm_to_disable); + + grpc_compression_options_disable_algorithm(wrapper->wrapped, + compression_algorithm); + + return Qnil; +} + +/* Gets the compression internal enum value of a compression level given its + * name. */ +grpc_compression_level grpc_rb_compression_options_level_name_to_value_internal( + VALUE level_name) { + Check_Type(level_name, T_SYMBOL); + + /* Check the compression level of the name passed in, and see which macro + * from the GRPC core header files match. */ + if (id_compress_level_none == SYM2ID(level_name)) { + return GRPC_COMPRESS_LEVEL_NONE; + } else if (id_compress_level_low == SYM2ID(level_name)) { + return GRPC_COMPRESS_LEVEL_LOW; + } else if (id_compress_level_medium == SYM2ID(level_name)) { + return GRPC_COMPRESS_LEVEL_MED; + } else if (id_compress_level_high == SYM2ID(level_name)) { + return GRPC_COMPRESS_LEVEL_HIGH; + } + + rb_raise(rb_eArgError, + "Unrecognized compression level name." + "Valid compression level names are none, low, medium, and high."); + + /* Dummy return statement. */ + return GRPC_COMPRESS_LEVEL_NONE; +} + +/* Sets the default compression level, given the name of a compression level. + * Throws an error if no algorithm matched. */ +void grpc_rb_compression_options_set_default_level( + grpc_compression_options *options, VALUE new_level_name) { + options->default_level.level = + grpc_rb_compression_options_level_name_to_value_internal(new_level_name); + options->default_level.is_set = 1; +} + +/* Gets the internal value of a compression algorithm suitable as the value + * in a GRPC core channel arguments hash. + * algorithm_value is an out parameter. + * Raises an error if the name of the algorithm passed in is invalid. */ +void grpc_rb_compression_options_algorithm_name_to_value_internal( + grpc_compression_algorithm *algorithm_value, VALUE algorithm_name) { + char *name_str = NULL; + long name_len = 0; + VALUE algorithm_name_as_string = Qnil; + + Check_Type(algorithm_name, T_SYMBOL); + + /* Convert the algorithm symbol to a ruby string, so that we can get the + * correct C string out of it. */ + algorithm_name_as_string = rb_funcall(algorithm_name, rb_intern("to_s"), 0); + + name_str = RSTRING_PTR(algorithm_name_as_string); + name_len = RSTRING_LEN(algorithm_name_as_string); + + /* Raise an error if the name isn't recognized as a compression algorithm by + * the algorithm parse function + * in GRPC core. */ + if (!grpc_compression_algorithm_parse(name_str, name_len, algorithm_value)) { + rb_raise(rb_eNameError, "Invalid compression algorithm name: %s", + StringValueCStr(algorithm_name_as_string)); + } +} + +/* Indicates whether a given algorithm is enabled on this instance, given the + * readable algorithm name. */ +VALUE grpc_rb_compression_options_is_algorithm_enabled(VALUE self, + VALUE algorithm_name) { + grpc_rb_compression_options *wrapper = NULL; + grpc_compression_algorithm internal_algorithm_value; + + TypedData_Get_Struct(self, grpc_rb_compression_options, + &grpc_rb_compression_options_data_type, wrapper); + grpc_rb_compression_options_algorithm_name_to_value_internal( + &internal_algorithm_value, algorithm_name); + + if (grpc_compression_options_is_algorithm_enabled(wrapper->wrapped, + internal_algorithm_value)) { + return Qtrue; + } + return Qfalse; +} + +/* Sets the default algorithm to the name of the algorithm passed in. + * Raises an error if the name is not a valid compression algorithm name. */ +void grpc_rb_compression_options_set_default_algorithm( + grpc_compression_options *options, VALUE algorithm_name) { + grpc_rb_compression_options_algorithm_name_to_value_internal( + &options->default_algorithm.algorithm, algorithm_name); + options->default_algorithm.is_set = 1; +} + +/* Disables an algorithm on the current instance, given the name of an + * algorithm. + * Fails if the algorithm name is invalid. */ +void grpc_rb_compression_options_disable_algorithm( + grpc_compression_options *compression_options, VALUE algorithm_name) { + grpc_compression_algorithm internal_algorithm_value; + + grpc_rb_compression_options_algorithm_name_to_value_internal( + &internal_algorithm_value, algorithm_name); + grpc_compression_options_disable_algorithm(compression_options, + internal_algorithm_value); +} + +/* Provides a ruby hash of GRPC core channel argument key-values that + * correspond to the compression settings on this instance. */ +VALUE grpc_rb_compression_options_to_hash(VALUE self) { + grpc_rb_compression_options *wrapper = NULL; + grpc_compression_options *compression_options = NULL; + VALUE channel_arg_hash = rb_hash_new(); + VALUE key = Qnil; + VALUE value = Qnil; + + TypedData_Get_Struct(self, grpc_rb_compression_options, + &grpc_rb_compression_options_data_type, wrapper); + compression_options = wrapper->wrapped; + + /* Add key-value pairs to the new Ruby hash. It can be used + * as GRPC core channel arguments. */ + if (compression_options->default_level.is_set) { + key = rb_str_new2(GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL); + value = INT2NUM((int)compression_options->default_level.level); + rb_hash_aset(channel_arg_hash, key, value); + } + + if (compression_options->default_algorithm.is_set) { + key = rb_str_new2(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM); + value = INT2NUM((int)compression_options->default_algorithm.algorithm); + rb_hash_aset(channel_arg_hash, key, value); + } + + key = rb_str_new2(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET); + value = INT2NUM((int)compression_options->enabled_algorithms_bitset); + rb_hash_aset(channel_arg_hash, key, value); + + return channel_arg_hash; +} + +/* Converts an internal enum level value to a readable level name. + * Fails if the level value is invalid. */ +VALUE grpc_rb_compression_options_level_value_to_name_internal( + grpc_compression_level compression_value) { + switch (compression_value) { + case GRPC_COMPRESS_LEVEL_NONE: + return ID2SYM(id_compress_level_none); + case GRPC_COMPRESS_LEVEL_LOW: + return ID2SYM(id_compress_level_low); + case GRPC_COMPRESS_LEVEL_MED: + return ID2SYM(id_compress_level_medium); + case GRPC_COMPRESS_LEVEL_HIGH: + return ID2SYM(id_compress_level_high); + default: + rb_raise( + rb_eArgError, + "Failed to convert compression level value to name for value: %d", + (int)compression_value); + } +} + +/* Converts an algorithm internal enum value to a readable name. + * Fails if the enum value is invalid. */ +VALUE grpc_rb_compression_options_algorithm_value_to_name_internal( + grpc_compression_algorithm internal_value) { + char *algorithm_name = NULL; + + if (!grpc_compression_algorithm_name(internal_value, &algorithm_name)) { + rb_raise(rb_eArgError, "Failed to convert algorithm value to name"); + } + + return ID2SYM(rb_intern(algorithm_name)); +} + +/* Gets the readable name of the default algorithm if one has been set. + * Returns nil if no algorithm has been set. */ +VALUE grpc_rb_compression_options_get_default_algorithm(VALUE self) { + grpc_compression_algorithm internal_value; + grpc_rb_compression_options *wrapper = NULL; + + TypedData_Get_Struct(self, grpc_rb_compression_options, + &grpc_rb_compression_options_data_type, wrapper); + + if (wrapper->wrapped->default_algorithm.is_set) { + internal_value = wrapper->wrapped->default_algorithm.algorithm; + return grpc_rb_compression_options_algorithm_value_to_name_internal( + internal_value); + } + + return Qnil; +} + +/* Gets the internal value of the default compression level that is to be passed + * to the GRPC core as a channel argument value. + * A nil return value means that it hasn't been set. */ +VALUE grpc_rb_compression_options_get_default_level(VALUE self) { + grpc_compression_level internal_value; + grpc_rb_compression_options *wrapper = NULL; + + TypedData_Get_Struct(self, grpc_rb_compression_options, + &grpc_rb_compression_options_data_type, wrapper); + + if (wrapper->wrapped->default_level.is_set) { + internal_value = wrapper->wrapped->default_level.level; + return grpc_rb_compression_options_level_value_to_name_internal( + internal_value); + } + + return Qnil; +} + +/* Gets a list of the disabled algorithms as readable names. + * Returns an empty list if no algorithms have been disabled. */ +VALUE grpc_rb_compression_options_get_disabled_algorithms(VALUE self) { + VALUE disabled_algorithms = rb_ary_new(); + grpc_compression_algorithm internal_value; + grpc_rb_compression_options *wrapper = NULL; + + TypedData_Get_Struct(self, grpc_rb_compression_options, + &grpc_rb_compression_options_data_type, wrapper); + + for (internal_value = GRPC_COMPRESS_NONE; + internal_value < GRPC_COMPRESS_ALGORITHMS_COUNT; internal_value++) { + if (!grpc_compression_options_is_algorithm_enabled(wrapper->wrapped, + internal_value)) { + rb_ary_push(disabled_algorithms, + grpc_rb_compression_options_algorithm_value_to_name_internal( + internal_value)); + } + } + return disabled_algorithms; +} + +/* Initializes the compression options wrapper. + * Takes an optional hash parameter. + * + * Example call-seq: + * options = CompressionOptions.new( + * default_level: :none, + * disabled_algorithms: [:gzip] + * ) + * channel_arg hash = Hash.new[...] + * channel_arg_hash_with_compression_options = channel_arg_hash.merge(options) + */ +VALUE grpc_rb_compression_options_init(int argc, VALUE *argv, VALUE self) { + grpc_rb_compression_options *wrapper = NULL; + VALUE default_algorithm = Qnil; + VALUE default_level = Qnil; + VALUE disabled_algorithms = Qnil; + VALUE algorithm_name = Qnil; + VALUE hash_arg = Qnil; + + rb_scan_args(argc, argv, "01", &hash_arg); + + /* Check if the hash parameter was passed, or if invalid arguments were + * passed. */ + if (hash_arg == Qnil) { + return self; + } else if (TYPE(hash_arg) != T_HASH || argc > 1) { + rb_raise(rb_eArgError, + "Invalid arguments. Expecting optional hash parameter"); + } + + TypedData_Get_Struct(self, grpc_rb_compression_options, + &grpc_rb_compression_options_data_type, wrapper); + + /* Set the default algorithm if one was chosen. */ + default_algorithm = + rb_hash_aref(hash_arg, ID2SYM(rb_intern("default_algorithm"))); + if (default_algorithm != Qnil) { + grpc_rb_compression_options_set_default_algorithm(wrapper->wrapped, + default_algorithm); + } + + /* Set the default level if one was chosen. */ + default_level = rb_hash_aref(hash_arg, ID2SYM(rb_intern("default_level"))); + if (default_level != Qnil) { + grpc_rb_compression_options_set_default_level(wrapper->wrapped, + default_level); + } + + /* Set the disabled algorithms if any were chosen. */ + disabled_algorithms = + rb_hash_aref(hash_arg, ID2SYM(rb_intern("disabled_algorithms"))); + if (disabled_algorithms != Qnil) { + Check_Type(disabled_algorithms, T_ARRAY); + + for (int i = 0; i < RARRAY_LEN(disabled_algorithms); i++) { + algorithm_name = rb_ary_entry(disabled_algorithms, i); + grpc_rb_compression_options_disable_algorithm(wrapper->wrapped, + algorithm_name); + } + } + + return self; +} + +void Init_grpc_compression_options() { + grpc_rb_cCompressionOptions = rb_define_class_under( + grpc_rb_mGrpcCore, "CompressionOptions", rb_cObject); + + /* Allocates an object managed by the ruby runtime. */ + rb_define_alloc_func(grpc_rb_cCompressionOptions, + grpc_rb_compression_options_alloc); + + /* Initializes the ruby wrapper. #new method takes an optional hash argument. + */ + rb_define_method(grpc_rb_cCompressionOptions, "initialize", + grpc_rb_compression_options_init, -1); + + /* Methods for getting the default algorithm, default level, and disabled + * algorithms as readable names. */ + rb_define_method(grpc_rb_cCompressionOptions, "default_algorithm", + grpc_rb_compression_options_get_default_algorithm, 0); + rb_define_method(grpc_rb_cCompressionOptions, "default_level", + grpc_rb_compression_options_get_default_level, 0); + rb_define_method(grpc_rb_cCompressionOptions, "disabled_algorithms", + grpc_rb_compression_options_get_disabled_algorithms, 0); + + /* Determines whether or not an algorithm is enabled, given a readable + * algorithm name.*/ + rb_define_method(grpc_rb_cCompressionOptions, "algorithm_enabled?", + grpc_rb_compression_options_is_algorithm_enabled, 1); + + /* Provides a hash of the compression settings suitable + * for passing to server or channel args. */ + rb_define_method(grpc_rb_cCompressionOptions, "to_hash", + grpc_rb_compression_options_to_hash, 0); + rb_define_alias(grpc_rb_cCompressionOptions, "to_channel_arg_hash", + "to_hash"); + + /* Ruby ids for the names of the different compression levels. */ + id_compress_level_none = rb_intern("none"); + id_compress_level_low = rb_intern("low"); + id_compress_level_medium = rb_intern("medium"); + id_compress_level_high = rb_intern("high"); +} diff --git a/src/python/grpcio/grpc/_cython/imports.generated.c b/src/ruby/ext/grpc/rb_compression_options.h similarity index 83% rename from src/python/grpcio/grpc/_cython/imports.generated.c rename to src/ruby/ext/grpc/rb_compression_options.h index c0080b5a47a..4d5a9247864 100644 --- a/src/python/grpcio/grpc/_cython/imports.generated.c +++ b/src/ruby/ext/grpc/rb_compression_options.h @@ -1,6 +1,6 @@ /* * - * Copyright 2016, Google Inc. + * Copyright 2015, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,7 +31,14 @@ * */ -/* TODO(atash) remove cruft */ -#include +#ifndef GRPC_RB_COMPRESSION_OPTIONS_H_ +#define GRPC_RB_COMPRESSION_OPTIONS_H_ -#include "imports.generated.h" +#include + +#include + +/* Initializes the compression options ruby wrapper. */ +void Init_grpc_compression_options(); + +#endif /* GRPC_RB_COMPRESSION_OPTIONS_H_ */ diff --git a/src/ruby/ext/grpc/rb_grpc.c b/src/ruby/ext/grpc/rb_grpc.c index 188a62475d2..17cd165a91d 100644 --- a/src/ruby/ext/grpc/rb_grpc.c +++ b/src/ruby/ext/grpc/rb_grpc.c @@ -49,6 +49,7 @@ #include "rb_loader.h" #include "rb_server.h" #include "rb_server_credentials.h" +#include "rb_compression_options.h" static VALUE grpc_rb_cTimeVal = Qnil; @@ -220,7 +221,7 @@ static VALUE grpc_rb_time_val_to_time(VALUE self) { time_const); real_time = gpr_convert_clock_type(*time_const, GPR_CLOCK_REALTIME); return rb_funcall(rb_cTime, id_at, 2, INT2NUM(real_time.tv_sec), - INT2NUM(real_time.tv_nsec)); + INT2NUM(real_time.tv_nsec / 1000)); } /* Invokes inspect on the ctime version of the time val. */ @@ -332,4 +333,5 @@ void Init_grpc_c() { Init_grpc_server_credentials(); Init_grpc_status_codes(); Init_grpc_time_consts(); + Init_grpc_compression_options(); } diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c index 9748cb576ba..d7f862cd9ce 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c @@ -186,6 +186,7 @@ gpr_set_log_function_type gpr_set_log_function_import; gpr_slice_ref_type gpr_slice_ref_import; gpr_slice_unref_type gpr_slice_unref_import; gpr_slice_new_type gpr_slice_new_import; +gpr_slice_new_with_user_data_type gpr_slice_new_with_user_data_import; gpr_slice_new_with_len_type gpr_slice_new_with_len_import; gpr_slice_malloc_type gpr_slice_malloc_import; gpr_slice_from_copied_string_type gpr_slice_from_copied_string_import; @@ -458,6 +459,7 @@ void grpc_rb_load_imports(HMODULE library) { gpr_slice_ref_import = (gpr_slice_ref_type) GetProcAddress(library, "gpr_slice_ref"); gpr_slice_unref_import = (gpr_slice_unref_type) GetProcAddress(library, "gpr_slice_unref"); gpr_slice_new_import = (gpr_slice_new_type) GetProcAddress(library, "gpr_slice_new"); + gpr_slice_new_with_user_data_import = (gpr_slice_new_with_user_data_type) GetProcAddress(library, "gpr_slice_new_with_user_data"); gpr_slice_new_with_len_import = (gpr_slice_new_with_len_type) GetProcAddress(library, "gpr_slice_new_with_len"); gpr_slice_malloc_import = (gpr_slice_malloc_type) GetProcAddress(library, "gpr_slice_malloc"); gpr_slice_from_copied_string_import = (gpr_slice_from_copied_string_type) GetProcAddress(library, "gpr_slice_from_copied_string"); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index 6f0974e31b4..14da63780cb 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -509,6 +509,9 @@ extern gpr_slice_unref_type gpr_slice_unref_import; typedef gpr_slice(*gpr_slice_new_type)(void *p, size_t len, void (*destroy)(void *)); extern gpr_slice_new_type gpr_slice_new_import; #define gpr_slice_new gpr_slice_new_import +typedef gpr_slice(*gpr_slice_new_with_user_data_type)(void *p, size_t len, void (*destroy)(void *), void *user_data); +extern gpr_slice_new_with_user_data_type gpr_slice_new_with_user_data_import; +#define gpr_slice_new_with_user_data gpr_slice_new_with_user_data_import typedef gpr_slice(*gpr_slice_new_with_len_type)(void *p, size_t len, void (*destroy)(void *, size_t)); extern gpr_slice_new_with_len_type gpr_slice_new_with_len_import; #define gpr_slice_new_with_len gpr_slice_new_with_len_import diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c index bf26841fd22..2a6a246e677 100644 --- a/src/ruby/ext/grpc/rb_server.c +++ b/src/ruby/ext/grpc/rb_server.c @@ -218,7 +218,7 @@ static VALUE grpc_rb_server_request_call(VALUE self) { grpc_rb_sNewServerRpc, rb_str_new2(st.details.method), rb_str_new2(st.details.host), rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec), - INT2NUM(deadline.tv_nsec)), + INT2NUM(deadline.tv_nsec / 1000)), grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call, call_queue), NULL); grpc_request_call_stack_cleanup(&st); diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb index 425dc3e5198..c2ac3c4dafe 100644 --- a/src/ruby/lib/grpc/generic/bidi_call.rb +++ b/src/ruby/lib/grpc/generic/bidi_call.rb @@ -76,7 +76,7 @@ module GRPC # block that can be invoked with each response. # # @param requests the Enumerable of requests to send - # @op_notifier a Notifier used to signal completion + # @param op_notifier a Notifier used to signal completion # @return an Enumerator of requests to yield def run_on_client(requests, op_notifier, &blk) @op_notifier = op_notifier diff --git a/src/ruby/lib/grpc/generic/client_stub.rb b/src/ruby/lib/grpc/generic/client_stub.rb index 9d6bd3bf590..0d7c1f7805e 100644 --- a/src/ruby/lib/grpc/generic/client_stub.rb +++ b/src/ruby/lib/grpc/generic/client_stub.rb @@ -34,7 +34,8 @@ require_relative '../version' module GRPC # rubocop:disable Metrics/ParameterLists - # ClientStub represents an endpoint used to send requests to GRPC servers. + # ClientStub represents a client connection to a gRPC server, and can be used + # to send requests. class ClientStub include Core::StatusCodes include Core::TimeConsts @@ -75,8 +76,9 @@ module GRPC # my_stub = ClientStub.new(example.host.com:50505, # :this_channel_is_insecure) # - # Any arbitrary keyword arguments are treated as channel arguments used to - # configure the RPC connection to the host. + # If a channel_override argument is passed, it will be used as the + # underlying channel. Otherwise, the channel_args argument will be used + # to construct a new underlying channel. # # There are some specific keyword args that are not used to configure the # channel: @@ -91,10 +93,17 @@ module GRPC # # @param host [String] the host the stub connects to # @param creds [Core::ChannelCredentials|Symbol] the channel credentials, or - # :this_channel_is_insecure + # :this_channel_is_insecure, which explicitly indicates that the client + # should be created with an insecure connection. Note: this argument is + # ignored if the channel_override argument is provided. # @param channel_override [Core::Channel] a pre-created channel # @param timeout [Number] the default timeout to use in requests - # @param channel_args [Hash] the channel arguments + # @param propagate_mask [Number] A bitwise combination of flags in + # GRPC::Core::PropagateMasks. Indicates how data should be propagated + # from parent server calls to child client calls if this client is being + # used within a gRPC server. + # @param channel_args [Hash] the channel arguments. Note: this argument is + # ignored if the channel_override argument is provided. def initialize(host, creds, channel_override: nil, timeout: nil, @@ -389,11 +398,11 @@ module GRPC # @param marshal [Function] f(obj)->string that marshals requests # @param unmarshal [Function] f(string)->obj that unmarshals responses # @param deadline [Time] (optional) the time the request should complete + # @param return_op [true|false] return an Operation if true # @param parent [Core::Call] a prior call whose reserved metadata # will be propagated by this one. # @param credentials [Core::CallCredentials] credentials to use when making # the call - # @param return_op [true|false] return an Operation if true # @param metadata [Hash] metadata to be sent to the server # @param blk [Block] when provided, is executed for each response # @return [Enumerator|nil|Operation] as discussed above @@ -430,7 +439,8 @@ module GRPC # @param unmarshal [Function] f(string)->obj that unmarshals responses # @param parent [Grpc::Call] a parent call, available when calls are # made from server - # @param timeout [TimeConst] + # @param credentials [Core::CallCredentials] credentials to use when making + # the call def new_active_call(method, marshal, unmarshal, deadline: nil, parent: nil, diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb index c92a532a500..7ea2371365f 100644 --- a/src/ruby/lib/grpc/generic/rpc_server.rb +++ b/src/ruby/lib/grpc/generic/rpc_server.rb @@ -172,22 +172,18 @@ module GRPC # The RPC server is configured using keyword arguments. # # There are some specific keyword args used to configure the RpcServer - # instance, however other arbitrary are allowed and when present are used - # to configure the listeninng connection set up by the RpcServer. - # - # * poll_period: when present, the server polls for new events with this - # period + # instance. # # * pool_size: the size of the thread pool the server uses to run its # threads # - # * creds: [GRPC::Core::ServerCredentials] - # the credentials used to secure the server - # # * max_waiting_requests: the maximum number of requests that are not # being handled to allow. When this limit is exceeded, the server responds # with not available to new requests # + # * poll_period: when present, the server polls for new events with this + # period + # # * connect_md_proc: # when non-nil is a proc for determining metadata to to send back the client # on receiving an invocation req. The proc signature is: diff --git a/src/ruby/lib/grpc/version.rb b/src/ruby/lib/grpc/version.rb index 5e6aaef2eb5..6e62af94d43 100644 --- a/src/ruby/lib/grpc/version.rb +++ b/src/ruby/lib/grpc/version.rb @@ -29,5 +29,5 @@ # GRPC contains the General RPC module. module GRPC - VERSION = '0.16.0.dev' + VERSION = '1.1.0.dev' end diff --git a/src/ruby/pb/grpc/health/checker.rb b/src/ruby/pb/grpc/health/checker.rb index f7310d92894..4bce1744c48 100644 --- a/src/ruby/pb/grpc/health/checker.rb +++ b/src/ruby/pb/grpc/health/checker.rb @@ -28,7 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. require 'grpc' -require 'grpc/health/v1/health_services' +require 'grpc/health/v1/health_services_pb' require 'thread' module Grpc diff --git a/src/ruby/pb/grpc/health/v1/health.rb b/src/ruby/pb/grpc/health/v1/health_pb.rb similarity index 100% rename from src/ruby/pb/grpc/health/v1/health.rb rename to src/ruby/pb/grpc/health/v1/health_pb.rb diff --git a/src/ruby/pb/grpc/health/v1/health_services.rb b/src/ruby/pb/grpc/health/v1/health_services_pb.rb similarity index 98% rename from src/ruby/pb/grpc/health/v1/health_services.rb rename to src/ruby/pb/grpc/health/v1/health_services_pb.rb index 68a3956f54a..8cc01e91dc5 100644 --- a/src/ruby/pb/grpc/health/v1/health_services.rb +++ b/src/ruby/pb/grpc/health/v1/health_services_pb.rb @@ -32,7 +32,7 @@ # require 'grpc' -require 'grpc/health/v1/health' +require 'grpc/health/v1/health_pb' module Grpc module Health diff --git a/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services.rb b/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb similarity index 93% rename from src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services.rb rename to src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb index eb523ffa6f0..e51c2f087a0 100644 --- a/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services.rb +++ b/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb @@ -1,5 +1,5 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! -# Source: grpc/testing/duplicate/echo_duplicate.proto for package 'grpc.testing.duplicate' +# Source: src/proto/grpc/testing/duplicate/echo_duplicate.proto for package 'grpc.testing.duplicate' # Original file comments: # Copyright 2015, Google Inc. # All rights reserved. @@ -34,7 +34,7 @@ # require 'grpc' -require 'grpc/testing/duplicate/echo_duplicate' +require 'src/proto/grpc/testing/duplicate/echo_duplicate_pb' module Grpc module Testing diff --git a/src/ruby/pb/grpc/testing/metrics.rb b/src/ruby/pb/grpc/testing/metrics_pb.rb similarity index 94% rename from src/ruby/pb/grpc/testing/metrics.rb rename to src/ruby/pb/grpc/testing/metrics_pb.rb index 3b3c8cd61bb..77b6c90970f 100644 --- a/src/ruby/pb/grpc/testing/metrics.rb +++ b/src/ruby/pb/grpc/testing/metrics_pb.rb @@ -1,5 +1,5 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! -# source: grpc/testing/metrics.proto +# source: src/proto/grpc/testing/metrics.proto require 'google/protobuf' diff --git a/src/ruby/pb/grpc/testing/metrics_services.rb b/src/ruby/pb/grpc/testing/metrics_services_pb.rb similarity index 95% rename from src/ruby/pb/grpc/testing/metrics_services.rb rename to src/ruby/pb/grpc/testing/metrics_services_pb.rb index 467b7b3ee50..e46366b1fbe 100644 --- a/src/ruby/pb/grpc/testing/metrics_services.rb +++ b/src/ruby/pb/grpc/testing/metrics_services_pb.rb @@ -1,5 +1,5 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! -# Source: grpc/testing/metrics.proto for package 'grpc.testing' +# Source: src/proto/grpc/testing/metrics.proto for package 'grpc.testing' # Original file comments: # Copyright 2015-2016, Google Inc. # All rights reserved. @@ -38,7 +38,7 @@ # service. require 'grpc' -require 'grpc/testing/metrics' +require 'src/proto/grpc/testing/metrics_pb' module Grpc module Testing diff --git a/src/ruby/pb/src/proto/grpc/testing/empty.rb b/src/ruby/pb/src/proto/grpc/testing/empty_pb.rb similarity index 100% rename from src/ruby/pb/src/proto/grpc/testing/empty.rb rename to src/ruby/pb/src/proto/grpc/testing/empty_pb.rb diff --git a/src/ruby/qps/src/proto/grpc/testing/messages.rb b/src/ruby/pb/src/proto/grpc/testing/messages_pb.rb similarity index 88% rename from src/ruby/qps/src/proto/grpc/testing/messages.rb rename to src/ruby/pb/src/proto/grpc/testing/messages_pb.rb index 2bdfe0eade3..e27ccd0dc04 100644 --- a/src/ruby/qps/src/proto/grpc/testing/messages.rb +++ b/src/ruby/pb/src/proto/grpc/testing/messages_pb.rb @@ -4,6 +4,9 @@ require 'google/protobuf' Google::Protobuf::DescriptorPool.generated_pool.build do + add_message "grpc.testing.BoolValue" do + optional :value, :bool, 1 + end add_message "grpc.testing.Payload" do optional :type, :enum, 1, "grpc.testing.PayloadType" optional :body, :bytes, 2 @@ -18,8 +21,9 @@ Google::Protobuf::DescriptorPool.generated_pool.build do optional :payload, :message, 3, "grpc.testing.Payload" optional :fill_username, :bool, 4 optional :fill_oauth_scope, :bool, 5 - optional :response_compression, :enum, 6, "grpc.testing.CompressionType" + optional :response_compressed, :message, 6, "grpc.testing.BoolValue" optional :response_status, :message, 7, "grpc.testing.EchoStatus" + optional :expect_compressed, :message, 8, "grpc.testing.BoolValue" end add_message "grpc.testing.SimpleResponse" do optional :payload, :message, 1, "grpc.testing.Payload" @@ -28,6 +32,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do end add_message "grpc.testing.StreamingInputCallRequest" do optional :payload, :message, 1, "grpc.testing.Payload" + optional :expect_compressed, :message, 2, "grpc.testing.BoolValue" end add_message "grpc.testing.StreamingInputCallResponse" do optional :aggregated_payload_size, :int32, 1 @@ -35,12 +40,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do add_message "grpc.testing.ResponseParameters" do optional :size, :int32, 1 optional :interval_us, :int32, 2 + optional :compressed, :message, 3, "grpc.testing.BoolValue" end add_message "grpc.testing.StreamingOutputCallRequest" do optional :response_type, :enum, 1, "grpc.testing.PayloadType" repeated :response_parameters, :message, 2, "grpc.testing.ResponseParameters" optional :payload, :message, 3, "grpc.testing.Payload" - optional :response_compression, :enum, 6, "grpc.testing.CompressionType" optional :response_status, :message, 7, "grpc.testing.EchoStatus" end add_message "grpc.testing.StreamingOutputCallResponse" do @@ -55,18 +60,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do end add_enum "grpc.testing.PayloadType" do value :COMPRESSABLE, 0 - value :UNCOMPRESSABLE, 1 - value :RANDOM, 2 - end - add_enum "grpc.testing.CompressionType" do - value :NONE, 0 - value :GZIP, 1 - value :DEFLATE, 2 end end module Grpc module Testing + BoolValue = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.BoolValue").msgclass Payload = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Payload").msgclass EchoStatus = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.EchoStatus").msgclass SimpleRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleRequest").msgclass @@ -79,6 +78,5 @@ module Grpc ReconnectParams = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectParams").msgclass ReconnectInfo = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectInfo").msgclass PayloadType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.PayloadType").enummodule - CompressionType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.CompressionType").enummodule end end diff --git a/src/ruby/pb/src/proto/grpc/testing/test.rb b/src/ruby/pb/src/proto/grpc/testing/test_pb.rb similarity index 72% rename from src/ruby/pb/src/proto/grpc/testing/test.rb rename to src/ruby/pb/src/proto/grpc/testing/test_pb.rb index 245b5ce00cd..2cc98630314 100644 --- a/src/ruby/pb/src/proto/grpc/testing/test.rb +++ b/src/ruby/pb/src/proto/grpc/testing/test_pb.rb @@ -3,8 +3,8 @@ require 'google/protobuf' -require 'src/proto/grpc/testing/empty' -require 'src/proto/grpc/testing/messages' +require 'src/proto/grpc/testing/empty_pb' +require 'src/proto/grpc/testing/messages_pb' Google::Protobuf::DescriptorPool.generated_pool.build do end diff --git a/src/ruby/pb/src/proto/grpc/testing/test_services.rb b/src/ruby/pb/src/proto/grpc/testing/test_services_pb.rb similarity index 99% rename from src/ruby/pb/src/proto/grpc/testing/test_services.rb rename to src/ruby/pb/src/proto/grpc/testing/test_services_pb.rb index 2652de5e6d2..fde328e4c5d 100644 --- a/src/ruby/pb/src/proto/grpc/testing/test_services.rb +++ b/src/ruby/pb/src/proto/grpc/testing/test_services_pb.rb @@ -35,7 +35,7 @@ # require 'grpc' -require 'src/proto/grpc/testing/test' +require 'src/proto/grpc/testing/test_pb' module Grpc module Testing diff --git a/src/ruby/pb/test/client.rb b/src/ruby/pb/test/client.rb index 066a7bb90f1..1e3ae656302 100755 --- a/src/ruby/pb/test/client.rb +++ b/src/ruby/pb/test/client.rb @@ -52,9 +52,9 @@ require_relative '../../lib/grpc' require 'googleauth' require 'google/protobuf' -require_relative 'proto/empty' -require_relative 'proto/messages' -require_relative 'proto/test_services' +require_relative '../src/proto/grpc/testing/empty_pb' +require_relative '../src/proto/grpc/testing/messages_pb' +require_relative '../src/proto/grpc/testing/test_services_pb' AUTH_ENV = Google::Auth::CredentialsLoader::ENV_VAR @@ -111,6 +111,18 @@ end # creates a test stub that accesses host:port securely. def create_stub(opts) address = "#{opts.host}:#{opts.port}" + + # Provide channel args that request compression by default + # for compression interop tests + if ['client_compressed_unary', + 'client_compressed_streaming'].include?(opts.test_case) + compression_options = + GRPC::Core::CompressionOptions.new(default_algorithm: :gzip) + compression_channel_args = compression_options.to_channel_arg_hash + else + compression_channel_args = {} + end + if opts.secure creds = ssl_creds(opts.use_test_ca) stub_opts = { @@ -145,10 +157,15 @@ def create_stub(opts) end GRPC.logger.info("... connecting securely to #{address}") + stub_opts[:channel_args].merge!(compression_channel_args) Grpc::Testing::TestService::Stub.new(address, creds, **stub_opts) else GRPC.logger.info("... connecting insecurely to #{address}") - Grpc::Testing::TestService::Stub.new(address, :this_channel_is_insecure) + Grpc::Testing::TestService::Stub.new( + address, + :this_channel_is_insecure, + channel_args: compression_channel_args + ) end end @@ -216,10 +233,28 @@ class BlockingEnumerator end end +# Intended to be used to wrap a call_op, and to adjust +# the write flag of the call_op in between messages yielded to it. +class WriteFlagSettingStreamingInputEnumerable + attr_accessor :call_op + + def initialize(requests_and_write_flags) + @requests_and_write_flags = requests_and_write_flags + end + + def each + @requests_and_write_flags.each do |request_and_flag| + @call_op.write_flag = request_and_flag[:write_flag] + yield request_and_flag[:request] + end + end +end + # defines methods corresponding to each interop test case. class NamedTests include Grpc::Testing include Grpc::Testing::PayloadType + include GRPC::Core::MetadataKeys def initialize(stub, args) @stub = stub @@ -235,6 +270,48 @@ class NamedTests perform_large_unary end + def client_compressed_unary + # first request used also for the probe + req_size, wanted_response_size = 271_828, 314_159 + expect_compressed = BoolValue.new(value: true) + payload = Payload.new(type: :COMPRESSABLE, body: nulls(req_size)) + req = SimpleRequest.new(response_type: :COMPRESSABLE, + response_size: wanted_response_size, + payload: payload, + expect_compressed: expect_compressed) + + # send a probe to see if CompressedResponse is supported on the server + send_probe_for_compressed_request_support do + request_uncompressed_args = { + COMPRESSION_REQUEST_ALGORITHM => 'identity' + } + @stub.unary_call(req, metadata: request_uncompressed_args) + end + + # make a call with a compressed message + resp = @stub.unary_call(req) + assert('Expected second unary call with compression to work') do + resp.payload.body.length == wanted_response_size + end + + # make a call with an uncompressed message + stub_options = { + COMPRESSION_REQUEST_ALGORITHM => 'identity' + } + + req = SimpleRequest.new( + response_type: :COMPRESSABLE, + response_size: wanted_response_size, + payload: payload, + expect_compressed: BoolValue.new(value: false) + ) + + resp = @stub.unary_call(req, metadata: stub_options) + assert('Expected second unary call with compression to work') do + resp.payload.body.length == wanted_response_size + end + end + def service_account_creds # ignore this test if the oauth options are not set if @args.oauth_scope.nil? @@ -309,6 +386,50 @@ class NamedTests end end + def client_compressed_streaming + # first request used also by the probe + first_request = StreamingInputCallRequest.new( + payload: Payload.new(type: :COMPRESSABLE, body: nulls(27_182)), + expect_compressed: BoolValue.new(value: true) + ) + + # send a probe to see if CompressedResponse is supported on the server + send_probe_for_compressed_request_support do + request_uncompressed_args = { + COMPRESSION_REQUEST_ALGORITHM => 'identity' + } + @stub.streaming_input_call([first_request], + metadata: request_uncompressed_args) + end + + second_request = StreamingInputCallRequest.new( + payload: Payload.new(type: :COMPRESSABLE, body: nulls(45_904)), + expect_compressed: BoolValue.new(value: false) + ) + + # Create the requests messages and the corresponding write flags + # for each message + requests = WriteFlagSettingStreamingInputEnumerable.new([ + { request: first_request, + write_flag: 0 }, + { request: second_request, + write_flag: GRPC::Core::WriteFlags::NO_COMPRESS } + ]) + + # Create the call_op, pass it to the requests enumerable, and + # run the call + call_op = @stub.streaming_input_call(requests, + return_op: true) + requests.call_op = call_op + resp = call_op.execute + + wanted_aggregate_size = 73_086 + + assert("#{__callee__}: aggregate payload size is incorrect") do + wanted_aggregate_size == resp.aggregated_payload_size + end + end + def server_streaming msg_sizes = [31_415, 9, 2653, 58_979] response_spec = msg_sizes.map { |s| ResponseParameters.new(size: s) } @@ -415,6 +536,29 @@ class NamedTests end resp end + + # Send probing message for compressed request on the server, to see + # if it's implemented. + def send_probe_for_compressed_request_support(&send_probe) + bad_status_occured = false + + begin + send_probe.call + rescue GRPC::BadStatus => e + if e.code == GRPC::Core::StatusCodes::INVALID_ARGUMENT + bad_status_occured = true + else + fail AssertionError, "Bad status received but code is #{e.code}" + end + rescue Exception => e + fail AssertionError, "Expected BadStatus. Received: #{e.inspect}" + end + + assert('CompressedRequest probe failed') do + bad_status_occured + end + end + end # Args is used to hold the command line info. diff --git a/src/ruby/pb/test/proto/empty.rb b/src/ruby/pb/test/proto/empty.rb deleted file mode 100644 index 559adcc85e7..00000000000 --- a/src/ruby/pb/test/proto/empty.rb +++ /dev/null @@ -1,15 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: test/proto/empty.proto - -require 'google/protobuf' - -Google::Protobuf::DescriptorPool.generated_pool.build do - add_message "grpc.testing.Empty" do - end -end - -module Grpc - module Testing - Empty = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Empty").msgclass - end -end diff --git a/src/ruby/pb/test/proto/messages.rb b/src/ruby/pb/test/proto/messages.rb deleted file mode 100644 index 5222c9824a7..00000000000 --- a/src/ruby/pb/test/proto/messages.rb +++ /dev/null @@ -1,80 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: test/proto/messages.proto - -require 'google/protobuf' - -Google::Protobuf::DescriptorPool.generated_pool.build do - add_message "grpc.testing.Payload" do - optional :type, :enum, 1, "grpc.testing.PayloadType" - optional :body, :bytes, 2 - end - add_message "grpc.testing.EchoStatus" do - optional :code, :int32, 1 - optional :message, :string, 2 - end - add_message "grpc.testing.SimpleRequest" do - optional :response_type, :enum, 1, "grpc.testing.PayloadType" - optional :response_size, :int32, 2 - optional :payload, :message, 3, "grpc.testing.Payload" - optional :fill_username, :bool, 4 - optional :fill_oauth_scope, :bool, 5 - optional :response_compression, :enum, 6, "grpc.testing.CompressionType" - optional :response_status, :message, 7, "grpc.testing.EchoStatus" - end - add_message "grpc.testing.SimpleResponse" do - optional :payload, :message, 1, "grpc.testing.Payload" - optional :username, :string, 2 - optional :oauth_scope, :string, 3 - end - add_message "grpc.testing.StreamingInputCallRequest" do - optional :payload, :message, 1, "grpc.testing.Payload" - end - add_message "grpc.testing.StreamingInputCallResponse" do - optional :aggregated_payload_size, :int32, 1 - end - add_message "grpc.testing.ResponseParameters" do - optional :size, :int32, 1 - optional :interval_us, :int32, 2 - end - add_message "grpc.testing.StreamingOutputCallRequest" do - optional :response_type, :enum, 1, "grpc.testing.PayloadType" - repeated :response_parameters, :message, 2, "grpc.testing.ResponseParameters" - optional :payload, :message, 3, "grpc.testing.Payload" - optional :response_compression, :enum, 6, "grpc.testing.CompressionType" - optional :response_status, :message, 7, "grpc.testing.EchoStatus" - end - add_message "grpc.testing.StreamingOutputCallResponse" do - optional :payload, :message, 1, "grpc.testing.Payload" - end - add_message "grpc.testing.ReconnectInfo" do - optional :passed, :bool, 1 - repeated :backoff_ms, :int32, 2 - end - add_enum "grpc.testing.PayloadType" do - value :COMPRESSABLE, 0 - value :UNCOMPRESSABLE, 1 - value :RANDOM, 2 - end - add_enum "grpc.testing.CompressionType" do - value :NONE, 0 - value :GZIP, 1 - value :DEFLATE, 2 - end -end - -module Grpc - module Testing - Payload = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Payload").msgclass - EchoStatus = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.EchoStatus").msgclass - SimpleRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleRequest").msgclass - SimpleResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleResponse").msgclass - StreamingInputCallRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingInputCallRequest").msgclass - StreamingInputCallResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingInputCallResponse").msgclass - ResponseParameters = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ResponseParameters").msgclass - StreamingOutputCallRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingOutputCallRequest").msgclass - StreamingOutputCallResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingOutputCallResponse").msgclass - ReconnectInfo = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectInfo").msgclass - PayloadType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.PayloadType").enummodule - CompressionType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.CompressionType").enummodule - end -end diff --git a/src/ruby/pb/test/proto/test.rb b/src/ruby/pb/test/proto/test.rb deleted file mode 100644 index 100eb6505c9..00000000000 --- a/src/ruby/pb/test/proto/test.rb +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: test/proto/test.proto - -require 'google/protobuf' - -require 'test/proto/empty' -require 'test/proto/messages' -Google::Protobuf::DescriptorPool.generated_pool.build do -end - -module Grpc - module Testing - end -end diff --git a/src/ruby/pb/test/proto/test_services.rb b/src/ruby/pb/test/proto/test_services.rb deleted file mode 100644 index 9df9cc5860b..00000000000 --- a/src/ruby/pb/test/proto/test_services.rb +++ /dev/null @@ -1,64 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# Source: test/proto/test.proto for package 'grpc.testing' - -require 'grpc' -require 'test/proto/test' - -module Grpc - module Testing - module TestService - - # TODO: add proto service documentation here - class Service - - include GRPC::GenericService - - self.marshal_class_method = :encode - self.unmarshal_class_method = :decode - self.service_name = 'grpc.testing.TestService' - - rpc :EmptyCall, Empty, Empty - rpc :UnaryCall, SimpleRequest, SimpleResponse - rpc :StreamingOutputCall, StreamingOutputCallRequest, stream(StreamingOutputCallResponse) - rpc :StreamingInputCall, stream(StreamingInputCallRequest), StreamingInputCallResponse - rpc :FullDuplexCall, stream(StreamingOutputCallRequest), stream(StreamingOutputCallResponse) - rpc :HalfDuplexCall, stream(StreamingOutputCallRequest), stream(StreamingOutputCallResponse) - end - - Stub = Service.rpc_stub_class - end - module UnimplementedService - - # TODO: add proto service documentation here - class Service - - include GRPC::GenericService - - self.marshal_class_method = :encode - self.unmarshal_class_method = :decode - self.service_name = 'grpc.testing.UnimplementedService' - - rpc :UnimplementedCall, Empty, Empty - end - - Stub = Service.rpc_stub_class - end - module ReconnectService - - # TODO: add proto service documentation here - class Service - - include GRPC::GenericService - - self.marshal_class_method = :encode - self.unmarshal_class_method = :decode - self.service_name = 'grpc.testing.ReconnectService' - - rpc :Start, Empty, Empty - rpc :Stop, Empty, ReconnectInfo - end - - Stub = Service.rpc_stub_class - end - end -end diff --git a/src/ruby/pb/test/server.rb b/src/ruby/pb/test/server.rb index 088f281dc47..0808121661c 100755 --- a/src/ruby/pb/test/server.rb +++ b/src/ruby/pb/test/server.rb @@ -50,9 +50,9 @@ require 'optparse' require 'grpc' -require 'test/proto/empty' -require 'test/proto/messages' -require 'test/proto/test_services' +require_relative '../src/proto/grpc/testing/empty_pb' +require_relative '../src/proto/grpc/testing/messages_pb' +require_relative '../src/proto/grpc/testing/test_services_pb' # DebugIsTruncated extends the default Logger to truncate debug messages class DebugIsTruncated < Logger diff --git a/src/ruby/qps/client.rb b/src/ruby/qps/client.rb index 917b01271e8..7ed648acefc 100644 --- a/src/ruby/qps/client.rb +++ b/src/ruby/qps/client.rb @@ -38,7 +38,7 @@ $LOAD_PATH.unshift(this_dir) unless $LOAD_PATH.include?(this_dir) require 'grpc' require 'histogram' -require 'src/proto/grpc/testing/services_services' +require 'src/proto/grpc/testing/services_services_pb' class Poisson def interarrival diff --git a/src/ruby/qps/server.rb b/src/ruby/qps/server.rb index 52a89ce847c..cd98ee1fd94 100644 --- a/src/ruby/qps/server.rb +++ b/src/ruby/qps/server.rb @@ -38,9 +38,9 @@ $LOAD_PATH.unshift(this_dir) unless $LOAD_PATH.include?(this_dir) require 'grpc' require 'qps-common' -require 'src/proto/grpc/testing/messages' -require 'src/proto/grpc/testing/services_services' -require 'src/proto/grpc/testing/stats' +require 'src/proto/grpc/testing/messages_pb' +require 'src/proto/grpc/testing/services_services_pb' +require 'src/proto/grpc/testing/stats_pb' class BenchmarkServiceImpl < Grpc::Testing::BenchmarkService::Service def unary_call(req, _call) diff --git a/src/ruby/qps/src/proto/grpc/testing/control.rb b/src/ruby/qps/src/proto/grpc/testing/control_pb.rb similarity index 97% rename from src/ruby/qps/src/proto/grpc/testing/control.rb rename to src/ruby/qps/src/proto/grpc/testing/control_pb.rb index 958fca320bc..02207a2b5d8 100644 --- a/src/ruby/qps/src/proto/grpc/testing/control.rb +++ b/src/ruby/qps/src/proto/grpc/testing/control_pb.rb @@ -3,8 +3,8 @@ require 'google/protobuf' -require 'src/proto/grpc/testing/payloads' -require 'src/proto/grpc/testing/stats' +require 'src/proto/grpc/testing/payloads_pb' +require 'src/proto/grpc/testing/stats_pb' Google::Protobuf::DescriptorPool.generated_pool.build do add_message "grpc.testing.PoissonParams" do optional :offered_load, :double, 1 @@ -109,6 +109,8 @@ Google::Protobuf::DescriptorPool.generated_pool.build do repeated :server_stats, :message, 4, "grpc.testing.ServerStats" repeated :server_cores, :int32, 5 optional :summary, :message, 6, "grpc.testing.ScenarioResultSummary" + repeated :client_success, :bool, 7 + repeated :server_success, :bool, 8 end add_enum "grpc.testing.ClientType" do value :SYNC_CLIENT, 0 diff --git a/src/ruby/pb/src/proto/grpc/testing/messages.rb b/src/ruby/qps/src/proto/grpc/testing/messages_pb.rb similarity index 88% rename from src/ruby/pb/src/proto/grpc/testing/messages.rb rename to src/ruby/qps/src/proto/grpc/testing/messages_pb.rb index 2bdfe0eade3..e27ccd0dc04 100644 --- a/src/ruby/pb/src/proto/grpc/testing/messages.rb +++ b/src/ruby/qps/src/proto/grpc/testing/messages_pb.rb @@ -4,6 +4,9 @@ require 'google/protobuf' Google::Protobuf::DescriptorPool.generated_pool.build do + add_message "grpc.testing.BoolValue" do + optional :value, :bool, 1 + end add_message "grpc.testing.Payload" do optional :type, :enum, 1, "grpc.testing.PayloadType" optional :body, :bytes, 2 @@ -18,8 +21,9 @@ Google::Protobuf::DescriptorPool.generated_pool.build do optional :payload, :message, 3, "grpc.testing.Payload" optional :fill_username, :bool, 4 optional :fill_oauth_scope, :bool, 5 - optional :response_compression, :enum, 6, "grpc.testing.CompressionType" + optional :response_compressed, :message, 6, "grpc.testing.BoolValue" optional :response_status, :message, 7, "grpc.testing.EchoStatus" + optional :expect_compressed, :message, 8, "grpc.testing.BoolValue" end add_message "grpc.testing.SimpleResponse" do optional :payload, :message, 1, "grpc.testing.Payload" @@ -28,6 +32,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do end add_message "grpc.testing.StreamingInputCallRequest" do optional :payload, :message, 1, "grpc.testing.Payload" + optional :expect_compressed, :message, 2, "grpc.testing.BoolValue" end add_message "grpc.testing.StreamingInputCallResponse" do optional :aggregated_payload_size, :int32, 1 @@ -35,12 +40,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do add_message "grpc.testing.ResponseParameters" do optional :size, :int32, 1 optional :interval_us, :int32, 2 + optional :compressed, :message, 3, "grpc.testing.BoolValue" end add_message "grpc.testing.StreamingOutputCallRequest" do optional :response_type, :enum, 1, "grpc.testing.PayloadType" repeated :response_parameters, :message, 2, "grpc.testing.ResponseParameters" optional :payload, :message, 3, "grpc.testing.Payload" - optional :response_compression, :enum, 6, "grpc.testing.CompressionType" optional :response_status, :message, 7, "grpc.testing.EchoStatus" end add_message "grpc.testing.StreamingOutputCallResponse" do @@ -55,18 +60,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do end add_enum "grpc.testing.PayloadType" do value :COMPRESSABLE, 0 - value :UNCOMPRESSABLE, 1 - value :RANDOM, 2 - end - add_enum "grpc.testing.CompressionType" do - value :NONE, 0 - value :GZIP, 1 - value :DEFLATE, 2 end end module Grpc module Testing + BoolValue = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.BoolValue").msgclass Payload = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Payload").msgclass EchoStatus = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.EchoStatus").msgclass SimpleRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleRequest").msgclass @@ -79,6 +78,5 @@ module Grpc ReconnectParams = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectParams").msgclass ReconnectInfo = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectInfo").msgclass PayloadType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.PayloadType").enummodule - CompressionType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.CompressionType").enummodule end end diff --git a/src/ruby/qps/src/proto/grpc/testing/payloads.rb b/src/ruby/qps/src/proto/grpc/testing/payloads_pb.rb similarity index 100% rename from src/ruby/qps/src/proto/grpc/testing/payloads.rb rename to src/ruby/qps/src/proto/grpc/testing/payloads_pb.rb diff --git a/src/ruby/qps/src/proto/grpc/testing/services.rb b/src/ruby/qps/src/proto/grpc/testing/services_pb.rb similarity index 72% rename from src/ruby/qps/src/proto/grpc/testing/services.rb rename to src/ruby/qps/src/proto/grpc/testing/services_pb.rb index b2675c2afe5..5ce13bf8b04 100644 --- a/src/ruby/qps/src/proto/grpc/testing/services.rb +++ b/src/ruby/qps/src/proto/grpc/testing/services_pb.rb @@ -3,8 +3,8 @@ require 'google/protobuf' -require 'src/proto/grpc/testing/messages' -require 'src/proto/grpc/testing/control' +require 'src/proto/grpc/testing/messages_pb' +require 'src/proto/grpc/testing/control_pb' Google::Protobuf::DescriptorPool.generated_pool.build do end diff --git a/src/ruby/qps/src/proto/grpc/testing/services_services.rb b/src/ruby/qps/src/proto/grpc/testing/services_services_pb.rb similarity index 98% rename from src/ruby/qps/src/proto/grpc/testing/services_services.rb rename to src/ruby/qps/src/proto/grpc/testing/services_services_pb.rb index 94b9a1e164e..bdbb9c86d00 100644 --- a/src/ruby/qps/src/proto/grpc/testing/services_services.rb +++ b/src/ruby/qps/src/proto/grpc/testing/services_services_pb.rb @@ -34,7 +34,7 @@ # of unary/streaming requests/responses. require 'grpc' -require 'src/proto/grpc/testing/services' +require 'src/proto/grpc/testing/services_pb' module Grpc module Testing diff --git a/src/ruby/qps/src/proto/grpc/testing/stats.rb b/src/ruby/qps/src/proto/grpc/testing/stats_pb.rb similarity index 100% rename from src/ruby/qps/src/proto/grpc/testing/stats.rb rename to src/ruby/qps/src/proto/grpc/testing/stats_pb.rb diff --git a/src/ruby/qps/worker.rb b/src/ruby/qps/worker.rb index 665fb863526..12b8087ca05 100755 --- a/src/ruby/qps/worker.rb +++ b/src/ruby/qps/worker.rb @@ -44,7 +44,7 @@ require 'facter' require 'client' require 'qps-common' require 'server' -require 'src/proto/grpc/testing/services_services' +require 'src/proto/grpc/testing/services_services_pb' class WorkerServiceImpl < Grpc::Testing::WorkerService::Service def cpu_cores diff --git a/src/ruby/spec/compression_options_spec.rb b/src/ruby/spec/compression_options_spec.rb new file mode 100644 index 00000000000..dbd7e592947 --- /dev/null +++ b/src/ruby/spec/compression_options_spec.rb @@ -0,0 +1,164 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +require 'grpc' + +describe GRPC::Core::CompressionOptions do + # Note these constants should be updated + # according to what the core lib provides. + + # Names of supported compression algorithms + ALGORITHMS = [:identity, :deflate, :gzip] + + # Names of valid supported compression levels + COMPRESS_LEVELS = [:none, :low, :medium, :high] + + it 'implements to_s' do + expect { GRPC::Core::CompressionOptions.new.to_s }.to_not raise_error + end + + it '#to_channel_arg_hash gives the same result as #to_hash' do + options = GRPC::Core::CompressionOptions.new + expect(options.to_channel_arg_hash).to eq(options.to_hash) + end + + # Test the normal call sequence of creating an instance + # and then obtaining the resulting channel-arg hash that + # corresponds to the compression settings of the instance + describe 'creating, reading, and converting to channel args hash' do + it 'works when no optional args were provided' do + options = GRPC::Core::CompressionOptions.new + + ALGORITHMS.each do |algorithm| + expect(options.algorithm_enabled?(algorithm)).to be true + end + + expect(options.disabled_algorithms).to be_empty + expect(options.default_algorithm).to be nil + expect(options.default_level).to be nil + expect(options.to_hash).to be_instance_of(Hash) + end + + it 'works when disabling multiple algorithms' do + options = GRPC::Core::CompressionOptions.new( + default_algorithm: :identity, + default_level: :none, + disabled_algorithms: [:gzip, :deflate] + ) + + [:gzip, :deflate].each do |algorithm| + expect(options.algorithm_enabled?(algorithm)).to be false + expect(options.disabled_algorithms.include?(algorithm)).to be true + end + + expect(options.default_algorithm).to be(:identity) + expect(options.default_level).to be(:none) + expect(options.to_hash).to be_instance_of(Hash) + end + + it 'works when all optional args have been set' do + options = GRPC::Core::CompressionOptions.new( + default_algorithm: :gzip, + default_level: :low, + disabled_algorithms: [:deflate] + ) + + expect(options.algorithm_enabled?(:deflate)).to be false + expect(options.algorithm_enabled?(:gzip)).to be true + expect(options.disabled_algorithms).to eq([:deflate]) + + expect(options.default_algorithm).to be(:gzip) + expect(options.default_level).to be(:low) + expect(options.to_hash).to be_instance_of(Hash) + end + + it 'doesnt fail when no algorithms are disabled' do + options = GRPC::Core::CompressionOptions.new( + default_algorithm: :identity, + default_level: :high + ) + + ALGORITHMS.each do |algorithm| + expect(options.algorithm_enabled?(algorithm)).to be(true) + end + + expect(options.disabled_algorithms).to be_empty + expect(options.default_algorithm).to be(:identity) + expect(options.default_level).to be(:high) + expect(options.to_hash).to be_instance_of(Hash) + end + end + + describe '#new with bad parameters' do + it 'should fail with more than one parameter' do + blk = proc { GRPC::Core::CompressionOptions.new(:gzip, :none) } + expect { blk.call }.to raise_error + end + + it 'should fail with a non-hash parameter' do + blk = proc { GRPC::Core::CompressionOptions.new(:gzip) } + expect { blk.call }.to raise_error + end + end + + describe '#default_algorithm' do + it 'returns nil if unset' do + options = GRPC::Core::CompressionOptions.new + expect(options.default_algorithm).to be(nil) + end + end + + describe '#default_level' do + it 'returns nil if unset' do + options = GRPC::Core::CompressionOptions.new + expect(options.default_level).to be(nil) + end + end + + describe '#disabled_algorithms' do + it 'returns an empty list if no algorithms were disabled' do + options = GRPC::Core::CompressionOptions.new + expect(options.disabled_algorithms).to be_empty + end + end + + describe '#algorithm_enabled?' do + [:none, :any, 'gzip', Object.new, 1].each do |name| + it "should fail for parameter ${name} of class #{name.class}" do + options = GRPC::Core::CompressionOptions.new( + disabled_algorithms: [:gzip]) + + blk = proc do + options.algorithm_enabled?(name) + end + expect { blk.call }.to raise_error + end + end + end +end diff --git a/src/ruby/spec/pb/duplicate/codegen_spec.rb b/src/ruby/spec/pb/duplicate/codegen_spec.rb index 54c136c5102..ea0240965ca 100644 --- a/src/ruby/spec/pb/duplicate/codegen_spec.rb +++ b/src/ruby/spec/pb/duplicate/codegen_spec.rb @@ -44,7 +44,7 @@ describe 'Ping protobuf code generation' do # Get the current content service_path = File.join(root_dir, 'src', 'ruby', 'pb', 'grpc', 'testing', 'duplicate', - 'echo_duplicate_services.rb') + 'echo_duplicate_services_pb.rb') want = nil File.open(service_path) { |f| want = f.read } @@ -54,7 +54,7 @@ describe 'Ping protobuf code generation' do got = nil Dir.mktmpdir do |tmp_dir| gen_out = File.join(tmp_dir, 'src', 'proto', 'grpc', 'testing', - 'duplicate', 'echo_duplicate_services.rb') + 'duplicate', 'echo_duplicate_services_pb.rb') pid = spawn( 'protoc', '-I.', diff --git a/src/ruby/spec/pb/health/checker_spec.rb b/src/ruby/spec/pb/health/checker_spec.rb index de11c9fedf7..1b2fa968271 100644 --- a/src/ruby/spec/pb/health/checker_spec.rb +++ b/src/ruby/spec/pb/health/checker_spec.rb @@ -28,7 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. require 'grpc' -require 'grpc/health/v1/health' +require 'grpc/health/v1/health_pb' require 'grpc/health/checker' require 'open3' require 'tmpdir' @@ -43,7 +43,7 @@ describe 'Health protobuf code generation' do skip 'protoc || grpc_ruby_plugin missing, cannot verify health code-gen' else it 'should already be loaded indirectly i.e, used by the other specs' do - expect(require('grpc/health/v1/health_services')).to be(false) + expect(require('grpc/health/v1/health_services_pb')).to be(false) end it 'should have the same content as created by code generation' do @@ -52,7 +52,7 @@ describe 'Health protobuf code generation' do # Get the current content service_path = File.join(root_dir, 'ruby', 'pb', 'grpc', - 'health', 'v1', 'health_services.rb') + 'health', 'v1', 'health_services_pb.rb') want = nil File.open(service_path) { |f| want = f.read } @@ -62,7 +62,7 @@ describe 'Health protobuf code generation' do got = nil Dir.mktmpdir do |tmp_dir| gen_out = File.join(tmp_dir, 'grpc', 'health', 'v1', - 'health_services.rb') + 'health_services_pb.rb') pid = spawn( 'protoc', '-I.', diff --git a/src/ruby/stress/metrics_server.rb b/src/ruby/stress/metrics_server.rb index 13638c4d211..2b7f78577d9 100644 --- a/src/ruby/stress/metrics_server.rb +++ b/src/ruby/stress/metrics_server.rb @@ -27,8 +27,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -require_relative '../pb/grpc/testing/metrics.rb' -require_relative '../pb/grpc/testing/metrics_services.rb' +require_relative '../pb/grpc/testing/metrics_pb.rb' +require_relative '../pb/grpc/testing/metrics_services_pb.rb' class Gauge def get_name diff --git a/src/ruby/tools/bin/grpc_tools_ruby_protoc.rb b/src/ruby/tools/bin/grpc_tools_ruby_protoc similarity index 79% rename from src/ruby/tools/bin/grpc_tools_ruby_protoc.rb rename to src/ruby/tools/bin/grpc_tools_ruby_protoc index 3a2a5b8dc96..dab06e7958d 100755 --- a/src/ruby/tools/bin/grpc_tools_ruby_protoc.rb +++ b/src/ruby/tools/bin/grpc_tools_ruby_protoc @@ -32,10 +32,17 @@ require 'rbconfig' require_relative '../os_check' -protoc_name = 'protoc' + RbConfig::CONFIG['EXEEXT'] +ext = RbConfig::CONFIG['EXEEXT'] -protoc_path = File.join(File.dirname(__FILE__), - RbConfig::CONFIG['host_cpu'] + '-' + OS.os_name, - protoc_name) +protoc_name = 'protoc' + ext -exec([ protoc_path, protoc_path ], *ARGV) +plugin_name = 'grpc_ruby_plugin' + ext + +protoc_dir = File.join(File.dirname(__FILE__), + RbConfig::CONFIG['host_cpu'] + '-' + OS.os_name) + +protoc_path = File.join(protoc_dir, protoc_name) + +plugin_path = File.join(protoc_dir, plugin_name) + +exec([ protoc_path, protoc_path ], "--plugin=protoc-gen-grpc=#{plugin_path}", *ARGV) diff --git a/src/ruby/tools/bin/grpc_tools_ruby_protoc_plugin.rb b/src/ruby/tools/bin/grpc_tools_ruby_protoc_plugin similarity index 100% rename from src/ruby/tools/bin/grpc_tools_ruby_protoc_plugin.rb rename to src/ruby/tools/bin/grpc_tools_ruby_protoc_plugin diff --git a/src/ruby/tools/grpc-tools.gemspec b/src/ruby/tools/grpc-tools.gemspec index 9fa4b66392d..68e2a7a1133 100644 --- a/src/ruby/tools/grpc-tools.gemspec +++ b/src/ruby/tools/grpc-tools.gemspec @@ -18,5 +18,5 @@ Gem::Specification.new do |s| s.platform = Gem::Platform::RUBY - s.executables = %w( grpc_tools_ruby_protoc.rb grpc_tools_ruby_protoc_plugin.rb ) + s.executables = %w( grpc_tools_ruby_protoc grpc_tools_ruby_protoc_plugin ) end diff --git a/src/ruby/tools/version.rb b/src/ruby/tools/version.rb index 68c1bf369d2..e457ec09ddb 100644 --- a/src/ruby/tools/version.rb +++ b/src/ruby/tools/version.rb @@ -29,6 +29,6 @@ module GRPC module Tools - VERSION = '0.16.0.dev' + VERSION = '1.1.0.dev' end end diff --git a/templates/CMakeLists.txt.template b/templates/CMakeLists.txt.template index 52e8b866be1..4e4223493b9 100644 --- a/templates/CMakeLists.txt.template +++ b/templates/CMakeLists.txt.template @@ -42,14 +42,16 @@ <%! def get_deps(target_dict): deps = [] + if target_dict.get('baselib', False): + deps.append("${_gRPC_BASELIB_LIBRARIES}") if target_dict.get('build', None) in ['protoc']: - deps.append("libprotoc") + deps.append("${_gRPC_PROTOBUF_PROTOC_LIBRARIES}") if target_dict.get('secure', False): - deps = ["ssl"] + deps.append("${_gRPC_SSL_LIBRARIES}") if target_dict['name'] in ['grpc++', 'grpc++_unsecure', 'grpc++_codegen_lib']: - deps.append("libprotobuf") + deps.append("${_gRPC_PROTOBUF_LIBRARIES}") elif target_dict['name'] in ['grpc']: - deps.append("zlibstatic") + deps.append("${_gRPC_ZLIB_LIBRARIES}") for d in target_dict.get('deps', []): deps.append(d) return deps @@ -64,39 +66,127 @@ set(PACKAGE_BUGREPORT "https://github.com/grpc/grpc/issues/") project(<%text>${PACKAGE_NAME} C CXX) - if(NOT BORINGSSL_ROOT_DIR) - set(BORINGSSL_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}/third_party/boringssl) + set(gRPC_ZLIB_PROVIDER "module" CACHE STRING "Provider of zlib library") + set_property(CACHE gRPC_ZLIB_PROVIDER PROPERTY STRINGS "module" "package") + + set(gRPC_SSL_PROVIDER "module" CACHE STRING "Provider of ssl library") + set_property(CACHE gRPC_SSL_PROVIDER PROPERTY STRINGS "module" "package") + + set(gRPC_PROTOBUF_PROVIDER "module" CACHE STRING "Provider of protobuf library") + set_property(CACHE gRPC_PROTOBUF_PROVIDER PROPERTY STRINGS "module" "package") + + set(gRPC_USE_PROTO_LITE OFF CACHE BOOL "Use the protobuf-lite library") + + if (gRPC_USE_PROTO_LITE) + set(_gRPC_PROTOBUF_LIBRARY_NAME "libprotobuf-lite") + add_definitions("-DGRPC_USE_PROTO_LITE") + else() + set(_gRPC_PROTOBUF_LIBRARY_NAME "libprotobuf") + endif() + + if("<%text>${gRPC_ZLIB_PROVIDER}" STREQUAL "module") + if(NOT ZLIB_ROOT_DIR) + set(ZLIB_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib) + endif() + set(ZLIB_INCLUDE_DIR "<%text>${ZLIB_ROOT_DIR}") + if(EXISTS "<%text>${ZLIB_ROOT_DIR}/CMakeLists.txt") + add_subdirectory(<%text>${ZLIB_ROOT_DIR} third_party/zlib) + if(TARGET zlibstatic) + set(_gRPC_ZLIB_LIBRARIES zlibstatic) + endif() + else() + message(WARNING "gRPC_ZLIB_PROVIDER is \"module\" but ZLIB_ROOT_DIR is wrong") + endif() + elseif("<%text>${gRPC_ZLIB_PROVIDER}" STREQUAL "package") + find_package(ZLIB) + if(TARGET ZLIB::ZLIB) + set(_gRPC_ZLIB_LIBRARIES ZLIB::ZLIB) + endif() + set(_gRPC_FIND_ZLIB "if(NOT ZLIB_FOUND)\n find_package(ZLIB)\nendif()") endif() - if(NOT PROTOBUF_ROOT_DIR) - set(PROTOBUF_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf) + + if("<%text>${gRPC_PROTOBUF_PROVIDER}" STREQUAL "module") + # Building the protobuf tests require gmock what is not part of a standard protobuf checkout. + # Disable them unless they are explicitly requested from the cmake command line (when we assume + # gmock is downloaded to the right location inside protobuf). + if(NOT protobuf_BUILD_TESTS) + set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") + endif() + if(NOT PROTOBUF_ROOT_DIR) + set(PROTOBUF_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf) + endif() + if(EXISTS "<%text>${PROTOBUF_ROOT_DIR}/cmake/CMakeLists.txt") + add_subdirectory(<%text>${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf) + if(TARGET <%text>${_gRPC_PROTOBUF_LIBRARY_NAME}) + set(_gRPC_PROTOBUF_LIBRARIES <%text>${_gRPC_PROTOBUF_LIBRARY_NAME}) + endif() + if(TARGET libprotoc) + set(_gRPC_PROTOBUF_PROTOC_LIBRARIES libprotoc) + endif() + else() + message(WARNING "gRPC_PROTOBUF_PROVIDER is \"module\" but PROTOBUF_ROOT_DIR is wrong") + endif() + elseif("<%text>${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package") + find_package(protobuf CONFIG) + if(protobuf_FOUND) + if(TARGET protobuf::<%text>${_gRPC_PROTOBUF_LIBRARY_NAME}) + set(_gRPC_PROTOBUF_LIBRARIES protobuf::<%text>${_gRPC_PROTOBUF_LIBRARY_NAME}) + endif() + if(TARGET protobuf::libprotoc) + set(_gRPC_PROTOBUF_PROTOC_LIBRARIES protobuf::libprotoc) + endif() + set(_gRPC_FIND_PROTOBUF "if(NOT protobuf_FOUND)\n find_package(protobuf CONFIG)\nendif()") + else() + find_package(Protobuf MODULE) + set(_gRPC_FIND_PROTOBUF "if(NOT Protobuf_FOUND)\n find_package(Protobuf)\nendif()") + endif() endif() - if(NOT ZLIB_ROOT_DIR) - set(ZLIB_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib) + + if("<%text>${gRPC_SSL_PROVIDER}" STREQUAL "module") + if(NOT BORINGSSL_ROOT_DIR) + set(BORINGSSL_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}/third_party/boringssl) + endif() + if(EXISTS "<%text>${BORINGSSL_ROOT_DIR}/CMakeLists.txt") + add_subdirectory(<%text>${BORINGSSL_ROOT_DIR} third_party/boringssl) + if(TARGET ssl) + set(_gRPC_SSL_LIBRARIES ssl) + endif() + else() + message(WARNING "gRPC_SSL_PROVIDER is \"module\" but BORINGSSL_ROOT_DIR is wrong") + endif() + elseif("<%text>${gRPC_SSL_PROVIDER}" STREQUAL "package") + find_package(OpenSSL) + if(TARGET OpenSSL::SSL) + set(_gRPC_SSL_LIBRARIES OpenSSL::SSL) + endif() + set(_gRPC_FIND_SSL "if(NOT OpenSSL_FOUND)\n find_package(OpenSSL)\nendif()") endif() - # Building the protobuf tests require gmock what is not part of a standard protobuf checkout. - # Disable them unless they are explicitly requested from the cmake command line (when we assume - # gmock is downloaded to the right location inside protobuf). - if(NOT protobuf_BUILD_TESTS) - set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") + if(NOT MSVC) + set(CMAKE_C_FLAGS "<%text>${CMAKE_C_FLAGS} -std=c11") + set(CMAKE_CXX_FLAGS "<%text>${CMAKE_CXX_FLAGS} -std=c++11") endif() - add_subdirectory(<%text>${BORINGSSL_ROOT_DIR} third_party/boringssl) - add_subdirectory(<%text>${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf) - add_subdirectory(<%text>${ZLIB_ROOT_DIR} third_party/zlib) + if(WIN32 AND MSVC) + set(_gRPC_BASELIB_LIBRARIES wsock32 ws2_32) + endif() - set(CMAKE_C_FLAGS "<%text>${CMAKE_C_FLAGS} -std=c11") - set(CMAKE_CXX_FLAGS "<%text>${CMAKE_CXX_FLAGS} -std=c++11") + include(GNUInstallDirs) + if(NOT DEFINED CMAKE_INSTALL_CMAKEDIR) + set(CMAKE_INSTALL_CMAKEDIR "<%text>${CMAKE_INSTALL_LIBDIR}/cmake/gRPC") + endif() % for lib in libs: % if lib.build in ["all", "protoc", "tool"]: ${cc_library(lib)} + ${cc_install(lib)} % endif % endfor % for tgt in targets: % if tgt.build in ["all", "protoc", "tool"]: ${cc_binary(tgt)} + ${cc_install(tgt)} % endif % endfor @@ -112,7 +202,7 @@ PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE <%text>${BORINGSSL_ROOT_DIR}/include PRIVATE <%text>${PROTOBUF_ROOT_DIR}/src - PRIVATE <%text>${ZLIB_ROOT_DIR} + PRIVATE <%text>${ZLIB_INCLUDE_DIR} PRIVATE <%text>${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib ) @@ -123,6 +213,20 @@ % endfor ) % endif + + % if len(lib.get('public_headers', [])) > 0: + foreach(_hdr + % for hdr in lib.get('public_headers', []): + ${hdr} + % endfor + ) + string(REPLACE "include/" "" _path <%text>${_hdr}) + get_filename_component(_path <%text>${_path} PATH) + install(FILES <%text>${_hdr} + DESTINATION "<%text>${CMAKE_INSTALL_INCLUDEDIR}/${_path}" + ) + endforeach() + % endif <%def name="cc_binary(tgt)"> @@ -150,3 +254,23 @@ % endif + <%def name="cc_install(tgt)"> + install(TARGETS ${tgt.name} EXPORT gRPCTargets + RUNTIME DESTINATION <%text>${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION <%text>${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION <%text>${CMAKE_INSTALL_LIBDIR} + ) + + + install(EXPORT gRPCTargets + DESTINATION <%text>${CMAKE_INSTALL_CMAKEDIR} + NAMESPACE gRPC:: + ) + + foreach(_config gRPCConfig gRPCConfigVersion) + configure_file(tools/cmake/<%text>${_config}.cmake.in + <%text>${_config}.cmake @ONLY) + install(FILES <%text>${CMAKE_CURRENT_BINARY_DIR}/${_config}.cmake + DESTINATION <%text>${CMAKE_INSTALL_CMAKEDIR} + ) + endforeach() diff --git a/templates/Makefile.template b/templates/Makefile.template index 0cbd8bfdd55..9afc6566e2f 100644 --- a/templates/Makefile.template +++ b/templates/Makefile.template @@ -282,6 +282,29 @@ LDFLAGS += -pthread endif + # + # The steps for cross-compiling are as follows: + # First, clone and make install of grpc using the native compilers for the host. + # Also, install protoc (e.g., from a package like apt-get) + # Then clone a fresh grpc for the actual cross-compiled build + # Set the environment variable GRPC_CROSS_COMPILE to true + # Set CC, CXX, LD, LDXX, AR, and STRIP to the cross-compiling binaries + # Also set PROTOBUF_CONFIG_OPTS to indicate cross-compilation to protobuf (e.g., + # PROTOBUF_CONFIG_OPTS="--host=arm-linux --with-protoc=/usr/local/bin/protoc" ) + # Set HAS_PKG_CONFIG=false + # To build tests, go to third_party/gflags and follow its ccmake instructions + # Make sure that you enable building shared libraries and set your prefix to + # something useful like /usr/local/cross + # You will also need to set GRPC_CROSS_LDOPTS and GRPC_CROSS_AROPTS to hold + # additional required arguments for LD and AR (examples below) + # Then you can do a make from the cross-compiling fresh clone! + # + ifeq ($(GRPC_CROSS_COMPILE),true) + LDFLAGS += $(GRPC_CROSS_LDOPTS) # e.g. -L/usr/local/lib -L/usr/local/cross/lib + AROPTS = $(GRPC_CROSS_AROPTS) # e.g., rc --target=elf32-little + USE_BUILT_PROTOC = false + endif + GTEST_LIB = -Ithird_party/googletest/include -Ithird_party/googletest third_party/googletest/src/gtest-all.cc GTEST_LIB += -lgflags ifeq ($(V),1) @@ -597,6 +620,15 @@ CPPFLAGS := -Ithird_party/googletest/include $(CPPFLAGS) + PROTOC_PLUGINS_ALL =\ + % for tgt in targets: + % if tgt.build == 'protoc': + $(BINDIR)/$(CONFIG)/${tgt.name}\ + % endif + % endfor + + PROTOC_PLUGINS_DIR = $(BINDIR)/$(CONFIG) + ifeq ($(HAS_SYSTEM_PROTOBUF),true) ifeq ($(HAS_PKG_CONFIG),true) PROTOBUF_PKG_CONFIG = true @@ -611,12 +643,19 @@ else PC_LIBS_GRPCXX = -lprotobuf endif + PROTOC_PLUGINS = $(PROTOC_PLUGINS_ALL) else ifeq ($(HAS_EMBEDDED_PROTOBUF),true) PROTOBUF_DEP = $(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a CPPFLAGS := -Ithird_party/protobuf/src $(CPPFLAGS) LDFLAGS := -L$(LIBDIR)/$(CONFIG)/protobuf $(LDFLAGS) + ifneq ($(USE_BUILT_PROTOC),false) PROTOC = $(BINDIR)/$(CONFIG)/protobuf/protoc + PROTOC_PLUGINS = $(PROTOC_PLUGINS_ALL) + else + PROTOC_PLUGINS = + PROTOC_PLUGINS_DIR = $(prefix)/bin + endif else NO_PROTOBUF = true endif @@ -664,13 +703,6 @@ .SECONDARY = %.pb.h %.pb.cc - PROTOC_PLUGINS =\ - % for tgt in targets: - % if tgt.build == 'protoc': - $(BINDIR)/$(CONFIG)/${tgt.name}\ - % endif - % endfor - ifeq ($(DEP_MISSING),) all: static shared plugins\ % for tgt in targets: @@ -792,7 +824,7 @@ $(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure $(E) "[MAKE] Building protobuf" - $(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static) + $(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static $(PROTOBUF_CONFIG_OPTS)) $(Q)$(MAKE) -C third_party/protobuf clean $(Q)$(MAKE) -C third_party/protobuf $(Q)mkdir -p $(LIBDIR)/$(CONFIG)/protobuf @@ -1124,7 +1156,7 @@ $(GENDIR)/${p}.grpc.pb.cc: ${p}.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) ${' '.join('$(GENDIR)/%s.pb.cc $(GENDIR)/%s.grpc.pb.cc' % (q,q) for q in proto_deps.get(p, []))} $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` - $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(BINDIR)/$(CONFIG)/grpc_cpp_plugin $< + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin $< endif % endfor @@ -1416,7 +1448,7 @@ $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` $(Q) rm -f $(LIBDIR)/$(CONFIG)/lib${lib.name}.a - $(Q) $(AR) $(LIBDIR)/$(CONFIG)/lib${lib.name}.a $(LIB${lib.name.upper()}_OBJS) \ + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/lib${lib.name}.a $(LIB${lib.name.upper()}_OBJS) \ % if lib.get('baselib', False): $(LIBGPR_OBJS) \ $(ZLIB_MERGE_OBJS) \ diff --git a/templates/composer.json.template b/templates/composer.json.template index c9ffbbcbd01..48d3b8892e2 100644 --- a/templates/composer.json.template +++ b/templates/composer.json.template @@ -7,15 +7,9 @@ "keywords": ["rpc"], "homepage": "http://grpc.io", "license": "BSD-3-Clause", - "repositories": [ - { - "type": "vcs", - "url": "https://github.com/stanley-cheung/Protobuf-PHP" - } - ], "require": { "php": ">=5.5.0", - "datto/protobuf-php": "dev-master" + "stanley-cheung/protobuf-php": "v0.6" }, "require-dev": { "google/auth": "v0.9" diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template index aefe6e965cc..12d06a7c8d9 100644 --- a/templates/gRPC-Core.podspec.template +++ b/templates/gRPC-Core.podspec.template @@ -62,7 +62,7 @@ %> Pod::Spec.new do |s| s.name = 'gRPC-Core' - version = '0.14.0' + version = '1.0.0-pre1' s.version = version s.summary = 'Core cross-platform gRPC library, written in C' s.homepage = 'http://www.grpc.io' @@ -71,7 +71,7 @@ s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "release-#{version.gsub(/\./, '_')}-objectivec-#{version}", + :tag => "objective-c-v#{version}", # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules. :submodules => true, } @@ -128,6 +128,8 @@ 'ALWAYS_SEARCH_USER_PATHS' => 'NO', } + s.default_subspecs = 'Interface', 'Implementation' + # Like many other C libraries, gRPC-Core has its public headers under `include//` and its # sources and private headers in other directories outside `include/`. Cocoapods' linter doesn't # allow any header to be listed outside the `header_mappings_dir` (even though doing so works in @@ -147,11 +149,33 @@ ss.header_mappings_dir = '.' ss.libraries = 'z' ss.dependency "#{s.name}/Interface", version - ss.dependency 'BoringSSL', '~> 4.0' + ss.dependency 'BoringSSL', '~> 5.0' # To save you from scrolling, this is the last part of the podspec. ss.source_files = ${ruby_multiline_list(grpc_private_files(libs), 22)} ss.private_header_files = ${ruby_multiline_list(grpc_private_headers(libs), 30)} end + + s.subspec 'Cronet-Interface' do |ss| + ss.header_mappings_dir = 'include/grpc' + ss.source_files = 'include/grpc/grpc_cronet.h' + end + + s.subspec 'Cronet-Tests' do |ss| + ss.header_mappings_dir = '.' + + ss.source_files = 'src/core/ext/transport/cronet/client/secure/cronet_channel_create.c', + 'src/core/ext/transport/cronet/transport/cronet_transport.c', + 'test/core/end2end/cq_verifier.{c,h}', + 'test/core/end2end/end2end_tests.{c,h}', + 'test/core/end2end/tests/*.{c,h}', + 'test/core/end2end/data/*.{c,h}', + 'test/core/util/test_config.{c,h}', + 'test/core/util/port.h', + 'test/core/util/port_posix.c', + 'test/core/util/port_server_client.{c,h}' + + ss.dependency 'CronetFramework' + end end diff --git a/templates/package.xml.template b/templates/package.xml.template index d8155cdd823..87b10389598 100644 --- a/templates/package.xml.template +++ b/templates/package.xml.template @@ -12,19 +12,19 @@ grpc-packages@google.com yes - 2016-06-30 + 2016-07-28 ${settings.php_version.php()} ${settings.php_version.php()} - beta - beta + stable + stable BSD - - Fix shutdown hang problem #4017 + - PHP7 Support continued, reduce code duplication #7543 @@ -205,18 +205,49 @@ - 0.15.1 - 0.15.1 + 1.0.0RC1 + 1.0.0RC1 - beta - beta + stable + stable - 2016-06-30 + 2016-07-13 BSD + - GA release - Fix shutdown hang problem #4017 + + + 1.0.0RC2 + 1.0.0RC2 + + + stable + stable + + 2016-07-21 + BSD + + - PHP7 Support #7464 + + + + + 1.0.0RC3 + 1.0.0RC3 + + + stable + stable + + 2016-07-28 + BSD + + - PHP7 Support continued, reduce code duplication #7543 + + diff --git a/templates/src/csharp/Grpc.Core.Tests/project.json.template b/templates/src/csharp/Grpc.Core.Tests/project.json.template index bc9fa3e63a9..d1ab9316f61 100644 --- a/templates/src/csharp/Grpc.Core.Tests/project.json.template +++ b/templates/src/csharp/Grpc.Core.Tests/project.json.template @@ -8,7 +8,10 @@ }, "Newtonsoft.Json": "8.0.3", "NUnit": "3.2.0", - "NUnitLite": "3.2.0-*" + "NUnitLite": "3.2.0-*", + "NUnit.ConsoleRunner": "3.2.0", + "OpenCover": "4.6.519", + "ReportGenerator": "2.4.4.0" }, "frameworks": { "net45": { }, diff --git a/templates/src/csharp/Grpc.Core/project.json.template b/templates/src/csharp/Grpc.Core/project.json.template index cdcebc5303d..6355db53892 100644 --- a/templates/src/csharp/Grpc.Core/project.json.template +++ b/templates/src/csharp/Grpc.Core/project.json.template @@ -16,12 +16,12 @@ "files": { "mappings": { "build/net45/": "Grpc.Core.targets", - "build/native/bin/windows_x86/": "../nativelibs/windows_x86/grpc_csharp_ext.dll", - "build/native/bin/windows_x64/": "../nativelibs/windows_x64/grpc_csharp_ext.dll", - "build/native/bin/linux_x86/": "../nativelibs/linux_x86/libgrpc_csharp_ext.so", - "build/native/bin/linux_x64/": "../nativelibs/linux_x64/libgrpc_csharp_ext.so", - "build/native/bin/macosx_x86/": "../nativelibs/macosx_x86/libgrpc_csharp_ext.dylib", - "build/native/bin/macosx_x64/": "../nativelibs/macosx_x64/libgrpc_csharp_ext.dylib" + "runtimes/win/native/grpc_csharp_ext.x86.dll": "../nativelibs/windows_x86/grpc_csharp_ext.dll", + "runtimes/win/native/grpc_csharp_ext.x64.dll": "../nativelibs/windows_x64/grpc_csharp_ext.dll", + "runtimes/linux/native/libgrpc_csharp_ext.x86.so": "../nativelibs/linux_x86/libgrpc_csharp_ext.so", + "runtimes/linux/native/libgrpc_csharp_ext.x64.so": "../nativelibs/linux_x64/libgrpc_csharp_ext.so", + "runtimes/osx/native/libgrpc_csharp_ext.x86.dylib": "../nativelibs/macosx_x86/libgrpc_csharp_ext.dylib", + "runtimes/osx/native/libgrpc_csharp_ext.x64.dylib": "../nativelibs/macosx_x64/libgrpc_csharp_ext.dylib" } } }, diff --git a/templates/src/csharp/build_options.include b/templates/src/csharp/build_options.include index ae96b94f728..169a45a808a 100644 --- a/templates/src/csharp/build_options.include +++ b/templates/src/csharp/build_options.include @@ -20,10 +20,10 @@ "include": "data/*", % endif "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/dbg/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Debug/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Debug/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/dbg/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/dbg/libgrpc_csharp_ext.dylib" } } } @@ -42,10 +42,10 @@ "include": "data/*", % endif "mappings": { - "nativelibs/windows_x64/grpc_csharp_ext.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", - "nativelibs/windows_x86/grpc_csharp_ext.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", - "nativelibs/linux_x64/libgrpc_csharp_ext.so": "../../../libs/opt/libgrpc_csharp_ext.so", - "nativelibs/macosx_x64/libgrpc_csharp_ext.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" + "grpc_csharp_ext.x64.dll": "../../../vsprojects/x64/Release/grpc_csharp_ext.dll", + "grpc_csharp_ext.x86.dll": "../../../vsprojects/Release/grpc_csharp_ext.dll", + "libgrpc_csharp_ext.x64.so": "../../../libs/opt/libgrpc_csharp_ext.so", + "libgrpc_csharp_ext.x64.dylib": "../../../libs/opt/libgrpc_csharp_ext.dylib" } } } diff --git a/templates/src/node/health_check/package.json.template b/templates/src/node/health_check/package.json.template index 1248ced1e16..96b9748aa74 100644 --- a/templates/src/node/health_check/package.json.template +++ b/templates/src/node/health_check/package.json.template @@ -21,11 +21,11 @@ "lodash": "^3.9.3", "google-protobuf": "^3.0.0-alpha.5" }, - "files": { + "files": [ "LICENSE", "health.js", "v1" - }, + ], "main": "src/node/index.js", "license": "BSD-3-Clause" } diff --git a/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template b/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template deleted file mode 100644 index d83bccad1db..00000000000 --- a/templates/src/python/grpcio/grpc/_cython/imports.generated.c.template +++ /dev/null @@ -1,41 +0,0 @@ -%YAML 1.2 ---- | - /* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - - /* TODO(atash) remove cruft */ - #include - - #include "imports.generated.h" - - diff --git a/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template b/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template deleted file mode 100644 index b85bc3dbd8b..00000000000 --- a/templates/src/python/grpcio/grpc/_cython/imports.generated.h.template +++ /dev/null @@ -1,52 +0,0 @@ -%YAML 1.2 ---- | - /* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - - /* TODO(atash) remove cruft */ - #ifndef PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ - #define PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ - - #include - - #include - #include - #include - #include - #include - #include - #include - #include - #include - - #endif diff --git a/templates/src/python/grpcio_health_checking/grpc_version.py.template b/templates/src/python/grpcio_health_checking/grpc_version.py.template new file mode 100644 index 00000000000..98946e95d3d --- /dev/null +++ b/templates/src/python/grpcio_health_checking/grpc_version.py.template @@ -0,0 +1,34 @@ +%YAML 1.2 +--- | + # Copyright 2016, Google Inc. + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions are + # met: + # + # * Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # * Redistributions in binary form must reproduce the above + # copyright notice, this list of conditions and the following disclaimer + # in the documentation and/or other materials provided with the + # distribution. + # * Neither the name of Google Inc. nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_health_checking/grpc_version.py.template`!!! + + VERSION='${settings.python_version.pep440()}' diff --git a/templates/tools/dockerfile/apt_get_pyenv.include b/templates/tools/dockerfile/apt_get_pyenv.include new file mode 100644 index 00000000000..70e90289b70 --- /dev/null +++ b/templates/tools/dockerfile/apt_get_pyenv.include @@ -0,0 +1,18 @@ +# Install dependencies for pyenv +RUN apt-get update && apt-get install -y ${'\\'} + libbz2-dev ${'\\'} + libncurses5-dev ${'\\'} + libncursesw5-dev ${'\\'} + libreadline-dev ${'\\'} + libsqlite3-dev ${'\\'} + libssl-dev ${'\\'} + llvm ${'\\'} + mercurial ${'\\'} + zlib1g-dev && apt-get clean + +# Install Pyenv and dev Python versions 3.5 and 3.6 +RUN curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash +RUN pyenv update +RUN pyenv install 3.5-dev +RUN pyenv install 3.6-dev +RUN pyenv local 3.5-dev 3.6-dev diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template index 4cb8d3b088f..da0c70aee0c 100644 --- a/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template +++ b/templates/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../csharp_deps.include"/> <%include file="../../run_tests_addons.include"/> # Define the default command. diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template index e39175a1ea8..aba0a0497e9 100644 --- a/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template +++ b/templates/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../run_tests_addons.include"/> # Define the default command. diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template index 542c81d614c..38a5ca725da 100644 --- a/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template +++ b/templates/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile.template @@ -32,6 +32,7 @@ FROM golang:1.5 <%include file="../../go_path.include"/> + <%include file="../../python_deps.include"/> # Define the default command. CMD ["bash"] diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template index 542c81d614c..38a5ca725da 100644 --- a/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template +++ b/templates/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile.template @@ -32,6 +32,7 @@ FROM golang:1.5 <%include file="../../go_path.include"/> + <%include file="../../python_deps.include"/> # Define the default command. CMD ["bash"] diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template index c286e80826c..3d5e7ee1204 100644 --- a/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template +++ b/templates/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile.template @@ -32,7 +32,8 @@ FROM debian:jessie <%include file="../../java_deps.include"/> - + <%include file="../../python_deps.include"/> + # Trigger download of as many Gradle artifacts as possible. RUN git clone --recursive --depth 1 https://github.com/grpc/grpc-java.git && ${'\\'} cd grpc-java && ${'\\'} diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template index 89bb9acc1af..01a03073a6f 100644 --- a/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template +++ b/templates/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../node_deps.include"/> <%include file="../../run_tests_addons.include"/> # Define the default command. diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template index 476f9d3d3eb..f37eadad745 100644 --- a/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template +++ b/templates/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile.template @@ -1,6 +1,6 @@ %YAML 1.2 --- | - # Copyright 2015, Google Inc. + # Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -35,30 +35,5 @@ <%include file="../../ruby_deps.include"/> <%include file="../../php_deps.include"/> <%include file="../../run_tests_addons.include"/> - # ronn: a ruby tool used to convert markdown to man pages, used during the - # install of Protobuf extensions - # - # rake: a ruby version of make used to build the PHP Protobuf extension - RUN /bin/bash -l -c "rvm all do gem install ronn rake" - - # Install composer - RUN curl -sS https://getcomposer.org/installer | php - RUN mv composer.phar /usr/local/bin/composer - - # As an attempt to work around #4212, try to prefetch Protobuf-PHP dependency - # into composer cache to prevent "composer install" from cloning on each build. - RUN git clone --mirror https://github.com/stanley-cheung/Protobuf-PHP.git ${'\\'} - /root/.composer/cache/vcs/git-github.com-stanley-cheung-Protobuf-PHP.git/ - - # Download the patched PHP protobuf so that PHP gRPC clients can be generated - # from proto3 schemas. - RUN git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php - - RUN /bin/bash -l -c "rvm use ruby-2.1 ${'\\'} - && cd /var/local/git/protobuf-php ${'\\'} - && rvm all do rake pear:package version=1.0 ${'\\'} - && pear install Protobuf-1.0.tgz" - - # Define the default command. - CMD ["bash"] + <%include file="../../php_common_deps.include"/> diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_php7/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_php7/Dockerfile.template new file mode 100644 index 00000000000..42157ee062c --- /dev/null +++ b/templates/tools/dockerfile/interoptest/grpc_interop_php7/Dockerfile.template @@ -0,0 +1,37 @@ +%YAML 1.2 +--- | + # Copyright 2016, Google Inc. + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions are + # met: + # + # * Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # * Redistributions in binary form must reproduce the above + # copyright notice, this list of conditions and the following disclaimer + # in the documentation and/or other materials provided with the + # distribution. + # * Neither the name of Google Inc. nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + FROM debian:jessie + + <%include file="../../php7_deps.include"/> + <%include file="../../ruby_deps.include"/> + <%include file="../../run_tests_addons.include"/> + <%include file="../../php_common_deps.include"/> diff --git a/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template b/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template index c3625b91fcc..fbd82423917 100644 --- a/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template +++ b/templates/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../ruby_deps.include"/> <%include file="../../run_tests_addons.include"/> # Define the default command. diff --git a/templates/tools/dockerfile/php7_deps.include b/templates/tools/dockerfile/php7_deps.include new file mode 100644 index 00000000000..a24e5064428 --- /dev/null +++ b/templates/tools/dockerfile/php7_deps.include @@ -0,0 +1,45 @@ +#================= +# PHP7 dependencies + +# Install Git and basic packages. +RUN apt-get update && apt-get install -y ${'\\'} + autoconf ${'\\'} + automake ${'\\'} + build-essential ${'\\'} + ccache ${'\\'} + curl ${'\\'} + git ${'\\'} + libcurl4-openssl-dev ${'\\'} + libgmp-dev ${'\\'} + libgmp3-dev ${'\\'} + libssl-dev ${'\\'} + libtool ${'\\'} + libxml2-dev ${'\\'} + pkg-config ${'\\'} + re2c ${'\\'} + time ${'\\'} + unzip ${'\\'} + wget ${'\\'} + zip && apt-get clean + +# Install other dependencies +RUN ln -sf /usr/include/x86_64-linux-gnu/gmp.h /usr/include/gmp.h +RUN wget http://ftp.gnu.org/gnu/bison/bison-2.6.4.tar.gz -O /var/local/bison-2.6.4.tar.gz +RUN cd /var/local ${'\\'} + && tar -zxvf bison-2.6.4.tar.gz ${'\\'} + && cd /var/local/bison-2.6.4 ${'\\'} + && ./configure ${'\\'} + && make ${'\\'} + && make install + +# Compile PHP7 from source +RUN git clone https://github.com/php/php-src /var/local/git/php-src +RUN cd /var/local/git/php-src ${'\\'} + && git checkout PHP-7.0.9 ${'\\'} + && ./buildconf --force ${'\\'} + && ./configure ${'\\'} + --with-gmp ${'\\'} + --with-openssl ${'\\'} + --with-zlib ${'\\'} + && make ${'\\'} + && make install diff --git a/templates/tools/dockerfile/php_common_deps.include b/templates/tools/dockerfile/php_common_deps.include new file mode 100644 index 00000000000..8839bb51554 --- /dev/null +++ b/templates/tools/dockerfile/php_common_deps.include @@ -0,0 +1,21 @@ +# ronn: a ruby tool used to convert markdown to man pages, used during the +# install of Protobuf extensions +# +# rake: a ruby version of make used to build the PHP Protobuf extension +RUN /bin/bash -l -c "rvm all do gem install ronn rake" + +# Install composer +RUN curl -sS https://getcomposer.org/installer | php +RUN mv composer.phar /usr/local/bin/composer + +# Download the patched PHP protobuf so that PHP gRPC clients can be generated +# from proto3 schemas. +RUN git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php + +RUN /bin/bash -l -c "rvm use ruby-2.1 ${'\\'} + && cd /var/local/git/protobuf-php ${'\\'} + && rvm all do rake pear:package version=1.0 ${'\\'} + && pear install Protobuf-1.0.tgz" + +# Define the default command. +CMD ["bash"] diff --git a/templates/tools/dockerfile/php_deps.include b/templates/tools/dockerfile/php_deps.include index 739049b5ea0..da071e23ba4 100644 --- a/templates/tools/dockerfile/php_deps.include +++ b/templates/tools/dockerfile/php_deps.include @@ -3,11 +3,5 @@ # Install dependencies -RUN /bin/bash -l -c "echo 'deb http://packages.dotdeb.org wheezy-php55 all' ${'\\'} - >> /etc/apt/sources.list.d/dotdeb.list" -RUN /bin/bash -l -c "echo 'deb-src http://packages.dotdeb.org wheezy-php55 all' ${'\\'} - >> /etc/apt/sources.list.d/dotdeb.list" -RUN wget http://www.dotdeb.org/dotdeb.gpg -O- | apt-key add - - RUN apt-get update && apt-get install -y ${'\\'} git php5 php5-dev phpunit unzip diff --git a/templates/tools/dockerfile/python_deps.include b/templates/tools/dockerfile/python_deps.include index 31623640482..26c91f495d5 100644 --- a/templates/tools/dockerfile/python_deps.include +++ b/templates/tools/dockerfile/python_deps.include @@ -11,4 +11,4 @@ RUN apt-get update && apt-get install -y ${'\\'} # Install Python packages from PyPI RUN pip install pip --upgrade RUN pip install virtualenv -RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template index 074178252d0..5d805bb4b22 100644 --- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template +++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../ccache_setup.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../gcp_api_libraries.include"/> diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template index ac087c5da74..18f06b770c3 100644 --- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template +++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../ccache_setup.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../gcp_api_libraries.include"/> diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template index 3ed3d6556f9..e02254cd535 100644 --- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template +++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile.template @@ -32,6 +32,7 @@ FROM golang:1.5 <%include file="../../gcp_api_libraries.include"/> + <%include file="../../python_deps.include"/> <%include file="../../go_path.include"/> # Define the default command. CMD ["bash"] diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template index 17ed99fd2e4..2bb2f9ba1e6 100644 --- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template +++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../ccache_setup.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../gcp_api_libraries.include"/> diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template index c50d38d1eca..d70b751b141 100644 --- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template +++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../node_deps.include"/> <%include file="../../gcp_api_libraries.include"/> <%include file="../../run_tests_addons.include"/> diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template index 4cd069da34e..13a250eb31c 100644 --- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template +++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../ruby_deps.include"/> <%include file="../../gcp_api_libraries.include"/> <%include file="../../php_deps.include"/> @@ -46,11 +47,6 @@ RUN curl -sS https://getcomposer.org/installer | php RUN mv composer.phar /usr/local/bin/composer - # As an attempt to work around #4212, try to prefetch Protobuf-PHP dependency - # into composer cache to prevent "composer install" from cloning on each build. - RUN git clone --mirror https://github.com/stanley-cheung/Protobuf-PHP.git ${'\\'} - /root/.composer/cache/vcs/git-github.com-stanley-cheung-Protobuf-PHP.git/ - # Download the patched PHP protobuf so that PHP gRPC clients can be generated # from proto3 schemas. RUN git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template index 8b933aaa324..18199771d7e 100644 --- a/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template +++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../ccache_setup.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../gcp_api_libraries.include"/> diff --git a/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template b/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template index e9cab570194..24dad488077 100644 --- a/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../csharp_deps.include"/> # Install dotnet SDK based on https://www.microsoft.com/net/core#debian diff --git a/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template index 4cb8d3b088f..da0c70aee0c 100644 --- a/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/csharp_jessie_x64/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../csharp_deps.include"/> <%include file="../../run_tests_addons.include"/> # Define the default command. diff --git a/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template index 04abf9f741e..04767248b8b 100644 --- a/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../clang_update.include"/> <%include file="../../run_tests_addons.include"/> diff --git a/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template b/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template index 7f43e759fc0..49fbea0f454 100644 --- a/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template +++ b/templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template @@ -32,8 +32,8 @@ FROM 32bit/debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../run_tests_addons.include"/> # Define the default command. CMD ["bash"] - \ No newline at end of file diff --git a/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template index 4950a82d2d7..8a95cad6492 100644 --- a/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template @@ -32,6 +32,7 @@ FROM ubuntu:14.04 <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../run_tests_addons_nocache.include"/> # Define the default command. diff --git a/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template index e39537975bf..42ad6c130da 100644 --- a/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile.template @@ -32,6 +32,7 @@ FROM ubuntu:16.04 <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../run_tests_addons.include"/> @@ -42,4 +43,3 @@ # Define the default command. CMD ["bash"] - \ No newline at end of file diff --git a/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template b/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template index e77b3d9e414..b6a3b0d5d21 100644 --- a/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:wheezy <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../cxx_deps.include"/> RUN apt-get update && apt-get install -y ${'\\'} diff --git a/templates/tools/dockerfile/test/fuzzer/Dockerfile.template b/templates/tools/dockerfile/test/fuzzer/Dockerfile.template index 33df2759084..6d7cb72f274 100644 --- a/templates/tools/dockerfile/test/fuzzer/Dockerfile.template +++ b/templates/tools/dockerfile/test/fuzzer/Dockerfile.template @@ -32,6 +32,7 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../clang_update.include"/> <%include file="../../run_tests_addons.include"/> diff --git a/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template index 5a6233343e9..72b098f0c20 100644 --- a/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/node_jessie_x64/Dockerfile.template @@ -32,8 +32,8 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../node_deps.include"/> <%include file="../../run_tests_addons.include"/> # Define the default command. CMD ["bash"] - \ No newline at end of file diff --git a/templates/tools/dockerfile/test/php7_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/php7_jessie_x64/Dockerfile.template new file mode 100644 index 00000000000..e6a213d90d3 --- /dev/null +++ b/templates/tools/dockerfile/test/php7_jessie_x64/Dockerfile.template @@ -0,0 +1,38 @@ +%YAML 1.2 +--- | + # Copyright 2016, Google Inc. + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions are + # met: + # + # * Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # * Redistributions in binary form must reproduce the above + # copyright notice, this list of conditions and the following disclaimer + # in the documentation and/or other materials provided with the + # distribution. + # * Neither the name of Google Inc. nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + FROM debian:jessie + + <%include file="../../php7_deps.include"/> + <%include file="../../python_deps.include"/> + <%include file="../../run_tests_addons.include"/> + # Define the default command. + CMD ["bash"] diff --git a/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template index fffac89efcd..0cfa373c905 100644 --- a/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/php_jessie_x64/Dockerfile.template @@ -32,8 +32,8 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../php_deps.include"/> <%include file="../../run_tests_addons.include"/> # Define the default command. CMD ["bash"] - \ No newline at end of file diff --git a/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template b/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template new file mode 100644 index 00000000000..f9a4dcb7b6c --- /dev/null +++ b/templates/tools/dockerfile/test/python_pyenv_x64/Dockerfile.template @@ -0,0 +1,39 @@ +%YAML 1.2 +--- | + # Copyright 2016, Google Inc. + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions are + # met: + # + # * Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # * Redistributions in binary form must reproduce the above + # copyright notice, this list of conditions and the following disclaimer + # in the documentation and/or other materials provided with the + # distribution. + # * Neither the name of Google Inc. nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + FROM debian:jessie + + <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> + <%include file="../../apt_get_pyenv.include"/> + <%include file="../../run_tests_addons.include"/> + # Define the default command. + CMD ["bash"] diff --git a/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template index 70baddffbf9..35838bc11e4 100644 --- a/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/ruby_jessie_x64/Dockerfile.template @@ -32,8 +32,8 @@ FROM debian:jessie <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> <%include file="../../ruby_deps.include"/> <%include file="../../run_tests_addons.include"/> # Define the default command. CMD ["bash"] - \ No newline at end of file diff --git a/templates/tools/dockerfile/test/sanity/Dockerfile.template b/templates/tools/dockerfile/test/sanity/Dockerfile.template index 9987e352608..12309b64d16 100644 --- a/templates/tools/dockerfile/test/sanity/Dockerfile.template +++ b/templates/tools/dockerfile/test/sanity/Dockerfile.template @@ -32,6 +32,7 @@ FROM ubuntu:15.10 <%include file="../../apt_get_basic.include"/> + <%include file="../../python_deps.include"/> #======================== # Sanity test dependencies RUN apt-get update && apt-get install -y ${"\\"} diff --git a/test/core/end2end/bad_server_response_test.c b/test/core/end2end/bad_server_response_test.c index cca75f54a5f..ab80adf0e06 100644 --- a/test/core/end2end/bad_server_response_test.c +++ b/test/core/end2end/bad_server_response_test.c @@ -71,6 +71,8 @@ #define UNPARSEABLE_DETAIL_MSG "Failed parsing HTTP/2" +#define HTTP1_DETAIL_MSG "Trying to connect an http1.x server" + /* TODO(zyc) Check the content of incomming data instead of using this length */ #define EXPECTED_INCOMING_DATA_LENGTH (size_t)310 @@ -334,7 +336,7 @@ int main(int argc, char **argv) { /* http1 response */ run_test(HTTP1_RESP, sizeof(HTTP1_RESP) - 1, GRPC_STATUS_UNAVAILABLE, - UNPARSEABLE_DETAIL_MSG); + HTTP1_DETAIL_MSG); return 0; } diff --git a/test/core/end2end/dualstack_socket_test.c b/test/core/end2end/dualstack_socket_test.c index 65a8deb663d..348b9ed5f02 100644 --- a/test/core/end2end/dualstack_socket_test.c +++ b/test/core/end2end/dualstack_socket_test.c @@ -273,7 +273,7 @@ void test_connect(const char *server_host, const char *client_host, int port, } int external_dns_works(const char *host) { - grpc_resolved_addresses *res; + grpc_resolved_addresses *res = NULL; grpc_error *error = grpc_blocking_resolve_address(host, "80", &res); GRPC_ERROR_UNREF(error); if (res != NULL) { diff --git a/test/core/end2end/tests/high_initial_seqno.c b/test/core/end2end/tests/high_initial_seqno.c index 50e3c9cb898..db45f5eb5ad 100644 --- a/test/core/end2end/tests/high_initial_seqno.c +++ b/test/core/end2end/tests/high_initial_seqno.c @@ -203,6 +203,12 @@ static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call_destroy(c); grpc_call_destroy(s); + /* TODO(ctiller): this rate limits the test, and it should be removed when + retry has been implemented; until then cross-thread chatter + may result in some requests needing to be cancelled due to + seqno exhaustion. */ + cq_verify_empty(cqv); + cq_verifier_destroy(cqv); } diff --git a/test/core/end2end/tests/network_status_change.c b/test/core/end2end/tests/network_status_change.c index 10207844ab0..39ddc137543 100644 --- a/test/core/end2end/tests/network_status_change.c +++ b/test/core/end2end/tests/network_status_change.c @@ -186,9 +186,10 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) { GPR_ASSERT(GRPC_CALL_OK == error); cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); + // Simulate the network loss event grpc_network_status_shutdown_all_endpoints(); - cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; @@ -205,7 +206,7 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) { op++; error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL); GPR_ASSERT(GRPC_CALL_OK == error); - void shutdown_all_endpoints(); + cq_expect_completion(cqv, tag(103), 1); cq_expect_completion(cqv, tag(1), 1); cq_verify(cqv); diff --git a/test/core/internal_api_canaries/iomgr.c b/test/core/internal_api_canaries/iomgr.c index 5e86c423095..27d630623ed 100644 --- a/test/core/internal_api_canaries/iomgr.c +++ b/test/core/internal_api_canaries/iomgr.c @@ -77,11 +77,14 @@ static void test_code(void) { /* endpoint.h */ grpc_endpoint endpoint; - grpc_endpoint_vtable vtable = { - grpc_endpoint_read, grpc_endpoint_write, - grpc_endpoint_add_to_pollset, grpc_endpoint_add_to_pollset_set, - grpc_endpoint_shutdown, grpc_endpoint_destroy, - grpc_endpoint_get_peer}; + grpc_endpoint_vtable vtable = {grpc_endpoint_read, + grpc_endpoint_write, + grpc_endpoint_get_workqueue, + grpc_endpoint_add_to_pollset, + grpc_endpoint_add_to_pollset_set, + grpc_endpoint_shutdown, + grpc_endpoint_destroy, + grpc_endpoint_get_peer}; endpoint.vtable = &vtable; grpc_endpoint_read(&exec_ctx, &endpoint, NULL, NULL); diff --git a/test/core/iomgr/udp_server_test.c b/test/core/iomgr/udp_server_test.c index 3152fb7a464..a959a7e07fa 100644 --- a/test/core/iomgr/udp_server_test.c +++ b/test/core/iomgr/udp_server_test.c @@ -70,7 +70,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, grpc_fd *emfd, g_number_of_reads++; g_number_of_bytes_read += (int)byte_count; - grpc_pollset_kick(g_pollset, NULL); + GPR_ASSERT( + GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, NULL))); gpr_mu_unlock(g_mu); } @@ -179,8 +180,10 @@ static void test_receive(int number_of_clients) { while (g_number_of_reads == number_of_reads_before && gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) { grpc_pollset_worker *worker = NULL; - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(GPR_CLOCK_MONOTONIC), deadline); + GPR_ASSERT(GRPC_LOG_IF_ERROR( + "pollset_work", + grpc_pollset_work(&exec_ctx, g_pollset, &worker, + gpr_now(GPR_CLOCK_MONOTONIC), deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); @@ -199,7 +202,8 @@ static void test_receive(int number_of_clients) { GPR_ASSERT(g_number_of_orphan_calls == 1); } -static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, bool success) { +static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, + grpc_error *error) { grpc_pollset_destroy(p); } diff --git a/test/core/iomgr/workqueue_test.c b/test/core/iomgr/workqueue_test.c deleted file mode 100644 index 76ecfae74b8..00000000000 --- a/test/core/iomgr/workqueue_test.c +++ /dev/null @@ -1,150 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include "src/core/lib/iomgr/workqueue.h" - -#include -#include -#include - -#include "test/core/util/test_config.h" - -static gpr_mu *g_mu; -static grpc_pollset *g_pollset; - -static void must_succeed(grpc_exec_ctx *exec_ctx, void *p, grpc_error *error) { - GPR_ASSERT(error == GRPC_ERROR_NONE); - gpr_mu_lock(g_mu); - *(int *)p = 1; - GPR_ASSERT( - GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, NULL))); - gpr_mu_unlock(g_mu); -} - -static void test_ref_unref(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_workqueue *wq; - GPR_ASSERT(GRPC_LOG_IF_ERROR("grpc_workqueue_create", - grpc_workqueue_create(&exec_ctx, &wq))); - GRPC_WORKQUEUE_REF(wq, "test"); - GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "test"); - GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "destroy"); - grpc_exec_ctx_finish(&exec_ctx); -} - -static void test_add_closure(void) { - grpc_closure c; - int done = 0; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_workqueue *wq; - GPR_ASSERT(GRPC_LOG_IF_ERROR("grpc_workqueue_create", - grpc_workqueue_create(&exec_ctx, &wq))); - gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5); - grpc_pollset_worker *worker = NULL; - grpc_closure_init(&c, must_succeed, &done); - - grpc_workqueue_enqueue(&exec_ctx, wq, &c, GRPC_ERROR_NONE); - grpc_workqueue_add_to_pollset(&exec_ctx, wq, g_pollset); - - gpr_mu_lock(g_mu); - GPR_ASSERT(!done); - while (!done) { - GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(deadline.clock_type), deadline))); - } - gpr_mu_unlock(g_mu); - grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); - - GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "destroy"); - grpc_exec_ctx_finish(&exec_ctx); -} - -static void test_flush(void) { - grpc_closure c; - int done = 0; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_workqueue *wq; - GPR_ASSERT(GRPC_LOG_IF_ERROR("grpc_workqueue_create", - grpc_workqueue_create(&exec_ctx, &wq))); - gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5); - grpc_pollset_worker *worker = NULL; - grpc_closure_init(&c, must_succeed, &done); - - grpc_exec_ctx_sched(&exec_ctx, &c, GRPC_ERROR_NONE, NULL); - grpc_workqueue_flush(&exec_ctx, wq); - grpc_workqueue_add_to_pollset(&exec_ctx, wq, g_pollset); - - gpr_mu_lock(g_mu); - GPR_ASSERT(!done); - while (!done) { - GPR_ASSERT(GRPC_LOG_IF_ERROR( - "pollset_work", - grpc_pollset_work(&exec_ctx, g_pollset, &worker, - gpr_now(deadline.clock_type), deadline))); - } - gpr_mu_unlock(g_mu); - grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(done); - - GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "destroy"); - grpc_exec_ctx_finish(&exec_ctx); -} - -static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, - grpc_error *error) { - grpc_pollset_destroy(p); -} - -int main(int argc, char **argv) { - grpc_closure destroyed; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_test_init(argc, argv); - grpc_init(); - g_pollset = gpr_malloc(grpc_pollset_size()); - grpc_pollset_init(g_pollset, &g_mu); - - test_ref_unref(); - test_add_closure(); - test_flush(); - - grpc_closure_init(&destroyed, destroy_pollset, g_pollset); - grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); - grpc_exec_ctx_finish(&exec_ctx); - grpc_shutdown(); - - gpr_free(g_pollset); - return 0; -} diff --git a/test/core/support/slice_test.c b/test/core/support/slice_test.c index 0da483a3216..06c364b3680 100644 --- a/test/core/support/slice_test.c +++ b/test/core/support/slice_test.c @@ -85,6 +85,27 @@ static void test_slice_new_returns_something_sensible(void) { gpr_slice_unref(slice); } +/* destroy function that sets a mark to indicate it was called. */ +static void set_mark(void *p) { *((int *)p) = 1; } + +static void test_slice_new_with_user_data(void) { + int marker = 0; + uint8_t buf[2]; + gpr_slice slice; + + buf[0] = 0; + buf[1] = 1; + slice = gpr_slice_new_with_user_data(buf, 2, set_mark, &marker); + GPR_ASSERT(marker == 0); + GPR_ASSERT(GPR_SLICE_LENGTH(slice) == 2); + GPR_ASSERT(GPR_SLICE_START_PTR(slice)[0] == 0); + GPR_ASSERT(GPR_SLICE_START_PTR(slice)[1] == 1); + + /* unref should cause destroy function to run. */ + gpr_slice_unref(slice); + GPR_ASSERT(marker == 1); +} + static int do_nothing_with_len_1_calls = 0; static void do_nothing_with_len_1(void *ignored, size_t len) { @@ -232,6 +253,7 @@ int main(int argc, char **argv) { grpc_test_init(argc, argv); test_slice_malloc_returns_something_sensible(); test_slice_new_returns_something_sensible(); + test_slice_new_with_user_data(); test_slice_new_with_len_returns_something_sensible(); for (length = 0; length < 128; length++) { test_slice_sub_works(length); diff --git a/test/core/surface/sequential_connectivity_test.c b/test/core/surface/sequential_connectivity_test.c index 2fba3927ba7..fe87f119f2f 100644 --- a/test/core/surface/sequential_connectivity_test.c +++ b/test/core/surface/sequential_connectivity_test.c @@ -154,7 +154,7 @@ static void secure_test_add_port(grpc_server *server, const char *addr) { static grpc_channel *secure_test_create_channel(const char *addr) { grpc_channel_credentials *ssl_creds = - grpc_ssl_credentials_create(NULL, NULL, NULL); + grpc_ssl_credentials_create(test_root_cert, NULL, NULL); grpc_arg ssl_name_override = {GRPC_ARG_STRING, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG, {"foo.test.google.fr"}}; diff --git a/test/core/util/mock_endpoint.c b/test/core/util/mock_endpoint.c index ed9545e9df2..13e0e918fbe 100644 --- a/test/core/util/mock_endpoint.c +++ b/test/core/util/mock_endpoint.c @@ -95,9 +95,17 @@ static char *me_get_peer(grpc_endpoint *ep) { return gpr_strdup("fake:mock_endpoint"); } +static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; } + static const grpc_endpoint_vtable vtable = { - me_read, me_write, me_add_to_pollset, me_add_to_pollset_set, - me_shutdown, me_destroy, me_get_peer, + me_read, + me_write, + me_get_workqueue, + me_add_to_pollset, + me_add_to_pollset_set, + me_shutdown, + me_destroy, + me_get_peer, }; grpc_endpoint *grpc_mock_endpoint_create(void (*on_write)(gpr_slice slice)) { diff --git a/test/core/util/passthru_endpoint.c b/test/core/util/passthru_endpoint.c index a39f3dd66e2..7ed9e97bd6a 100644 --- a/test/core/util/passthru_endpoint.c +++ b/test/core/util/passthru_endpoint.c @@ -140,9 +140,17 @@ static char *me_get_peer(grpc_endpoint *ep) { return gpr_strdup("fake:mock_endpoint"); } +static grpc_workqueue *me_get_workqueue(grpc_endpoint *ep) { return NULL; } + static const grpc_endpoint_vtable vtable = { - me_read, me_write, me_add_to_pollset, me_add_to_pollset_set, - me_shutdown, me_destroy, me_get_peer, + me_read, + me_write, + me_get_workqueue, + me_add_to_pollset, + me_add_to_pollset_set, + me_shutdown, + me_destroy, + me_get_peer, }; static void half_init(half *m, passthru_endpoint *parent) { diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc index 6c7eae53a40..ac79fe82740 100644 --- a/test/cpp/end2end/async_end2end_test.cc +++ b/test/cpp/end2end/async_end2end_test.cc @@ -200,6 +200,10 @@ class Verifier { bool spin_; }; +bool plugin_has_sync_methods(std::unique_ptr& plugin) { + return plugin->has_sync_methods(); +} + // This class disables the server builder plugins that may add sync services to // the server. If there are sync services, UnimplementedRpc test will triger // the sync unkown rpc routine on the server side, rather than the async one @@ -210,14 +214,9 @@ class ServerBuilderSyncPluginDisabler : public ::grpc::ServerBuilderOption { void UpdatePlugins(std::vector>* plugins) GRPC_OVERRIDE { - auto plugin = plugins->begin(); - while (plugin != plugins->end()) { - if ((*plugin)->has_sync_methods()) { - plugins->erase(plugin++); - } else { - plugin++; - } - } + plugins->erase(std::remove_if(plugins->begin(), plugins->end(), + plugin_has_sync_methods), + plugins->end()); } }; @@ -345,6 +344,31 @@ TEST_P(AsyncEnd2endTest, SequentialRpcs) { SendRpc(10); } +// We do not need to protect notify because the use is synchronized. +void ServerWait(Server* server, int* notify) { + server->Wait(); + *notify = 1; +} +TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) { + int notify = 0; + std::thread* wait_thread = + new std::thread(&ServerWait, server_.get(), ¬ify); + ResetStub(); + SendRpc(1); + EXPECT_EQ(0, notify); + server_->Shutdown(); + wait_thread->join(); + EXPECT_EQ(1, notify); + delete wait_thread; +} + +TEST_P(AsyncEnd2endTest, ShutdownThenWait) { + ResetStub(); + SendRpc(1); + server_->Shutdown(); + server_->Wait(); +} + // Test a simple RPC using the async version of Next TEST_P(AsyncEnd2endTest, AsyncNextRpc) { ResetStub(); diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index 354a59cedd5..46a58d3ac39 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -1166,6 +1166,9 @@ TEST_P(ProxyEnd2endTest, HugeResponse) { request.mutable_param()->set_response_message_length(kResponseSize); ClientContext context; + std::chrono::system_clock::time_point deadline = + std::chrono::system_clock::now() + std::chrono::seconds(20); + context.set_deadline(deadline); Status s = stub_->Echo(&context, request, &response); EXPECT_EQ(kResponseSize, response.message().size()); EXPECT_TRUE(s.ok()); @@ -1411,7 +1414,7 @@ TEST_P(SecureEnd2endTest, ClientAuthContext) { std::shared_ptr auth_ctx = context.auth_context(); std::vector tst = auth_ctx->FindPropertyValues("transport_security_type"); - EXPECT_EQ(1u, tst.size()); + ASSERT_EQ(1u, tst.size()); EXPECT_EQ(GetParam().credentials_type, ToString(tst[0])); if (GetParam().credentials_type == kTlsCredentialsType) { EXPECT_EQ("x509_subject_alternative_name", diff --git a/test/cpp/end2end/proto_server_reflection_test.cc b/test/cpp/end2end/proto_server_reflection_test.cc index f8fc39b5535..efbb0e1f8e5 100644 --- a/test/cpp/end2end/proto_server_reflection_test.cc +++ b/test/cpp/end2end/proto_server_reflection_test.cc @@ -31,7 +31,6 @@ * */ -#include #include #include #include @@ -59,7 +58,7 @@ class ProtoServerReflectionTest : public ::testing::Test { void SetUp() GRPC_OVERRIDE { port_ = grpc_pick_unused_port_or_die(); - ref_desc_pool_ = google::protobuf::DescriptorPool::generated_pool(); + ref_desc_pool_ = protobuf::DescriptorPool::generated_pool(); ServerBuilder builder; grpc::string server_address = "localhost:" + to_string(port_); @@ -73,7 +72,7 @@ class ProtoServerReflectionTest : public ::testing::Test { CreateChannel(target, InsecureChannelCredentials()); stub_ = grpc::testing::EchoTestService::NewStub(channel); desc_db_.reset(new ProtoReflectionDescriptorDatabase(channel)); - desc_pool_.reset(new google::protobuf::DescriptorPool(desc_db_.get())); + desc_pool_.reset(new protobuf::DescriptorPool(desc_db_.get())); } string to_string(const int number) { @@ -83,15 +82,15 @@ class ProtoServerReflectionTest : public ::testing::Test { } void CompareService(const grpc::string& service) { - const google::protobuf::ServiceDescriptor* service_desc = + const protobuf::ServiceDescriptor* service_desc = desc_pool_->FindServiceByName(service); - const google::protobuf::ServiceDescriptor* ref_service_desc = + const protobuf::ServiceDescriptor* ref_service_desc = ref_desc_pool_->FindServiceByName(service); EXPECT_TRUE(service_desc != nullptr); EXPECT_TRUE(ref_service_desc != nullptr); EXPECT_EQ(service_desc->DebugString(), ref_service_desc->DebugString()); - const google::protobuf::FileDescriptor* file_desc = service_desc->file(); + const protobuf::FileDescriptor* file_desc = service_desc->file(); if (known_files_.find(file_desc->package() + "/" + file_desc->name()) != known_files_.end()) { EXPECT_EQ(file_desc->DebugString(), @@ -105,9 +104,9 @@ class ProtoServerReflectionTest : public ::testing::Test { } void CompareMethod(const grpc::string& method) { - const google::protobuf::MethodDescriptor* method_desc = + const protobuf::MethodDescriptor* method_desc = desc_pool_->FindMethodByName(method); - const google::protobuf::MethodDescriptor* ref_method_desc = + const protobuf::MethodDescriptor* ref_method_desc = ref_desc_pool_->FindMethodByName(method); EXPECT_TRUE(method_desc != nullptr); EXPECT_TRUE(ref_method_desc != nullptr); @@ -122,9 +121,8 @@ class ProtoServerReflectionTest : public ::testing::Test { return; } - const google::protobuf::Descriptor* desc = - desc_pool_->FindMessageTypeByName(type); - const google::protobuf::Descriptor* ref_desc = + const protobuf::Descriptor* desc = desc_pool_->FindMessageTypeByName(type); + const protobuf::Descriptor* ref_desc = ref_desc_pool_->FindMessageTypeByName(type); EXPECT_TRUE(desc != nullptr); EXPECT_TRUE(ref_desc != nullptr); @@ -135,10 +133,10 @@ class ProtoServerReflectionTest : public ::testing::Test { std::unique_ptr server_; std::unique_ptr stub_; std::unique_ptr desc_db_; - std::unique_ptr desc_pool_; + std::unique_ptr desc_pool_; std::unordered_set known_files_; std::unordered_set known_types_; - const google::protobuf::DescriptorPool* ref_desc_pool_; + const protobuf::DescriptorPool* ref_desc_pool_; int port_; reflection::ProtoServerReflectionPlugin plugin_; }; diff --git a/test/cpp/end2end/server_builder_plugin_test.cc b/test/cpp/end2end/server_builder_plugin_test.cc index 778a2be573e..b967a5d1e99 100644 --- a/test/cpp/end2end/server_builder_plugin_test.cc +++ b/test/cpp/end2end/server_builder_plugin_test.cc @@ -191,7 +191,7 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam { // we run some tests without a service, and for those we need to supply a // frequently polled completion queue cq_ = builder_->AddCompletionQueue(); - cq_thread_ = grpc::thread(std::bind(&ServerBuilderPluginTest::RunCQ, this)); + cq_thread_ = new grpc::thread(&ServerBuilderPluginTest::RunCQ, this); server_ = builder_->BuildAndStart(); EXPECT_TRUE(CheckPresent()); } @@ -209,7 +209,8 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam { EXPECT_TRUE(plugin->finish_is_called()); server_->Shutdown(); cq_->Shutdown(); - cq_thread_.join(); + cq_thread_->join(); + delete cq_thread_; } string to_string(const int number) { @@ -224,7 +225,7 @@ class ServerBuilderPluginTest : public ::testing::TestWithParam { std::unique_ptr stub_; std::unique_ptr cq_; std::unique_ptr server_; - grpc::thread cq_thread_; + grpc::thread* cq_thread_; TestServiceImpl service_; int port_; diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h index 047bd164082..4045e13460f 100644 --- a/test/cpp/qps/client.h +++ b/test/cpp/qps/client.h @@ -112,6 +112,21 @@ class ClientRequestCreator { } }; +class HistogramEntry GRPC_FINAL { + public: + HistogramEntry() : used_(false) {} + bool used() const { return used_; } + double value() const { return value_; } + void set_value(double v) { + used_ = true; + value_ = v; + } + + private: + bool used_; + double value_; +}; + class Client { public: Client() : timer_(new UsageTimer), interarrival_timer_() {} @@ -151,10 +166,21 @@ class Client { return stats; } + // Must call AwaitThreadsCompletion before destructor to avoid a race + // between destructor and invocation of virtual ThreadFunc + void AwaitThreadsCompletion() { + DestroyMultithreading(); + std::unique_lock g(thread_completion_mu_); + while (threads_remaining_ != 0) { + threads_complete_.wait(g); + } + } + protected: bool closed_loop_; void StartThreads(size_t num_threads) { + threads_remaining_ = num_threads; for (size_t i = 0; i < num_threads; i++) { threads_.emplace_back(new Thread(this, i)); } @@ -162,7 +188,8 @@ class Client { void EndThreads() { threads_.clear(); } - virtual bool ThreadFunc(Histogram* histogram, size_t thread_idx) = 0; + virtual void DestroyMultithreading() = 0; + virtual bool ThreadFunc(HistogramEntry* histogram, size_t thread_idx) = 0; void SetupLoadTest(const ClientConfig& config, size_t num_threads) { // Set up the load distribution based on the number of threads @@ -215,7 +242,6 @@ class Client { public: Thread(Client* client, size_t idx) : done_(false), - new_stats_(nullptr), client_(client), idx_(idx), impl_(&Thread::ThreadFunc, this) {} @@ -230,15 +256,10 @@ class Client { void BeginSwap(Histogram* n) { std::lock_guard g(mu_); - new_stats_ = n; + n->Swap(&histogram_); } - void EndSwap() { - std::unique_lock g(mu_); - while (new_stats_ != nullptr) { - cv_.wait(g); - }; - } + void EndSwap() {} void MergeStatsInto(Histogram* hist) { std::unique_lock g(mu_); @@ -252,29 +273,26 @@ class Client { void ThreadFunc() { for (;;) { // run the loop body - const bool thread_still_ok = client_->ThreadFunc(&histogram_, idx_); - // lock, see if we're done + HistogramEntry entry; + const bool thread_still_ok = client_->ThreadFunc(&entry, idx_); + // lock, update histogram if needed and see if we're done std::lock_guard g(mu_); + if (entry.used()) { + histogram_.Add(entry.value()); + } if (!thread_still_ok) { gpr_log(GPR_ERROR, "Finishing client thread due to RPC error"); done_ = true; } if (done_) { + client_->CompleteThread(); return; } - // check if we're resetting stats, swap out the histogram if so - if (new_stats_) { - new_stats_->Swap(&histogram_); - new_stats_ = nullptr; - cv_.notify_one(); - } } } std::mutex mu_; - std::condition_variable cv_; bool done_; - Histogram* new_stats_; Histogram histogram_; Client* client_; const size_t idx_; @@ -286,6 +304,18 @@ class Client { InterarrivalTimer interarrival_timer_; std::vector next_time_; + + std::mutex thread_completion_mu_; + size_t threads_remaining_; + std::condition_variable threads_complete_; + + void CompleteThread() { + std::lock_guard g(thread_completion_mu_); + threads_remaining_--; + if (threads_remaining_ == 0) { + threads_complete_.notify_all(); + } + } }; template diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc index 1507d1e3d66..5d9cb4bd0cf 100644 --- a/test/cpp/qps/client_async.cc +++ b/test/cpp/qps/client_async.cc @@ -31,7 +31,6 @@ * */ -#include #include #include #include @@ -48,7 +47,6 @@ #include #include #include -#include #include #include "src/proto/grpc/testing/services.grpc.pb.h" @@ -64,7 +62,7 @@ class ClientRpcContext { ClientRpcContext() {} virtual ~ClientRpcContext() {} // next state, return false if done. Collect stats when appropriate - virtual bool RunNextState(bool, Histogram* hist) = 0; + virtual bool RunNextState(bool, HistogramEntry* entry) = 0; virtual ClientRpcContext* StartNewClone() = 0; static void* tag(ClientRpcContext* c) { return reinterpret_cast(c); } static ClientRpcContext* detag(void* t) { @@ -104,7 +102,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext { alarm_.reset(new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this))); } } - bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { + bool RunNextState(bool ok, HistogramEntry* entry) GRPC_OVERRIDE { switch (next_state_) { case State::READY: start_ = UsageTimer::Now(); @@ -114,7 +112,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext { next_state_ = State::RESP_DONE; return true; case State::RESP_DONE: - hist->Add((UsageTimer::Now() - start_) * 1e9); + entry->set_value((UsageTimer::Now() - start_) * 1e9); callback_(status_, &response_); next_state_ = State::INVALID; return false; @@ -176,6 +174,7 @@ class AsyncClient : public ClientImpl { for (int i = 0; i < num_async_threads_; i++) { cli_cqs_.emplace_back(new CompletionQueue); next_issuers_.emplace_back(NextIssuer(i)); + shutdown_state_.emplace_back(new PerThreadShutdownState()); } using namespace std::placeholders; @@ -192,7 +191,6 @@ class AsyncClient : public ClientImpl { } virtual ~AsyncClient() { for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) { - (*cq)->Shutdown(); void* got_tag; bool ok; while ((*cq)->Next(&got_tag, &ok)) { @@ -201,32 +199,16 @@ class AsyncClient : public ClientImpl { } } - bool ThreadFunc(Histogram* histogram, - size_t thread_idx) GRPC_OVERRIDE GRPC_FINAL { - void* got_tag; - bool ok; - - if (cli_cqs_[thread_idx]->Next(&got_tag, &ok)) { - // Got a regular event, so process it - ClientRpcContext* ctx = ClientRpcContext::detag(got_tag); - if (!ctx->RunNextState(ok, histogram)) { - // The RPC and callback are done, so clone the ctx - // and kickstart the new one - auto clone = ctx->StartNewClone(); - clone->Start(cli_cqs_[thread_idx].get()); - // delete the old version - delete ctx; - } - return true; - } else { // queue is shutting down - return false; - } - } - protected: const int num_async_threads_; private: + struct PerThreadShutdownState { + mutable std::mutex mutex; + bool shutdown; + PerThreadShutdownState() : shutdown(false) {} + }; + int NumThreads(const ClientConfig& config) { int num_threads = config.async_client_threads(); if (num_threads <= 0) { // Use dynamic sizing @@ -235,9 +217,60 @@ class AsyncClient : public ClientImpl { } return num_threads; } + void DestroyMultithreading() GRPC_OVERRIDE GRPC_FINAL { + for (auto ss = shutdown_state_.begin(); ss != shutdown_state_.end(); ++ss) { + std::lock_guard lock((*ss)->mutex); + (*ss)->shutdown = true; + } + for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) { + (*cq)->Shutdown(); + } + this->EndThreads(); // this needed for resolution + } + + bool ThreadFunc(HistogramEntry* entry, + size_t thread_idx) GRPC_OVERRIDE GRPC_FINAL { + void* got_tag; + bool ok; + + switch (cli_cqs_[thread_idx]->AsyncNext( + &got_tag, &ok, + std::chrono::system_clock::now() + std::chrono::milliseconds(10))) { + case CompletionQueue::GOT_EVENT: { + // Got a regular event, so process it + ClientRpcContext* ctx = ClientRpcContext::detag(got_tag); + // Proceed while holding a lock to make sure that + // this thread isn't supposed to shut down + std::lock_guard l(shutdown_state_[thread_idx]->mutex); + if (shutdown_state_[thread_idx]->shutdown) { + return true; + } else if (!ctx->RunNextState(ok, entry)) { + // The RPC and callback are done, so clone the ctx + // and kickstart the new one + auto clone = ctx->StartNewClone(); + clone->Start(cli_cqs_[thread_idx].get()); + // delete the old version + delete ctx; + } + return true; + } + case CompletionQueue::TIMEOUT: { + std::lock_guard l(shutdown_state_[thread_idx]->mutex); + if (shutdown_state_[thread_idx]->shutdown) { + return true; + } + return true; + } + case CompletionQueue::SHUTDOWN: // queue is shutting down, so we must be + // done + return true; + } + GPR_UNREACHABLE_CODE(return true); + } std::vector> cli_cqs_; std::vector> next_issuers_; + std::vector> shutdown_state_; }; static std::unique_ptr BenchmarkStubCreator( @@ -253,7 +286,7 @@ class AsyncUnaryClient GRPC_FINAL config, SetupCtx, BenchmarkStubCreator) { StartThreads(num_async_threads_); } - ~AsyncUnaryClient() GRPC_OVERRIDE { EndThreads(); } + ~AsyncUnaryClient() GRPC_OVERRIDE {} private: static void CheckDone(grpc::Status s, SimpleResponse* response) {} @@ -298,7 +331,7 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext { stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this)); next_state_ = State::STREAM_IDLE; } - bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { + bool RunNextState(bool ok, HistogramEntry* entry) GRPC_OVERRIDE { while (true) { switch (next_state_) { case State::STREAM_IDLE: @@ -330,7 +363,7 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext { return true; break; case State::READ_DONE: - hist->Add((UsageTimer::Now() - start_) * 1e9); + entry->set_value((UsageTimer::Now() - start_) * 1e9); callback_(status_, &response_); next_state_ = State::STREAM_IDLE; break; // loop around @@ -382,7 +415,7 @@ class AsyncStreamingClient GRPC_FINAL StartThreads(num_async_threads_); } - ~AsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); } + ~AsyncStreamingClient() GRPC_OVERRIDE {} private: static void CheckDone(grpc::Status s, SimpleResponse* response) {} @@ -430,7 +463,7 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext { ClientRpcContext::tag(this)); next_state_ = State::STREAM_IDLE; } - bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { + bool RunNextState(bool ok, HistogramEntry* entry) GRPC_OVERRIDE { while (true) { switch (next_state_) { case State::STREAM_IDLE: @@ -462,7 +495,7 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext { return true; break; case State::READ_DONE: - hist->Add((UsageTimer::Now() - start_) * 1e9); + entry->set_value((UsageTimer::Now() - start_) * 1e9); callback_(status_, &response_); next_state_ = State::STREAM_IDLE; break; // loop around @@ -518,7 +551,7 @@ class GenericAsyncStreamingClient GRPC_FINAL StartThreads(num_async_threads_); } - ~GenericAsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); } + ~GenericAsyncStreamingClient() GRPC_OVERRIDE {} private: static void CheckDone(grpc::Status s, ByteBuffer* response) {} diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc index c88e95b80e5..25c78235532 100644 --- a/test/cpp/qps/client_sync.cc +++ b/test/cpp/qps/client_sync.cc @@ -31,7 +31,6 @@ * */ -#include #include #include #include @@ -46,7 +45,6 @@ #include #include #include -#include #include #include #include @@ -55,7 +53,6 @@ #include "src/core/lib/profiling/timers.h" #include "src/proto/grpc/testing/services.grpc.pb.h" #include "test/cpp/qps/client.h" -#include "test/cpp/qps/histogram.h" #include "test/cpp/qps/interarrival.h" #include "test/cpp/qps/usage_timer.h" @@ -90,6 +87,9 @@ class SynchronousClient size_t num_threads_; std::vector responses_; + + private: + void DestroyMultithreading() GRPC_OVERRIDE GRPC_FINAL { EndThreads(); } }; class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient { @@ -98,9 +98,9 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient { : SynchronousClient(config) { StartThreads(num_threads_); } - ~SynchronousUnaryClient() { EndThreads(); } + ~SynchronousUnaryClient() {} - bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE { + bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) GRPC_OVERRIDE { WaitToIssue(thread_idx); auto* stub = channels_[thread_idx % channels_.size()].get_stub(); double start = UsageTimer::Now(); @@ -108,7 +108,7 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient { grpc::ClientContext context; grpc::Status s = stub->UnaryCall(&context, request_, &responses_[thread_idx]); - histogram->Add((UsageTimer::Now() - start) * 1e9); + entry->set_value((UsageTimer::Now() - start) * 1e9); return s.ok(); } }; @@ -127,25 +127,29 @@ class SynchronousStreamingClient GRPC_FINAL : public SynchronousClient { StartThreads(num_threads_); } ~SynchronousStreamingClient() { - EndThreads(); - for (auto stream = &stream_[0]; stream != &stream_[num_threads_]; - stream++) { + for (size_t i = 0; i < num_threads_; i++) { + auto stream = &stream_[i]; if (*stream) { (*stream)->WritesDone(); - EXPECT_TRUE((*stream)->Finish().ok()); + Status s = (*stream)->Finish(); + EXPECT_TRUE(s.ok()); + if (!s.ok()) { + gpr_log(GPR_ERROR, "Stream %zu received an error %s", i, + s.error_message().c_str()); + } } } delete[] stream_; delete[] context_; } - bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE { + bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) GRPC_OVERRIDE { WaitToIssue(thread_idx); GPR_TIMER_SCOPE("SynchronousStreamingClient::ThreadFunc", 0); double start = UsageTimer::Now(); if (stream_[thread_idx]->Write(request_) && stream_[thread_idx]->Read(&responses_[thread_idx])) { - histogram->Add((UsageTimer::Now() - start) * 1e9); + entry->set_value((UsageTimer::Now() - start) * 1e9); return true; } return false; diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index 08bf0458832..2aeaea51f25 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -87,7 +87,7 @@ static std::unordered_map> get_hosts_and_cores( CoreRequest dummy; CoreResponse cores; grpc::Status s = stub->CoreCount(&ctx, dummy, &cores); - assert(s.ok()); + GPR_ASSERT(s.ok()); std::deque dq; for (int i = 0; i < cores.cores(); i++) { dq.push_back(i); @@ -289,9 +289,13 @@ std::unique_ptr RunScenario( *args.mutable_setup() = server_config; servers[i].stream = servers[i].stub->RunServer(runsc::AllocContext(&contexts)); - GPR_ASSERT(servers[i].stream->Write(args)); + if (!servers[i].stream->Write(args)) { + gpr_log(GPR_ERROR, "Could not write args to server %zu", i); + } ServerStatus init_status; - GPR_ASSERT(servers[i].stream->Read(&init_status)); + if (!servers[i].stream->Read(&init_status)) { + gpr_log(GPR_ERROR, "Server %zu did not yield initial status", i); + } gpr_join_host_port(&cli_target, host, init_status.port()); client_config.add_server_targets(cli_target); gpr_free(host); @@ -345,9 +349,13 @@ std::unique_ptr RunScenario( *args.mutable_setup() = per_client_config; clients[i].stream = clients[i].stub->RunClient(runsc::AllocContext(&contexts)); - GPR_ASSERT(clients[i].stream->Write(args)); + if (!clients[i].stream->Write(args)) { + gpr_log(GPR_ERROR, "Could not write args to client %zu", i); + } ClientStatus init_status; - GPR_ASSERT(clients[i].stream->Read(&init_status)); + if (!clients[i].stream->Read(&init_status)) { + gpr_log(GPR_ERROR, "Client %zu did not yield initial status", i); + } } // Let everything warmup @@ -362,19 +370,31 @@ std::unique_ptr RunScenario( server_mark.mutable_mark()->set_reset(true); ClientArgs client_mark; client_mark.mutable_mark()->set_reset(true); - for (auto server = &servers[0]; server != &servers[num_servers]; server++) { - GPR_ASSERT(server->stream->Write(server_mark)); + for (size_t i = 0; i < num_servers; i++) { + auto server = &servers[i]; + if (!server->stream->Write(server_mark)) { + gpr_log(GPR_ERROR, "Couldn't write mark to server %zu", i); + } } - for (auto client = &clients[0]; client != &clients[num_clients]; client++) { - GPR_ASSERT(client->stream->Write(client_mark)); + for (size_t i = 0; i < num_clients; i++) { + auto client = &clients[i]; + if (!client->stream->Write(client_mark)) { + gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i); + } } ServerStatus server_status; ClientStatus client_status; - for (auto server = &servers[0]; server != &servers[num_servers]; server++) { - GPR_ASSERT(server->stream->Read(&server_status)); + for (size_t i = 0; i < num_servers; i++) { + auto server = &servers[i]; + if (!server->stream->Read(&server_status)) { + gpr_log(GPR_ERROR, "Couldn't get status from server %zu", i); + } } - for (auto client = &clients[0]; client != &clients[num_clients]; client++) { - GPR_ASSERT(client->stream->Read(&client_status)); + for (size_t i = 0; i < num_clients; i++) { + auto client = &clients[i]; + if (!client->stream->Read(&client_status)) { + gpr_log(GPR_ERROR, "Couldn't get status from client %zu", i); + } } // Wait some time @@ -390,37 +410,73 @@ std::unique_ptr RunScenario( Histogram merged_latencies; gpr_log(GPR_INFO, "Finishing clients"); - for (auto client = &clients[0]; client != &clients[num_clients]; client++) { - GPR_ASSERT(client->stream->Write(client_mark)); - GPR_ASSERT(client->stream->WritesDone()); + for (size_t i = 0; i < num_clients; i++) { + auto client = &clients[i]; + if (!client->stream->Write(client_mark)) { + gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i); + } + if (!client->stream->WritesDone()) { + gpr_log(GPR_ERROR, "Failed WritesDone for client %zu", i); + } } - for (auto client = &clients[0]; client != &clients[num_clients]; client++) { - GPR_ASSERT(client->stream->Read(&client_status)); - const auto& stats = client_status.stats(); - merged_latencies.MergeProto(stats.latencies()); - result->add_client_stats()->CopyFrom(stats); - GPR_ASSERT(!client->stream->Read(&client_status)); + for (size_t i = 0; i < num_clients; i++) { + auto client = &clients[i]; + // Read the client final status + if (client->stream->Read(&client_status)) { + gpr_log(GPR_INFO, "Received final status from client %zu", i); + const auto& stats = client_status.stats(); + merged_latencies.MergeProto(stats.latencies()); + result->add_client_stats()->CopyFrom(stats); + // That final status should be the last message on the client stream + GPR_ASSERT(!client->stream->Read(&client_status)); + } else { + gpr_log(GPR_ERROR, "Couldn't get final status from client %zu", i); + } } - for (auto client = &clients[0]; client != &clients[num_clients]; client++) { - GPR_ASSERT(client->stream->Finish().ok()); + for (size_t i = 0; i < num_clients; i++) { + auto client = &clients[i]; + Status s = client->stream->Finish(); + result->add_client_success(s.ok()); + if (!s.ok()) { + gpr_log(GPR_ERROR, "Client %zu had an error %s", i, + s.error_message().c_str()); + } } delete[] clients; merged_latencies.FillProto(result->mutable_latencies()); gpr_log(GPR_INFO, "Finishing servers"); - for (auto server = &servers[0]; server != &servers[num_servers]; server++) { - GPR_ASSERT(server->stream->Write(server_mark)); - GPR_ASSERT(server->stream->WritesDone()); + for (size_t i = 0; i < num_servers; i++) { + auto server = &servers[i]; + if (!server->stream->Write(server_mark)) { + gpr_log(GPR_ERROR, "Couldn't write mark to server %zu", i); + } + if (!server->stream->WritesDone()) { + gpr_log(GPR_ERROR, "Failed WritesDone for server %zu", i); + } } - for (auto server = &servers[0]; server != &servers[num_servers]; server++) { - GPR_ASSERT(server->stream->Read(&server_status)); - result->add_server_stats()->CopyFrom(server_status.stats()); - result->add_server_cores(server_status.cores()); - GPR_ASSERT(!server->stream->Read(&server_status)); + for (size_t i = 0; i < num_servers; i++) { + auto server = &servers[i]; + // Read the server final status + if (server->stream->Read(&server_status)) { + gpr_log(GPR_INFO, "Received final status from server %zu", i); + result->add_server_stats()->CopyFrom(server_status.stats()); + result->add_server_cores(server_status.cores()); + // That final status should be the last message on the server stream + GPR_ASSERT(!server->stream->Read(&server_status)); + } else { + gpr_log(GPR_ERROR, "Couldn't get final status from server %zu", i); + } } - for (auto server = &servers[0]; server != &servers[num_servers]; server++) { - GPR_ASSERT(server->stream->Finish().ok()); + for (size_t i = 0; i < num_servers; i++) { + auto server = &servers[i]; + Status s = server->stream->Finish(); + result->add_server_success(s.ok()); + if (!s.ok()) { + gpr_log(GPR_ERROR, "Server %zu had an error %s", i, + s.error_message().c_str()); + } } delete[] servers; @@ -429,8 +485,9 @@ std::unique_ptr RunScenario( return result; } -void RunQuit() { +bool RunQuit() { // Get client, server lists + bool result = true; auto workers = get_workers("QPS_WORKERS"); for (size_t i = 0; i < workers.size(); i++) { auto stub = WorkerService::NewStub( @@ -438,8 +495,14 @@ void RunQuit() { Void dummy; grpc::ClientContext ctx; ctx.set_fail_fast(false); - GPR_ASSERT(stub->QuitWorker(&ctx, dummy, &dummy).ok()); + Status s = stub->QuitWorker(&ctx, dummy, &dummy); + if (!s.ok()) { + gpr_log(GPR_ERROR, "Worker %zu could not be properly quit because %s", i, + s.error_message().c_str()); + result = false; + } } + return result; } } // namespace testing diff --git a/test/cpp/qps/driver.h b/test/cpp/qps/driver.h index 3a5cf138f11..93f4370cafa 100644 --- a/test/cpp/qps/driver.h +++ b/test/cpp/qps/driver.h @@ -47,7 +47,7 @@ std::unique_ptr RunScenario( const grpc::testing::ServerConfig& server_config, size_t num_servers, int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count); -void RunQuit(); +bool RunQuit(); } // namespace testing } // namespace grpc diff --git a/test/cpp/qps/gen_build_yaml.py b/test/cpp/qps/gen_build_yaml.py index 34b81514411..4ff4e44b8b2 100755 --- a/test/cpp/qps/gen_build_yaml.py +++ b/test/cpp/qps/gen_build_yaml.py @@ -45,9 +45,10 @@ import performance.scenario_config as scenario_config def _scenario_json_string(scenario_json): # tweak parameters to get fast test times - scenario_json['warmup_seconds'] = 1 + scenario_json['warmup_seconds'] = 0 scenario_json['benchmark_seconds'] = 1 - return json.dumps(scenario_config.remove_nonproto_fields(scenario_json)) + scenarios_json = {'scenarios': [scenario_config.remove_nonproto_fields(scenario_json)]} + return json.dumps(scenarios_json) def threads_of_type(scenario_json, path): d = scenario_json @@ -72,8 +73,7 @@ print yaml.dump({ { 'name': 'json_run_localhost', 'shortname': 'json_run_localhost:%s' % scenario_json['name'], - 'args': ['--scenario_json', - pipes.quote(_scenario_json_string(scenario_json))], + 'args': ['--scenarios_json', _scenario_json_string(scenario_json)], 'ci_platforms': ['linux', 'mac', 'posix', 'windows'], 'platforms': ['linux', 'mac', 'posix', 'windows'], 'flaky': False, @@ -81,7 +81,8 @@ print yaml.dump({ 'boringssl': True, 'defaults': 'boringssl', 'cpu_cost': guess_cpu(scenario_json), - 'exclude_configs': [] + 'exclude_configs': [], + 'timeout_seconds': 3*60 } for scenario_json in scenario_config.CXXLanguage().scenarios() ] diff --git a/test/cpp/qps/json_run_localhost.cc b/test/cpp/qps/json_run_localhost.cc index 6545dc2917d..74e40fbf1a9 100644 --- a/test/cpp/qps/json_run_localhost.cc +++ b/test/cpp/qps/json_run_localhost.cc @@ -75,7 +75,7 @@ int main(int argc, char** argv) { for (int i = 1; i < argc; i++) { args.push_back(argv[i]); } - SubProcess(args).Join(); + GPR_ASSERT(SubProcess(args).Join() == 0); for (auto it = jobs.begin(); it != jobs.end(); ++it) { (*it)->Interrupt(); diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index f5d739f893a..1524ebbc389 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -53,7 +53,7 @@ DEFINE_bool(quit, false, "Quit the workers"); namespace grpc { namespace testing { -static void QpsDriver() { +static bool QpsDriver() { grpc::string json; bool scfile = (FLAGS_scenarios_file != ""); @@ -81,13 +81,13 @@ static void QpsDriver() { } else if (scjson) { json = FLAGS_scenarios_json.c_str(); } else if (FLAGS_quit) { - RunQuit(); - return; + return RunQuit(); } // Parse into an array of scenarios Scenarios scenarios; ParseJson(json.c_str(), "grpc.testing.Scenarios", &scenarios); + bool success = true; // Make sure that there is at least some valid scenario here GPR_ASSERT(scenarios.scenarios_size() > 0); @@ -109,7 +109,15 @@ static void QpsDriver() { GetReporter()->ReportQPSPerCore(*result); GetReporter()->ReportLatency(*result); GetReporter()->ReportTimes(*result); + + for (int i = 0; success && i < result->client_success_size(); i++) { + success = result->client_success(i); + } + for (int i = 0; success && i < result->server_success_size(); i++) { + success = result->server_success(i); + } } + return success; } } // namespace testing @@ -118,7 +126,7 @@ static void QpsDriver() { int main(int argc, char **argv) { grpc::testing::InitBenchmark(&argc, &argv, true); - grpc::testing::QpsDriver(); + bool ok = grpc::testing::QpsDriver(); - return 0; + return ok ? 0 : 1; } diff --git a/test/cpp/qps/qps_worker.cc b/test/cpp/qps/qps_worker.cc index f514e23e854..d3e53fe14a6 100644 --- a/test/cpp/qps/qps_worker.cc +++ b/test/cpp/qps/qps_worker.cc @@ -33,7 +33,6 @@ #include "test/cpp/qps/qps_worker.h" -#include #include #include #include @@ -124,11 +123,12 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service { GRPC_OVERRIDE { InstanceGuard g(this); if (!g.Acquired()) { - return Status(StatusCode::RESOURCE_EXHAUSTED, ""); + return Status(StatusCode::RESOURCE_EXHAUSTED, "Client worker busy"); } ScopedProfile profile("qps_client.prof", false); Status ret = RunClientBody(ctx, stream); + gpr_log(GPR_INFO, "RunClient: Returning"); return ret; } @@ -137,11 +137,12 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service { GRPC_OVERRIDE { InstanceGuard g(this); if (!g.Acquired()) { - return Status(StatusCode::RESOURCE_EXHAUSTED, ""); + return Status(StatusCode::RESOURCE_EXHAUSTED, "Server worker busy"); } ScopedProfile profile("qps_server.prof", false); Status ret = RunServerBody(ctx, stream); + gpr_log(GPR_INFO, "RunServer: Returning"); return ret; } @@ -154,7 +155,7 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service { Status QuitWorker(ServerContext* ctx, const Void*, Void*) GRPC_OVERRIDE { InstanceGuard g(this); if (!g.Acquired()) { - return Status(StatusCode::RESOURCE_EXHAUSTED, ""); + return Status(StatusCode::RESOURCE_EXHAUSTED, "Quitting worker busy"); } worker_->MarkDone(); @@ -197,33 +198,38 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service { ServerReaderWriter* stream) { ClientArgs args; if (!stream->Read(&args)) { - return Status(StatusCode::INVALID_ARGUMENT, ""); + return Status(StatusCode::INVALID_ARGUMENT, "Couldn't read args"); } if (!args.has_setup()) { - return Status(StatusCode::INVALID_ARGUMENT, ""); + return Status(StatusCode::INVALID_ARGUMENT, "Invalid setup arg"); } gpr_log(GPR_INFO, "RunClientBody: about to create client"); auto client = CreateClient(args.setup()); if (!client) { - return Status(StatusCode::INVALID_ARGUMENT, ""); + return Status(StatusCode::INVALID_ARGUMENT, "Couldn't create client"); } gpr_log(GPR_INFO, "RunClientBody: client created"); ClientStatus status; if (!stream->Write(status)) { - return Status(StatusCode::UNKNOWN, ""); + return Status(StatusCode::UNKNOWN, "Client couldn't report init status"); } gpr_log(GPR_INFO, "RunClientBody: creation status reported"); while (stream->Read(&args)) { gpr_log(GPR_INFO, "RunClientBody: Message read"); if (!args.has_mark()) { gpr_log(GPR_INFO, "RunClientBody: Message is not a mark!"); - return Status(StatusCode::INVALID_ARGUMENT, ""); + return Status(StatusCode::INVALID_ARGUMENT, "Invalid mark"); } *status.mutable_stats() = client->Mark(args.mark().reset()); - stream->Write(status); + if (!stream->Write(status)) { + return Status(StatusCode::UNKNOWN, "Client couldn't respond to mark"); + } gpr_log(GPR_INFO, "RunClientBody: Mark response given"); } + gpr_log(GPR_INFO, "RunClientBody: Awaiting Threads Completion"); + client->AwaitThreadsCompletion(); + gpr_log(GPR_INFO, "RunClientBody: Returning"); return Status::OK; } @@ -232,10 +238,10 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service { ServerReaderWriter* stream) { ServerArgs args; if (!stream->Read(&args)) { - return Status(StatusCode::INVALID_ARGUMENT, ""); + return Status(StatusCode::INVALID_ARGUMENT, "Couldn't read server args"); } if (!args.has_setup()) { - return Status(StatusCode::INVALID_ARGUMENT, ""); + return Status(StatusCode::INVALID_ARGUMENT, "Bad server creation args"); } if (server_port_ != 0) { args.mutable_setup()->set_port(server_port_); @@ -243,24 +249,26 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service { gpr_log(GPR_INFO, "RunServerBody: about to create server"); auto server = CreateServer(args.setup()); if (!server) { - return Status(StatusCode::INVALID_ARGUMENT, ""); + return Status(StatusCode::INVALID_ARGUMENT, "Couldn't create server"); } gpr_log(GPR_INFO, "RunServerBody: server created"); ServerStatus status; status.set_port(server->port()); status.set_cores(server->cores()); if (!stream->Write(status)) { - return Status(StatusCode::UNKNOWN, ""); + return Status(StatusCode::UNKNOWN, "Server couldn't report init status"); } gpr_log(GPR_INFO, "RunServerBody: creation status reported"); while (stream->Read(&args)) { gpr_log(GPR_INFO, "RunServerBody: Message read"); if (!args.has_mark()) { gpr_log(GPR_INFO, "RunServerBody: Message not a mark!"); - return Status(StatusCode::INVALID_ARGUMENT, ""); + return Status(StatusCode::INVALID_ARGUMENT, "Invalid mark"); } *status.mutable_stats() = server->Mark(args.mark().reset()); - stream->Write(status); + if (!stream->Write(status)) { + return Status(StatusCode::UNKNOWN, "Server couldn't respond to mark"); + } gpr_log(GPR_INFO, "RunServerBody: Mark response given"); } diff --git a/test/cpp/qps/server_async.cc b/test/cpp/qps/server_async.cc index c9954d0d02d..dea87463312 100644 --- a/test/cpp/qps/server_async.cc +++ b/test/cpp/qps/server_async.cc @@ -102,7 +102,7 @@ class AsyncQpsServerTest : public Server { auto process_rpc_bound = std::bind(process_rpc, config.payload_config(), _1, _2); - for (int i = 0; i < 10000 / num_threads; i++) { + for (int i = 0; i < 15000; i++) { for (int j = 0; j < num_threads; j++) { if (request_unary_function) { auto request_unary = @@ -123,21 +123,24 @@ class AsyncQpsServerTest : public Server { for (int i = 0; i < num_threads; i++) { shutdown_state_.emplace_back(new PerThreadShutdownState()); - } - for (int i = 0; i < num_threads; i++) { threads_.emplace_back(&AsyncQpsServerTest::ThreadFunc, this, i); } } ~AsyncQpsServerTest() { for (auto ss = shutdown_state_.begin(); ss != shutdown_state_.end(); ++ss) { - (*ss)->set_shutdown(); + std::lock_guard lock((*ss)->mutex); + (*ss)->shutdown = true; + } + // TODO (vpai): Remove this deadline and allow Shutdown to finish properly + auto deadline = std::chrono::system_clock::now() + std::chrono::seconds(3); + server_->Shutdown(deadline); + for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); ++cq) { + (*cq)->Shutdown(); } - server_->Shutdown(); for (auto thr = threads_.begin(); thr != threads_.end(); thr++) { thr->join(); } for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); ++cq) { - (*cq)->Shutdown(); bool ok; void *got_tag; while ((*cq)->Next(&got_tag, &ok)) @@ -150,22 +153,24 @@ class AsyncQpsServerTest : public Server { } private: - void ThreadFunc(int rank) { + void ThreadFunc(int thread_idx) { // Wait until work is available or we are shutting down bool ok; void *got_tag; - while (srv_cqs_[rank]->Next(&got_tag, &ok)) { + while (srv_cqs_[thread_idx]->Next(&got_tag, &ok)) { ServerRpcContext *ctx = detag(got_tag); // The tag is a pointer to an RPC context to invoke - const bool still_going = ctx->RunNextState(ok); - if (!shutdown_state_[rank]->shutdown()) { - // this RPC context is done, so refresh it - if (!still_going) { - ctx->Reset(); - } - } else { + // Proceed while holding a lock to make sure that + // this thread isn't supposed to shut down + std::lock_guard l(shutdown_state_[thread_idx]->mutex); + if (shutdown_state_[thread_idx]->shutdown) { return; } + const bool still_going = ctx->RunNextState(ok); + // if this RPC context is done, refresh it + if (!still_going) { + ctx->Reset(); + } } return; } @@ -333,24 +338,12 @@ class AsyncQpsServerTest : public Server { ServiceType async_service_; std::forward_list contexts_; - class PerThreadShutdownState { - public: - PerThreadShutdownState() : shutdown_(false) {} - - bool shutdown() const { - std::lock_guard lock(mutex_); - return shutdown_; - } - - void set_shutdown() { - std::lock_guard lock(mutex_); - shutdown_ = true; - } - - private: - mutable std::mutex mutex_; - bool shutdown_; + struct PerThreadShutdownState { + mutable std::mutex mutex; + bool shutdown; + PerThreadShutdownState() : shutdown(false) {} }; + std::vector> shutdown_state_; }; diff --git a/test/cpp/util/grpc_cli.cc b/test/cpp/util/grpc_cli.cc index c52e48bae65..53529da782c 100644 --- a/test/cpp/util/grpc_cli.cc +++ b/test/cpp/util/grpc_cli.cc @@ -34,18 +34,21 @@ /* A command line tool to talk to a grpc server. Example of talking to grpc interop server: - grpc_cli call localhost:50051 UnaryCall src/proto/grpc/testing/test.proto \ - "response_size:10" --enable_ssl=false + grpc_cli call localhost:50051 UnaryCall "response_size:10" \ + --protofiles=src/proto/grpc/testing/test.proto --enable_ssl=false Options: - 1. --proto_path, if your proto file is not under current working directory, + 1. --protofiles, use this flag to provide a proto file if the server does + does not have the reflection service. + 2. --proto_path, if your proto file is not under current working directory, use this flag to provide a search root. It should work similar to the - counterpart in protoc. - 2. --metadata specifies metadata to be sent to the server, such as: + counterpart in protoc. This option is valid only when protofiles is + provided. + 3. --metadata specifies metadata to be sent to the server, such as: --metadata="MyHeaderKey1:Value1:MyHeaderKey2:Value2" - 3. --enable_ssl, whether to use tls. - 4. --use_auth, if set to true, attach a GoogleDefaultCredentials to the call - 3. --input_binary_file, a file containing the serialized request. The file + 4. --enable_ssl, whether to use tls. + 5. --use_auth, if set to true, attach a GoogleDefaultCredentials to the call + 6. --input_binary_file, a file containing the serialized request. The file can be generated by calling something like: protoc --proto_path=src/proto/grpc/testing/ \ --encode=grpc.testing.SimpleRequest \ @@ -53,7 +56,7 @@ < input.txt > input.bin If this is used and no proto file is provided in the argument list, the method string has to be exact in the form of /package.service/method. - 4. --output_binary_file, a file to write binary format response into, it can + 7. --output_binary_file, a file to write binary format response into, it can be later decoded using protoc: protoc --proto_path=src/proto/grpc/testing/ \ --decode=grpc.testing.SimpleResponse \ @@ -61,6 +64,7 @@ < output.bin > output.txt */ +#include #include #include #include @@ -86,6 +90,8 @@ DEFINE_string(output_binary_file, "", DEFINE_string(metadata, "", "Metadata to send to server, in the form of key1:val1:key2:val2"); DEFINE_string(proto_path, ".", "Path to look for the proto file."); +// TODO(zyc): support a list of input proto files +DEFINE_string(protofiles, "", "Name of the proto file."); void ParseMetadataFlag( std::multimap* client_metadata) { @@ -129,35 +135,61 @@ void PrintMetadata(const T& m, const grpc::string& message) { int main(int argc, char** argv) { grpc::testing::InitTest(&argc, &argv, true); - if (argc < 4 || argc == 5 || grpc::string(argv[1]) != "call") { + if (argc < 4 || grpc::string(argv[1]) != "call") { std::cout << "Usage: grpc_cli call server_host:port method_name " << "[proto file] [text format request] []" << std::endl; + return 1; } - grpc::string file_name; grpc::string request_text; grpc::string server_address(argv[2]); grpc::string method_name(argv[3]); std::unique_ptr parser; grpc::string serialized_request_proto; - if (argc == 6) { - file_name = argv[4]; - // TODO(yangg) read from stdin as well? - request_text = argv[5]; + if (argc == 5) { + request_text = argv[4]; + } + + std::shared_ptr creds; + if (!FLAGS_enable_ssl) { + creds = grpc::InsecureChannelCredentials(); + } else { + if (FLAGS_use_auth) { + creds = grpc::GoogleDefaultCredentials(); + } else { + creds = grpc::SslCredentials(grpc::SslCredentialsOptions()); + } } + std::shared_ptr channel = + grpc::CreateChannel(server_address, creds); if (request_text.empty() && FLAGS_input_binary_file.empty()) { - std::cout << "Missing input. Use text format input or " - << "--input_binary_file for serialized request" << std::endl; - return 1; - } else if (!request_text.empty()) { - parser.reset(new grpc::testing::ProtoFileParser(FLAGS_proto_path, file_name, - method_name)); + if (isatty(STDIN_FILENO)) { + std::cout << "reading request message from stdin..." << std::endl; + } + std::stringstream input_stream; + input_stream << std::cin.rdbuf(); + request_text = input_stream.str(); + } + + if (!request_text.empty()) { + if (!FLAGS_protofiles.empty()) { + parser.reset(new grpc::testing::ProtoFileParser( + FLAGS_proto_path, FLAGS_protofiles, method_name)); + } else { + parser.reset(new grpc::testing::ProtoFileParser(channel, method_name)); + } method_name = parser->GetFullMethodName(); if (parser->HasError()) { return 1; } + + if (!FLAGS_input_binary_file.empty()) { + std::cout + << "warning: request given in argv, ignoring --input_binary_file" + << std::endl; + } } if (parser) { @@ -175,19 +207,6 @@ int main(int argc, char** argv) { } std::cout << "connecting to " << server_address << std::endl; - std::shared_ptr creds; - if (!FLAGS_enable_ssl) { - creds = grpc::InsecureChannelCredentials(); - } else { - if (FLAGS_use_auth) { - creds = grpc::GoogleDefaultCredentials(); - } else { - creds = grpc::SslCredentials(grpc::SslCredentialsOptions()); - } - } - std::shared_ptr channel = - grpc::CreateChannel(server_address, creds); - grpc::string serialized_response_proto; std::multimap client_metadata; std::multimap server_initial_metadata, @@ -219,7 +238,7 @@ int main(int argc, char** argv) { } } else { std::cout << "Rpc failed with status code " << s.error_code() - << " error message " << s.error_message() << std::endl; + << ", error message: " << s.error_message() << std::endl; } return 0; diff --git a/test/cpp/util/proto_file_parser.cc b/test/cpp/util/proto_file_parser.cc index 25aec329eb3..5b0d925e1c3 100644 --- a/test/cpp/util/proto_file_parser.cc +++ b/test/cpp/util/proto_file_parser.cc @@ -95,9 +95,48 @@ ProtoFileParser::ProtoFileParser(const grpc::string& proto_path, dynamic_factory_.reset( new google::protobuf::DynamicMessageFactory(importer_->pool())); + std::vector service_desc_list; + for (int i = 0; i < file_desc->service_count(); i++) { + service_desc_list.push_back(file_desc->service(i)); + } + InitProtoFileParser(method, service_desc_list); +} + +ProtoFileParser::ProtoFileParser(std::shared_ptr channel, + const grpc::string& method) + : has_error_(false), + desc_db_(new grpc::ProtoReflectionDescriptorDatabase(channel)), + desc_pool_(new google::protobuf::DescriptorPool(desc_db_.get())) { + std::vector service_list; + if (!desc_db_->GetServices(&service_list)) { + LogError( + "Failed to get services from the server, " + "it may not have the reflection service.\n" + "Please try to use the --protofiles option to provide a proto file."); + } + if (has_error_) { + return; + } + dynamic_factory_.reset( + new google::protobuf::DynamicMessageFactory(desc_pool_.get())); + + std::vector service_desc_list; + for (auto it = service_list.begin(); it != service_list.end(); it++) { + service_desc_list.push_back(desc_pool_->FindServiceByName(*it)); + } + InitProtoFileParser(method, service_desc_list); +} + +ProtoFileParser::~ProtoFileParser() {} + +void ProtoFileParser::InitProtoFileParser( + const grpc::string& method, + const std::vector + service_desc_list) { const google::protobuf::MethodDescriptor* method_descriptor = nullptr; - for (int i = 0; !method_descriptor && i < file_desc->service_count(); i++) { - const auto* service_desc = file_desc->service(i); + for (auto it = service_desc_list.begin(); it != service_desc_list.end(); + it++) { + const auto* service_desc = *it; for (int j = 0; j < service_desc->method_count(); j++) { const auto* method_desc = service_desc->method(j); if (MethodNameMatch(method_desc->full_name(), method)) { @@ -130,8 +169,6 @@ ProtoFileParser::ProtoFileParser(const grpc::string& proto_path, dynamic_factory_->GetPrototype(method_descriptor->output_type())->New()); } -ProtoFileParser::~ProtoFileParser() {} - grpc::string ProtoFileParser::GetSerializedProto( const grpc::string& text_format_proto, bool is_request) { grpc::string serialized; @@ -143,7 +180,7 @@ grpc::string ProtoFileParser::GetSerializedProto( LogError("Failed to parse text format to proto."); return ""; } - ok = request_prototype_->SerializeToString(&serialized); + ok = msg->SerializeToString(&serialized); if (!ok) { LogError("Failed to serialize proto."); return ""; diff --git a/test/cpp/util/proto_file_parser.h b/test/cpp/util/proto_file_parser.h index 46cdd665038..b442d77db98 100644 --- a/test/cpp/util/proto_file_parser.h +++ b/test/cpp/util/proto_file_parser.h @@ -38,8 +38,10 @@ #include #include +#include #include "src/compiler/config.h" +#include "test/cpp/util/proto_reflection_descriptor_database.h" namespace grpc { namespace testing { @@ -53,6 +55,9 @@ class ProtoFileParser { // even just Method. It will log an error if there is ambiguity. ProtoFileParser(const grpc::string& proto_path, const grpc::string& file_name, const grpc::string& method); + + ProtoFileParser(std::shared_ptr channel, + const grpc::string& method); ~ProtoFileParser(); grpc::string GetFullMethodName() const { return full_method_name_; } @@ -68,12 +73,18 @@ class ProtoFileParser { void LogError(const grpc::string& error_msg); private: + void InitProtoFileParser( + const grpc::string& method, + const std::vector services); + bool has_error_; grpc::string request_text_; grpc::string full_method_name_; google::protobuf::compiler::DiskSourceTree source_tree_; std::unique_ptr error_printer_; std::unique_ptr importer_; + std::unique_ptr desc_db_; + std::unique_ptr desc_pool_; std::unique_ptr dynamic_factory_; std::unique_ptr request_prototype_; std::unique_ptr response_prototype_; diff --git a/test/cpp/util/proto_reflection_descriptor_database.cc b/test/cpp/util/proto_reflection_descriptor_database.cc index 25b720aee0a..8fd466feb06 100644 --- a/test/cpp/util/proto_reflection_descriptor_database.cc +++ b/test/cpp/util/proto_reflection_descriptor_database.cc @@ -53,10 +53,20 @@ ProtoReflectionDescriptorDatabase::ProtoReflectionDescriptorDatabase( std::shared_ptr channel) : stub_(ServerReflection::NewStub(channel)) {} -ProtoReflectionDescriptorDatabase::~ProtoReflectionDescriptorDatabase() {} +ProtoReflectionDescriptorDatabase::~ProtoReflectionDescriptorDatabase() { + if (stream_) { + stream_->WritesDone(); + Status status = stream_->Finish(); + if (!status.ok()) { + gpr_log(GPR_ERROR, + "ServerReflectionInfo rpc failed. Error code: %d, details: %s", + (int)status.error_code(), status.error_message().c_str()); + } + } +} bool ProtoReflectionDescriptorDatabase::FindFileByName( - const string& filename, google::protobuf::FileDescriptorProto* output) { + const string& filename, protobuf::FileDescriptorProto* output) { if (cached_db_.FindFileByName(filename, output)) { return true; } @@ -101,7 +111,7 @@ bool ProtoReflectionDescriptorDatabase::FindFileByName( } bool ProtoReflectionDescriptorDatabase::FindFileContainingSymbol( - const string& symbol_name, google::protobuf::FileDescriptorProto* output) { + const string& symbol_name, protobuf::FileDescriptorProto* output) { if (cached_db_.FindFileContainingSymbol(symbol_name, output)) { return true; } @@ -148,7 +158,7 @@ bool ProtoReflectionDescriptorDatabase::FindFileContainingSymbol( bool ProtoReflectionDescriptorDatabase::FindFileContainingExtension( const string& containing_type, int field_number, - google::protobuf::FileDescriptorProto* output) { + protobuf::FileDescriptorProto* output) { if (cached_db_.FindFileContainingExtension(containing_type, field_number, output)) { return true; @@ -276,10 +286,10 @@ bool ProtoReflectionDescriptorDatabase::GetServices( return false; } -const google::protobuf::FileDescriptorProto +const protobuf::FileDescriptorProto ProtoReflectionDescriptorDatabase::ParseFileDescriptorProtoResponse( const std::string& byte_fd_proto) { - google::protobuf::FileDescriptorProto file_desc_proto; + protobuf::FileDescriptorProto file_desc_proto; file_desc_proto.ParseFromString(byte_fd_proto); return file_desc_proto; } @@ -287,7 +297,7 @@ ProtoReflectionDescriptorDatabase::ParseFileDescriptorProtoResponse( void ProtoReflectionDescriptorDatabase::AddFileFromResponse( const grpc::reflection::v1alpha::FileDescriptorResponse& response) { for (int i = 0; i < response.file_descriptor_proto_size(); ++i) { - const google::protobuf::FileDescriptorProto file_proto = + const protobuf::FileDescriptorProto file_proto = ParseFileDescriptorProtoResponse(response.file_descriptor_proto(i)); if (known_files_.find(file_proto.name()) == known_files_.end()) { known_files_.insert(file_proto.name()); diff --git a/test/cpp/util/proto_reflection_descriptor_database.h b/test/cpp/util/proto_reflection_descriptor_database.h index 99c00675bb3..eb7cf4907db 100644 --- a/test/cpp/util/proto_reflection_descriptor_database.h +++ b/test/cpp/util/proto_reflection_descriptor_database.h @@ -38,19 +38,19 @@ #include #include -#include -#include -#include +// GRPC_NO_GENERATED_CODE indicates generated pb files should not be used +#ifdef GRPC_NO_GENERATED_CODE +#include "src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h" +#else #include +#endif // GRPC_NO_GENERATED_CODE #include - namespace grpc { // ProtoReflectionDescriptorDatabase takes a stub of ServerReflection and // provides the methods defined by DescriptorDatabase interfaces. It can be used // to feed a DescriptorPool instance. -class ProtoReflectionDescriptorDatabase - : public google::protobuf::DescriptorDatabase { +class ProtoReflectionDescriptorDatabase : public protobuf::DescriptorDatabase { public: explicit ProtoReflectionDescriptorDatabase( std::unique_ptr stub); @@ -65,14 +65,13 @@ class ProtoReflectionDescriptorDatabase // Find a file by file name. Fills in in *output and returns true if found. // Otherwise, returns false, leaving the contents of *output undefined. bool FindFileByName(const string& filename, - google::protobuf::FileDescriptorProto* output) - GRPC_OVERRIDE; + protobuf::FileDescriptorProto* output) GRPC_OVERRIDE; // Find the file that declares the given fully-qualified symbol name. // If found, fills in *output and returns true, otherwise returns false // and leaves *output undefined. bool FindFileContainingSymbol(const string& symbol_name, - google::protobuf::FileDescriptorProto* output) + protobuf::FileDescriptorProto* output) GRPC_OVERRIDE; // Find the file which defines an extension extending the given message type @@ -81,7 +80,7 @@ class ProtoReflectionDescriptorDatabase // must be a fully-qualified type name. bool FindFileContainingExtension( const string& containing_type, int field_number, - google::protobuf::FileDescriptorProto* output) GRPC_OVERRIDE; + protobuf::FileDescriptorProto* output) GRPC_OVERRIDE; // Finds the tag numbers used by all known extensions of // extendee_type, and appends them to output in an undefined @@ -102,7 +101,7 @@ class ProtoReflectionDescriptorDatabase grpc::reflection::v1alpha::ServerReflectionResponse> ClientStream; - const google::protobuf::FileDescriptorProto ParseFileDescriptorProtoResponse( + const protobuf::FileDescriptorProto ParseFileDescriptorProtoResponse( const std::string& byte_fd_proto); void AddFileFromResponse( @@ -123,7 +122,7 @@ class ProtoReflectionDescriptorDatabase std::unordered_map> cached_extension_numbers_; std::mutex stream_mutex_; - google::protobuf::SimpleDescriptorDatabase cached_db_; + protobuf::SimpleDescriptorDatabase cached_db_; }; } // namespace grpc diff --git a/test/cpp/util/slice_test.cc b/test/cpp/util/slice_test.cc index de7ff031ab8..45799ae157c 100644 --- a/test/cpp/util/slice_test.cc +++ b/test/cpp/util/slice_test.cc @@ -68,6 +68,16 @@ TEST_F(SliceTest, Empty) { CheckSlice(empty_slice, ""); } +TEST_F(SliceTest, Cslice) { + gpr_slice s = gpr_slice_from_copied_string(kContent); + Slice spp(s, Slice::STEAL_REF); + CheckSlice(spp, kContent); + gpr_slice c_slice = spp.c_slice(); + EXPECT_EQ(GPR_SLICE_START_PTR(s), GPR_SLICE_START_PTR(c_slice)); + EXPECT_EQ(GPR_SLICE_END_PTR(s), GPR_SLICE_END_PTR(c_slice)); + gpr_slice_unref(c_slice); +} + } // namespace } // namespace grpc diff --git a/test/distrib/python/run_distrib_test.sh b/test/distrib/python/run_distrib_test.sh index 8a983bc2488..0c2154838ff 100755 --- a/test/distrib/python/run_distrib_test.sh +++ b/test/distrib/python/run_distrib_test.sh @@ -33,34 +33,47 @@ set -ex cd $(dirname $0) # Pick up the source dist archive whatever its version is +SDIST_ARCHIVES=$EXTERNAL_GIT_ROOT/input_artifacts/grpcio-*.tar.gz BDIST_ARCHIVES=$EXTERNAL_GIT_ROOT/input_artifacts/grpcio-*.whl +TOOLS_SDIST_ARCHIVES=$EXTERNAL_GIT_ROOT/input_artifacts/grpcio_tools-*.tar.gz TOOLS_BDIST_ARCHIVES=$EXTERNAL_GIT_ROOT/input_artifacts/grpcio_tools-*.whl -if [ ! -f ${SDIST_ARCHIVE} ] -then - echo "Archive ${SDIST_ARCHIVE} does not exist." - exit 1 -fi +function make_virtualenv() { + virtualenv $1 + $1/bin/python -m pip install --upgrade six pip + $1/bin/python -m pip install cython +} -PYTHON=python2 -PIP=pip2 -which $PYTHON || PYTHON=python -which $PIP || PIP=pip +function at_least_one_installs() { + for file in "$@"; do + if python -m pip install $file; then + return 0 + fi + done + return -1 +} -# TODO(jtattermusch): this shouldn't be required -# TODO(jtattermusch): run the command twice to workaround docker-on-overlay -# issue https://github.com/docker/docker/issues/12327 -# (first attempt will fail when using docker with overlayFS) -${PIP} install --upgrade six pip || ${PIP} install --upgrade six pip +make_virtualenv bdist_test +make_virtualenv sdist_test -# At least one of the bdist packages has to succeed (whichever one matches the -# test machine, anyway). -for bdist in ${BDIST_ARCHIVES} ${TOOLS_BDIST_ARCHIVES}; do - ($PYTHON -m pip install $bdist) || true -done +# +# Install our distributions in order of dependencies +# + +(source bdist_test/bin/activate && at_least_one_installs ${BDIST_ARCHIVES}) +(source bdist_test/bin/activate && at_least_one_installs ${TOOLS_BDIST_ARCHIVES}) + +(source sdist_test/bin/activate && at_least_one_installs ${SDIST_ARCHIVES}) +(source sdist_test/bin/activate && at_least_one_installs ${TOOLS_SDIST_ARCHIVES}) + +# +# Test our distributions +# # TODO(jtattermusch): add a .proto file to the distribtest, generate python # code from it and then use the generated code from distribtest.py -$PYTHON -m grpc.tools.protoc +(source bdist_test/bin/activate && python -m grpc.tools.protoc --help) +(source sdist_test/bin/activate && python -m grpc.tools.protoc --help) -$PYTHON distribtest.py +(source bdist_test/bin/activate && python distribtest.py) +(source sdist_test/bin/activate && python distribtest.py) diff --git a/tools/cmake/gRPCConfig.cmake.in b/tools/cmake/gRPCConfig.cmake.in new file mode 100644 index 00000000000..48f06745798 --- /dev/null +++ b/tools/cmake/gRPCConfig.cmake.in @@ -0,0 +1,7 @@ +# Depend packages +@_gRPC_FIND_ZLIB@ +@_gRPC_FIND_PROTOBUF@ +@_gRPC_FIND_SSL@ + +# Targets +include(${CMAKE_CURRENT_LIST_DIR}/gRPCTargets.cmake) diff --git a/tools/cmake/gRPCConfigVersion.cmake.in b/tools/cmake/gRPCConfigVersion.cmake.in new file mode 100644 index 00000000000..f3c19fd403a --- /dev/null +++ b/tools/cmake/gRPCConfigVersion.cmake.in @@ -0,0 +1,11 @@ +set(PACKAGE_VERSION "@PACKAGE_VERSION@") + +# Check whether the requested PACKAGE_FIND_VERSION is compatible +if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_EXACT TRUE) + endif() +endif() diff --git a/tools/distrib/python/docgen.py b/tools/distrib/python/docgen.py index 72c65ad14a8..15bd8d855f6 100755 --- a/tools/distrib/python/docgen.py +++ b/tools/distrib/python/docgen.py @@ -70,8 +70,9 @@ environment.update({ }) subprocess_arguments_list = [ - {'args': ['make'], 'cwd': PROJECT_ROOT}, {'args': ['virtualenv', VIRTUALENV_DIR], 'env': environment}, + {'args': [VIRTUALENV_PIP_PATH, 'install', '--upgrade', 'pip'], + 'env': environment}, {'args': [VIRTUALENV_PIP_PATH, 'install', '-r', REQUIREMENTS_PATH], 'env': environment}, {'args': [VIRTUALENV_PYTHON_PATH, SETUP_PATH, 'build'], 'env': environment}, diff --git a/tools/distrib/python/grpcio_tools/README.rst b/tools/distrib/python/grpcio_tools/README.rst index 8946a1d5b31..55521d17bb1 100644 --- a/tools/distrib/python/grpcio_tools/README.rst +++ b/tools/distrib/python/grpcio_tools/README.rst @@ -122,6 +122,7 @@ Help, I ... third_party/protobuf/src/google/protobuf/stubs/mathlimits.h:173:31: note: in expansion of macro 'SIGNED_INT_MAX' static const Type kPosMax = SIGNED_INT_MAX(Type); \\ ^ + And your toolchain is GCC (at the time of this writing, up through at least GCC 6.0), this is probably a bug where GCC chokes on constant expressions when the :code:`-fwrapv` flag is specified. You should consider setting your @@ -136,3 +137,42 @@ Given protobuf include directories :code:`$INCLUDE`, an output directory :: $ python -m grpc.tools.protoc -I$INCLUDE --python_out=$OUTPUT --grpc_python_out=$OUTPUT $PROTO_FILES + +To use as a build step in distutils-based projects, you may use the provided +command class in your :code:`setup.py`: + +:: + + setuptools.setup( + # ... + cmdclass={ + 'build_proto_modules': grpc.tools.command.BuildPackageProtos, + } + # ... + ) + +Invocation of the command will walk the project tree and transpile every +:code:`.proto` file into a :code:`_pb2.py` file in the same directory. + +Note that this particular approach requires :code:`grpcio-tools` to be +installed on the machine before the setup script is invoked (i.e. no +combination of :code:`setup_requires` or :code:`install_requires` will provide +access to :code:`grpc.tools.command.BuildPackageProtos` if it isn't already +installed). One way to work around this can be found in our +:code:`grpcio-health-checking` +`package `_: + +:: + + class BuildPackageProtos(setuptools.Command): + """Command to generate project *_pb2.py modules from proto files.""" + # ... + def run(self): + from grpc.tools import command + command.build_package_protos(self.distribution.package_dir['']) + +Now including :code:`grpcio-tools` in :code:`setup_requires` will provide the +command on-setup as desired. + +For more information on command classes, consult :code:`distutils` and +:code:`setuptools` documentation. diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/command.py b/tools/distrib/python/grpcio_tools/grpc/tools/command.py index ccf38b7d569..25200998357 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/command.py +++ b/tools/distrib/python/grpcio_tools/grpc/tools/command.py @@ -35,7 +35,26 @@ import setuptools from grpc.tools import protoc -class BuildProtoModules(setuptools.Command): +def build_package_protos(package_root): + proto_files = [] + inclusion_root = os.path.abspath(package_root) + for root, _, files in os.walk(inclusion_root): + for filename in files: + if filename.endswith('.proto'): + proto_files.append(os.path.abspath(os.path.join(root, filename))) + + for proto_file in proto_files: + command = [ + 'grpc.tools.protoc', + '--proto_path={}'.format(inclusion_root), + '--python_out={}'.format(inclusion_root), + '--grpc_python_out={}'.format(inclusion_root), + ] + [proto_file] + if protoc.main(command) != 0: + sys.stderr.write('warning: {} failed'.format(command)) + + +class BuildPackageProtos(setuptools.Command): """Command to generate project *_pb2.py modules from proto files.""" description = 'build grpc protobuf modules' @@ -52,19 +71,4 @@ class BuildProtoModules(setuptools.Command): # directory is provided as an 'include' directory. We assume it's the '' key # to `self.distribution.package_dir` (and get a key error if it's not # there). - proto_files = [] - inclusion_root = os.path.abspath(self.distribution.package_dir['']) - for root, _, files in os.walk(inclusion_root): - for filename in files: - if filename.endswith('.proto'): - proto_files.append(os.path.abspath(os.path.join(root, filename))) - - for proto_file in proto_files: - command = [ - 'grpc.tools.protoc', - '--proto_path={}'.format(inclusion_root), - '--python_out={}'.format(inclusion_root), - '--grpc_python_out={}'.format(inclusion_root), - ] + [proto_file] - if protoc.main(command) != 0: - sys.stderr.write('warning: {} failed'.format(command)) + build_package_protos(self.distribution.package_dir['']) diff --git a/tools/distrib/python/grpcio_tools/grpc_version.py b/tools/distrib/python/grpcio_tools/grpc_version.py index 4b1e7fcd584..79c40717ddf 100644 --- a/tools/distrib/python/grpcio_tools/grpc_version.py +++ b/tools/distrib/python/grpcio_tools/grpc_version.py @@ -29,4 +29,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!! -VERSION='0.16.0.dev0' +VERSION='1.1.0.dev0' diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py index e025158a82b..bb1f1cf085b 100644 --- a/tools/distrib/python/grpcio_tools/setup.py +++ b/tools/distrib/python/grpcio_tools/setup.py @@ -28,11 +28,13 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from distutils import extension +from distutils import util import errno import os import os.path import pkg_resources import platform +import re import shlex import shutil import sys @@ -51,15 +53,50 @@ import grpc_version PY3 = sys.version_info.major == 3 +# Environment variable to determine whether or not the Cython extension should +# *use* Cython or use the generated C files. Note that this requires the C files +# to have been generated by building first *with* Cython support. +BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False) + # There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are # entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support. # We use these environment variables to thus get around that without locking # ourselves in w.r.t. the multitude of operating systems this ought to build on. -# By default we assume a GCC-like compiler. -EXTRA_COMPILE_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_CFLAGS', - '-fno-wrapv -frtti -std=c++11')) -EXTRA_LINK_ARGS = shlex.split(os.environ.get('GRPC_PYTHON_LDFLAGS', - '-lpthread')) +# We can also use these variables as a way to inject environment-specific +# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a +# reasonable default. +EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None) +EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None) +if EXTRA_ENV_COMPILE_ARGS is None: + EXTRA_ENV_COMPILE_ARGS = '-fno-wrapv -frtti -std=c++11' + if 'win32' in sys.platform: + # We use define flags here and don't directly add to DEFINE_MACROS below to + # ensure that the expert user/builder has a way of turning it off (via the + # envvars) without adding yet more GRPC-specific envvars. + # See https://sourceforge.net/p/mingw-w64/bugs/363/ + if '32' in platform.architecture()[0]: + EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s' + else: + EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64' +if EXTRA_ENV_LINK_ARGS is None: + EXTRA_ENV_LINK_ARGS = '-lpthread' + if 'win32' in sys.platform: + # TODO(atash) check if this is actually safe to just import and call on + # non-Windows (to avoid breaking import style) + from distutils.cygwinccompiler import get_msvcr + msvcr = get_msvcr()[0] + EXTRA_ENV_LINK_ARGS += ( + ' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} ' + '-static'.format(msvcr=msvcr)) +EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS) +EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS) + +CC_FILES = [ + os.path.normpath(cc_file) for cc_file in protoc_lib_deps.CC_FILES] +PROTO_FILES = [ + os.path.normpath(proto_file) for proto_file in protoc_lib_deps.PROTO_FILES] +CC_INCLUDE = os.path.normpath(protoc_lib_deps.CC_INCLUDE) +PROTO_INCLUDE = os.path.normpath(protoc_lib_deps.PROTO_INCLUDE) GRPC_PYTHON_TOOLS_PACKAGE = 'grpc.tools' GRPC_PYTHON_PROTO_RESOURCES_NAME = '_proto' @@ -76,14 +113,18 @@ if 'darwin' in sys.platform and PY3: if mac_target and (pkg_resources.parse_version(mac_target) < pkg_resources.parse_version('10.9.0')): os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9' + os.environ['_PYTHON_HOST_PLATFORM'] = re.sub( + r'macosx-[0-9]+\.[0-9]+-(.+)', + r'macosx-10.9-\1', + util.get_platform()) def package_data(): tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace('.', os.path.sep) proto_resources_path = os.path.join(tools_path, GRPC_PYTHON_PROTO_RESOURCES_NAME) proto_files = [] - for proto_file in protoc_lib_deps.PROTO_FILES: - source = os.path.join(protoc_lib_deps.PROTO_INCLUDE, proto_file) + for proto_file in PROTO_FILES: + source = os.path.join(PROTO_INCLUDE, proto_file) target = os.path.join(proto_resources_path, proto_file) relative_target = os.path.join(GRPC_PYTHON_PROTO_RESOURCES_NAME, proto_file) try: @@ -97,44 +138,47 @@ def package_data(): proto_files.append(relative_target) return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files} -def protoc_ext_module(): - plugin_sources = [ - 'grpc/tools/main.cc', - 'grpc_root/src/compiler/python_generator.cc'] + [ - os.path.join(protoc_lib_deps.CC_INCLUDE, cc_file) - for cc_file in protoc_lib_deps.CC_FILES] +def extension_modules(): + if BUILD_WITH_CYTHON: + plugin_sources = [os.path.join('grpc', 'tools', '_protoc_compiler.pyx')] + else: + plugin_sources = [os.path.join('grpc', 'tools', '_protoc_compiler.cpp')] + plugin_sources += [ + os.path.join('grpc', 'tools', 'main.cc'), + os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')] + [ + os.path.join(CC_INCLUDE, cc_file) + for cc_file in CC_FILES] plugin_ext = extension.Extension( name='grpc.tools._protoc_compiler', - sources=['grpc/tools/_protoc_compiler.pyx'] + plugin_sources, + sources=plugin_sources, include_dirs=[ '.', 'grpc_root', - 'grpc_root/include', - protoc_lib_deps.CC_INCLUDE, + os.path.join('grpc_root', 'include'), + CC_INCLUDE, ], language='c++', define_macros=list(DEFINE_MACROS), extra_compile_args=list(EXTRA_COMPILE_ARGS), extra_link_args=list(EXTRA_LINK_ARGS), ) - return plugin_ext - -def maybe_cythonize(exts): - from Cython import Build - return Build.cythonize(exts) + extensions = [plugin_ext] + if BUILD_WITH_CYTHON: + from Cython import Build + return Build.cythonize(extensions) + else: + return extensions setuptools.setup( name='grpcio_tools', version=grpc_version.VERSION, license='3-clause BSD', - ext_modules=maybe_cythonize([ - protoc_ext_module(), - ]), + ext_modules=extension_modules(), packages=setuptools.find_packages('.'), namespace_packages=['grpc'], install_requires=[ 'protobuf>=3.0.0a3', - 'grpcio>=0.14.0', + 'grpcio>=0.15.0', ], package_data=package_data(), ) diff --git a/tools/distrib/python/make_grpcio_tools.py b/tools/distrib/python/make_grpcio_tools.py index fd9b38b084b..adf58445afe 100755 --- a/tools/distrib/python/make_grpcio_tools.py +++ b/tools/distrib/python/make_grpcio_tools.py @@ -29,12 +29,18 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from __future__ import print_function + +import errno +import filecmp +import glob import os import os.path import shutil import subprocess import sys import traceback +import uuid DEPS_FILE_CONTENT=""" # Copyright 2016, Google Inc. @@ -124,20 +130,88 @@ def get_deps(): proto_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT)) return deps_file_content +def long_path(path): + if os.name == 'nt': + return '\\\\?\\' + path + else: + return path + +def atomic_file_copy(src, dst): + """Based on the lock-free-whack-a-mole algorithm, depending on filesystem + renaming being atomic. Described at http://stackoverflow.com/a/28090883. + """ + try: + if filecmp.cmp(src, dst): + return + except: + pass + dst_dir = os.path.abspath(os.path.dirname(dst)) + dst_base = os.path.basename(dst) + this_id = str(uuid.uuid4()).replace('.', '-') + temporary_file = os.path.join(dst_dir, '{}.{}.tmp'.format(dst_base, this_id)) + mole_file = os.path.join(dst_dir, '{}.{}.mole.tmp'.format(dst_base, this_id)) + mole_pattern = os.path.join(dst_dir, '{}.*.mole.tmp'.format(dst_base)) + src = long_path(src) + dst = long_path(dst) + temporary_file = long_path(temporary_file) + mole_file = long_path(mole_file) + mole_pattern = long_path(mole_pattern) + shutil.copy2(src, temporary_file) + try: + os.rename(temporary_file, mole_file) + except: + print('Error moving temporary file {} to {}'.format(temporary_file, mole_file), file=sys.stderr) + print('while trying to copy file {} to {}'.format(src, dst), file=sys.stderr) + raise + for other_file in glob.glob(mole_pattern): + other_id = other_file.split('.')[-3] + if this_id == other_id: + pass + elif this_id < other_id: + try: + os.remove(other_file) + except: + pass + else: + try: + os.remove(mole_file) + except: + pass + this_id = other_id + mole_file = other_file + try: + if filecmp.cmp(src, dst): + try: + os.remove(mole_file) + except: + pass + return + except: + pass + try: + os.rename(mole_file, dst) + except: + pass + def main(): os.chdir(GRPC_ROOT) - for tree in [GRPC_PYTHON_PROTOBUF, - GRPC_PYTHON_PROTOC_PLUGINS, - GRPC_PYTHON_INCLUDE]: - try: - shutil.rmtree(tree) - except Exception as _: - pass - shutil.copytree(GRPC_PROTOBUF, GRPC_PYTHON_PROTOBUF) - shutil.copytree(GRPC_PROTOC_PLUGINS, GRPC_PYTHON_PROTOC_PLUGINS) - shutil.copytree(GRPC_INCLUDE, GRPC_PYTHON_INCLUDE) + for source, target in [ + (GRPC_PROTOBUF, GRPC_PYTHON_PROTOBUF), + (GRPC_PROTOC_PLUGINS, GRPC_PYTHON_PROTOC_PLUGINS), + (GRPC_INCLUDE, GRPC_PYTHON_INCLUDE)]: + for source_dir, _, files in os.walk(source): + target_dir = os.path.abspath(os.path.join(target, os.path.relpath(source_dir, source))) + try: + os.makedirs(target_dir) + except OSError as error: + if error.errno != errno.EEXIST: + raise + for relative_file in files: + source_file = os.path.abspath(os.path.join(source_dir, relative_file)) + target_file = os.path.abspath(os.path.join(target_dir, relative_file)) + atomic_file_copy(source_file, target_file) try: protoc_lib_deps_content = get_deps() diff --git a/tools/dockerfile/distribtest/python_arch_x64/Dockerfile b/tools/dockerfile/distribtest/python_arch_x64/Dockerfile index 2f79cc3017a..dff72eee97a 100644 --- a/tools/dockerfile/distribtest/python_arch_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_arch_x64/Dockerfile @@ -33,4 +33,4 @@ RUN pacman --noconfirm -Syy RUN pacman --noconfirm -S openssl RUN pacman --noconfirm -S python2 RUN pacman --noconfirm -S python2-pip - +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_centos6_x64/Dockerfile b/tools/dockerfile/distribtest/python_centos6_x64/Dockerfile index d4f473792e1..967450156cd 100644 --- a/tools/dockerfile/distribtest/python_centos6_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_centos6_x64/Dockerfile @@ -44,3 +44,5 @@ RUN curl https://bootstrap.pypa.io/get-pip.py | python - # "which" command required by python's run_distrib_test.sh RUN yum install -y which + +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_centos7_x64/Dockerfile b/tools/dockerfile/distribtest/python_centos7_x64/Dockerfile index ca64fa7bea9..0127fe1e28b 100644 --- a/tools/dockerfile/distribtest/python_centos7_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_centos7_x64/Dockerfile @@ -32,4 +32,4 @@ FROM centos:7 RUN yum install -y python RUN yum install -y epel-release RUN yum install -y python-pip - +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_fedora20_x64/Dockerfile b/tools/dockerfile/distribtest/python_fedora20_x64/Dockerfile index 8b0f769c263..3d3636e43da 100644 --- a/tools/dockerfile/distribtest/python_fedora20_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_fedora20_x64/Dockerfile @@ -35,3 +35,5 @@ RUN yum clean all && yum update -y && yum install -y python python-pip # Trying twice makes it work fine. # https://github.com/docker/docker/issues/10180 RUN pip install --upgrade six || pip install --upgrade six + +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_fedora21_x64/Dockerfile b/tools/dockerfile/distribtest/python_fedora21_x64/Dockerfile index fcbe053f1fc..0b1b6aeb350 100644 --- a/tools/dockerfile/distribtest/python_fedora21_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_fedora21_x64/Dockerfile @@ -40,3 +40,5 @@ RUN yum clean all && yum update -y && yum install -y python python-pip # Trying twice makes it work fine. # https://github.com/docker/docker/issues/10180 RUN pip2 install --upgrade six || pip2 install --upgrade six + +RUN pip2 install virtualenv diff --git a/tools/dockerfile/distribtest/python_fedora22_x64/Dockerfile b/tools/dockerfile/distribtest/python_fedora22_x64/Dockerfile index ddcacb4257d..4d75034c158 100644 --- a/tools/dockerfile/distribtest/python_fedora22_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_fedora22_x64/Dockerfile @@ -30,3 +30,4 @@ FROM fedora:22 RUN yum clean all && yum update -y && yum install -y python python-pip +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_fedora23_x64/Dockerfile b/tools/dockerfile/distribtest/python_fedora23_x64/Dockerfile index d45195e509c..a1bc9ba8d6e 100644 --- a/tools/dockerfile/distribtest/python_fedora23_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_fedora23_x64/Dockerfile @@ -30,3 +30,4 @@ FROM fedora:23 RUN yum clean all && yum update -y && yum install -y python python-pip +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_jessie_x64/Dockerfile b/tools/dockerfile/distribtest/python_jessie_x64/Dockerfile index 83df4ed4fae..7dc32a088ed 100644 --- a/tools/dockerfile/distribtest/python_jessie_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_jessie_x64/Dockerfile @@ -30,3 +30,4 @@ FROM debian:jessie RUN apt-get update && apt-get install -y python python-pip +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_jessie_x86/Dockerfile b/tools/dockerfile/distribtest/python_jessie_x86/Dockerfile index 19addb29120..04c1402e722 100644 --- a/tools/dockerfile/distribtest/python_jessie_x86/Dockerfile +++ b/tools/dockerfile/distribtest/python_jessie_x86/Dockerfile @@ -31,6 +31,8 @@ FROM 32bit/debian:jessie RUN apt-get update && apt-get install -y python python-pip +RUN pip install virtualenv + # docker is running on a 64-bit machine, so we need to # override "uname -m" to report i686 instead of x86_64, otherwise # python will choose a wrong binary package to install. diff --git a/tools/dockerfile/distribtest/python_opensuse_x64/Dockerfile b/tools/dockerfile/distribtest/python_opensuse_x64/Dockerfile index fe1406be98c..27159c72e33 100644 --- a/tools/dockerfile/distribtest/python_opensuse_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_opensuse_x64/Dockerfile @@ -38,3 +38,5 @@ RUN zypper --non-interactive install which # Without this, pip won't be able to connect to # https://pypi.python.org/simple/ RUN zypper --non-interactive install ca-certificates-mozilla + +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_ubuntu1204_x64/Dockerfile b/tools/dockerfile/distribtest/python_ubuntu1204_x64/Dockerfile index 4068fbe2bad..7a8c91b79bd 100644 --- a/tools/dockerfile/distribtest/python_ubuntu1204_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_ubuntu1204_x64/Dockerfile @@ -30,3 +30,5 @@ FROM ubuntu:12.04 RUN apt-get update -y && apt-get install -y python python-pip + +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_ubuntu1404_x64/Dockerfile b/tools/dockerfile/distribtest/python_ubuntu1404_x64/Dockerfile index 0858fb0c064..65189a44de9 100644 --- a/tools/dockerfile/distribtest/python_ubuntu1404_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_ubuntu1404_x64/Dockerfile @@ -30,3 +30,5 @@ FROM ubuntu:14.04 RUN apt-get update -y && apt-get install -y python python-pip + +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_ubuntu1504_x64/Dockerfile b/tools/dockerfile/distribtest/python_ubuntu1504_x64/Dockerfile index ed6ffddbec7..abf36c4a245 100644 --- a/tools/dockerfile/distribtest/python_ubuntu1504_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_ubuntu1504_x64/Dockerfile @@ -30,3 +30,5 @@ FROM ubuntu:15.04 RUN apt-get update -y && apt-get install -y python python-pip + +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_ubuntu1510_x64/Dockerfile b/tools/dockerfile/distribtest/python_ubuntu1510_x64/Dockerfile index 9e3e0c260f6..6e862d203b2 100644 --- a/tools/dockerfile/distribtest/python_ubuntu1510_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_ubuntu1510_x64/Dockerfile @@ -30,3 +30,5 @@ FROM ubuntu:15.10 RUN apt-get update -y && apt-get install -y python python-pip + +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_ubuntu1604_x64/Dockerfile b/tools/dockerfile/distribtest/python_ubuntu1604_x64/Dockerfile index 5098da8a26f..59f4feab553 100644 --- a/tools/dockerfile/distribtest/python_ubuntu1604_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_ubuntu1604_x64/Dockerfile @@ -30,3 +30,5 @@ FROM ubuntu:16.04 RUN apt-get update -y && apt-get install -y python python-pip + +RUN pip install virtualenv diff --git a/tools/dockerfile/distribtest/python_wheezy_x64/Dockerfile b/tools/dockerfile/distribtest/python_wheezy_x64/Dockerfile index 66165ee9296..bc8816d3050 100644 --- a/tools/dockerfile/distribtest/python_wheezy_x64/Dockerfile +++ b/tools/dockerfile/distribtest/python_wheezy_x64/Dockerfile @@ -30,3 +30,5 @@ FROM debian:wheezy RUN apt-get update -y && apt-get install -y python python-pip + +RUN pip install virtualenv diff --git a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh index eab7611b3f3..462c65ab5e9 100755 --- a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh +++ b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh @@ -31,7 +31,7 @@ set -e # directories to run against -DIRS="src/core/lib src/core/ext src/cpp test/core test/cpp include" +DIRS="src/core/lib src/core/ext src/cpp test/core test/cpp include src/compiler" # file matching patterns to check GLOB="*.h *.c *.cc" diff --git a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile index 150dde4f212..e3d52f0cb53 100644 --- a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================ # C# dependencies diff --git a/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile index bbd903e2696..aa77d5f1272 100644 --- a/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================= # C++ dependencies RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean diff --git a/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile index ec71a53c2d3..05e963d1e67 100644 --- a/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile @@ -32,5 +32,20 @@ FROM golang:1.5 # Using login shell removes Go from path, so we add it. RUN ln -s /usr/local/go/bin/go /usr/local/bin +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + # Define the default command. CMD ["bash"] diff --git a/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile index ec71a53c2d3..05e963d1e67 100644 --- a/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile @@ -32,5 +32,20 @@ FROM golang:1.5 # Using login shell removes Go from path, so we add it. RUN ln -s /usr/local/go/bin/go /usr/local/bin +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + # Define the default command. CMD ["bash"] diff --git a/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile index 252c9bc9286..b5fe54f9918 100644 --- a/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile @@ -47,6 +47,21 @@ ENV JAVA_HOME /usr/lib/jvm/java-8-oracle ENV PATH $PATH:$JAVA_HOME/bin +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + # Trigger download of as many Gradle artifacts as possible. RUN git clone --recursive --depth 1 https://github.com/grpc/grpc-java.git && \ diff --git a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile index be07094cd27..d9a75018295 100644 --- a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================== # Node dependencies diff --git a/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile index af83ee61646..0d6171c1705 100644 --- a/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_php/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2015, Google Inc. +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -83,12 +83,6 @@ RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc" # Install dependencies -RUN /bin/bash -l -c "echo 'deb http://packages.dotdeb.org wheezy-php55 all' \ - >> /etc/apt/sources.list.d/dotdeb.list" -RUN /bin/bash -l -c "echo 'deb-src http://packages.dotdeb.org wheezy-php55 all' \ - >> /etc/apt/sources.list.d/dotdeb.list" -RUN wget http://www.dotdeb.org/dotdeb.gpg -O- | apt-key add - - RUN apt-get update && apt-get install -y \ git php5 php5-dev phpunit unzip @@ -113,11 +107,6 @@ RUN /bin/bash -l -c "rvm all do gem install ronn rake" RUN curl -sS https://getcomposer.org/installer | php RUN mv composer.phar /usr/local/bin/composer -# As an attempt to work around #4212, try to prefetch Protobuf-PHP dependency -# into composer cache to prevent "composer install" from cloning on each build. -RUN git clone --mirror https://github.com/stanley-cheung/Protobuf-PHP.git \ - /root/.composer/cache/vcs/git-github.com-stanley-cheung-Protobuf-PHP.git/ - # Download the patched PHP protobuf so that PHP gRPC clients can be generated # from proto3 schemas. RUN git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php @@ -129,3 +118,4 @@ RUN /bin/bash -l -c "rvm use ruby-2.1 \ # Define the default command. CMD ["bash"] + diff --git a/tools/dockerfile/interoptest/grpc_interop_php7/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_php7/Dockerfile new file mode 100644 index 00000000000..be8f25f8ffa --- /dev/null +++ b/tools/dockerfile/interoptest/grpc_interop_php7/Dockerfile @@ -0,0 +1,125 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +FROM debian:jessie + +#================= +# PHP7 dependencies + +# Install Git and basic packages. +RUN apt-get update && apt-get install -y \ + autoconf \ + automake \ + build-essential \ + ccache \ + curl \ + git \ + libcurl4-openssl-dev \ + libgmp-dev \ + libgmp3-dev \ + libssl-dev \ + libtool \ + libxml2-dev \ + pkg-config \ + re2c \ + time \ + unzip \ + wget \ + zip && apt-get clean + +# Install other dependencies +RUN ln -sf /usr/include/x86_64-linux-gnu/gmp.h /usr/include/gmp.h +RUN wget http://ftp.gnu.org/gnu/bison/bison-2.6.4.tar.gz -O /var/local/bison-2.6.4.tar.gz +RUN cd /var/local \ + && tar -zxvf bison-2.6.4.tar.gz \ + && cd /var/local/bison-2.6.4 \ + && ./configure \ + && make \ + && make install + +# Compile PHP7 from source +RUN git clone https://github.com/php/php-src /var/local/git/php-src +RUN cd /var/local/git/php-src \ + && git checkout PHP-7.0.9 \ + && ./buildconf --force \ + && ./configure \ + --with-gmp \ + --with-openssl \ + --with-zlib \ + && make \ + && make install + +#================== +# Ruby dependencies + +# Install rvm +RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 +RUN \curl -sSL https://get.rvm.io | bash -s stable + +# Install Ruby 2.1 +RUN /bin/bash -l -c "rvm install ruby-2.1" +RUN /bin/bash -l -c "rvm use --default ruby-2.1" +RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc" +RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc" +RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc" +RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc" + +# Prepare ccache +RUN ln -s /usr/bin/ccache /usr/local/bin/gcc +RUN ln -s /usr/bin/ccache /usr/local/bin/g++ +RUN ln -s /usr/bin/ccache /usr/local/bin/cc +RUN ln -s /usr/bin/ccache /usr/local/bin/c++ +RUN ln -s /usr/bin/ccache /usr/local/bin/clang +RUN ln -s /usr/bin/ccache /usr/local/bin/clang++ + + +RUN mkdir /var/local/jenkins + +# ronn: a ruby tool used to convert markdown to man pages, used during the +# install of Protobuf extensions +# +# rake: a ruby version of make used to build the PHP Protobuf extension +RUN /bin/bash -l -c "rvm all do gem install ronn rake" + +# Install composer +RUN curl -sS https://getcomposer.org/installer | php +RUN mv composer.phar /usr/local/bin/composer + +# Download the patched PHP protobuf so that PHP gRPC clients can be generated +# from proto3 schemas. +RUN git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php + +RUN /bin/bash -l -c "rvm use ruby-2.1 \ + && cd /var/local/git/protobuf-php \ + && rvm all do rake pear:package version=1.0 \ + && pear install Protobuf-1.0.tgz" + +# Define the default command. +CMD ["bash"] + diff --git a/examples/python/helloworld/run_codegen.sh b/tools/dockerfile/interoptest/grpc_interop_php7/build_interop.sh similarity index 70% rename from examples/python/helloworld/run_codegen.sh rename to tools/dockerfile/interoptest/grpc_interop_php7/build_interop.sh index 34224e5c418..261dded2821 100755 --- a/examples/python/helloworld/run_codegen.sh +++ b/tools/dockerfile/interoptest/grpc_interop_php7/build_interop.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2015, Google Inc. +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,6 +27,26 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Builds PHP interop server and client in a base image. +set -ex + +mkdir -p /var/local/git +git clone --recursive /var/local/jenkins/grpc /var/local/git/grpc + +# copy service account keys if available +cp -r /var/local/jenkins/service_account $HOME || true + +cd /var/local/git/grpc +rvm --default use ruby-2.1 + +# gRPC core and protobuf need to be installed +make install + +(cd src/php/ext/grpc && phpize && ./configure && make) + +(cd third_party/protobuf && make install) + +(cd src/php && composer install) -# Runs the protoc with gRPC plugin to generate protocol messages and gRPC stubs. -python -m grpc.tools.protoc -I../../protos --python_out=. --grpc_python_out=. ../../protos/helloworld.proto +(cd src/php && protoc-gen-php -i tests/interop/ -o tests/interop/ tests/interop/test.proto) diff --git a/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile index 8e7319c200b..10a88916ada 100644 --- a/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile @@ -76,7 +76,7 @@ RUN apt-get update && apt-get install -y \ # Install Python packages from PyPI RUN pip install pip --upgrade RUN pip install virtualenv -RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 # Prepare ccache RUN ln -s /usr/bin/ccache /usr/local/bin/gcc diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile index 88b513032ac..dae64e5c8cb 100644 --- a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================== # Ruby dependencies diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile index 823fe948fb2..81e3fdc3804 100644 --- a/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile +++ b/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + # Prepare ccache RUN ln -s /usr/bin/ccache /usr/local/bin/gcc RUN ln -s /usr/bin/ccache /usr/local/bin/g++ diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile index 556a26ee139..e082da648b3 100644 --- a/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile +++ b/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + # Prepare ccache RUN ln -s /usr/bin/ccache /usr/local/bin/gcc RUN ln -s /usr/bin/ccache /usr/local/bin/g++ diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile index 2a875f59f19..1e2b7d8c671 100644 --- a/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile +++ b/tools/dockerfile/stress_test/grpc_interop_stress_go/Dockerfile @@ -34,6 +34,21 @@ RUN apt-get update && apt-get install -y python-pip && apt-get clean RUN pip install --upgrade google-api-python-client +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + # Using login shell removes Go from path, so we add it. RUN ln -s /usr/local/go/bin/go /usr/local/bin diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile index 69bef1480c8..0c17ff595ef 100644 --- a/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile +++ b/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + # Prepare ccache RUN ln -s /usr/bin/ccache /usr/local/bin/gcc RUN ln -s /usr/bin/ccache /usr/local/bin/g++ diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile index 0738e95e9bc..0594f69a5b8 100644 --- a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile +++ b/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================== # Node dependencies diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile index 3092bd955e2..0716be5a9dd 100644 --- a/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile +++ b/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================== # Ruby dependencies @@ -88,12 +103,6 @@ RUN pip install --upgrade google-api-python-client # Install dependencies -RUN /bin/bash -l -c "echo 'deb http://packages.dotdeb.org wheezy-php55 all' \ - >> /etc/apt/sources.list.d/dotdeb.list" -RUN /bin/bash -l -c "echo 'deb-src http://packages.dotdeb.org wheezy-php55 all' \ - >> /etc/apt/sources.list.d/dotdeb.list" -RUN wget http://www.dotdeb.org/dotdeb.gpg -O- | apt-key add - - RUN apt-get update && apt-get install -y \ git php5 php5-dev phpunit unzip @@ -118,11 +127,6 @@ RUN /bin/bash -l -c "rvm all do gem install ronn rake" RUN curl -sS https://getcomposer.org/installer | php RUN mv composer.phar /usr/local/bin/composer -# As an attempt to work around #4212, try to prefetch Protobuf-PHP dependency -# into composer cache to prevent "composer install" from cloning on each build. -RUN git clone --mirror https://github.com/stanley-cheung/Protobuf-PHP.git \ - /root/.composer/cache/vcs/git-github.com-stanley-cheung-Protobuf-PHP.git/ - # Download the patched PHP protobuf so that PHP gRPC clients can be generated # from proto3 schemas. RUN git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile index ee6249d381e..20d2d3f57be 100644 --- a/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile +++ b/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile @@ -93,7 +93,7 @@ RUN apt-get update && apt-get install -y \ # Install Python packages from PyPI RUN pip install pip --upgrade RUN pip install virtualenv -RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 RUN pip install coverage diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile index 36b54ddafe3..f459153fe54 100644 --- a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile +++ b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + # Prepare ccache RUN ln -s /usr/bin/ccache /usr/local/bin/gcc RUN ln -s /usr/bin/ccache /usr/local/bin/g++ diff --git a/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile b/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile index 98515aa5d73..25c6fe6ec6a 100644 --- a/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile +++ b/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================ # C# dependencies diff --git a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile index 150dde4f212..e3d52f0cb53 100644 --- a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================ # C# dependencies diff --git a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile index a8aa74dd0e0..67cee19914d 100644 --- a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================= # C++ dependencies RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean diff --git a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile index abd3e42f266..bee0849c67f 100644 --- a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile +++ b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================= # C++ dependencies RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean diff --git a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile index 5ef25e80b47..2b3f4af3e6d 100644 --- a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile +++ b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================= # C++ dependencies RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean diff --git a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile index c65fc619778..2d282276d31 100644 --- a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile +++ b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================= # C++ dependencies RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean diff --git a/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile b/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile index 9d5dd52c18f..c25033387f9 100644 --- a/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile +++ b/tools/dockerfile/test/cxx_wheezy_x64/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================= # C++ dependencies RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean diff --git a/tools/dockerfile/test/fuzzer/Dockerfile b/tools/dockerfile/test/fuzzer/Dockerfile index 3ac134ad7da..bd04f07cea5 100644 --- a/tools/dockerfile/test/fuzzer/Dockerfile +++ b/tools/dockerfile/test/fuzzer/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================= # C++ dependencies RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean diff --git a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile index bd7728580fd..13f7c10f92f 100644 --- a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile @@ -100,12 +100,6 @@ RUN /bin/bash -l -c "nvm alias default 4" # Install dependencies -RUN /bin/bash -l -c "echo 'deb http://packages.dotdeb.org wheezy-php55 all' \ - >> /etc/apt/sources.list.d/dotdeb.list" -RUN /bin/bash -l -c "echo 'deb-src http://packages.dotdeb.org wheezy-php55 all' \ - >> /etc/apt/sources.list.d/dotdeb.list" -RUN wget http://www.dotdeb.org/dotdeb.gpg -O- | apt-key add - - RUN apt-get update && apt-get install -y \ git php5 php5-dev phpunit unzip @@ -137,7 +131,7 @@ RUN apt-get update && apt-get install -y \ # Install Python packages from PyPI RUN pip install pip --upgrade RUN pip install virtualenv -RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 # Prepare ccache RUN ln -s /usr/bin/ccache /usr/local/bin/gcc diff --git a/tools/dockerfile/test/node_jessie_x64/Dockerfile b/tools/dockerfile/test/node_jessie_x64/Dockerfile index be07094cd27..d9a75018295 100644 --- a/tools/dockerfile/test/node_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/node_jessie_x64/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================== # Node dependencies diff --git a/tools/dockerfile/test/php7_jessie_x64/Dockerfile b/tools/dockerfile/test/php7_jessie_x64/Dockerfile new file mode 100644 index 00000000000..221338956ef --- /dev/null +++ b/tools/dockerfile/test/php7_jessie_x64/Dockerfile @@ -0,0 +1,105 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +FROM debian:jessie + +#================= +# PHP7 dependencies + +# Install Git and basic packages. +RUN apt-get update && apt-get install -y \ + autoconf \ + automake \ + build-essential \ + ccache \ + curl \ + git \ + libcurl4-openssl-dev \ + libgmp-dev \ + libgmp3-dev \ + libssl-dev \ + libtool \ + libxml2-dev \ + pkg-config \ + re2c \ + time \ + unzip \ + wget \ + zip && apt-get clean + +# Install other dependencies +RUN ln -sf /usr/include/x86_64-linux-gnu/gmp.h /usr/include/gmp.h +RUN wget http://ftp.gnu.org/gnu/bison/bison-2.6.4.tar.gz -O /var/local/bison-2.6.4.tar.gz +RUN cd /var/local \ + && tar -zxvf bison-2.6.4.tar.gz \ + && cd /var/local/bison-2.6.4 \ + && ./configure \ + && make \ + && make install + +# Compile PHP7 from source +RUN git clone https://github.com/php/php-src /var/local/git/php-src +RUN cd /var/local/git/php-src \ + && git checkout PHP-7.0.9 \ + && ./buildconf --force \ + && ./configure \ + --with-gmp \ + --with-openssl \ + --with-zlib \ + && make \ + && make install + +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + +# Prepare ccache +RUN ln -s /usr/bin/ccache /usr/local/bin/gcc +RUN ln -s /usr/bin/ccache /usr/local/bin/g++ +RUN ln -s /usr/bin/ccache /usr/local/bin/cc +RUN ln -s /usr/bin/ccache /usr/local/bin/c++ +RUN ln -s /usr/bin/ccache /usr/local/bin/clang +RUN ln -s /usr/bin/ccache /usr/local/bin/clang++ + + +RUN mkdir /var/local/jenkins + +# Define the default command. +CMD ["bash"] diff --git a/tools/dockerfile/test/php_jessie_x64/Dockerfile b/tools/dockerfile/test/php_jessie_x64/Dockerfile index e477295722a..17ea36b76c4 100644 --- a/tools/dockerfile/test/php_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/php_jessie_x64/Dockerfile @@ -63,17 +63,26 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================= # PHP dependencies # Install dependencies -RUN /bin/bash -l -c "echo 'deb http://packages.dotdeb.org wheezy-php55 all' \ - >> /etc/apt/sources.list.d/dotdeb.list" -RUN /bin/bash -l -c "echo 'deb-src http://packages.dotdeb.org wheezy-php55 all' \ - >> /etc/apt/sources.list.d/dotdeb.list" -RUN wget http://www.dotdeb.org/dotdeb.gpg -O- | apt-key add - - RUN apt-get update && apt-get install -y \ git php5 php5-dev phpunit unzip diff --git a/tools/dockerfile/test/python_jessie_x64/Dockerfile b/tools/dockerfile/test/python_jessie_x64/Dockerfile index 8e7319c200b..10a88916ada 100644 --- a/tools/dockerfile/test/python_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/python_jessie_x64/Dockerfile @@ -76,7 +76,7 @@ RUN apt-get update && apt-get install -y \ # Install Python packages from PyPI RUN pip install pip --upgrade RUN pip install virtualenv -RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 # Prepare ccache RUN ln -s /usr/bin/ccache /usr/local/bin/gcc diff --git a/tools/dockerfile/test/python_pyenv_x64/Dockerfile b/tools/dockerfile/test/python_pyenv_x64/Dockerfile new file mode 100644 index 00000000000..abb5f3c89b3 --- /dev/null +++ b/tools/dockerfile/test/python_pyenv_x64/Dockerfile @@ -0,0 +1,112 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +FROM debian:jessie + +# Install Git and basic packages. +RUN apt-get update && apt-get install -y \ + autoconf \ + autotools-dev \ + build-essential \ + bzip2 \ + ccache \ + curl \ + gcc \ + gcc-multilib \ + git \ + golang \ + gyp \ + lcov \ + libc6 \ + libc6-dbg \ + libc6-dev \ + libgtest-dev \ + libtool \ + make \ + perl \ + strace \ + python-dev \ + python-setuptools \ + python-yaml \ + telnet \ + unzip \ + wget \ + zip && apt-get clean + +#================ +# Build profiling +RUN apt-get update && apt-get install -y time && apt-get clean + +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + +# Install dependencies for pyenv +RUN apt-get update && apt-get install -y \ + libbz2-dev \ + libncurses5-dev \ + libncursesw5-dev \ + libreadline-dev \ + libsqlite3-dev \ + libssl-dev \ + llvm \ + mercurial \ + zlib1g-dev && apt-get clean + +# Install Pyenv and dev Python versions 3.5 and 3.6 +RUN curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash +RUN pyenv update +RUN pyenv install 3.5-dev +RUN pyenv install 3.6-dev +RUN pyenv local 3.5-dev 3.6-dev + +# Prepare ccache +RUN ln -s /usr/bin/ccache /usr/local/bin/gcc +RUN ln -s /usr/bin/ccache /usr/local/bin/g++ +RUN ln -s /usr/bin/ccache /usr/local/bin/cc +RUN ln -s /usr/bin/ccache /usr/local/bin/c++ +RUN ln -s /usr/bin/ccache /usr/local/bin/clang +RUN ln -s /usr/bin/ccache /usr/local/bin/clang++ + + +RUN mkdir /var/local/jenkins + +# Define the default command. +CMD ["bash"] diff --git a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile index 88b513032ac..dae64e5c8cb 100644 --- a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #================== # Ruby dependencies diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile index 70a32c5586a..f4b4831a649 100644 --- a/tools/dockerfile/test/sanity/Dockerfile +++ b/tools/dockerfile/test/sanity/Dockerfile @@ -63,6 +63,21 @@ RUN apt-get update && apt-get install -y \ # Build profiling RUN apt-get update && apt-get install -y time && apt-get clean +#==================== +# Python dependencies + +# Install dependencies + +RUN apt-get update && apt-get install -y \ + python-all-dev \ + python3-all-dev \ + python-pip + +# Install Python packages from PyPI +RUN pip install pip --upgrade +RUN pip install virtualenv +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 + #======================== # Sanity test dependencies RUN apt-get update && apt-get install -y \ diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++ index de7acd7777e..a2415e1217c 100644 --- a/tools/doxygen/Doxyfile.c++ +++ b/tools/doxygen/Doxyfile.c++ @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.16.0-dev +PROJECT_NUMBER = 1.1.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 76bb3b6c59e..945298b964c 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.16.0-dev +PROJECT_NUMBER = 1.1.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core index 53ae4e4cf4b..e631c962b3d 100644 --- a/tools/doxygen/Doxyfile.core +++ b/tools/doxygen/Doxyfile.core @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.16.0-dev +PROJECT_NUMBER = 1.1.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index b846237689e..62cf5cc8c7a 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.16.0-dev +PROJECT_NUMBER = 1.1.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -796,6 +796,7 @@ src/core/lib/channel/channel_stack_builder.h \ src/core/lib/channel/compress_filter.h \ src/core/lib/channel/connected_channel.h \ src/core/lib/channel/context.h \ +src/core/lib/channel/handshaker.h \ src/core/lib/channel/http_client_filter.h \ src/core/lib/channel/http_server_filter.h \ src/core/lib/compression/algorithm_metadata.h \ @@ -951,6 +952,7 @@ src/core/lib/channel/channel_stack.c \ src/core/lib/channel/channel_stack_builder.c \ src/core/lib/channel/compress_filter.c \ src/core/lib/channel/connected_channel.c \ +src/core/lib/channel/handshaker.c \ src/core/lib/channel/http_client_filter.c \ src/core/lib/channel/http_server_filter.c \ src/core/lib/compression/compression.c \ diff --git a/tools/gcp/stress_test/stress_test_utils.py b/tools/gcp/stress_test/stress_test_utils.py index b821fc8fcc1..be50af31845 100755 --- a/tools/gcp/stress_test/stress_test_utils.py +++ b/tools/gcp/stress_test/stress_test_utils.py @@ -121,7 +121,7 @@ class BigQueryHelper: if not page['jobComplete']: print('TIMEOUT ERROR: The query %s timed out. Current timeout value is' ' %d msec. Returning False (i.e assuming there are no failures)' - ) % (query, timeoout_msec) + ) % (query, timeout_msec) return False num_failures = int(page['totalRows']) diff --git a/tools/profiling/latency_profile/profile_analyzer.py b/tools/profiling/latency_profile/profile_analyzer.py index dad0712d402..48b8e9b950f 100755 --- a/tools/profiling/latency_profile/profile_analyzer.py +++ b/tools/profiling/latency_profile/profile_analyzer.py @@ -43,6 +43,7 @@ TIME_FROM_SCOPE_START = object() TIME_TO_SCOPE_END = object() TIME_FROM_STACK_START = object() TIME_TO_STACK_END = object() +TIME_FROM_LAST_IMPORTANT = object() argp = argparse.ArgumentParser(description='Process output of basic_prof builds') @@ -78,10 +79,14 @@ class ScopeBuilder(object): self.call_stack_builder.lines.append(line_item) def finish(self, line): - assert line['tag'] == self.top_line.tag, 'expected %s, got %s; thread=%s; t0=%f t1=%f' % (self.top_line.tag, line['tag'], line['thd'], self.top_line.start_time, line['t']) + assert line['tag'] == self.top_line.tag, ( + 'expected %s, got %s; thread=%s; t0=%f t1=%f' % + (self.top_line.tag, line['tag'], line['thd'], self.top_line.start_time, + line['t'])) final_time_stamp = line['t'] assert self.top_line.end_time is None self.top_line.end_time = final_time_stamp + self.top_line.important = self.top_line.important or line['imp'] assert SELF_TIME not in self.top_line.times self.top_line.times[SELF_TIME] = final_time_stamp - self.top_line.start_time for line in self.call_stack_builder.lines[self.first_child_pos:]: @@ -101,9 +106,14 @@ class CallStackBuilder(object): start_time = self.lines[0].start_time end_time = self.lines[0].end_time self.signature = self.signature.hexdigest() + last_important = start_time for line in self.lines: line.times[TIME_FROM_STACK_START] = line.start_time - start_time line.times[TIME_TO_STACK_END] = end_time - line.end_time + line.times[TIME_FROM_LAST_IMPORTANT] = line.start_time - last_important + if line.important: + last_important = line.end_time + last_important = end_time def add(self, line): line_type = line['type'] @@ -113,7 +123,9 @@ class CallStackBuilder(object): self.stk.append(ScopeBuilder(self, line)) return False elif line_type == '}': - assert self.stk, 'expected non-empty stack for closing %s; thread=%s; t=%f' % (line['tag'], line['thd'], line['t']) + assert self.stk, ( + 'expected non-empty stack for closing %s; thread=%s; t=%f' % + (line['tag'], line['thd'], line['t'])) self.stk.pop().finish(line) if not self.stk: self.finish() @@ -216,9 +228,16 @@ def time_format(idx): return '' return ent +BANNER = { + 'simple': 'Count: %(count)d', + 'html': '

Count: %(count)d

' +} + FORMAT = [ ('TAG', lambda line: '..'*line.indent + tidy_tag(line.tag)), ('LOC', lambda line: '%s:%d' % (line.filename[line.filename.rfind('/')+1:], line.fileline)), + ('IMP', lambda line: '*' if line.important else ''), + ('FROM_IMP', time_format(TIME_FROM_LAST_IMPORTANT)), ('FROM_STACK_START', time_format(TIME_FROM_STACK_START)), ('SELF', time_format(SELF_TIME)), ('TO_STACK_END', time_format(TIME_TO_STACK_END)), @@ -227,11 +246,6 @@ FORMAT = [ ('TO_SCOPE_END', time_format(TIME_TO_SCOPE_END)), ] -BANNER = { - 'simple': 'Count: %(count)d', - 'html': '

Count: %(count)d

' -} - if args.fmt == 'html': print '' print '' diff --git a/tools/run_tests/artifact_targets.py b/tools/run_tests/artifact_targets.py index bd1269ceb72..8550ee7b848 100644 --- a/tools/run_tests/artifact_targets.py +++ b/tools/run_tests/artifact_targets.py @@ -30,17 +30,20 @@ """Definition of targets to build artifacts.""" +import os.path +import sys + import jobset def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={}, - flake_retries=0, timeout_retries=0): + flake_retries=0, timeout_retries=0, timeout_seconds=30*60): """Creates jobspec for a task running under docker.""" environ = environ.copy() environ['RUN_COMMAND'] = shell_command docker_args=[] - for k,v in environ.iteritems(): + for k,v in environ.items(): docker_args += ['-e', '%s=%s' % (k, v)] docker_env = {'DOCKERFILE_DIR': dockerfile_dir, 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh', @@ -49,20 +52,20 @@ def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={}, cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args, environ=docker_env, shortname='build_artifact.%s' % (name), - timeout_seconds=30*60, + timeout_seconds=timeout_seconds, flake_retries=flake_retries, timeout_retries=timeout_retries) return jobspec def create_jobspec(name, cmdline, environ=None, shell=False, - flake_retries=0, timeout_retries=0): + flake_retries=0, timeout_retries=0, timeout_seconds=30*60): """Creates jobspec.""" jobspec = jobset.JobSpec( cmdline=cmdline, environ=environ, shortname='build_artifact.%s' % (name), - timeout_seconds=30*60, + timeout_seconds=timeout_seconds, flake_retries=flake_retries, timeout_retries=timeout_retries, shell=shell) @@ -76,27 +79,30 @@ _ARCH_FLAG_MAP = { 'x64': '-m64' } -python_version_arch_map = { - 'x86': 'Python27_32bits', - 'x64': 'Python27' +python_windows_version_arch_map = { + ('x86', '2.7'): 'Python27_32bits', + ('x64', '2.7'): 'Python27', + ('x86', '3.4'): 'Python34_32bits', + ('x64', '3.4'): 'Python34', } class PythonArtifact: """Builds Python artifacts.""" - def __init__(self, platform, arch, manylinux_build=None): + def __init__(self, platform, arch, python_version, manylinux_build=None): if manylinux_build: - self.name = 'python_%s_%s_%s' % (platform, arch, manylinux_build) + self.name = 'python%s_%s_%s_%s' % (python_version, platform, arch, manylinux_build) else: - self.name = 'python_%s_%s' % (platform, arch) + self.name = 'python%s_%s_%s' % (python_version, platform, arch) self.platform = platform self.arch = arch - self.labels = ['artifact', 'python', platform, arch] - self.python_version = python_version_arch_map[arch] + self.labels = ['artifact', 'python', python_version, platform, arch] + self.python_version = python_version + self.python_windows_prefix = python_windows_version_arch_map[arch, python_version] self.manylinux_build = manylinux_build def pre_build_jobspecs(self): - return [] + return [] def build_jobspec(self): environ = {} @@ -107,26 +113,27 @@ class PythonArtifact: # special places... environ['PYTHON'] = '/opt/python/{}/bin/python'.format(self.manylinux_build) environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.manylinux_build) - # Our docker image has all the prerequisites pip-installed already. - environ['SKIP_PIP_INSTALL'] = '1' # Platform autodetection for the manylinux1 image breaks so we set the # defines ourselves. # TODO(atash) get better platform-detection support in core so we don't # need to do this manually... environ['CFLAGS'] = '-DGPR_MANYLINUX1=1' + environ['BUILD_HEALTH_CHECKING'] = 'TRUE' + environ['BUILD_MANYLINUX_WHEEL'] = 'TRUE' return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_python_manylinux_%s' % self.arch, 'tools/run_tests/build_artifact_python.sh', - environ=environ) + environ=environ, + timeout_seconds=60*60) elif self.platform == 'windows': return create_jobspec(self.name, ['tools\\run_tests\\build_artifact_python.bat', - self.python_version, + self.python_windows_prefix, '32' if self.arch == 'x86' else '64' ], shell=True) else: - environ['SKIP_PIP_INSTALL'] = 'TRUE' + environ['PYTHON'] = 'python{}'.format(self.python_version) return create_jobspec(self.name, ['tools/run_tests/build_artifact_python.sh'], environ=environ) @@ -323,13 +330,18 @@ def targets(): for Cls in (CSharpExtArtifact, NodeExtArtifact, ProtocArtifact) for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')] + - [PythonArtifact('linux', 'x86', 'cp27-cp27m'), - PythonArtifact('linux', 'x86', 'cp27-cp27mu'), - PythonArtifact('linux', 'x64', 'cp27-cp27m'), - PythonArtifact('linux', 'x64', 'cp27-cp27mu'), - PythonArtifact('macos', 'x64'), - PythonArtifact('windows', 'x86'), - PythonArtifact('windows', 'x64'), + [PythonArtifact('linux', 'x86', '2.7', 'cp27-cp27m'), + PythonArtifact('linux', 'x86', '2.7', 'cp27-cp27mu'), + PythonArtifact('linux', 'x64', '2.7', 'cp27-cp27m'), + PythonArtifact('linux', 'x64', '2.7', 'cp27-cp27mu'), + PythonArtifact('macos', 'x64', '2.7'), + PythonArtifact('windows', 'x86', '2.7'), + PythonArtifact('windows', 'x64', '2.7'), + PythonArtifact('linux', 'x86', '3.4', 'cp34-cp34m'), + PythonArtifact('linux', 'x64', '3.4', 'cp34-cp34m'), + PythonArtifact('macos', 'x64', '3.4'), + PythonArtifact('windows', 'x86', '3.4'), + PythonArtifact('windows', 'x64', '3.4'), RubyArtifact('linux', 'x86'), RubyArtifact('linux', 'x64'), RubyArtifact('macos', 'x64'), diff --git a/tools/run_tests/build_artifact_python.bat b/tools/run_tests/build_artifact_python.bat index 7c8c2aa12d7..074a3c67819 100644 --- a/tools/run_tests/build_artifact_python.bat +++ b/tools/run_tests/build_artifact_python.bat @@ -34,29 +34,6 @@ pip install --upgrade six pip install --upgrade setuptools pip install -rrequirements.txt -@rem Because this is windows and *everything seems to hate Windows* we have to -@rem set all of these flags ourselves because Python won't help us (see the -@rem setup.py of the grpcio_tools project). -set GRPC_PYTHON_CFLAGS=-fno-wrapv -frtti -std=c++11 - -@rem See https://sourceforge.net/p/mingw-w64/bugs/363/ -if %2 == 32 ( - set GRPC_PYTHON_CFLAGS=%GRPC_PYTHON_CFLAGS% -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s -) else ( - set GRPC_PYTHON_CFLAGS=%GRPC_PYTHON_CFLAGS% -D_ftime=_ftime64 -D_timeb=__timeb64 -) - -@rem Further confusing things, MSYS2's mingw64 tries to dynamically link -@rem libgcc, libstdc++, and winpthreads. We have to override this or our -@rem extensions end up linking to MSYS2 DLLs, which the normal Python on -@rem Windows user won't have... and ON TOP OF THIS, there's MinGW's GCC default -@rem behavior of linking msvcrt.dll as the C runtime library, which we need to -@rem override so that Python's distutils doesn't link us against multiple C -@rem runtimes. -python -c "from distutils.cygwinccompiler import get_msvcr; print(get_msvcr()[0])" > temp.txt -set /p PYTHON_MSVCR= 10: - print 'Killing all QPS workers.' + print('Killing all QPS workers.') for job in jobs: job.kill() retries += 1 time.sleep(3) - print 'All QPS workers finished.' + print('All QPS workers finished.') argp = argparse.ArgumentParser(description='Run performance tests.') diff --git a/tools/run_tests/run_stress_tests.py b/tools/run_tests/run_stress_tests.py index e42ee24ffb0..de4a22877ca 100755 --- a/tools/run_tests/run_stress_tests.py +++ b/tools/run_tests/run_stress_tests.py @@ -29,6 +29,8 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Run stress test in C++""" +from __future__ import print_function + import argparse import atexit import dockerjob @@ -93,7 +95,7 @@ def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None): # turn environ into -e docker args if environ: - for k, v in environ.iteritems(): + for k, v in environ.items(): docker_cmdline += ['-e', '%s=%s' % (k, v)] # set working directory @@ -140,7 +142,7 @@ def cloud_to_cloud_jobspec(language, '--num_channels_per_server=%s' % num_channels_per_server, '--metrics_port=%s' % metrics_port ])) - print cmdline + print(cmdline) cwd = language.client_cwd environ = language.global_env() if docker_image: @@ -287,7 +289,7 @@ try: (server_host, server_port) = server[1].split(':') server_addresses[server_name] = (server_host, server_port) - for server_name, server_address in server_addresses.iteritems(): + for server_name, server_address in server_addresses.items(): (server_host, server_port) = server_address for language in languages: test_job = cloud_to_cloud_jobspec( @@ -302,7 +304,7 @@ try: jobs.append(test_job) if not jobs: - print 'No jobs to run.' + print('No jobs to run.') for image in docker_images.itervalues(): dockerjob.remove_image(image, skip_nonexistent=True) sys.exit(1) @@ -317,12 +319,12 @@ try: finally: # Check if servers are still running. - for server, job in server_jobs.iteritems(): + for server, job in server_jobs.items(): if not job.is_running(): - print 'Server "%s" has exited prematurely.' % server + print('Server "%s" has exited prematurely.' % server) dockerjob.finish_jobs([j for j in server_jobs.itervalues()]) for image in docker_images.itervalues(): - print 'Removing docker image %s' % image + print('Removing docker image %s' % image) dockerjob.remove_image(image) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index b0e20698bde..542415d9085 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -30,6 +30,8 @@ """Run tests in parallel.""" +from __future__ import print_function + import argparse import ast import collections @@ -39,6 +41,7 @@ import json import multiprocessing import os import os.path +import pipes import platform import random import re @@ -48,7 +51,7 @@ import sys import tempfile import traceback import time -import urllib2 +from six.moves import urllib import uuid import jobset @@ -60,7 +63,9 @@ _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(_ROOT) -_FORCE_ENVIRON_FOR_WRAPPERS = {} +_FORCE_ENVIRON_FOR_WRAPPERS = { + 'GRPC_VERBOSITY': 'DEBUG', +} _POLLING_STRATEGIES = { @@ -72,6 +77,9 @@ def platform_string(): return jobset.platform_string() +_DEFAULT_TIMEOUT_SECONDS = 5 * 60 + + # SimpleConfig: just compile with CONFIG=config, and run the binary to test class Config(object): @@ -84,7 +92,7 @@ class Config(object): self.tool_prefix = tool_prefix self.timeout_multiplier = timeout_multiplier - def job_spec(self, cmdline, timeout_seconds=5*60, + def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS, shortname=None, environ={}, cpu_cost=1.0, flaky=False): """Construct a jobset.JobSpec for a test under this config @@ -93,7 +101,7 @@ class Config(object): would like to run """ actual_environ = self.environ.copy() - for k, v in environ.iteritems(): + for k, v in environ.items(): actual_environ[k] = v return jobset.JobSpec(cmdline=self.tool_prefix + cmdline, shortname=shortname, @@ -158,8 +166,9 @@ class CLanguage(object): for polling_strategy in polling_strategies: env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH': _ROOT + '/src/core/lib/tsi/test_creds/ca.pem', - 'GRPC_POLL_STRATEGY': polling_strategy} - shortname_ext = '' if polling_strategy=='all' else ' polling=%s' % polling_strategy + 'GRPC_POLL_STRATEGY': polling_strategy, + 'GRPC_VERBOSITY': 'DEBUG'} + shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy if self.config.build_config in target['exclude_configs']: continue if self.platform == 'windows': @@ -190,28 +199,26 @@ class CLanguage(object): assert line[1] == ' ' test = base + line.strip() cmdline = [binary] + ['--gtest_filter=%s' % test] - out.append(self.config.job_spec(cmdline, [binary], - shortname='%s:%s %s' % (binary, test, shortname_ext), + out.append(self.config.job_spec(cmdline, + shortname='%s --gtest_filter=%s %s' % (binary, test, shortname_ext), cpu_cost=target['cpu_cost'], environ=env)) else: cmdline = [binary] + target['args'] - out.append(self.config.job_spec(cmdline, [binary], - shortname=' '.join(cmdline) + shortname_ext, + out.append(self.config.job_spec(cmdline, + shortname=' '.join( + pipes.quote(arg) + for arg in cmdline) + + shortname_ext, cpu_cost=target['cpu_cost'], flaky=target.get('flaky', False), + timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS), environ=env)) elif self.args.regex == '.*' or self.platform == 'windows': - print '\nWARNING: binary not found, skipping', binary + print('\nWARNING: binary not found, skipping', binary) return sorted(out) def make_targets(self): - test_regex = self.args.regex - if self.platform != 'windows' and self.args.regex != '.*': - # use the regex to minimize the number of things to build - return [os.path.basename(target['name']) - for target in get_c_tests(False, self.test_lang) - if re.search(test_regex, '/' + target['name'])] if self.platform == 'windows': # don't build tools on windows just yet return ['buildtests_%s' % self.make_target] @@ -374,6 +381,42 @@ class PhpLanguage(object): return 'php' +class Php7Language(object): + + def configure(self, config, args): + self.config = config + self.args = args + _check_compiler(self.args.compiler, ['default']) + + def test_specs(self): + return [self.config.job_spec(['src/php/bin/run_tests.sh'], None, + environ=_FORCE_ENVIRON_FOR_WRAPPERS)] + + def pre_build_steps(self): + return [] + + def make_targets(self): + return ['static_c', 'shared_c'] + + def make_options(self): + return [] + + def build_steps(self): + return [['tools/run_tests/build_php.sh']] + + def post_tests_steps(self): + return [['tools/run_tests/post_tests_php.sh']] + + def makefile_name(self): + return 'Makefile' + + def dockerfile_dir(self): + return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch) + + def __str__(self): + return 'php7' + + class PythonConfig(collections.namedtuple('PythonConfig', [ 'name', 'build', 'run'])): """Tuple of commands (named s.t. 'what it says on the tin' applies)""" @@ -393,7 +436,7 @@ class PythonLanguage(object): return [self.config.job_spec( config.run, timeout_seconds=5*60, - environ=dict(environment.items() + + environ=dict(list(environment.items()) + [('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]), shortname='%s.test.%s' % (config.name, suite_name),) for suite_name in tests_json @@ -418,7 +461,10 @@ class PythonLanguage(object): return 'Makefile' def dockerfile_dir(self): - return 'tools/dockerfile/test/python_jessie_%s' % _docker_arch_suffix(self.args.arch) + return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch)) + + def python_manager_name(self): + return 'pyenv' if self.args.compiler in ['python3.5', 'python3.6'] else 'jessie' def _get_pythons(self, args): if args.arch == 'x86': @@ -453,6 +499,8 @@ class PythonLanguage(object): shell + runner + [os.path.join(name, venv_relative_python[0])]) python27_config = python_config_generator(name='py27', major='2', minor='7', bits=bits) python34_config = python_config_generator(name='py34', major='3', minor='4', bits=bits) + python35_config = python_config_generator(name='py35', major='3', minor='5', bits=bits) + python36_config = python_config_generator(name='py36', major='3', minor='6', bits=bits) if args.compiler == 'default': if os.name == 'nt': return (python27_config,) @@ -462,6 +510,10 @@ class PythonLanguage(object): return (python27_config,) elif args.compiler == 'python3.4': return (python34_config,) + elif args.compiler == 'python3.5': + return (python35_config,) + elif args.compiler == 'python3.6': + return (python36_config,) else: raise Exception('Compiler %s not supported.' % args.compiler) @@ -662,7 +714,7 @@ class ObjCLanguage(object): return [] def make_targets(self): - return ['grpc_objective_c_plugin', 'interop_server'] + return ['interop_server'] def make_options(self): return [] @@ -733,6 +785,7 @@ _LANGUAGES = { 'c': CLanguage('c', 'c'), 'node': NodeLanguage(), 'php': PhpLanguage(), + 'php7': Php7Language(), 'python': PythonLanguage(), 'ruby': RubyLanguage(), 'csharp': CSharpLanguage(), @@ -755,7 +808,7 @@ def _windows_arch_option(arch): elif arch == 'x64': return '/p:Platform=x64' else: - print 'Architecture %s not supported.' % arch + print('Architecture %s not supported.' % arch) sys.exit(1) @@ -773,11 +826,11 @@ def _check_arch_option(arch): elif runtime_arch == '32bit' and arch == 'x86': return else: - print 'Architecture %s does not match current runtime architecture.' % arch + print('Architecture %s does not match current runtime architecture.' % arch) sys.exit(1) else: if args.arch != 'default': - print 'Architecture %s not supported on current platform.' % args.arch + print('Architecture %s not supported on current platform.' % args.arch) sys.exit(1) @@ -791,7 +844,7 @@ def _windows_build_bat(compiler): elif compiler == 'vs2010': return 'vsprojects\\build_vs2010.bat' else: - print 'Compiler %s not supported.' % compiler + print('Compiler %s not supported.' % compiler) sys.exit(1) @@ -805,7 +858,7 @@ def _windows_toolset_option(compiler): elif compiler == 'vs2010': return '/p:PlatformToolset=v100' else: - print 'Compiler %s not supported.' % compiler + print('Compiler %s not supported.' % compiler) sys.exit(1) @@ -816,7 +869,7 @@ def _docker_arch_suffix(arch): elif arch == 'x86': return 'x86' else: - print 'Architecture %s not supported with current settings.' % arch + print('Architecture %s not supported with current settings.' % arch) sys.exit(1) @@ -893,7 +946,7 @@ argp.add_argument('--compiler', 'gcc4.4', 'gcc4.6', 'gcc4.9', 'gcc5.3', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'vs2010', 'vs2013', 'vs2015', - 'python2.7', 'python3.4', + 'python2.7', 'python3.4', 'python3.5', 'python3.6', 'node0.12', 'node4', 'node5', 'coreclr'], default='default', @@ -932,7 +985,7 @@ for spec in args.update_submodules: branch = spec[1] cwd = 'third_party/%s' % submodule def git(cmd, cwd=cwd): - print 'in %s: git %s' % (cwd, cmd) + print('in %s: git %s' % (cwd, cmd)) subprocess.check_call('git %s' % cmd, cwd=cwd, shell=True) git('fetch') git('checkout %s' % branch) @@ -943,8 +996,8 @@ if need_to_regenerate_projects: if jobset.platform_string() == 'linux': subprocess.check_call('tools/buildgen/generate_projects.sh', shell=True) else: - print 'WARNING: may need to regenerate projects, but since we are not on' - print ' Linux this step is being skipped. Compilation MAY fail.' + print('WARNING: may need to regenerate projects, but since we are not on') + print(' Linux this step is being skipped. Compilation MAY fail.') # grab config @@ -971,18 +1024,18 @@ for l in languages: language_make_options=[] if any(language.make_options() for language in languages): if not 'gcov' in args.config and len(languages) != 1: - print 'languages with custom make options cannot be built simultaneously with other languages' + print('languages with custom make options cannot be built simultaneously with other languages') sys.exit(1) else: language_make_options = next(iter(languages)).make_options() if args.use_docker: if not args.travis: - print 'Seen --use_docker flag, will run tests under docker.' - print - print 'IMPORTANT: The changes you are testing need to be locally committed' - print 'because only the committed changes in the current branch will be' - print 'copied to the docker environment.' + print('Seen --use_docker flag, will run tests under docker.') + print('') + print('IMPORTANT: The changes you are testing need to be locally committed') + print('because only the committed changes in the current branch will be') + print('copied to the docker environment.') time.sleep(5) dockerfile_dirs = set([l.dockerfile_dir() for l in languages]) @@ -1066,7 +1119,7 @@ build_steps = list(set( for l in languages for cmdline in l.pre_build_steps())) if make_targets: - make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.iteritems()) + make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items()) build_steps.extend(set(make_commands)) build_steps.extend(set( jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None) @@ -1081,6 +1134,18 @@ runs_per_test = args.runs_per_test forever = args.forever +def _shut_down_legacy_server(legacy_server_port): + try: + version = int(urllib.request.urlopen( + 'http://localhost:%d/version_number' % legacy_server_port, + timeout=10).read()) + except: + pass + else: + urllib.request.urlopen( + 'http://localhost:%d/quitquitquit' % legacy_server_port).read() + + def _shut_down_legacy_server(legacy_server_port): try: version = int(urllib2.urlopen( @@ -1099,29 +1164,29 @@ def _start_port_server(port_server_port): # if not running ==> start a new one # otherwise, leave it up try: - version = int(urllib2.urlopen( + version = int(urllib.request.urlopen( 'http://localhost:%d/version_number' % port_server_port, timeout=10).read()) - print 'detected port server running version %d' % version + print('detected port server running version %d' % version) running = True except Exception as e: - print 'failed to detect port server: %s' % sys.exc_info()[0] - print e.strerror + print('failed to detect port server: %s' % sys.exc_info()[0]) + print(e.strerror) running = False if running: current_version = int(subprocess.check_output( [sys.executable, os.path.abspath('tools/run_tests/port_server.py'), 'dump_version'])) - print 'my port server is version %d' % current_version + print('my port server is version %d' % current_version) running = (version >= current_version) if not running: - print 'port_server version mismatch: killing the old one' - urllib2.urlopen('http://localhost:%d/quitquitquit' % port_server_port).read() + print('port_server version mismatch: killing the old one') + urllib.request.urlopen('http://localhost:%d/quitquitquit' % port_server_port).read() time.sleep(1) if not running: fd, logfile = tempfile.mkstemp() os.close(fd) - print 'starting port_server, with log file %s' % logfile + print('starting port_server, with log file %s' % logfile) args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'), '-p', '%d' % port_server_port, '-l', logfile] env = dict(os.environ) @@ -1147,34 +1212,34 @@ def _start_port_server(port_server_port): waits = 0 while True: if waits > 10: - print 'killing port server due to excessive start up waits' + print('killing port server due to excessive start up waits') port_server.kill() if port_server.poll() is not None: - print 'port_server failed to start' + print('port_server failed to start') # try one final time: maybe another build managed to start one time.sleep(1) try: - urllib2.urlopen('http://localhost:%d/get' % port_server_port, + urllib.request.urlopen('http://localhost:%d/get' % port_server_port, timeout=1).read() - print 'last ditch attempt to contact port server succeeded' + print('last ditch attempt to contact port server succeeded') break except: traceback.print_exc() port_log = open(logfile, 'r').read() - print port_log + print(port_log) sys.exit(1) try: - urllib2.urlopen('http://localhost:%d/get' % port_server_port, + urllib.request.urlopen('http://localhost:%d/get' % port_server_port, timeout=1).read() - print 'port server is up and ready' + print('port server is up and ready') break except socket.timeout: - print 'waiting for port_server: timeout' + print('waiting for port_server: timeout') traceback.print_exc(); time.sleep(1) waits += 1 - except urllib2.URLError: - print 'waiting for port_server: urlerror' + except urllib.error.URLError: + print('waiting for port_server: urlerror') traceback.print_exc(); time.sleep(1) waits += 1 @@ -1271,8 +1336,6 @@ def _build_and_run( jobset.message( 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs), do_newline=True) - else: - jobset.message('PASSED', k, do_newline=True) finally: for antagonist in antagonists: antagonist.kill() diff --git a/tools/run_tests/sanity/check_sources_and_headers.py b/tools/run_tests/sanity/check_sources_and_headers.py index c028499ca63..28c1dc46d7f 100755 --- a/tools/run_tests/sanity/check_sources_and_headers.py +++ b/tools/run_tests/sanity/check_sources_and_headers.py @@ -55,7 +55,8 @@ def target_has_header(target, name): for dep in target['deps']: if target_has_header(get_target(dep), name): return True - if name == 'src/core/lib/profiling/stap_probes.h': + if name in ['src/core/lib/profiling/stap_probes.h', + 'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h']: return True return False diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index 2295371f4af..a6485dca6e9 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -1818,22 +1818,6 @@ "third_party": false, "type": "target" }, - { - "deps": [ - "gpr", - "gpr_test_util", - "grpc", - "grpc_test_util" - ], - "headers": [], - "language": "c", - "name": "workqueue_test", - "src": [ - "test/core/iomgr/workqueue_test.c" - ], - "third_party": false, - "type": "target" - }, { "deps": [ "gpr", @@ -2149,6 +2133,7 @@ "gpr_test_util", "grpc", "grpc++", + "grpc++_reflection", "grpc++_test_config", "grpc++_test_util", "grpc_cli_libs", @@ -4376,26 +4361,32 @@ { "deps": [ "grpc++", - "grpc++_codegen_proto" + "grpc++_reflection_proto" ], "headers": [ "include/grpc++/ext/proto_server_reflection_plugin.h", - "include/grpc++/ext/reflection.grpc.pb.h", - "include/grpc++/ext/reflection.pb.h", "src/cpp/ext/proto_server_reflection.h" ], "language": "c++", "name": "grpc++_reflection", "src": [ "include/grpc++/ext/proto_server_reflection_plugin.h", - "include/grpc++/ext/reflection.grpc.pb.h", - "include/grpc++/ext/reflection.pb.h", "src/cpp/ext/proto_server_reflection.cc", "src/cpp/ext/proto_server_reflection.h", - "src/cpp/ext/proto_server_reflection_plugin.cc", - "src/cpp/ext/reflection.grpc.pb.cc", - "src/cpp/ext/reflection.pb.cc" + "src/cpp/ext/proto_server_reflection_plugin.cc" + ], + "third_party": false, + "type": "lib" + }, + { + "deps": [], + "headers": [ + "src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h", + "src/proto/grpc/reflection/v1alpha/reflection.pb.h" ], + "language": "c++", + "name": "grpc++_reflection_codegen", + "src": [], "third_party": false, "type": "lib" }, @@ -4476,11 +4467,13 @@ { "deps": [ "grpc++", + "grpc++_reflection", "grpc_plugin_support" ], "headers": [ "test/cpp/util/cli_call.h", - "test/cpp/util/proto_file_parser.h" + "test/cpp/util/proto_file_parser.h", + "test/cpp/util/proto_reflection_descriptor_database.h" ], "language": "c++", "name": "grpc_cli_libs", @@ -4488,7 +4481,9 @@ "test/cpp/util/cli_call.cc", "test/cpp/util/cli_call.h", "test/cpp/util/proto_file_parser.cc", - "test/cpp/util/proto_file_parser.h" + "test/cpp/util/proto_file_parser.h", + "test/cpp/util/proto_reflection_descriptor_database.cc", + "test/cpp/util/proto_reflection_descriptor_database.h" ], "third_party": false, "type": "lib" @@ -5714,6 +5709,7 @@ "src/core/lib/channel/compress_filter.h", "src/core/lib/channel/connected_channel.h", "src/core/lib/channel/context.h", + "src/core/lib/channel/handshaker.h", "src/core/lib/channel/http_client_filter.h", "src/core/lib/channel/http_server_filter.h", "src/core/lib/compression/algorithm_metadata.h", @@ -5807,6 +5803,8 @@ "src/core/lib/channel/connected_channel.c", "src/core/lib/channel/connected_channel.h", "src/core/lib/channel/context.h", + "src/core/lib/channel/handshaker.c", + "src/core/lib/channel/handshaker.h", "src/core/lib/channel/http_client_filter.c", "src/core/lib/channel/http_client_filter.h", "src/core/lib/channel/http_server_filter.c", @@ -6769,5 +6767,24 @@ ], "third_party": false, "type": "filegroup" + }, + { + "deps": [ + "grpc++_codegen_proto" + ], + "headers": [ + "include/grpc++/ext/reflection.grpc.pb.h", + "include/grpc++/ext/reflection.pb.h" + ], + "language": "c++", + "name": "grpc++_reflection_proto", + "src": [ + "include/grpc++/ext/reflection.grpc.pb.h", + "include/grpc++/ext/reflection.pb.h", + "src/cpp/ext/reflection.grpc.pb.cc", + "src/cpp/ext/reflection.pb.cc" + ], + "third_party": false, + "type": "filegroup" } ] diff --git a/tools/run_tests/task_runner.py b/tools/run_tests/task_runner.py index b42aa17cbbe..2e3fa443b96 100755 --- a/tools/run_tests/task_runner.py +++ b/tools/run_tests/task_runner.py @@ -30,6 +30,8 @@ """Runs selected gRPC test/build tasks.""" +from __future__ import print_function + import argparse import atexit import jobset @@ -111,7 +113,7 @@ build_jobs = [] for target in targets: build_jobs.append(target.build_jobspec()) if not build_jobs: - print 'Nothing to build.' + print('Nothing to build.') sys.exit(1) jobset.message('START', 'Building targets.', do_newline=True) diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json index abd25afa3ee..d5e6f50703a 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/tests.json @@ -1935,25 +1935,6 @@ "windows" ] }, - { - "args": [], - "ci_platforms": [ - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "flaky": false, - "gtest": false, - "language": "c", - "name": "workqueue_test", - "platforms": [ - "linux", - "mac", - "posix" - ] - }, { "args": [], "ci_platforms": [ @@ -27762,8 +27743,8 @@ }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_generic_async_streaming_ping_pong_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}" ], "boringssl": true, "ci_platforms": [ @@ -27784,12 +27765,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_secure" + "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_secure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_async_streaming_ping_pong_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}" ], "boringssl": true, "ci_platforms": [ @@ -27810,12 +27792,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_secure" + "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_secure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_async_unary_ping_pong_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}" ], "boringssl": true, "ci_platforms": [ @@ -27836,12 +27819,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_secure" + "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_secure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_sync_unary_ping_pong_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"SYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_unary_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"SYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}" ], "boringssl": true, "ci_platforms": [ @@ -27862,12 +27846,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_secure" + "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_secure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" ], "boringssl": true, "ci_platforms": [ @@ -27888,12 +27873,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_secure" + "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_secure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" ], "boringssl": true, "ci_platforms": [ @@ -27914,12 +27900,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_secure" + "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_secure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" ], "boringssl": true, "ci_platforms": [ @@ -27940,12 +27927,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_secure" + "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_secure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" ], "boringssl": true, "ci_platforms": [ @@ -27966,12 +27954,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_secure" + "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_secure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_generic_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}" ], "boringssl": true, "ci_platforms": [ @@ -27992,12 +27981,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_insecure" + "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_insecure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}" ], "boringssl": true, "ci_platforms": [ @@ -28018,12 +28008,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_insecure" + "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_insecure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_async_unary_ping_pong_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}" ], "boringssl": true, "ci_platforms": [ @@ -28044,12 +28035,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_insecure" + "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_insecure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_sync_unary_ping_pong_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"SYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_unary_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"SYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}]}" ], "boringssl": true, "ci_platforms": [ @@ -28070,12 +28062,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_insecure" + "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_insecure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" ], "boringssl": true, "ci_platforms": [ @@ -28096,12 +28089,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_insecure" + "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_insecure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" ], "boringssl": true, "ci_platforms": [ @@ -28122,12 +28116,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_insecure" + "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_insecure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" ], "boringssl": true, "ci_platforms": [ @@ -28148,12 +28143,13 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_insecure" + "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_insecure", + "timeout_seconds": 180 }, { "args": [ - "--scenario_json", - "'{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 1, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'" + "--scenarios_json", + "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" ], "boringssl": true, "ci_platforms": [ @@ -28174,7 +28170,8 @@ "posix", "windows" ], - "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_insecure" + "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_insecure", + "timeout_seconds": 180 }, { "args": [ diff --git a/vsprojects/vcxproj/grpc++_reflection_codegen/grpc++_reflection_codegen.vcxproj b/vsprojects/vcxproj/grpc++_reflection_codegen/grpc++_reflection_codegen.vcxproj new file mode 100644 index 00000000000..d9e10c2d37e --- /dev/null +++ b/vsprojects/vcxproj/grpc++_reflection_codegen/grpc++_reflection_codegen.vcxproj @@ -0,0 +1,168 @@ + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + {C8A925BF-4373-D85D-60AE-96CDCBBF33F2} + true + $(SolutionDir)IntDir\$(MSBuildProjectName)\ + + + + v100 + + + v110 + + + v120 + + + v140 + + + StaticLibrary + true + Unicode + + + StaticLibrary + false + true + Unicode + + + + + + + + + + + + grpc++_reflection_codegen + + + grpc++_reflection_codegen + + + + NotUsing + Level3 + Disabled + WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + true + MultiThreadedDebug + true + None + false + + + Windows + true + false + + + + + + NotUsing + Level3 + Disabled + WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + true + MultiThreadedDebug + true + None + false + + + Windows + true + false + + + + + + NotUsing + Level3 + MaxSpeed + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + true + true + MultiThreaded + true + None + false + + + Windows + true + false + true + true + + + + + + NotUsing + Level3 + MaxSpeed + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + true + true + MultiThreaded + true + None + false + + + Windows + true + false + true + true + + + + + + + + + + + + + + + + + + + This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. + + + + diff --git a/vsprojects/vcxproj/grpc++_reflection_codegen/grpc++_reflection_codegen.vcxproj.filters b/vsprojects/vcxproj/grpc++_reflection_codegen/grpc++_reflection_codegen.vcxproj.filters new file mode 100644 index 00000000000..577dcc77d8f --- /dev/null +++ b/vsprojects/vcxproj/grpc++_reflection_codegen/grpc++_reflection_codegen.vcxproj.filters @@ -0,0 +1,27 @@ + + + + + src\proto\grpc\reflection\v1alpha + + + + + + {d6f45d49-92db-00f7-3dd4-e53f5768d80c} + + + {32b951f4-cef1-24a3-ffb9-bb229f0cdd6a} + + + {8fdcb9f3-4d86-2f49-5c15-c92e0e0f4fba} + + + {098a074c-f3de-2840-8009-1a3840af1efc} + + + {219ff371-7d3a-130c-5792-be36514a4e98} + + + + diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj b/vsprojects/vcxproj/grpc/grpc.vcxproj index a9e96dab462..33485d3dc91 100644 --- a/vsprojects/vcxproj/grpc/grpc.vcxproj +++ b/vsprojects/vcxproj/grpc/grpc.vcxproj @@ -305,6 +305,7 @@ + @@ -468,6 +469,8 @@ + + diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters index be77e53f457..a21d873b88f 100644 --- a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters +++ b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters @@ -19,6 +19,9 @@ src\core\lib\channel + + src\core\lib\channel + src\core\lib\channel @@ -671,6 +674,9 @@ src\core\lib\channel + + src\core\lib\channel + src\core\lib\channel diff --git a/vsprojects/vcxproj/grpc_cli_libs/grpc_cli_libs.vcxproj b/vsprojects/vcxproj/grpc_cli_libs/grpc_cli_libs.vcxproj index 39cb1e0cb58..d25c692e3e8 100644 --- a/vsprojects/vcxproj/grpc_cli_libs/grpc_cli_libs.vcxproj +++ b/vsprojects/vcxproj/grpc_cli_libs/grpc_cli_libs.vcxproj @@ -149,14 +149,20 @@ + + + + + {5F575402-3F89-5D1A-6910-9DB8BF5D2BAB} + {C187A093-A0FE-489D-A40A-6E33DE0F9FEB} diff --git a/vsprojects/vcxproj/grpc_cli_libs/grpc_cli_libs.vcxproj.filters b/vsprojects/vcxproj/grpc_cli_libs/grpc_cli_libs.vcxproj.filters index 55ef18bf306..4add8ed5e13 100644 --- a/vsprojects/vcxproj/grpc_cli_libs/grpc_cli_libs.vcxproj.filters +++ b/vsprojects/vcxproj/grpc_cli_libs/grpc_cli_libs.vcxproj.filters @@ -7,6 +7,9 @@ test\cpp\util + + test\cpp\util + @@ -15,6 +18,9 @@ test\cpp\util + + test\cpp\util + diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj index 1cfe06aec6d..3a3610d524d 100644 --- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj +++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj @@ -294,6 +294,7 @@ + @@ -435,6 +436,8 @@ + + diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters index c33c6650e70..bec089e860e 100644 --- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters +++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters @@ -22,6 +22,9 @@ src\core\lib\channel + + src\core\lib\channel + src\core\lib\channel @@ -578,6 +581,9 @@ src\core\lib\channel + + src\core\lib\channel + src\core\lib\channel diff --git a/vsprojects/vcxproj/test/grpc_cli/grpc_cli.vcxproj b/vsprojects/vcxproj/test/grpc_cli/grpc_cli.vcxproj index cd844d15794..9c8cdc54c25 100644 --- a/vsprojects/vcxproj/test/grpc_cli/grpc_cli.vcxproj +++ b/vsprojects/vcxproj/test/grpc_cli/grpc_cli.vcxproj @@ -173,6 +173,9 @@ {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} + + {5F575402-3F89-5D1A-6910-9DB8BF5D2BAB} + {C187A093-A0FE-489D-A40A-6E33DE0F9FEB}