mirror of https://github.com/opencv/opencv.git
commit
0fbd58bef9
324 changed files with 34970 additions and 9791 deletions
@ -0,0 +1,163 @@ |
||||
name: PR:4.x U20 |
||||
|
||||
on: |
||||
pull_request: |
||||
branches: |
||||
- 4.x |
||||
|
||||
env: |
||||
EXTRA_CMAKE_OPTIONS: '-DBUILD_DOCS=ON -DPYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON -DENABLE_CCACHE=OFF' |
||||
OPENCV_TEST_DATA_PATH: '/opencv_extra/testdata' |
||||
OPENCV_DOCKER_WORKDIR: '/__w/opencv/opencv' |
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }} |
||||
SOURCE_BRANCH_NAME: ${{ github.head_ref }} |
||||
TARGET_BRANCH_NAME: ${{ github.base_ref }} |
||||
ANT_HOME: '/usr/share/ant' |
||||
PYTHONPATH: /opencv-build/python_loader:$PYTHONPATH |
||||
|
||||
jobs: |
||||
BuildAndTest: |
||||
runs-on: ubuntu-20.04 |
||||
defaults: |
||||
run: |
||||
shell: bash |
||||
container: |
||||
image: quay.io/asenyaev/opencv-ubuntu:20.04 |
||||
steps: |
||||
- name: PR info |
||||
run: | |
||||
echo "PR Author: ${{ env.PR_AUTHOR }}" |
||||
echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}" |
||||
echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}" |
||||
- name: Clean |
||||
run: find . -mindepth 1 -delete |
||||
- name: Fetch opencv |
||||
uses: actions/checkout@v3 |
||||
with: |
||||
repository: opencv/opencv |
||||
ref: ${{ env.TARGET_BRANCH_NAME }} |
||||
fetch-depth: 0 |
||||
- name: Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch |
||||
run: | |
||||
cd ${{ env.OPENCV_DOCKER_WORKDIR }} |
||||
git config --global --add safe.directory ${{ env.OPENCV_DOCKER_WORKDIR }} |
||||
git config user.email "opencv.ci" |
||||
git config user.name "opencv.ci" |
||||
git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}" |
||||
- name: Clone opencv_extra |
||||
run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} --depth 1 https://github.com/opencv/opencv_extra.git /opencv_extra |
||||
- name: Configure OpenCV |
||||
run: | |
||||
cd /opencv-build |
||||
cmake -G Ninja ${{ env.EXTRA_CMAKE_OPTIONS }} ${{ env.OPENCV_DOCKER_WORKDIR }} |
||||
- name: Build OpenCV |
||||
run: | |
||||
cd /opencv-build |
||||
ninja |
||||
- name: Accuracy:calib3d |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_calib3d |
||||
- name: Accuracy:core |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_core |
||||
- name: Accuracy:dnn |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_dnn |
||||
- name: Accuracy:features2d |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_features2d |
||||
- name: Accuracy:flann |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_flann |
||||
- name: Accuracy:gapi |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_gapi |
||||
- name: Accuracy:highgui |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_highgui |
||||
- name: Accuracy:imgcodecs |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_imgcodecs |
||||
- name: Accuracy:imgproc |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_imgproc |
||||
- name: Accuracy:ml |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_ml |
||||
- name: Accuracy:objdetect |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_objdetect |
||||
- name: Accuracy:photo |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_photo |
||||
- name: Accuracy:stitching |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_stitching |
||||
- name: Accuracy:video |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_video |
||||
- name: Accuracy:videoio |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_test_videoio |
||||
- name: Performance:calib3d |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_calib3d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:core |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_core --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:dnn |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_dnn --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:features2d |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_features2d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:gapi |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_gapi --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:imgcodecs |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_imgcodecs --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:imgproc |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_imgproc --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:objdetect |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_objdetect --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:photo |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_photo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:stitching |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_stitching --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:video |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_video --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Performance:videoio |
||||
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_videoio --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 |
||||
- name: Python3 |
||||
run: | |
||||
cd ${{ env.OPENCV_DOCKER_WORKDIR }}/modules/python/test |
||||
python3 ./test.py --repo ../../../ -v |
||||
- name: Java |
||||
run: cd /opencv-build && xvfb-run -a python3 ${{ env.OPENCV_DOCKER_WORKDIR }}/modules/ts/misc/run.py . -a -t java |
||||
- name: Save Unit Test Results |
||||
uses: actions/upload-artifact@v3 |
||||
if: always() |
||||
with: |
||||
name: junit-html |
||||
path: /opencv-build/java_test/testResults/junit-noframes.html |
||||
- name: Pylint |
||||
run: cd /opencv-build && cmake --build . --config release --target check_pylint -- -j4 |
||||
|
||||
BuildContrib: |
||||
runs-on: ubuntu-20.04 |
||||
defaults: |
||||
run: |
||||
shell: bash |
||||
container: |
||||
image: quay.io/asenyaev/opencv-ubuntu:20.04 |
||||
steps: |
||||
- name: PR info |
||||
run: | |
||||
echo "PR Author: ${{ env.PR_AUTHOR }}" |
||||
echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}" |
||||
echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}" |
||||
- name: Clean |
||||
run: find . -mindepth 1 -delete |
||||
- name: Fetch opencv |
||||
uses: actions/checkout@v3 |
||||
with: |
||||
repository: opencv/opencv |
||||
ref: ${{ env.TARGET_BRANCH_NAME }} |
||||
fetch-depth: 0 |
||||
- name: Merge opencv with a test branch |
||||
run: | |
||||
cd ${{ env.OPENCV_DOCKER_WORKDIR }} |
||||
git config --global --add safe.directory ${{ env.OPENCV_DOCKER_WORKDIR }} |
||||
git config user.email "opencv.ci" |
||||
git config user.name "opencv.ci" |
||||
git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}" |
||||
- name: Clone opencv_contrib |
||||
run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} --depth 1 https://github.com/opencv/opencv_contrib.git /opencv_contrib |
||||
- name: Configure OpenCV Contrib |
||||
run: | |
||||
cd /opencv-contrib-build |
||||
cmake -G Ninja ${{ env.EXTRA_CMAKE_OPTIONS }} -DOPENCV_EXTRA_MODULES_PATH=/opencv_contrib/modules ${{ env.OPENCV_DOCKER_WORKDIR }} |
||||
- name: Build OpenCV Contrib |
||||
run: | |
||||
cd /opencv-contrib-build |
||||
ninja |
@ -0,0 +1,104 @@ |
||||
name: TIM-VX Backend |
||||
|
||||
on: |
||||
pull_request: |
||||
branches: [ 4.x ] |
||||
types: [ labeled, opened, synchronize, reopened ] |
||||
|
||||
|
||||
jobs: |
||||
x86-simulator-build-test: |
||||
runs-on: ubuntu-20.04 |
||||
# Docker image from https://hub.docker.com/r/yuentau/ocv_ubuntu |
||||
container: docker.io/yuentau/ocv_ubuntu:20.04 |
||||
env: |
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }} |
||||
SOURCE_BRANCH_NAME: ${{ github.head_ref }} |
||||
TARGET_BRANCH_NAME: ${{ github.base_ref }} |
||||
steps: |
||||
- name: info |
||||
run: | |
||||
echo "PR Author: ${{ env.PR_AUTHOR }}" |
||||
echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}" |
||||
echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}" |
||||
- name: clean |
||||
shell: bash |
||||
run: find . -mindepth 1 -delete |
||||
- name: fetch opencv |
||||
uses: actions/checkout@v3 |
||||
with: |
||||
repository: opencv/opencv |
||||
ref: ${{ env.TARGET_BRANCH_NAME }} |
||||
fetch-depth: 0 |
||||
path: opencv |
||||
- name: merge opencv with test branch |
||||
shell: bash |
||||
run: | |
||||
cd opencv |
||||
git config user.email "opencv.ci" |
||||
git config user.name "opencv.ci" |
||||
git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}" --allow-unrelated-histories |
||||
- name: configure |
||||
run: | |
||||
cmake -B build -DWITH_TIMVX=ON -DCMAKE_INSTALL_PREFIX=./install -DBUILD_SHARED_LIBS=ON -DBUILD_PERF_TESTS=ON -DBUILD_TESTS=ON -DBUILD_EXAMPLES=OFF -DBUILD_DOCS=OFF -DWITH_OPENCL=OFF opencv |
||||
- name: build |
||||
run: cmake --build build --target install -j $(nproc) |
||||
|
||||
khadas-vim3-tests: |
||||
if: contains(github.event.pull_request.labels.*.name, 'category:dnn_timvx') |
||||
concurrency: |
||||
group: khadas-vim3 |
||||
cancel-in-progress: false |
||||
runs-on: [self-hosted, Linux, ARM64, khadas-vim3] |
||||
env: |
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }} |
||||
SOURCE_BRANCH_NAME: ${{ github.head_ref }} |
||||
TARGET_BRANCH_NAME: ${{ github.base_ref }} |
||||
steps: |
||||
- name: info |
||||
run: | |
||||
echo "PR Author: ${{ env.PR_AUTHOR }}" |
||||
echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}" |
||||
echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}" |
||||
- name: clean |
||||
shell: bash |
||||
run: find . -mindepth 1 -delete |
||||
- name: fetch opencv |
||||
uses: actions/checkout@v3 |
||||
with: |
||||
repository: opencv/opencv |
||||
ref: ${{ env.TARGET_BRANCH_NAME }} |
||||
fetch-depth: 0 |
||||
path: opencv |
||||
- name: merge opencv with test branch |
||||
shell: bash |
||||
run: | |
||||
cd opencv |
||||
git config user.email "opencv.ci" |
||||
git config user.name "opencv.ci" |
||||
git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv" "${{ env.SOURCE_BRANCH_NAME }}" --allow-unrelated-histories |
||||
- name: fetch opencv_extra |
||||
uses: actions/checkout@v3 |
||||
with: |
||||
repository: opencv/opencv_extra |
||||
path: opencv_extra |
||||
- name: merge opencv_extra with test branch |
||||
shell: bash |
||||
run: | |
||||
RET=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}") |
||||
if [[ ! -z "$RET" ]]; then |
||||
cd opencv_extra |
||||
git config user.email "opencv.ci" |
||||
git config user.name "opencv.ci" |
||||
git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}" --allow-unrelated-histories |
||||
else |
||||
echo "no merge since ${{ env.PR_AUTHOR }}/opencv_extra does not have branch ${{ env.SOURCE_BRANCH_NAME }}" |
||||
fi |
||||
- name: configure |
||||
run: | |
||||
cmake -B build -D CMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=./install -DWITH_TIMVX=ON -DWITH_OPENCL=OFF -DWITH_EIGEN=OFF opencv |
||||
- name: build |
||||
run: cmake --build build --target opencv_test_dnn -j 4 |
||||
- name: unit tests for int8 layers |
||||
run: | |
||||
OPENCV_TEST_DATA_PATH=./opencv_extra/testdata ./build/bin/opencv_test_dnn --gtest_filter="Test_Int8_layers.*/1" |
@ -0,0 +1,73 @@ |
||||
set(TIMVX_COMMIT_HASH "1d9c7ab941b3d8d9c4d28d80058402725731e3d6") |
||||
set(OCV_TIMVX_DIR "${OpenCV_BINARY_DIR}/3rdparty/libtim-vx") |
||||
set(OCV_TIMVX_SOURCE_PATH "${OCV_TIMVX_DIR}/TIM-VX-${TIMVX_COMMIT_HASH}") |
||||
|
||||
# Download TIM-VX source code |
||||
if(EXISTS "${OCV_TIMVX_SOURCE_PATH}") |
||||
message(STATUS "TIM-VX: Use cache of TIM-VX source code at ${OCV_TIMVX_SOURCE_PATH}") |
||||
set(TIMVX_FOUND ON) |
||||
else() |
||||
set(OCV_TIMVX_FILENAME "${TIMVX_COMMIT_HASH}.zip") |
||||
set(OCV_TIMVX_URL "https://github.com/VeriSilicon/TIM-VX/archive/") |
||||
set(timvx_zip_md5sum 92619cc4498014ac7a09834d5e33ebd5) |
||||
|
||||
ocv_download(FILENAME ${OCV_TIMVX_FILENAME} |
||||
HASH ${timvx_zip_md5sum} |
||||
URL "${OCV_TIMVX_URL}" |
||||
DESTINATION_DIR "${OCV_TIMVX_DIR}" |
||||
ID "TIM-VX" |
||||
STATUS res |
||||
UNPACK RELATIVE_URL) |
||||
if(res) |
||||
set(TIMVX_FOUND ON) |
||||
message(STATUS "TIM-VX: Source code downloaded at ${OCV_TIMVX_SOURCE_PATH}.") |
||||
else() |
||||
set(TIMVX_FOUND OFF) |
||||
message(STATUS "TIM-VX: Failed to download source code from github. Turning off TIMVX_FOUND") |
||||
return() |
||||
endif() |
||||
endif() |
||||
|
||||
# set VIVANTE SDK especially for x86_64 which comes along with TIM-VX source code |
||||
if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64) |
||||
set(VIVANTE_SDK_DIR "${OCV_TIMVX_SOURCE_PATH}/prebuilt-sdk/x86_64_linux") |
||||
message(STATUS "TIM-VX: Build from source using prebuilt x86_64 VIVANTE SDK.") |
||||
endif() |
||||
|
||||
# Verify if requested VIVANTE SDK libraries are all found |
||||
find_vivante_sdk_libs(missing ${VIVANTE_SDK_DIR}) |
||||
if(missing) |
||||
message(STATUS "TIM-VX: Failed to find ${missing} in ${VIVANTE_SDK_DIR}/lib. Turning off TIMVX_VIV_FOUND") |
||||
set(TIMVX_VIV_FOUND OFF) |
||||
else() |
||||
message(STATUS "TIM-VX: dependent VIVANTE SDK libraries are found at ${VIVANTE_SDK_DIR}/lib.") |
||||
set(TIMVX_VIV_FOUND ON) |
||||
endif() |
||||
|
||||
if(TIMVX_VIV_FOUND) |
||||
# vars used by TIM-VX CMake scripts |
||||
set(EXTERNAL_VIV_SDK "${VIVANTE_SDK_DIR}" CACHE INTERNAL "" FORCE) |
||||
set(VIV_SDK_DRIVER_PREFIX "lib" CACHE INTERNAL "" FORCE) |
||||
endif() |
||||
|
||||
if(TIMVX_FOUND AND TIMVX_VIV_FOUND) |
||||
set(BUILD_TIMVX ON) |
||||
else() |
||||
return() |
||||
endif() |
||||
|
||||
if(BUILD_TIMVX) |
||||
set(HAVE_TIMVX 1) |
||||
|
||||
ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-parameter -Wstrict-prototypes -Wundef -Wsign-compare -Wmissing-prototypes -Wmissing-declarations -Wstrict-aliasing -Wunused-but-set-variable -Wmaybe-uninitialized -Wshadow -Wsuggest-override -Wswitch) |
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wunused-parameter -Wstrict-prototypes -Wundef -Wsign-compare -Wunused-but-set-variable -Wshadow -Wsuggest-override -Wmissing-declarations -Wswitch) |
||||
|
||||
set(TIMVX_INC_DIR "${OCV_TIMVX_SOURCE_PATH}/include" CACHE INTERNAL "TIM-VX include directory") |
||||
if(EXISTS "${OCV_TIMVX_SOURCE_PATH}/CMakeLists.txt") |
||||
add_subdirectory("${OCV_TIMVX_SOURCE_PATH}" "${OCV_TIMVX_DIR}/build") |
||||
else() |
||||
message(WARNING "TIM-VX: Missing 'CMakeLists.txt' in the source code: ${OCV_TIMVX_SOURCE_PATH}") |
||||
endif() |
||||
ocv_install_target(tim-vx EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) |
||||
set(TIMVX_LIB "tim-vx") |
||||
endif() |
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,69 @@ |
||||
set(TIMVX_INSTALL_DIR "" CACHE PATH "Path to libtim-vx installation") |
||||
set(VIVANTE_SDK_DIR "" CACHE PATH "Path to VIVANTE SDK needed by TIM-VX.") |
||||
set(VIVANTE_SDK_LIB_CANDIDATES "OpenVX;VSC;GAL;ArchModelSw;NNArchPerf" CACHE STRING "VIVANTE SDK library candidates") |
||||
|
||||
# Ensure VIVANTE SDK library candidates are present in given search path |
||||
function(find_vivante_sdk_libs _viv_notfound _viv_search_path) |
||||
foreach(one ${VIVANTE_SDK_LIB_CANDIDATES}) |
||||
#NO_DEFAULT_PATH is used to ensure VIVANTE SDK libs are from one only source |
||||
find_library(VIV_${one}_LIB ${one} PATHS "${_viv_search_path}/lib" NO_DEFAULT_PATH) |
||||
if(NOT VIV_${one}_LIB) |
||||
list(APPEND _viv_notfound_list ${one}) |
||||
endif() |
||||
endforeach() |
||||
set(${_viv_notfound} ${_viv_notfound_list} PARENT_SCOPE) |
||||
endfunction() |
||||
# Default value for VIVANTE_SDK_DIR: /usr |
||||
if(NOT VIVANTE_SDK_DIR) |
||||
set(VIVANTE_SDK_DIR "/usr") |
||||
endif() |
||||
# Environment variable VIVANTE_SDK_DIR overrides the one in this script |
||||
if(DEFINED ENV{VIVANTE_SDK_DIR}) |
||||
set(VIVANTE_SDK_DIR $ENV{VIVANTE_SDK_DIR}) |
||||
message(STATUS "TIM-VX: Load VIVANTE_SDK_DIR from system environment: ${VIVANTE_SDK_DIR}") |
||||
endif() |
||||
|
||||
|
||||
# Compile with pre-installed TIM-VX; Or compile together with TIM-VX from source |
||||
if(TIMVX_INSTALL_DIR AND NOT BUILD_TIMVX) |
||||
message(STATUS "TIM-VX: Use binaries at ${TIMVX_INSTALL_DIR}") |
||||
set(BUILD_TIMVX OFF) |
||||
|
||||
set(TIMVX_INC_DIR "${TIMVX_INSTALL_DIR}/include" CACHE INTERNAL "TIM-VX include directory") |
||||
find_library(TIMVX_LIB "tim-vx" PATHS "${TIMVX_INSTALL_DIR}/lib") |
||||
if(TIMVX_LIB) |
||||
set(TIMVX_FOUND ON) |
||||
else() |
||||
set(TIMVX_FOUND OFF) |
||||
endif() |
||||
|
||||
# Verify if requested VIVANTE SDK libraries are all found |
||||
find_vivante_sdk_libs(missing ${VIVANTE_SDK_DIR}) |
||||
if(missing) |
||||
message(STATUS "TIM-VX: Failed to find ${missing} in ${VIVANTE_SDK_DIR}/lib. Turning off TIMVX_VIV_FOUND") |
||||
set(TIMVX_VIV_FOUND OFF) |
||||
else() |
||||
message(STATUS "TIM-VX: dependent VIVANTE SDK libraries are found at ${VIVANTE_SDK_DIR}/lib.") |
||||
set(TIMVX_VIV_FOUND ON) |
||||
endif() |
||||
else() |
||||
message(STATUS "TIM-VX: Build from source") |
||||
include("${OpenCV_SOURCE_DIR}/3rdparty/libtim-vx/tim-vx.cmake") |
||||
endif() |
||||
|
||||
if(TIMVX_FOUND AND TIMVX_VIV_FOUND) |
||||
set(HAVE_TIMVX 1) |
||||
|
||||
message(STATUS "TIM-VX: Found TIM-VX includes: ${TIMVX_INC_DIR}") |
||||
message(STATUS "TIM-VX: Found TIM-VX library: ${TIMVX_LIB}") |
||||
set(TIMVX_LIBRARY ${TIMVX_LIB}) |
||||
set(TIMVX_INCLUDE_DIR ${TIMVX_INC_DIR}) |
||||
|
||||
message(STATUS "TIM-VX: Found VIVANTE SDK libraries: ${VIVANTE_SDK_DIR}/lib") |
||||
link_directories(${VIVANTE_SDK_DIR}/lib) |
||||
endif() |
||||
|
||||
MARK_AS_ADVANCED( |
||||
TIMVX_INC_DIR |
||||
TIMVX_LIB |
||||
) |
@ -0,0 +1,91 @@ |
||||
# Gitlab-style mirror |
||||
# CMake scripts look for opencv/opencv_3rdparty, |
||||
# OAID/Tengine, 01org/tbb(oneAPI/oneTBB), opencv/ade |
||||
# from OPENCV_DOWNLOAD_MIRROR |
||||
ocv_update(OPENCV_DOWNLOAD_MIRROR_URL "") |
||||
|
||||
###### |
||||
# Download via commit id |
||||
###### |
||||
# Tengine |
||||
ocv_update(TENGINE_PKG_MD5_CUSTOM "") |
||||
ocv_update(TENGINE_PKG_MD5_ORIGINAL 23f61ebb1dd419f1207d8876496289c5) # same as tengine_md5sum for TENGINE commit of e89cf8870de2ff0a80cfe626c0b52b2a16fb302e |
||||
# NVIDIA_OPTICAL_FLOW |
||||
ocv_update(NVIDIA_OPTICAL_FLOW_PKG_MD5_GITCODE "") |
||||
ocv_update(NVIDIA_OPTICAL_FLOW_PKG_MD5_ORIGINAL a73cd48b18dcc0cc8933b30796074191) |
||||
# TIM-VX |
||||
ocv_update(TIM-VX_PKG_MD5_GITCODE "") |
||||
ocv_update(TIM-VX_PKG_MD5_ORIGINAL 92619cc4498014ac7a09834d5e33ebd5) |
||||
|
||||
###### |
||||
# Download from release page |
||||
##### |
||||
# TBB |
||||
ocv_update(TBB_RELEASE_CUSTOM "") |
||||
ocv_update(TBB_PKG_NAME_CUSTOM "") |
||||
ocv_update(TBB_PKG_MD5_CUSTOM "") |
||||
ocv_update(TBB_PKG_MD5_ORIGINAL 5af6f6c2a24c2043e62e47205e273b1f) # same as OPENCV_TBB_RELEASE_MD5 for TBB release of v2020.2 |
||||
# ADE |
||||
ocv_update(ADE_RELEASE_CUSTOM "") |
||||
ocv_update(ADE_PKG_NAME_CUSTOM "") |
||||
ocv_update(ADE_PKG_MD5_CUSTOM "") |
||||
ocv_update(ADE_PKG_MD5_ORIGINAL b624b995ec9c439cbc2e9e6ee940d3a2) # same as ade_md5 for ADE release of v0.1.1f |
||||
|
||||
macro(ocv_download_url_custom_usercontent OWNER) |
||||
string(REPLACE "/" ";" DL_URL_split ${DL_URL}) |
||||
list(GET DL_URL_split 5 __COMMIT_ID) |
||||
list(GET DL_URL_split 6 __PKG_NAME) |
||||
set(DL_URL "https://${OPENCV_DOWNLOAD_MIRROR_URL}/${OWNER}/opencv_3rdparty/-/raw/${__COMMIT_ID}/${__PKG_NAME}/") |
||||
endmacro() |
||||
macro(ocv_download_url_custom_archive_commit_id) |
||||
if("m${${DL_ID}_PKG_MD5_CUSTOM}" STREQUAL "m") |
||||
message(WARNING "ocv_download: specify ${DL_ID}_PKG_MD5_CUSTOM to download ${DL_ID} from custom source.") |
||||
elseif(${DL_ID}_PKG_MD5_ORIGINAL STREQUAL "${DL_HASH}") |
||||
string(REPLACE "/" ";" DL_URL_split ${DL_URL}) |
||||
list(GET DL_URL_split 3 __OWNER) |
||||
list(GET DL_URL_split 4 __REPO_NAME) |
||||
set(DL_URL "https://${OPENCV_DOWNLOAD_MIRROR_URL}/${__OWNER}/${__REPO_NAME}/-/archive/") |
||||
set(DL_HASH "${${DL_ID}_PKG_MD5_CUSTOM}") |
||||
else() |
||||
message(WARNING "No information about mirrors for downloading ${DL_FILENAME} from URL='${DL_URL}' and MD5=${DL_HASH}.") |
||||
endif() |
||||
endmacro() |
||||
macro(ocv_download_url_custom_archive_release) |
||||
if("m${${DL_ID}_RELEASE_CUSTOM}" STREQUAL "m") |
||||
message(WARNING "ocv_download: specify ${DL_ID}_RELEASE_CUSTOM to download ${DL_ID} from custom source.") |
||||
return() |
||||
endif() |
||||
if("m${${DL_ID}_PKG_NAME_CUSTOM}" STREQUAL "m") |
||||
message(WARNING "ocv_download: specify ${DL_ID}_PKG_NAME_CUSTOM to download ${DL_ID} from custom source.") |
||||
return() |
||||
endif() |
||||
if("m${${DL_ID}_PKG_MD5_CUSTOM}" STREQUAL "m") |
||||
message(WARNING "ocv_download: specify ${DL_ID}_PKG_MD5_CUSTOM to download ${DL_ID} from custom source.") |
||||
return() |
||||
endif() |
||||
string(REPLACE "/" ";" DL_URL_split ${DL_URL}) |
||||
list(GET DL_URL_split 3 __OWNER) |
||||
list(GET DL_URL_split 4 __REPO_NAME) |
||||
set(DL_URL "https://${OPENCV_DOWNLOAD_MIRROR_URL}/${__OWNER}/${__REPO_NAME}/-/archive/${${DL_ID}_RELEASE_CUSTOM}/${__REPO_NAME}-") |
||||
set(DL_HASH "${${DL_ID}_PKG_MD5_CUSTOM}") |
||||
endmacro() |
||||
|
||||
if("m${OPENCV_DOWNLOAD_MIRROR_URL}" STREQUAL "m") |
||||
message(WARNING "ocv_download: specify OPENCV_DOWNLOAD_MIRROR_URL to use custom mirror.") |
||||
else() |
||||
if((DL_ID STREQUAL "FFMPEG") OR (DL_ID STREQUAL "IPPICV") OR (DL_ID STREQUAL "data") OR (DL_ID STREQUAL "xfeatures2d/boostdesc") OR (DL_ID STREQUAL "xfeatures2d/vgg")) |
||||
ocv_download_url_custom_usercontent(opencv) |
||||
elseif(DL_ID STREQUAL "wechat_qrcode") |
||||
ocv_download_url_gitcode_usercontent(WeChatCV) |
||||
elseif((DL_ID STREQUAL "TENGINE") OR (DL_ID STREQUAL "NVIDIA_OPTICAL_FLOW") OR (DL_ID STREQUAL "TIM-VX")) |
||||
ocv_download_url_custom_archive_commit_id() |
||||
elseif(DL_ID STREQUAL "TBB") |
||||
ocv_download_url_custom_archive_release() |
||||
set(OPENCV_TBB_SUBDIR "${TBB_PKG_NAME_CUSTOM}" PARENT_SCOPE) |
||||
elseif(DL_ID STREQUAL "ADE") |
||||
ocv_download_url_custom_archive_release() |
||||
set(ade_subdir "${ADE_PKG_NAME_CUSTOM}" PARENT_SCOPE) |
||||
else() |
||||
message(STATUS "ocv_download: Unknown download ID ${DL_ID} for using mirror ${OPENCV_DOWNLOAD_MIRROR_URL}. Use original source instead.") |
||||
endif() |
||||
endif() |
@ -0,0 +1,86 @@ |
||||
###### |
||||
# Download via commit id |
||||
###### |
||||
# Tengine |
||||
ocv_update(TENGINE_PKG_MD5_GITCODE 1b5908632b557275cd6e85b0c03f9690) |
||||
ocv_update(TENGINE_PKG_MD5_ORIGINAL 23f61ebb1dd419f1207d8876496289c5) # same as tengine_md5sum for TENGINE commit of e89cf8870de2ff0a80cfe626c0b52b2a16fb302e |
||||
# NVIDIA_OPTICAL_FLOW |
||||
ocv_update(NVIDIA_OPTICAL_FLOW_PKG_MD5_GITCODE 8d5b7eeb24d6ca9c6bcfdff4196d5b47) |
||||
ocv_update(NVIDIA_OPTICAL_FLOW_PKG_MD5_ORIGINAL a73cd48b18dcc0cc8933b30796074191) |
||||
# TIM-VX |
||||
ocv_update(TIM-VX_PKG_MD5_GITCODE 3f2a548b40b170668aaa60d4f60ba40b) |
||||
ocv_update(TIM-VX_PKG_MD5_ORIGINAL 92619cc4498014ac7a09834d5e33ebd5) |
||||
|
||||
###### |
||||
# Download from release page |
||||
##### |
||||
# TBB |
||||
ocv_update(TBB_RELEASE_GITCODE "v2020.2") |
||||
ocv_update(TBB_PKG_NAME_GITCODE "tbb-${TBB_RELEASE_GITCODE}") |
||||
ocv_update(TBB_PKG_MD5_GITCODE 4eeafdf16a90cb66e39a31c8d6c6804e) |
||||
ocv_update(TBB_PKG_MD5_ORIGINAL 5af6f6c2a24c2043e62e47205e273b1f) # same as OPENCV_TBB_RELEASE_MD5 for TBB release of v2020.2 |
||||
# ADE |
||||
ocv_update(ADE_RELEASE_GITCODE "v0.1.1f") |
||||
ocv_update(ADE_PKG_NAME_GITCODE "ade-${ADE_RELEASE_GITCODE}") |
||||
ocv_update(ADE_PKG_MD5_GITCODE c12909e0ccfa93138c820ba91ff37b3c) |
||||
ocv_update(ADE_PKG_MD5_ORIGINAL b624b995ec9c439cbc2e9e6ee940d3a2) # same as ade_md5 for ADE release of v0.1.1f |
||||
|
||||
# |
||||
# Replace download links for packages in opencv/opencv_3rdparty: |
||||
# 1. Extract repo owner and repo name from DL_URL. |
||||
# 2. Put repo owner and repo name into the placeholders of new DL_URL. |
||||
# |
||||
macro(ocv_download_url_gitcode_usercontent OWNER) |
||||
string(REPLACE "/" ";" DL_URL_split ${DL_URL}) |
||||
list(GET DL_URL_split 5 __COMMIT_ID) |
||||
list(GET DL_URL_split 6 __PKG_NAME) |
||||
set(DL_URL "https://gitcode.net/${OWNER}/opencv_3rdparty/-/raw/${__COMMIT_ID}/") |
||||
if(__PKG_NAME) |
||||
set(DL_URL "${DL_URL}${__PKG_NAME}/") |
||||
endif() |
||||
endmacro() |
||||
# |
||||
# Replace download links and checksums for archives/releases in other repositories: |
||||
# 1. Check if versions matched. If not matched, download from github instead. |
||||
# 2. Extract repo owner and repo name from DL_URL. |
||||
# 3. Put repo owner and repo name into the placeholders of new DL_URL. |
||||
# 4. Replace DL_HASH with the one downloaded from gitcode.net. |
||||
# |
||||
macro(ocv_download_url_gitcode_archive_commit_id) |
||||
if(DL_HASH STREQUAL "${${DL_ID}_PKG_MD5_ORIGINAL}") |
||||
string(REPLACE "/" ";" DL_URL_split ${DL_URL}) |
||||
list(GET DL_URL_split 3 __OWNER) |
||||
list(GET DL_URL_split 4 __REPO_NAME) |
||||
set(DL_URL "https://gitcode.net/mirrors/${__OWNER}/${__REPO_NAME}/-/archive/") |
||||
set(DL_HASH "${${DL_ID}_PKG_MD5_GITCODE}") |
||||
else() |
||||
message(WARNING "Package ${DL_ID} from mirror gitcode.net is outdated and will be downloaded from github.com instead.") |
||||
endif() |
||||
endmacro() |
||||
macro(ocv_download_url_gitcode_archive_release) |
||||
if(DL_HASH STREQUAL "${${DL_ID}_PKG_MD5_ORIGINAL}") |
||||
string(REPLACE "/" ";" DL_URL_split ${DL_URL}) |
||||
list(GET DL_URL_split 3 __OWNER) |
||||
list(GET DL_URL_split 4 __REPO_NAME) |
||||
set(DL_URL "https://gitcode.net/${__OWNER}/${__REPO_NAME}/-/archive/${${DL_ID}_RELEASE_GITCODE}/${__REPO_NAME}-") |
||||
set(DL_HASH "${${DL_ID}_PKG_MD5_GITCODE}") |
||||
else() |
||||
message(WARNING "Package ${DL_ID} from mirror gitcode.net is outdated and will be downloaded from github.com instead.") |
||||
endif() |
||||
endmacro() |
||||
|
||||
if((DL_ID STREQUAL "FFMPEG") OR (DL_ID STREQUAL "IPPICV") OR (DL_ID STREQUAL "data") OR (DL_ID STREQUAL "xfeatures2d/boostdesc") OR (DL_ID STREQUAL "xfeatures2d/vgg")) |
||||
ocv_download_url_gitcode_usercontent(opencv) |
||||
elseif(DL_ID STREQUAL "wechat_qrcode") |
||||
ocv_download_url_gitcode_usercontent(mirrors/WeChatCV) |
||||
elseif((DL_ID STREQUAL "TENGINE") OR (DL_ID STREQUAL "NVIDIA_OPTICAL_FLOW") OR (DL_ID STREQUAL "TIM-VX")) |
||||
ocv_download_url_gitcode_archive_commit_id() |
||||
elseif(DL_ID STREQUAL "TBB") |
||||
ocv_download_url_gitcode_archive_release() |
||||
set(OPENCV_TBB_SUBDIR "${TBB_PKG_NAME_GITCODE}" PARENT_SCOPE) |
||||
elseif(DL_ID STREQUAL "ADE") |
||||
ocv_download_url_gitcode_archive_release() |
||||
set(ade_subdir "${ADE_PKG_NAME_GITCODE}" PARENT_SCOPE) |
||||
else() |
||||
message(STATUS "ocv_download: Unknown download ID ${DL_ID} for using mirror gitcode.net. Use original source instead.") |
||||
endif() |
@ -0,0 +1,44 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
#include "perf_precomp.hpp" |
||||
|
||||
namespace opencv_test { namespace { |
||||
|
||||
using PerfIntType = perf::TestBaseWithParam<std::tuple<int>>; |
||||
PERF_TEST_P(PerfIntType, fisheye_undistortPoints, |
||||
(testing::Values(1e2, 1e3, 1e4))) |
||||
{ |
||||
const cv::Size imageSize(1280, 800); |
||||
|
||||
/* Set camera matrix */ |
||||
const cv::Matx33d K(558.478087865323, 0, 620.458515360843, |
||||
0, 560.506767351568, 381.939424848348, |
||||
0, 0, 1); |
||||
|
||||
/* Set distortion coefficients */ |
||||
Mat D(1, 4, CV_64F); |
||||
theRNG().fill(D, RNG::UNIFORM, -1.e-5, 1.e-5); |
||||
|
||||
int pointsNumber = std::get<0>(GetParam()); |
||||
|
||||
/* Create two-channel points matrix */ |
||||
cv::Mat xy[2] = {}; |
||||
xy[0].create(pointsNumber, 1, CV_64F); |
||||
theRNG().fill(xy[0], cv::RNG::UNIFORM, 0, imageSize.width); // x
|
||||
xy[1].create(pointsNumber, 1, CV_64F); |
||||
theRNG().fill(xy[1], cv::RNG::UNIFORM, 0, imageSize.height); // y
|
||||
|
||||
cv::Mat points; |
||||
merge(xy, 2, points); |
||||
|
||||
/* Set fixed iteration number to check only c++ code, not algo convergence */ |
||||
TermCriteria termCriteria(TermCriteria::MAX_ITER, 10, 0); |
||||
|
||||
Mat undistortedPoints; |
||||
TEST_CYCLE() fisheye::undistortPoints(points, undistortedPoints, K, D, noArray(), noArray(), termCriteria); |
||||
|
||||
SANITY_CHECK_NOTHING(); |
||||
} |
||||
|
||||
}} // namespace
|
@ -0,0 +1,195 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef OPENCV_DNN_CUDA4DNN_CSL_CUDNN_RECURRENT_HPP |
||||
#define OPENCV_DNN_CUDA4DNN_CSL_CUDNN_RECURRENT_HPP |
||||
|
||||
#include "cudnn.hpp" |
||||
#include <cudnn.h> |
||||
|
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cudnn { |
||||
|
||||
/**
|
||||
*/ |
||||
class DropoutDescriptor |
||||
{ |
||||
public: |
||||
DropoutDescriptor() noexcept = default; |
||||
DropoutDescriptor(const DropoutDescriptor &) = delete; |
||||
DropoutDescriptor(DropoutDescriptor &&other) noexcept : descriptor{other.descriptor} |
||||
{ |
||||
states = std::move(other.states); |
||||
other.descriptor = nullptr; |
||||
} |
||||
|
||||
/**
|
||||
*/ |
||||
DropoutDescriptor(const Handle &handle, float dropout) |
||||
{ |
||||
CUDA4DNN_CHECK_CUDNN(cudnnCreateDropoutDescriptor(&descriptor)); |
||||
|
||||
// we need additional memory for dropout descriptor
|
||||
size_t stateSize; |
||||
CUDA4DNN_CHECK_CUDNN(cudnnDropoutGetStatesSize(handle.get(), &stateSize)); |
||||
states.reset(stateSize); |
||||
|
||||
try |
||||
{ |
||||
auto seed = 1234ull; // Pick a seed.
|
||||
CUDA4DNN_CHECK_CUDNN(cudnnSetDropoutDescriptor(descriptor, handle.get(), dropout, |
||||
states.get().get(), stateSize, seed)); |
||||
} |
||||
catch (...) |
||||
{ |
||||
CUDA4DNN_CHECK_CUDNN(cudnnDestroyDropoutDescriptor(descriptor)); |
||||
throw; |
||||
} |
||||
} |
||||
|
||||
~DropoutDescriptor() noexcept |
||||
{ |
||||
if (descriptor) |
||||
{ |
||||
CUDA4DNN_CHECK_CUDNN(cudnnDestroyDropoutDescriptor(descriptor)); |
||||
} |
||||
} |
||||
|
||||
DropoutDescriptor &operator=(const DropoutDescriptor &) = delete; |
||||
DropoutDescriptor &operator=(DropoutDescriptor &&other) noexcept |
||||
{ |
||||
descriptor = other.descriptor; |
||||
states = std::move(other.states); |
||||
other.descriptor = nullptr; |
||||
return *this; |
||||
}; |
||||
|
||||
cudnnDropoutDescriptor_t get() const noexcept { return descriptor; } |
||||
|
||||
private: |
||||
cudnnDropoutDescriptor_t descriptor{nullptr}; |
||||
|
||||
using value_type = typename ManagedPtr<char>::element_type; |
||||
ManagedPtr<value_type> states; |
||||
}; |
||||
|
||||
/**
|
||||
*/ |
||||
template<class T> |
||||
class RNNDescriptor |
||||
{ |
||||
public: |
||||
enum class RNNMode |
||||
{ |
||||
RNN_RELU, |
||||
RNN_TANH, |
||||
LSTM, |
||||
GRU |
||||
}; |
||||
|
||||
RNNDescriptor() noexcept = default; |
||||
RNNDescriptor(const RNNDescriptor &) = delete; |
||||
RNNDescriptor(RNNDescriptor &&other) noexcept : descriptor{other.descriptor} |
||||
{ |
||||
other.descriptor = nullptr; |
||||
} |
||||
|
||||
/**
|
||||
*/ |
||||
RNNDescriptor(const Handle &handle, RNNMode mode, int hidden_size, int num_layers, |
||||
bool bidirectional, const DropoutDescriptor &dropoutDesc) |
||||
{ |
||||
CUDA4DNN_CHECK_CUDNN(cudnnCreateRNNDescriptor(&descriptor)); |
||||
const auto rnn_mode = [mode] { |
||||
switch (mode) |
||||
{ |
||||
case RNNMode::RNN_RELU: |
||||
return CUDNN_RNN_RELU; |
||||
case RNNMode::RNN_TANH: |
||||
return CUDNN_RNN_TANH; |
||||
case RNNMode::LSTM: |
||||
return CUDNN_LSTM; |
||||
case RNNMode::GRU: |
||||
return CUDNN_GRU; |
||||
default: |
||||
return CUDNN_LSTM; |
||||
} |
||||
}(); |
||||
|
||||
try |
||||
{ |
||||
CUDA4DNN_CHECK_CUDNN(cudnnSetRNNDescriptor_v6( |
||||
handle.get(), descriptor, hidden_size, num_layers, dropoutDesc.get(), |
||||
CUDNN_LINEAR_INPUT, bidirectional ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, |
||||
rnn_mode, |
||||
algo, //CUDNN_RNN_ALGO_STANDARD,
|
||||
detail::get_data_type<T>())); |
||||
} |
||||
catch (...) |
||||
{ |
||||
CUDA4DNN_CHECK_CUDNN(cudnnDestroyRNNDescriptor(descriptor)); |
||||
throw; |
||||
} |
||||
} |
||||
|
||||
~RNNDescriptor() noexcept |
||||
{ |
||||
if (descriptor) |
||||
{ |
||||
CUDA4DNN_CHECK_CUDNN(cudnnDestroyRNNDescriptor(descriptor)); |
||||
} |
||||
} |
||||
|
||||
RNNDescriptor &operator=(const RNNDescriptor &) = delete; |
||||
RNNDescriptor &operator=(RNNDescriptor &&other) noexcept |
||||
{ |
||||
descriptor = other.descriptor; |
||||
other.descriptor = nullptr; |
||||
return *this; |
||||
}; |
||||
|
||||
cudnnRNNDescriptor_t get() const noexcept { return descriptor; } |
||||
|
||||
private: |
||||
cudnnRNNDescriptor_t descriptor{nullptr}; |
||||
cudnnRNNMode_t mode{CUDNN_LSTM}; |
||||
// support only one algo for a while
|
||||
cudnnRNNAlgo_t algo{CUDNN_RNN_ALGO_STANDARD}; |
||||
}; |
||||
|
||||
template<class T> |
||||
size_t getRNNWorkspaceSize(const Handle &handle, const RNNDescriptor<T> &rnnDesc, |
||||
const int seqLength, const TensorDescriptorsArray<T> &inputDesc) |
||||
{ |
||||
size_t workSize; |
||||
CUDA4DNN_CHECK_CUDNN(cudnnGetRNNWorkspaceSize(handle.get(), rnnDesc.get(), seqLength, |
||||
inputDesc.get().data(), &workSize)); |
||||
return workSize; |
||||
} |
||||
|
||||
template<class T> |
||||
void LSTMForward(const Handle &handle, const RNNDescriptor<T> &rnnDesc, |
||||
const FilterDescriptor<T> &filterDesc, DevicePtr<const T> filterPtr, |
||||
const TensorDescriptorsArray<T> &inputDesc, DevicePtr<const T> inputPtr, |
||||
const TensorDescriptor<T> &initialHDesc, DevicePtr<const T> initialH, |
||||
const TensorDescriptor<T> &initialCDesc, DevicePtr<const T> initialC, |
||||
const int seqLength, const TensorDescriptorsArray<T> &outputDesc, |
||||
DevicePtr<T> yOutputPtr, DevicePtr<T> ycOutputPtr, WorkspaceInstance workspace) |
||||
{ |
||||
CV_Assert(handle); |
||||
|
||||
CUDA4DNN_CHECK_CUDNN(cudnnRNNForwardInference(handle.get(), rnnDesc.get(), seqLength, |
||||
inputDesc.get().data(), inputPtr.get(), // input sequence
|
||||
initialHDesc.get(), initialH.get(), |
||||
initialCDesc.get(), initialC.get(), // hidden
|
||||
filterDesc.get(), filterPtr.get(), // weights
|
||||
outputDesc.get().data(), yOutputPtr.get(), // output
|
||||
nullptr, nullptr, |
||||
initialCDesc.get(), ycOutputPtr.get(), |
||||
static_cast<void*>(workspace.get()), workspace.size_in_bytes())); |
||||
} |
||||
|
||||
}}}}} /* namespace cv::dnn::cuda4dnn::csl::cudnn */ |
||||
|
||||
#endif //OPENCV_DNN_CUDA4DNN_CSL_CUDNN_RECURRENT_HPP
|
@ -0,0 +1,97 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef OPENCV_DNN_SRC_CUDA4DNN_PRIMITIVES_CELLS_HPP |
||||
#define OPENCV_DNN_SRC_CUDA4DNN_PRIMITIVES_CELLS_HPP |
||||
|
||||
#include "../../op_cuda.hpp" |
||||
|
||||
#include "../csl/cudnn.hpp" |
||||
#include "../csl/tensor_ops.hpp" |
||||
#include "../csl/cudnn/recurrent.hpp" |
||||
|
||||
namespace cv { namespace dnn { namespace cuda4dnn { |
||||
|
||||
struct RNNConfiguration |
||||
{ |
||||
int seqLength; |
||||
int numLayers; |
||||
int hiddenSize; |
||||
int inputSize; |
||||
int miniBatch; |
||||
bool bidirectional; |
||||
}; |
||||
|
||||
template<class T> |
||||
class LSTMOp final : public CUDABackendNode |
||||
{ |
||||
public: |
||||
using wrapper_type = GetCUDABackendWrapperType<T>; |
||||
|
||||
LSTMOp(csl::Stream stream_, csl::cudnn::Handle handle, const Mat& filters, const Mat& h0, |
||||
const Mat& c0, const RNNConfiguration& config) |
||||
: stream(std::move(stream_)) |
||||
{ |
||||
typename csl::LSTM<T>::params_type params{ |
||||
{filters.total(), 1, 1}, // reshape
|
||||
config.seqLength, |
||||
config.numLayers, |
||||
config.hiddenSize, |
||||
config.inputSize, |
||||
config.miniBatch, |
||||
config.bidirectional, |
||||
0.0, /* dropout */ |
||||
csl::cudnn::RNNDescriptor<T>::RNNMode::LSTM |
||||
}; |
||||
|
||||
lstm = csl::LSTM<T>(handle, params); |
||||
auto correct_shape_filters = filters.reshape(1, {static_cast<int>(filters.total()), 1, 1}); |
||||
filtersTensor = csl::makeTensorHeader<T>(correct_shape_filters); |
||||
csl::copyMatToTensor<T>(correct_shape_filters, filtersTensor, stream); |
||||
|
||||
h0Tensor = csl::makeTensorHeader<T>(h0); |
||||
csl::copyMatToTensor<T>(h0, h0Tensor, stream); |
||||
|
||||
c0Tensor = csl::makeTensorHeader<T>(c0); |
||||
csl::copyMatToTensor<T>(c0, c0Tensor, stream); |
||||
|
||||
csl::WorkspaceBuilder builder; |
||||
builder.require<T>(lstm.get_workspace_memory_in_bytes()); |
||||
} |
||||
|
||||
void forward(const std::vector<cv::Ptr<BackendWrapper>>& inputs, |
||||
const std::vector<cv::Ptr<BackendWrapper>>& outputs, |
||||
csl::Workspace& workspace) override |
||||
{ |
||||
CV_Assert(inputs.size() == 1 && !outputs.empty()); |
||||
|
||||
auto input_wrapper = inputs[0].dynamicCast<wrapper_type>(); |
||||
auto input = input_wrapper->getView(); |
||||
|
||||
auto y_output_wrapper = outputs[0].dynamicCast<wrapper_type>(); |
||||
auto y_output = y_output_wrapper->getSpan(); |
||||
|
||||
Ptr<wrapper_type> yc_output_wrapper = outputs.size() == 2 ? outputs[1].dynamicCast<wrapper_type>() : Ptr<wrapper_type>(); |
||||
csl::TensorSpan<T> yc_output = yc_output_wrapper.empty() ? csl::TensorSpan<T>() : yc_output_wrapper->getSpan(); |
||||
|
||||
csl::WorkspaceAllocator allocator(workspace); |
||||
lstm.inference(input, y_output, yc_output, filtersTensor, h0Tensor, c0Tensor, allocator.get_instance()); |
||||
} |
||||
|
||||
std::size_t get_workspace_memory_in_bytes() const noexcept override |
||||
{ |
||||
return lstm.get_workspace_memory_in_bytes(); |
||||
} |
||||
|
||||
private: |
||||
csl::LSTM<T> lstm; |
||||
csl::Stream stream; |
||||
csl::Tensor<T> filtersTensor; |
||||
csl::Tensor<T> h0Tensor; |
||||
csl::Tensor<T> c0Tensor; |
||||
}; |
||||
|
||||
}}} /* namespace cv::dnn::cuda4dnn */ |
||||
|
||||
#endif //OPENCV_DNN_SRC_CUDA4DNN_PRIMITIVES_RECURRENT_CELLS_HPP
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,67 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#include "dnn_common.hpp" |
||||
#include <opencv2/core/utils/configuration.private.hpp> |
||||
|
||||
namespace cv { |
||||
namespace dnn { |
||||
CV__DNN_INLINE_NS_BEGIN |
||||
|
||||
|
||||
size_t getParam_DNN_NETWORK_DUMP() |
||||
{ |
||||
static size_t DNN_NETWORK_DUMP = utils::getConfigurationParameterSizeT("OPENCV_DNN_NETWORK_DUMP", 0); |
||||
return DNN_NETWORK_DUMP; |
||||
} |
||||
|
||||
// this option is useful to run with valgrind memory errors detection
|
||||
bool getParam_DNN_DISABLE_MEMORY_OPTIMIZATIONS() |
||||
{ |
||||
static bool DNN_DISABLE_MEMORY_OPTIMIZATIONS = utils::getConfigurationParameterBool("OPENCV_DNN_DISABLE_MEMORY_OPTIMIZATIONS", false); |
||||
return DNN_DISABLE_MEMORY_OPTIMIZATIONS; |
||||
} |
||||
|
||||
#ifdef HAVE_OPENCL |
||||
bool getParam_DNN_OPENCL_ALLOW_ALL_DEVICES() |
||||
{ |
||||
static bool DNN_OPENCL_ALLOW_ALL_DEVICES = utils::getConfigurationParameterBool("OPENCV_DNN_OPENCL_ALLOW_ALL_DEVICES", false); |
||||
return DNN_OPENCL_ALLOW_ALL_DEVICES; |
||||
} |
||||
#endif |
||||
|
||||
int getParam_DNN_BACKEND_DEFAULT() |
||||
{ |
||||
static int PARAM_DNN_BACKEND_DEFAULT = (int)utils::getConfigurationParameterSizeT("OPENCV_DNN_BACKEND_DEFAULT", |
||||
#ifdef HAVE_INF_ENGINE |
||||
(size_t)DNN_BACKEND_INFERENCE_ENGINE |
||||
#else |
||||
(size_t)DNN_BACKEND_OPENCV |
||||
#endif |
||||
); |
||||
return PARAM_DNN_BACKEND_DEFAULT; |
||||
} |
||||
|
||||
// Additional checks (slowdowns execution!)
|
||||
bool getParam_DNN_CHECK_NAN_INF() |
||||
{ |
||||
static bool DNN_CHECK_NAN_INF = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF", false); |
||||
return DNN_CHECK_NAN_INF; |
||||
} |
||||
bool getParam_DNN_CHECK_NAN_INF_DUMP() |
||||
{ |
||||
static bool DNN_CHECK_NAN_INF_DUMP = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF_DUMP", false); |
||||
return DNN_CHECK_NAN_INF_DUMP; |
||||
} |
||||
bool getParam_DNN_CHECK_NAN_INF_RAISE_ERROR() |
||||
{ |
||||
static bool DNN_CHECK_NAN_INF_RAISE_ERROR = utils::getConfigurationParameterBool("OPENCV_DNN_CHECK_NAN_INF_RAISE_ERROR", false); |
||||
return DNN_CHECK_NAN_INF_RAISE_ERROR; |
||||
} |
||||
|
||||
|
||||
CV__DNN_INLINE_NS_END |
||||
}} // namespace cv::dnn
|
@ -0,0 +1,93 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
|
||||
namespace cv { |
||||
namespace dnn { |
||||
CV__DNN_INLINE_NS_BEGIN |
||||
|
||||
|
||||
Net readNet(const String& _model, const String& _config, const String& _framework) |
||||
{ |
||||
String framework = toLowerCase(_framework); |
||||
String model = _model; |
||||
String config = _config; |
||||
const std::string modelExt = model.substr(model.rfind('.') + 1); |
||||
const std::string configExt = config.substr(config.rfind('.') + 1); |
||||
if (framework == "caffe" || modelExt == "caffemodel" || configExt == "caffemodel" || modelExt == "prototxt" || configExt == "prototxt") |
||||
{ |
||||
if (modelExt == "prototxt" || configExt == "caffemodel") |
||||
std::swap(model, config); |
||||
return readNetFromCaffe(config, model); |
||||
} |
||||
if (framework == "tensorflow" || modelExt == "pb" || configExt == "pb" || modelExt == "pbtxt" || configExt == "pbtxt") |
||||
{ |
||||
if (modelExt == "pbtxt" || configExt == "pb") |
||||
std::swap(model, config); |
||||
return readNetFromTensorflow(model, config); |
||||
} |
||||
if (framework == "torch" || modelExt == "t7" || modelExt == "net" || configExt == "t7" || configExt == "net") |
||||
{ |
||||
return readNetFromTorch(model.empty() ? config : model); |
||||
} |
||||
if (framework == "darknet" || modelExt == "weights" || configExt == "weights" || modelExt == "cfg" || configExt == "cfg") |
||||
{ |
||||
if (modelExt == "cfg" || configExt == "weights") |
||||
std::swap(model, config); |
||||
return readNetFromDarknet(config, model); |
||||
} |
||||
if (framework == "dldt" || modelExt == "bin" || configExt == "bin" || modelExt == "xml" || configExt == "xml") |
||||
{ |
||||
if (modelExt == "xml" || configExt == "bin") |
||||
std::swap(model, config); |
||||
return readNetFromModelOptimizer(config, model); |
||||
} |
||||
if (framework == "onnx" || modelExt == "onnx") |
||||
{ |
||||
return readNetFromONNX(model); |
||||
} |
||||
CV_Error(Error::StsError, "Cannot determine an origin framework of files: " + model + (config.empty() ? "" : ", " + config)); |
||||
} |
||||
|
||||
Net readNet(const String& _framework, const std::vector<uchar>& bufferModel, |
||||
const std::vector<uchar>& bufferConfig) |
||||
{ |
||||
String framework = toLowerCase(_framework); |
||||
if (framework == "caffe") |
||||
return readNetFromCaffe(bufferConfig, bufferModel); |
||||
else if (framework == "tensorflow") |
||||
return readNetFromTensorflow(bufferModel, bufferConfig); |
||||
else if (framework == "darknet") |
||||
return readNetFromDarknet(bufferConfig, bufferModel); |
||||
else if (framework == "torch") |
||||
CV_Error(Error::StsNotImplemented, "Reading Torch models from buffers"); |
||||
else if (framework == "dldt") |
||||
return readNetFromModelOptimizer(bufferConfig, bufferModel); |
||||
CV_Error(Error::StsError, "Cannot determine an origin framework with a name " + framework); |
||||
} |
||||
|
||||
Net readNetFromModelOptimizer(const String& xml, const String& bin) |
||||
{ |
||||
return Net::readFromModelOptimizer(xml, bin); |
||||
} |
||||
|
||||
Net readNetFromModelOptimizer(const std::vector<uchar>& bufferCfg, const std::vector<uchar>& bufferModel) |
||||
{ |
||||
return Net::readFromModelOptimizer(bufferCfg, bufferModel); |
||||
} |
||||
|
||||
Net readNetFromModelOptimizer( |
||||
const uchar* bufferModelConfigPtr, size_t bufferModelConfigSize, |
||||
const uchar* bufferWeightsPtr, size_t bufferWeightsSize) |
||||
{ |
||||
return Net::readFromModelOptimizer( |
||||
bufferModelConfigPtr, bufferModelConfigSize, |
||||
bufferWeightsPtr, bufferWeightsSize); |
||||
} |
||||
|
||||
|
||||
CV__DNN_INLINE_NS_END |
||||
}} // namespace cv::dnn
|
@ -0,0 +1,158 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#include <opencv2/imgproc.hpp> |
||||
|
||||
|
||||
namespace cv { |
||||
namespace dnn { |
||||
CV__DNN_INLINE_NS_BEGIN |
||||
|
||||
|
||||
Mat blobFromImage(InputArray image, double scalefactor, const Size& size, |
||||
const Scalar& mean, bool swapRB, bool crop, int ddepth) |
||||
{ |
||||
CV_TRACE_FUNCTION(); |
||||
Mat blob; |
||||
blobFromImage(image, blob, scalefactor, size, mean, swapRB, crop, ddepth); |
||||
return blob; |
||||
} |
||||
|
||||
void blobFromImage(InputArray image, OutputArray blob, double scalefactor, |
||||
const Size& size, const Scalar& mean, bool swapRB, bool crop, int ddepth) |
||||
{ |
||||
CV_TRACE_FUNCTION(); |
||||
std::vector<Mat> images(1, image.getMat()); |
||||
blobFromImages(images, blob, scalefactor, size, mean, swapRB, crop, ddepth); |
||||
} |
||||
|
||||
Mat blobFromImages(InputArrayOfArrays images, double scalefactor, Size size, |
||||
const Scalar& mean, bool swapRB, bool crop, int ddepth) |
||||
{ |
||||
CV_TRACE_FUNCTION(); |
||||
Mat blob; |
||||
blobFromImages(images, blob, scalefactor, size, mean, swapRB, crop, ddepth); |
||||
return blob; |
||||
} |
||||
|
||||
void blobFromImages(InputArrayOfArrays images_, OutputArray blob_, double scalefactor, |
||||
Size size, const Scalar& mean_, bool swapRB, bool crop, int ddepth) |
||||
{ |
||||
CV_TRACE_FUNCTION(); |
||||
CV_CheckType(ddepth, ddepth == CV_32F || ddepth == CV_8U, "Blob depth should be CV_32F or CV_8U"); |
||||
if (ddepth == CV_8U) |
||||
{ |
||||
CV_CheckEQ(scalefactor, 1.0, "Scaling is not supported for CV_8U blob depth"); |
||||
CV_Assert(mean_ == Scalar() && "Mean subtraction is not supported for CV_8U blob depth"); |
||||
} |
||||
|
||||
std::vector<Mat> images; |
||||
images_.getMatVector(images); |
||||
CV_Assert(!images.empty()); |
||||
for (size_t i = 0; i < images.size(); i++) |
||||
{ |
||||
Size imgSize = images[i].size(); |
||||
if (size == Size()) |
||||
size = imgSize; |
||||
if (size != imgSize) |
||||
{ |
||||
if (crop) |
||||
{ |
||||
float resizeFactor = std::max(size.width / (float)imgSize.width, |
||||
size.height / (float)imgSize.height); |
||||
resize(images[i], images[i], Size(), resizeFactor, resizeFactor, INTER_LINEAR); |
||||
Rect crop(Point(0.5 * (images[i].cols - size.width), |
||||
0.5 * (images[i].rows - size.height)), |
||||
size); |
||||
images[i] = images[i](crop); |
||||
} |
||||
else |
||||
resize(images[i], images[i], size, 0, 0, INTER_LINEAR); |
||||
} |
||||
if (images[i].depth() == CV_8U && ddepth == CV_32F) |
||||
images[i].convertTo(images[i], CV_32F); |
||||
Scalar mean = mean_; |
||||
if (swapRB) |
||||
std::swap(mean[0], mean[2]); |
||||
|
||||
images[i] -= mean; |
||||
images[i] *= scalefactor; |
||||
} |
||||
|
||||
size_t nimages = images.size(); |
||||
Mat image0 = images[0]; |
||||
int nch = image0.channels(); |
||||
CV_Assert(image0.dims == 2); |
||||
if (nch == 3 || nch == 4) |
||||
{ |
||||
int sz[] = { (int)nimages, nch, image0.rows, image0.cols }; |
||||
blob_.create(4, sz, ddepth); |
||||
Mat blob = blob_.getMat(); |
||||
Mat ch[4]; |
||||
|
||||
for (size_t i = 0; i < nimages; i++) |
||||
{ |
||||
const Mat& image = images[i]; |
||||
CV_Assert(image.depth() == blob_.depth()); |
||||
nch = image.channels(); |
||||
CV_Assert(image.dims == 2 && (nch == 3 || nch == 4)); |
||||
CV_Assert(image.size() == image0.size()); |
||||
|
||||
for (int j = 0; j < nch; j++) |
||||
ch[j] = Mat(image.rows, image.cols, ddepth, blob.ptr((int)i, j)); |
||||
if (swapRB) |
||||
std::swap(ch[0], ch[2]); |
||||
split(image, ch); |
||||
} |
||||
} |
||||
else |
||||
{ |
||||
CV_Assert(nch == 1); |
||||
int sz[] = { (int)nimages, 1, image0.rows, image0.cols }; |
||||
blob_.create(4, sz, ddepth); |
||||
Mat blob = blob_.getMat(); |
||||
|
||||
for (size_t i = 0; i < nimages; i++) |
||||
{ |
||||
const Mat& image = images[i]; |
||||
CV_Assert(image.depth() == blob_.depth()); |
||||
nch = image.channels(); |
||||
CV_Assert(image.dims == 2 && (nch == 1)); |
||||
CV_Assert(image.size() == image0.size()); |
||||
|
||||
image.copyTo(Mat(image.rows, image.cols, ddepth, blob.ptr((int)i, 0))); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void imagesFromBlob(const cv::Mat& blob_, OutputArrayOfArrays images_) |
||||
{ |
||||
CV_TRACE_FUNCTION(); |
||||
|
||||
// A blob is a 4 dimensional matrix in floating point precision
|
||||
// blob_[0] = batchSize = nbOfImages
|
||||
// blob_[1] = nbOfChannels
|
||||
// blob_[2] = height
|
||||
// blob_[3] = width
|
||||
CV_Assert(blob_.depth() == CV_32F); |
||||
CV_Assert(blob_.dims == 4); |
||||
|
||||
images_.create(cv::Size(1, blob_.size[0]), blob_.depth()); |
||||
|
||||
std::vector<Mat> vectorOfChannels(blob_.size[1]); |
||||
for (int n = 0; n < blob_.size[0]; ++n) |
||||
{ |
||||
for (int c = 0; c < blob_.size[1]; ++c) |
||||
{ |
||||
vectorOfChannels[c] = getPlane(blob_, n, c); |
||||
} |
||||
cv::merge(vectorOfChannels, images_.getMatRef(n)); |
||||
} |
||||
} |
||||
|
||||
|
||||
CV__DNN_INLINE_NS_END |
||||
}} // namespace cv::dnn
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue