Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/21981/head
OpenCV Developers 3 years ago
commit d9a444ca1a
  1. 187
      .github/workflows/PR-4.x-ARM64.yaml
  2. 39
      .github/workflows/PR-4.x-U20.yaml
  3. 34
      .github/workflows/PR-4.x-W10.yaml
  4. 2
      3rdparty/openvx/README.md
  5. 2
      doc/js_tutorials/js_imgproc/js_morphological_ops/js_morphological_ops.markdown
  6. 2
      doc/js_tutorials/js_setup/js_nodejs/js_nodejs.markdown
  7. 4
      doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown
  8. 2
      doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown
  9. 24
      doc/tutorials/imgproc/imgtrans/hough_circle/hough_circle.markdown
  10. 2
      doc/tutorials/imgproc/morph_lines_detection/morph_lines_detection.md
  11. 4
      modules/core/include/opencv2/core/mat.hpp
  12. 2
      modules/core/src/parallel.cpp
  13. 2
      modules/dnn/src/onnx/opencv-onnx.proto
  14. 2
      modules/imgcodecs/include/opencv2/imgcodecs.hpp
  15. 2
      modules/imgcodecs/src/grfmt_jpeg.cpp
  16. 2
      modules/imgcodecs/src/grfmt_tiff.cpp
  17. 43
      modules/imgcodecs/test/test_tiff.cpp
  18. 12
      modules/imgproc/include/opencv2/imgproc.hpp
  19. 2
      modules/imgproc/src/opencl/medianFilter.cl
  20. 2
      modules/imgproc/test/test_convhull.cpp
  21. 2
      modules/js/test/test_imgproc.js
  22. 2
      modules/ml/src/lr.cpp
  23. 2
      modules/video/src/kalman.cpp
  24. 2
      modules/videoio/src/cap_gstreamer.cpp
  25. 2
      modules/videoio/test/test_video_io.cpp

@ -0,0 +1,187 @@
name: PR:4.x ARM64
# TODO: enable pipeline after 4.x update
on: workflow_dispatch
env:
EXTRA_CMAKE_OPTIONS: '-DBUILD_DOCS=ON -DPYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 -DBUILD_opencv_xfeatures2d=OFF -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON -DENABLE_CCACHE=OFF'
OPENCV_TEST_DATA_PATH: '/opencv_extra/testdata'
OPENCV_DOCKER_WORKDIR: '/__w/opencv/opencv'
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
PR_AUTHOR_FORK: ${{ github.event.pull_request.head.repo.full_name }}
SOURCE_BRANCH_NAME: ${{ github.head_ref }}
TARGET_BRANCH_NAME: ${{ github.base_ref }}
ANT_HOME: '/usr/share/ant'
PYTHONPATH: /opencv-build/python_loader:$PYTHONPATH
jobs:
BuildAndTest:
runs-on: opencv-cn-lin-arm64
defaults:
run:
shell: bash
container:
image: docker.io/yuentau/ocv_ubuntu:20.04-arm64
steps:
- name: PR info
run: |
echo "PR Author: ${{ env.PR_AUTHOR }}"
echo "PR Author fork: ${{ env.PR_AUTHOR_FORK }}"
echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}"
echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}"
- name: Clean
run: find . -mindepth 1 -delete
- name: Fetch opencv
uses: actions/checkout@v3
with:
repository: opencv/opencv
ref: ${{ env.TARGET_BRANCH_NAME }}
fetch-depth: 0
- name: Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch
run: |
cd ${{ env.OPENCV_DOCKER_WORKDIR }}
git config --global --add safe.directory ${{ env.OPENCV_DOCKER_WORKDIR }}
git config user.email "opencv.ci"
git config user.name "opencv.ci"
git pull -v "https://github.com/${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}"
- name: Clone opencv_extra
run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} https://github.com/opencv/opencv_extra.git /opencv_extra
- name: Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch
run: |
OPENCV_EXTRA_FORK=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}") || true
if [[ ! -z "$OPENCV_EXTRA_FORK" ]]; then
echo "Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch"
cd /opencv_extra
git config user.email "opencv.ci"
git config user.name "opencv.ci"
git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}"
else
echo "No merge since ${{ env.PR_AUTHOR }}/opencv_extra does not have branch ${{ env.SOURCE_BRANCH_NAME }}"
fi
- name: Configure OpenCV
run: |
cmake -G Ninja -B /opencv-build ${{ env.EXTRA_CMAKE_OPTIONS }} ${{ env.OPENCV_DOCKER_WORKDIR }}
- name: Build OpenCV
run: |
cd /opencv-build
ninja
- name: Accuracy:calib3d
run: cd /opencv-build && xvfb-run -a bin/opencv_test_calib3d
- name: Accuracy:core
run: cd /opencv-build && xvfb-run -a bin/opencv_test_core
- name: Accuracy:dnn
run: cd /opencv-build && xvfb-run -a bin/opencv_test_dnn
- name: Accuracy:features2d
run: cd /opencv-build && xvfb-run -a bin/opencv_test_features2d
- name: Accuracy:flann
run: cd /opencv-build && xvfb-run -a bin/opencv_test_flann
- name: Accuracy:highgui
run: cd /opencv-build && xvfb-run -a bin/opencv_test_highgui
- name: Accuracy:imgcodecs
run: cd /opencv-build && xvfb-run -a bin/opencv_test_imgcodecs
- name: Accuracy:imgproc
run: cd /opencv-build && xvfb-run -a bin/opencv_test_imgproc
- name: Accuracy:ml
run: cd /opencv-build && xvfb-run -a bin/opencv_test_ml
- name: Accuracy:objdetect
run: cd /opencv-build && xvfb-run -a bin/opencv_test_objdetect --gtest_filter="-Objdetect_QRCode_Close.regression/0:Objdetect_QRCode_Close.regression/4"
- name: Accuracy:photo
run: cd /opencv-build && xvfb-run -a bin/opencv_test_photo --gtest_filter="-Photo_CalibrateDebevec.regression"
- name: Accuracy:shape
run: cd /opencv-build && xvfb-run -a bin/opencv_test_shape
- name: Accuracy:stitching
run: cd /opencv-build && xvfb-run -a bin/opencv_test_stitching
- name: Accuracy:superres
run: cd /opencv-build && xvfb-run -a bin/opencv_test_superres
- name: Accuracy:video
run: cd /opencv-build && xvfb-run -a bin/opencv_test_video
- name: Accuracy:videoio
run: cd /opencv-build && xvfb-run -a bin/opencv_test_videoio
- name: Accuracy:videostab
run: cd /opencv-build && xvfb-run -a bin/opencv_test_videostab
- name: Performance:calib3d
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_calib3d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:core
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_core --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:dnn
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_dnn --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:features2d
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_features2d --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:imgcodecs
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_imgcodecs --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:imgproc
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_imgproc --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:objdetect
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_objdetect --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1 --gtest_filter="-Perf_Objdetect_QRCode.detect/2:Perf_Objdetect_QRCode_Multi.decodeMulti*:Perf_Objdetect_QRCode_Multi.detectMulti*"
- name: Performance:photo
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_photo --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:stitching
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_stitching --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:superres
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_superres --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:video
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_video --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Performance:videoio
run: cd /opencv-build && xvfb-run -a bin/opencv_perf_videoio --perf_impl=plain --perf_min_samples=1 --perf_force_samples=1 --perf_verify_sanity --skip_unstable=1
- name: Python3
run: |
cd ${{ env.OPENCV_DOCKER_WORKDIR }}/modules/python/test
python3 ./test.py --repo ../../../ -v
- name: Java
run: cd /opencv-build && xvfb-run -a python3 ${{ env.OPENCV_DOCKER_WORKDIR }}/modules/ts/misc/run.py . -a -t java
- name: Save Unit Test Results
uses: actions/upload-artifact@v3
if: always()
with:
name: junit-html
path: /opencv-build/java_test/testResults/junit-noframes.html
BuildContrib:
runs-on: opencv-cn-lin-arm64
defaults:
run:
shell: bash
container:
image: docker.io/yuentau/ocv_ubuntu:20.04-arm64
steps:
- name: PR info
run: |
echo "PR Author: ${{ env.PR_AUTHOR }}"
echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}"
echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}"
- name: Clean
run: find . -mindepth 1 -delete
- name: Fetch opencv
uses: actions/checkout@v3
with:
repository: opencv/opencv
ref: ${{ env.TARGET_BRANCH_NAME }}
fetch-depth: 0
- name: Merge opencv with a test branch
run: |
cd ${{ env.OPENCV_DOCKER_WORKDIR }}
git config --global --add safe.directory ${{ env.OPENCV_DOCKER_WORKDIR }}
git config user.email "opencv.ci"
git config user.name "opencv.ci"
git pull -v "https://github.com/${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}"
- name: Clone opencv_contrib
run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} https://github.com/opencv/opencv_contrib.git /opencv_contrib
- name: Merge opencv_contrib with ${{ env.SOURCE_BRANCH_NAME }} branch
run: |
OPENCV_CONTRIB_FORK=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv_contrib" "${{ env.SOURCE_BRANCH_NAME }}") || true
if [[ ! -z "$OPENCV_CONTRIB_FORK" ]]; then
echo "Merge opencv_contrib with ${{ env.SOURCE_BRANCH_NAME }} branch"
cd /opencv_contrib
git config user.email "opencv.ci"
git config user.name "opencv.ci"
git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv_contrib" "${{ env.SOURCE_BRANCH_NAME }}"
else
echo "No merge since ${{ env.PR_AUTHOR }}/opencv_contrib does not have branch ${{ env.SOURCE_BRANCH_NAME }}"
fi
- name: Configure OpenCV Contrib
run: |
cmake -G Ninja -B /opencv-contrib-build ${{ env.EXTRA_CMAKE_OPTIONS }} -DOPENCV_EXTRA_MODULES_PATH=/opencv_contrib/modules ${{ env.OPENCV_DOCKER_WORKDIR }}
- name: Build OpenCV Contrib
run: |
cd /opencv-contrib-build
ninja

@ -6,24 +6,29 @@ on:
- 4.x
env:
EXTRA_CMAKE_OPTIONS: '-DBUILD_DOCS=ON -DPYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON -DENABLE_CCACHE=OFF'
OPENCV_TEST_DATA_PATH: '/opencv_extra/testdata'
OPENCV_DOCKER_WORKDIR: '/__w/opencv/opencv'
EXTRA_CMAKE_OPTIONS: '-DBUILD_DOCS=ON -DPYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 -DOPENCV_DOWNLOAD_PATH=/home/ci/binaries_cache -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON'
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
PR_AUTHOR_FORK: ${{ github.event.pull_request.head.repo.full_name }}
SOURCE_BRANCH_NAME: ${{ github.head_ref }}
TARGET_BRANCH_NAME: ${{ github.base_ref }}
ANT_HOME: '/usr/share/ant'
ANT_HOME: /usr/share/ant
GIT_CACHE_DOCKER: /home/ci/git_cache
PYTHONPATH: /opencv-build/python_loader:$PYTHONPATH
OPENCV_TEST_DATA_PATH: /opencv_extra/testdata
OPENCV_DOCKER_WORKDIR: /opencv
jobs:
BuildAndTest:
runs-on: ubuntu-20.04
runs-on: opencv-cn-lin-x86-64
defaults:
run:
shell: bash
container:
image: quay.io/asenyaev/opencv-ubuntu:20.04
volumes:
- /home/opencv-cn/git_cache:/home/ci/git_cache
- /home/opencv-cn/ci_cache/opencv:/home/ci/.ccache
- /home/opencv-cn/binaries_cache:/home/ci/binaries_cache
steps:
- name: PR info
run: |
@ -31,13 +36,9 @@ jobs:
echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}"
echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}"
- name: Clean
run: find . -mindepth 1 -delete
run: find ${{ env.OPENCV_DOCKER_WORKDIR }} -mindepth 1 -delete
- name: Fetch opencv
uses: actions/checkout@v3
with:
repository: opencv/opencv
ref: ${{ env.TARGET_BRANCH_NAME }}
fetch-depth: 0
run: git clone --branch ${{ env.TARGET_BRANCH_NAME }} --reference ${{ env.GIT_CACHE_DOCKER }}/opencv.git https://github.com/opencv/opencv.git ${{ env.OPENCV_DOCKER_WORKDIR }}
- name: Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch
run: |
cd ${{ env.OPENCV_DOCKER_WORKDIR }}
@ -46,7 +47,7 @@ jobs:
git config user.name "opencv.ci"
git pull -v "https://github.com/${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}"
- name: Clone opencv_extra
run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} https://github.com/opencv/opencv_extra.git /opencv_extra
run: git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} --reference ${{ env.GIT_CACHE_DOCKER }}/opencv_extra.git https://github.com/opencv/opencv_extra.git /opencv_extra
- name: Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch
run: |
OPENCV_EXTRA_FORK=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}") || true
@ -137,12 +138,16 @@ jobs:
run: cd /opencv-build && cmake --build . --config release --target check_pylint -- -j4
BuildContrib:
runs-on: ubuntu-20.04
runs-on: opencv-cn-lin-x86-64
defaults:
run:
shell: bash
container:
image: quay.io/asenyaev/opencv-ubuntu:20.04
volumes:
- /home/opencv-cn/git_cache:/home/ci/git_cache
- /home/opencv-cn/ci_cache/opencv:/home/ci/.ccache
- /home/opencv-cn/binaries_cache:/home/ci/binaries_cache
steps:
- name: PR info
run: |
@ -150,13 +155,9 @@ jobs:
echo "Source branch name: ${{ env.SOURCE_BRANCH_NAME }}"
echo "Target branch name: ${{ env.TARGET_BRANCH_NAME }}"
- name: Clean
run: find . -mindepth 1 -delete
run: find ${{ env.OPENCV_DOCKER_WORKDIR }} -mindepth 1 -delete
- name: Fetch opencv
uses: actions/checkout@v3
with:
repository: opencv/opencv
ref: ${{ env.TARGET_BRANCH_NAME }}
fetch-depth: 0
run: git clone --branch ${{ env.TARGET_BRANCH_NAME }} --reference ${{ env.GIT_CACHE_DOCKER }}/opencv.git https://github.com/opencv/opencv.git ${{ env.OPENCV_DOCKER_WORKDIR }}
- name: Merge opencv with a test branch
run: |
cd ${{ env.OPENCV_DOCKER_WORKDIR }}

@ -6,14 +6,13 @@ on:
- 4.x
env:
EXTRA_CMAKE_OPTIONS: '-DCL_Z_OPTION=/Z7 -DOPENCV_DOWNLOAD_PATH=c:\Slave\workspace\binaries_cache -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON -DCMAKE_BUILD_TYPE=Release'
EXTRA_CMAKE_OPTIONS: '-DCL_Z_OPTION=/Z7 -DOPENCV_DOWNLOAD_PATH=%BINARIES_CACHE% -DBUILD_EXAMPLES=ON -DOPENCV_ENABLE_NONFREE=ON -DCMAKE_BUILD_TYPE=Release'
OPENCV_TEST_DATA_PATH: ${{ github.workspace }}\opencv_extra\testdata
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
PR_AUTHOR_FORK: ${{ github.event.pull_request.head.repo.full_name }}
SOURCE_BRANCH_NAME: ${{ github.head_ref }}
TARGET_BRANCH_NAME: ${{ github.base_ref }}
GTEST_FILTER_STRING: '-Samples.findFile:videoio/videocapture_acceleration.read/122:videoio/videocapture_acceleration.read/126'
GIT_CACHE: c:\Slave\git_cache
jobs:
BuildAndTest:
@ -30,25 +29,25 @@ jobs:
- name: Clean
run: cd ${{ github.workspace }} && rm -rf *
- name: Fetch opencv
run: cd ${{ github.workspace }} && git clone --branch ${{ env.TARGET_BRANCH_NAME }} --reference ${{ env.GIT_CACHE }}\opencv.git https://github.com/opencv/opencv.git
run: cd ${{ github.workspace }} && git clone --branch ${{ env.TARGET_BRANCH_NAME }} --reference %GIT_CACHE%\opencv.git git@github.com:opencv/opencv.git
- name: Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch
run: |
cd ${{ github.workspace }}\opencv
git config user.email "opencv.ci"
git config user.name "opencv.ci"
git pull -v "https://github.com/${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}"
git pull -v "git@github.com:${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}"
- name: Fetch opencv_extra
run: cd ${{ github.workspace }} && git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} --reference ${{ env.GIT_CACHE }}\opencv_extra.git https://github.com/opencv/opencv_extra.git
run: cd ${{ github.workspace }} && git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} --reference %GIT_CACHE%\opencv_extra.git git@github.com:opencv/opencv_extra.git
- name: Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch
shell: bash
run: |
RET=$(git ls-remote --heads "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}") || true
if [[ ! -z "$RET" ]]; then
OPENCV_EXTRA_FORK=$(git ls-remote --heads "git@github.com:/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}") || true
if [[ ! -z "$OPENCV_EXTRA_FORK" ]]; then
echo "Merge opencv_extra with ${{ env.SOURCE_BRANCH_NAME }} branch"
cd opencv_extra
git config user.email "opencv.ci"
git config user.name "opencv.ci"
git pull -v "https://github.com/${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}"
git pull -v "git@github.com:${{ env.PR_AUTHOR }}/opencv_extra" "${{ env.SOURCE_BRANCH_NAME }}"
else
echo "No merge since ${{ env.PR_AUTHOR }}/opencv_extra does not have branch ${{ env.SOURCE_BRANCH_NAME }}"
fi
@ -143,15 +142,28 @@ jobs:
- name: Clean
run: cd ${{ github.workspace }} && rm -rf *
- name: Fetch opencv
run: cd ${{ github.workspace }} && git clone --branch ${{ env.TARGET_BRANCH_NAME }} --reference ${{ env.GIT_CACHE }}\opencv.git https://github.com/opencv/opencv.git
run: cd ${{ github.workspace }} && git clone --branch ${{ env.TARGET_BRANCH_NAME }} --reference %GIT_CACHE%\opencv.git git@github.com:opencv/opencv.git
- name: Merge opencv with ${{ env.SOURCE_BRANCH_NAME }} branch
run: |
cd ${{ github.workspace }}\opencv
git config user.email "opencv.ci"
git config user.name "opencv.ci"
git pull -v "https://github.com/${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}"
git pull -v "git@github.com:${{ env.PR_AUTHOR_FORK }}" "${{ env.SOURCE_BRANCH_NAME }}"
- name: Fetch opencv_contrib
run: cd ${{ github.workspace }} && git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} --reference ${{ env.GIT_CACHE }}\opencv_contrib.git --depth 1 https://github.com/opencv/opencv_contrib.git
run: cd ${{ github.workspace }} && git clone --single-branch --branch ${{ env.TARGET_BRANCH_NAME }} --reference %GIT_CACHE%\opencv_contrib.git --depth 1 git@github.com:opencv/opencv_contrib.git
- name: Merge opencv_contrib with ${{ env.SOURCE_BRANCH_NAME }} branch
shell: bash
run: |
OPENCV_CONTRIB_FORK=$(git ls-remote --heads "git@github.com:${{ env.PR_AUTHOR }}/opencv_contrib" "${{ env.SOURCE_BRANCH_NAME }}") || true
if [[ ! -z "$OPENCV_CONTRIB_FORK" ]]; then
echo "Merge opencv_contrib with ${{ env.SOURCE_BRANCH_NAME }} branch"
cd /opencv_contrib
git config user.email "opencv.ci"
git config user.name "opencv.ci"
git pull -v "git@github.com:${{ env.PR_AUTHOR }}/opencv_contrib" "${{ env.SOURCE_BRANCH_NAME }}"
else
echo "No merge since ${{ env.PR_AUTHOR }}/opencv_contrib does not have branch ${{ env.SOURCE_BRANCH_NAME }}"
fi
- name: Configure OpenCV Contrib
run: |
mkdir ${{ github.workspace }}\opencv-contrib-build && cd ${{ github.workspace }}\opencv-contrib-build

@ -77,7 +77,7 @@ E.g. external ref-counting is implemented for 1.0 version and native OpenVX one
Also there are some **C++ 11** features are used (e.g. rvalue ref-s) when their availability is detected at ***compile time***.
C++ exceptions are used for errors indication instead of return codes. There are two types of exceptions are defined: `RuntimeError` is thrown when OpenVX C call returned unsuccessful result and `WrapperError` is thrown when a problem is occured in the wrappers code. Both exception calsses are derived from `std::exception` (actually from its inheritants).
C++ exceptions are used for errors indication instead of return codes. There are two types of exceptions are defined: `RuntimeError` is thrown when OpenVX C call returned unsuccessful result and `WrapperError` is thrown when a problem is occurred in the wrappers code. Both exception calsses are derived from `std::exception` (actually from its inheritants).
The so called **OpenVX objects** (e.g. `vx_image`) are represented as C++ classes in wrappers.
All these classes use automatic ref-counting that allows development of exception-safe code.

@ -52,7 +52,7 @@ Try it
### 2. Dilation
It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel
It is just opposite of erosion. Here, a pixel element is '1' if at least one pixel under the kernel
is '1'. So it increases the white region in the image or size of foreground object increases.
Normally, in cases like noise removal, erosion is followed by dilation. Because, erosion removes
white noises, but it also shrinks our object. So we dilate it. Since noise is gone, they won't come

@ -223,7 +223,7 @@ Before the example, is worth consider first how files are handled in emscripten
These C++ sources use standard APIs to access the filesystem and the implementation often ends up in system calls that read a file in the hard drive. Since JavaScript applications in the browser don't have access to the local filesystem, [emscripten emulates a standard filesystem](https://emscripten.org/docs/api_reference/Filesystem-API.html) so compiled C++ code works out of the box.
In the browser, this filesystem is emulated in memory while in Node.js there's also the possibility of using the local filesystem directly. This is often preferable since there's no need of copy file's content in memory. This section is explains how to do do just that, this is, configuring emscripten so files are accessed directly from our local filesystem and relative paths match files relative to the current local directory as expected.
In the browser, this filesystem is emulated in memory while in Node.js there's also the possibility of using the local filesystem directly. This is often preferable since there's no need of copy file's content in memory. This section explains how to do just that, this is, configuring emscripten so files are accessed directly from our local filesystem and relative paths match files relative to the current local directory as expected.
### The example ###

@ -18,7 +18,7 @@ is sufficient to find the object exactly on the trainImage.
For that, we can use a function from calib3d module, ie **cv.findHomography()**. If we pass the set
of points from both the images, it will find the perspective transformation of that object. Then we
can use **cv.perspectiveTransform()** to find the object. It needs atleast four correct points to
can use **cv.perspectiveTransform()** to find the object. It needs at least four correct points to
find the transformation.
We have seen that there can be some possible errors while matching which may affect the result. To
@ -64,7 +64,7 @@ for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
@endcode
Now we set a condition that atleast 10 matches (defined by MIN_MATCH_COUNT) are to be there to
Now we set a condition that at least 10 matches (defined by MIN_MATCH_COUNT) are to be there to
find the object. Otherwise simply show a message saying not enough matches are present.
If enough matches are found, we extract the locations of matched keypoints in both the images. They

@ -48,7 +48,7 @@ Result:
### 2. Dilation
It is just opposite of erosion. Here, a pixel element is '1' if atleast one pixel under the kernel
It is just opposite of erosion. Here, a pixel element is '1' if at least one pixel under the kernel
is '1'. So it increases the white region in the image or size of foreground object increases.
Normally, in cases like noise removal, erosion is followed by dilation. Because, erosion removes
white noises, but it also shrinks our object. So we dilate it. Since noise is gone, they won't come

@ -81,11 +81,11 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
@end_toggle
@add_toggle_java
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py load
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java load
@end_toggle
@add_toggle_python
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java load
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py load
@end_toggle
#### Convert it to grayscale:
@ -95,11 +95,11 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
@end_toggle
@add_toggle_java
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py convert_to_gray
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java convert_to_gray
@end_toggle
@add_toggle_python
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java convert_to_gray
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py convert_to_gray
@end_toggle
#### Apply a Median blur to reduce noise and avoid false circle detection:
@ -109,11 +109,11 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
@end_toggle
@add_toggle_java
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py reduce_noise
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java reduce_noise
@end_toggle
@add_toggle_python
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java reduce_noise
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py reduce_noise
@end_toggle
#### Proceed to apply Hough Circle Transform:
@ -123,11 +123,11 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
@end_toggle
@add_toggle_java
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py houghcircles
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java houghcircles
@end_toggle
@add_toggle_python
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java houghcircles
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py houghcircles
@end_toggle
- with the arguments:
@ -151,11 +151,11 @@ The image we used can be found [here](https://raw.githubusercontent.com/opencv/o
@end_toggle
@add_toggle_java
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py draw
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java draw
@end_toggle
@add_toggle_python
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java draw
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py draw
@end_toggle
You can see that we will draw the circle(s) on red and the center(s) with a small green dot
@ -167,11 +167,11 @@ You can see that we will draw the circle(s) on red and the center(s) with a smal
@end_toggle
@add_toggle_java
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py display
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java display
@end_toggle
@add_toggle_python
@snippet samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java display
@snippet samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py display
@end_toggle
Result

@ -130,7 +130,7 @@ Get image from [here](https://raw.githubusercontent.com/opencv/opencv/4.x/doc/tu
#### Output images
Now we are ready to apply morphological operations in order to extract the horizontal and vertical lines and as a consequence to separate the the music notes from the music sheet, but first let's initialize the output images that we will use for that reason:
Now we are ready to apply morphological operations in order to extract the horizontal and vertical lines and as a consequence to separate the music notes from the music sheet, but first let's initialize the output images that we will use for that reason:
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp init

@ -1018,7 +1018,7 @@ public:
@param copyData Flag to specify whether the underlying data of the STL vector should be copied
to (true) or shared with (false) the newly constructed matrix. When the data is copied, the
allocated buffer is managed using Mat reference counting mechanism. While the data is shared,
the reference counter is NULL, and you should not deallocate the data until the matrix is not
the reference counter is NULL, and you should not deallocate the data until the matrix is
destructed.
*/
template<typename _Tp> explicit Mat(const std::vector<_Tp>& vec, bool copyData=false);
@ -2276,7 +2276,7 @@ public:
std::reverse_iterator<const_iterator> rbegin() const;
std::reverse_iterator<const_iterator> rend() const;
//! template methods for for operation over all matrix elements.
//! template methods for operation over all matrix elements.
// the operations take care of skipping gaps in the end of rows (if any)
template<typename Functor> void forEach(const Functor& operation);
template<typename Functor> void forEach(const Functor& operation) const;

@ -986,7 +986,7 @@ int getNumberOfCPUs_()
#endif
#if !defined(_WIN32) && !defined(__APPLE__)
#if !defined(_WIN32) && !defined(__APPLE__) && defined(_SC_NPROCESSORS_ONLN)
static unsigned cpu_count_sysconf = (unsigned)sysconf( _SC_NPROCESSORS_ONLN );
ncpus = minNonZero(ncpus, cpu_count_sysconf);

@ -61,7 +61,7 @@ enum Version {
// The version field is always serialized and we will use it to store the
// version that the graph is generated from. This helps us set up version
// control.
// For the IR, we are using simple numbers starting with with 0x00000001,
// For the IR, we are using simple numbers starting with 0x00000001,
// which was the version we published on Oct 10, 2017.
IR_VERSION_2017_10_10 = 0x0000000000000001;

@ -301,7 +301,7 @@ CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst);
The function imencode compresses the image and stores it in the memory buffer that is resized to fit the
result. See cv::imwrite for the list of supported formats and flags description.
@param ext File extension that defines the output format.
@param ext File extension that defines the output format. Must include a leading period.
@param img Image to be written.
@param buf Output buffer resized to fit the compressed image.
@param params Format-specific parameters. See cv::imwrite and cv::ImwriteFlags.

@ -602,9 +602,9 @@ bool JpegEncoder::write( const Mat& img, const std::vector<int>& params )
JpegErrorMgr jerr;
JpegDestination dest;
jpeg_create_compress(&cinfo);
cinfo.err = jpeg_std_error(&jerr.pub);
jerr.pub.error_exit = error_exit;
jpeg_create_compress(&cinfo);
if( !m_buf )
{

@ -1121,7 +1121,7 @@ bool TiffEncoder::writeLibTiff( const std::vector<Mat>& img_vec, const std::vect
CV_TIFF_CHECK_CALL(TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, sample_format));
if (page_compression != COMPRESSION_NONE)
if (page_compression == COMPRESSION_LZW || page_compression == COMPRESSION_ADOBE_DEFLATE || page_compression == COMPRESSION_DEFLATE)
{
CV_TIFF_CHECK_CALL(TIFFSetField(tif, TIFFTAG_PREDICTOR, predictor));
}

@ -359,6 +359,49 @@ TEST(Imgcodecs_Tiff, read_palette_color_image)
ASSERT_EQ(CV_8UC3, img.type());
}
TEST(Imgcodecs_Tiff, readWrite_predictor)
{
/* see issue #21871
*/
const uchar sample_data[160] = {
0xff, 0xff, 0xff, 0xff, 0x88, 0x88, 0xff, 0xff, 0x88, 0x88, 0xff, 0xff, 0xff, 0xff, 0xff, 0x88,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
0xff, 0x00, 0x00, 0x44, 0xff, 0xff, 0x88, 0xff, 0x33, 0x00, 0x66, 0xff, 0xff, 0x88, 0x00, 0x44,
0x88, 0x00, 0x44, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x44, 0xff, 0xff, 0x11, 0x00, 0xff,
0x11, 0x00, 0x88, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xff,
0x11, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x33, 0x00, 0x88, 0xff, 0x00, 0x66, 0xff,
0x11, 0x00, 0x66, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x44, 0x33, 0x00, 0xff, 0xff,
0x88, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
0xff, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x33, 0x00, 0x00, 0x66, 0xff, 0xff,
0xff, 0xff, 0x88, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff
};
cv::Mat mat(10, 16, CV_8UC1, (void*)sample_data);
int methods[] = {
COMPRESSION_NONE, COMPRESSION_LZW,
COMPRESSION_PACKBITS, COMPRESSION_DEFLATE, COMPRESSION_ADOBE_DEFLATE
};
for (size_t i = 0; i < sizeof(methods) / sizeof(int); i++)
{
string out = cv::tempfile(".tif");
std::vector<int> params;
params.push_back(TIFFTAG_COMPRESSION);
params.push_back(methods[i]);
params.push_back(TIFFTAG_PREDICTOR);
params.push_back(PREDICTOR_HORIZONTAL);
EXPECT_NO_THROW(cv::imwrite(out, mat, params));
const Mat img = cv::imread(out, IMREAD_UNCHANGED);
ASSERT_FALSE(img.empty());
ASSERT_EQ(0, cv::norm(mat, img, cv::NORM_INF));
EXPECT_EQ(0, remove(out.c_str()));
}
}
//==================================================================================================

@ -118,7 +118,7 @@ sophisticated [interpolation methods](http://en.wikipedia.org/wiki/Multivariate_
where a polynomial function is fit into some neighborhood of the computed pixel \f$(f_x(x,y),
f_y(x,y))\f$, and then the value of the polynomial at \f$(f_x(x,y), f_y(x,y))\f$ is taken as the
interpolated pixel value. In OpenCV, you can choose between several interpolation methods. See
resize for details.
#resize for details.
@note The geometrical transformations do not work with `CV_8S` or `CV_32S` images.
@ -1576,7 +1576,7 @@ CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,
For every pixel \f$ (x, y) \f$ in the source image, the function calculates the sum of squares of those neighboring
pixel values which overlap the filter placed over the pixel \f$ (x, y) \f$.
The unnormalized square box filter can be useful in computing local image statistics such as the the local
The unnormalized square box filter can be useful in computing local image statistics such as the local
variance and standard deviation around the neighborhood of a pixel.
@param src input image
@ -2345,7 +2345,7 @@ way:
resize(src, dst, Size(), 0.5, 0.5, interpolation);
@endcode
To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
enlarge an image, it will generally look best with c#INTER_CUBIC (slow) or #INTER_LINEAR
enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR
(faster but still looks OK).
@param src input image.
@ -2437,7 +2437,7 @@ The function remap transforms the source image using the specified map:
where values of pixels with non-integer coordinates are computed using one of available
interpolation methods. \f$map_x\f$ and \f$map_y\f$ can be encoded as separate floating-point maps
in \f$map_1\f$ and \f$map_2\f$ respectively, or interleaved floating-point maps of \f$(x,y)\f$ in
\f$map_1\f$, or fixed-point maps created by using convertMaps. The reason you might want to
\f$map_1\f$, or fixed-point maps created by using #convertMaps. The reason you might want to
convert from floating to fixed-point representations of a map is that they can yield much faster
(\~2x) remapping operations. In the converted case, \f$map_1\f$ contains pairs (cvFloor(x),
cvFloor(y)) and \f$map_2\f$ contains indices in a table of interpolation coefficients.
@ -2447,7 +2447,7 @@ This function cannot operate in-place.
@param src Source image.
@param dst Destination image. It has the same size as map1 and the same type as src .
@param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating point
CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point
representation to fixed-point for speed.
@param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
if map1 is (x,y) points), respectively.
@ -2472,7 +2472,7 @@ options ( (map1.type(), map2.type()) \f$\rightarrow\f$ (dstmap1.type(), dstmap2.
supported:
- \f$\texttt{(CV_32FC1, CV_32FC1)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\f$. This is the
most frequently used conversion operation, in which the original floating-point maps (see remap )
most frequently used conversion operation, in which the original floating-point maps (see #remap)
are converted to a more compact and much faster fixed-point representation. The first output array
contains the rounded coordinates and the second array (created only when nninterpolation=false )
contains indices in the interpolation tables.

@ -43,7 +43,7 @@
#ifdef USE_4OPT
//Utility macros for for 1,2,4 channel images:
//Utility macros for 1,2,4 channel images:
// - LOAD4/STORE4 - load/store 4-pixel groups from/to global memory
// - SHUFFLE4_3/SHUFFLE4_5 - rearrange scattered border/central pixels into regular 4-pixel variables

@ -1015,7 +1015,7 @@ int CV_MinCircleTest::validate_test_results( int test_case_idx )
if( point_count >= 2 && (j < 2 || (j == 2 && cvTsDist(v[0],v[1]) < (radius-1)*2/eps)) )
{
ts->printf( cvtest::TS::LOG,
"There should be at at least 3 points near the circle boundary or 2 points on the diameter\n" );
"There should be at least 3 points near the circle boundary or 2 points on the diameter\n" );
code = cvtest::TS::FAIL_BAD_ACCURACY;
goto _exit_;
}

@ -948,7 +948,7 @@ QUnit.test('test_filter', function(assert) {
cv.rotate(src, dst, cv.ROTATE_90_CLOCKWISE);
size = dst.size();
let size = dst.size();
assert.equal(size.height, 2, "ROTATE_HEIGHT");
assert.equal(size.width, 3, "ROTATE_WIGTH");

@ -126,7 +126,7 @@ bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
int num_classes = (int) this->forward_mapper.size();
if(num_classes < 2)
{
CV_Error( CV_StsBadArg, "data should have atleast 2 classes" );
CV_Error( CV_StsBadArg, "data should have at least 2 classes" );
}
// add a column of ones to the data (bias/intercept term)

@ -96,7 +96,7 @@ const Mat& KalmanFilter::predict(const Mat& control)
// P'(k) = temp1*At + Q
gemm(temp1, transitionMatrix, 1, processNoiseCov, 1, errorCovPre, GEMM_2_T);
// handle the case when there will be measurement before the next predict.
// handle the case when there will be no measurement before the next predict.
statePre.copyTo(statePost);
errorCovPre.copyTo(errorCovPost);

@ -475,7 +475,7 @@ bool GStreamerCapture::setAudioProperties(const cv::VideoCaptureParameters& para
/*!
* \brief CvCapture_GStreamer::grabFrame
* \return
* Grabs a sample from the pipeline, awaiting consumation by retreiveFrame.
* Grabs a sample from the pipeline, awaiting consumation by retrieveFrame.
* The pipeline is started if it was not running yet
*/
bool GStreamerCapture::grabFrame()

@ -224,7 +224,7 @@ public:
if (cvtest::debugLevel > 0)
std::cout << "i = " << i << ": timestamp = " << timestamp << std::endl;
const double frame_period = 1000.f/bunny_param.getFps();
// NOTE: eps == frame_period, because videoCapture returns frame begining timestamp or frame end
// NOTE: eps == frame_period, because videoCapture returns frame beginning timestamp or frame end
// timestamp depending on codec and back-end. So the first frame has timestamp 0 or frame_period.
EXPECT_NEAR(timestamp, i*frame_period, frame_period) << "i=" << i;
}

Loading…
Cancel
Save