Merge remote-tracking branch 'refs/remotes/opencv/master' into deepflow_acceleration

pull/745/head
sbokov 8 years ago
commit d2548239bb
  1. 30
      .github/ISSUE_TEMPLATE.md
  2. 9
      .github/PULL_REQUEST_TEMPLATE.md
  3. 2
      .travis.yml
  4. 2
      CONTRIBUTING.md
  5. 7
      README.md
  6. 70
      modules/README.md
  7. 2
      modules/aruco/include/opencv2/aruco.hpp
  8. 2
      modules/aruco/samples/calibrate_camera.cpp
  9. 4
      modules/aruco/samples/calibrate_camera_charuco.cpp
  10. 2
      modules/aruco/samples/create_board.cpp
  11. 4
      modules/aruco/samples/create_board_charuco.cpp
  12. 4
      modules/aruco/samples/create_diamond.cpp
  13. 4
      modules/aruco/samples/detect_board_charuco.cpp
  14. 4
      modules/aruco/samples/detect_diamonds.cpp
  15. 7
      modules/aruco/samples/detector_params.yml
  16. 2
      modules/aruco/src/aruco.cpp
  17. 11
      modules/aruco/tutorials/aruco_board_detection/aruco_board_detection.markdown
  18. BIN
      modules/aruco/tutorials/aruco_board_detection/images/board.jpg
  19. 10
      modules/aruco/tutorials/aruco_calibration/aruco_calibration.markdown
  20. 15
      modules/aruco/tutorials/aruco_detection/aruco_detection.markdown
  21. BIN
      modules/aruco/tutorials/aruco_detection/images/marker23.jpg
  22. 15
      modules/aruco/tutorials/charuco_detection/charuco_detection.markdown
  23. BIN
      modules/aruco/tutorials/charuco_detection/images/board.jpg
  24. BIN
      modules/aruco/tutorials/charuco_detection/images/charucoboard.jpg
  25. 13
      modules/aruco/tutorials/charuco_diamond_detection/charuco_diamond_detection.markdown
  26. 4
      modules/dnn/CMakeLists.txt
  27. 8
      modules/dnn/tutorials/tutorial_dnn_build.markdown
  28. 4
      modules/face/data/cascades/haarcascade_mcs_eyepair_big.xml
  29. 4
      modules/face/data/cascades/haarcascade_mcs_eyepair_small.xml
  30. 4
      modules/face/data/cascades/haarcascade_mcs_leftear.xml
  31. 4
      modules/face/data/cascades/haarcascade_mcs_lefteye.xml
  32. 4
      modules/face/data/cascades/haarcascade_mcs_mouth.xml
  33. 4
      modules/face/data/cascades/haarcascade_mcs_nose.xml
  34. 4
      modules/face/data/cascades/haarcascade_mcs_rightear.xml
  35. 4
      modules/face/data/cascades/haarcascade_mcs_righteye.xml
  36. 4
      modules/face/data/cascades/haarcascade_mcs_upperbody.xml
  37. 19
      modules/optflow/include/opencv2/optflow.hpp
  38. 20
      modules/optflow/src/dis_flow.cpp
  39. 2
      modules/structured_light/CMakeLists.txt
  40. 11
      modules/structured_light/include/opencv2/structured_light/graycodepattern.hpp
  41. 10
      modules/structured_light/src/graycodepattern.cpp
  42. 2
      modules/text/include/opencv2/text.hpp
  43. 2
      modules/text/include/opencv2/text/erfilter.hpp
  44. 14
      modules/text/include/opencv2/text/ocr.hpp
  45. 10
      modules/text/src/erfilter.cpp
  46. 2
      modules/text/src/ocr_hmm_decoder.cpp
  47. 2
      modules/tracking/include/opencv2/tracking/onlineMIL.hpp
  48. 2
      modules/tracking/src/onlineMIL.cpp
  49. 4
      modules/tracking/tutorials/tutorial_introduction_to_tracker.markdown
  50. 2
      modules/xfeatures2d/src/sift.cpp
  51. 1
      modules/ximgproc/README.md
  52. 10
      modules/ximgproc/doc/ximgproc.bib
  53. 2
      modules/ximgproc/include/opencv2/ximgproc.hpp
  54. 67
      modules/ximgproc/include/opencv2/ximgproc/paillou_filter.hpp
  55. 107
      modules/ximgproc/samples/paillou_demo.cpp
  56. 2
      modules/ximgproc/samples/structured_edge_detection.cpp
  57. 486
      modules/ximgproc/src/paillou_filter.cpp
  58. 2
      modules/ximgproc/tutorials/disparity_filtering.markdown
  59. 2
      modules/xobjdetect/tools/waldboost_detector/CMakeLists.txt

@ -0,0 +1,30 @@
<!--
If you have a question rather than reporting a bug please go to http://answers.opencv.org where you get much faster responses.
If you need further assistance please read [How To Contribute](https://github.com/opencv/opencv/wiki/How_to_contribute).
This is a template helping you to create an issue which can be processed as quickly as possible. This is the bug reporting section for the OpenCV library.
-->
##### System information (version)
<!-- Example
- OpenCV => 3.1
- Operating System / Platform => Windows 64 Bit
- Compiler => Visual Studio 2015
-->
- OpenCV => :grey_question:
- Operating System / Platform => :grey_question:
- Compiler => :grey_question:
##### Detailed description
<!-- your description -->
##### Steps to reproduce
<!-- to add code example fence it with triple backticks and optional file extension
```.cpp
// C++ code example
```
or attach as .txt or .zip file
-->

@ -0,0 +1,9 @@
<!-- Please use this line to close one or multiple issues when this pullrequest gets merged
You can add another line right under the first one:
resolves #1234
resolves #1235
-->
### This pullrequest changes
<!-- Please describe what your pullrequest is changing -->

@ -4,7 +4,7 @@ compiler:
- clang
before_script:
- cd ../
- git clone https://github.com/Itseez/opencv.git
- git clone https://github.com/opencv/opencv.git
- mkdir build-opencv
- cd build-opencv
- cmake -DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib/modules ../opencv

@ -1,3 +1,3 @@
## Contributing guidelines
All guidelines for contributing to the OpenCV repository can be found at [`How to contribute guideline`](https://github.com/Itseez/opencv/wiki/How_to_contribute).
All guidelines for contributing to the OpenCV repository can be found at [`How to contribute guideline`](https://github.com/opencv/opencv/wiki/How_to_contribute).

@ -13,7 +13,8 @@ provides production quality support for this module.
### How to build OpenCV with extra modules
You can build OpenCV, so it will include the modules from this repository.
You can build OpenCV, so it will include the modules from this repository. Contrib modules are under constant development and it is recommended to use them alongside the master branch or latest releases of OpenCV.
Here is the CMake command for you:
```
@ -46,10 +47,12 @@ If you prefer using the gui version of cmake (cmake-gui), then, you can add `ope
7. build the `opencv` core with the method you chose (make and make install if you chose Unix makfile at step 6)
8. to run, linker flags to contrib modules will need to be added to use them in your code/IDE. For example to use the aruco module, "-lopencv_aruco" flag will be added.
### Update the repository documentation
In order to keep a clean overview containing all contributed modules the following files need to be created/adapted.
1. Update the README.md file under the modules folder. Here you add your model with a single line description.
2. Add a README.md inside your own module folder. This README explains which functionality (seperate functions) is available, links to the corresponding samples and explains in somewhat more detail what the module is expected to do. If any extra requirements are needed to build the module without problems, add them here also.
2. Add a README.md inside your own module folder. This README explains which functionality (seperate functions) is available, links to the corresponding samples and explains in somewhat more detail what the module is expected to do. If any extra requirements are needed to build the module without problems, add them here also.

@ -1,59 +1,71 @@
An overview of the contrib modules and a small explanation
----------------------------------------------------------
An overview of the opencv_contrib modules
-----------------------------------------
This list gives an overview of all modules available inside the contrib repository.
These are also the correct names for disabling the building of a specific module by adding
To turn off building one of these module repositories, set the names in bold below to <reponame>
```
$ cmake -D OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules -D BUILD_opencv_reponame=OFF <opencv_source_directory>
$ cmake -D OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules -D BUILD_opencv_<reponame>=OFF <opencv_source_directory>
```
1. **opencv_adas**: Advanced Driver Assistance Systems module with Forward Collision Warning.
1. **aruco**: ArUco and ChArUco Markers -- Augmented reality ArUco marker and "ChARUco" markers where ArUco markers embedded inside the white areas of the checker board.
2. **opencv_bgsegm**: Improved Adaptive Background Mixture Model for Real-time Tracking / Visual Tracking of Human Visitors under Variable-Lighting Conditions.
2. **bgsegm**: Background Segmentation -- Improved Adaptive Background Mixture Model and use for real time human tracking under Variable-Lighting Conditions.
3. **opencv_bioinspired**: Biologically inspired vision models and derivated tools.
3. **bioinspired**: Biological Vision -- Biologically inspired vision model: minimize noise and luminance variance, transient event segmentation, high dynamic range tone mapping methods.
4. **opencv_ ccalib**: Custom Calibration Pattern for 3D reconstruction.
4. **ccalib**: Custom Calibration -- Patterns for 3D reconstruction, omnidirectional camera calibration, random pattern calibration and multi-camera calibration.
5. **opencv_cvv**: GUI for Interactive Visual Debugging of Computer Vision Programs.
5. **cnn_3dobj**: Deep Object Recognition and Pose -- Uses Caffe Deep Neural Net library to build, train and test a CNN model of visual object recognition and pose.
6. **opencv_datasets**: Interface for interfacing with existing computer vision databases.
6. **contrib_world**: opencv_contrib holder -- contrib_world is the module that when built, contains all other opencv_contrib modules. It may be used for the more convenient redistribution of opencv binaries.
7. **opencv_datasettools**: Tools for working with different datasets.
7. **cvv**: Computer Vision Debugger -- Simple code that you can add to your program that pops up a GUI allowing you to interactively and visually debug computer vision programs.
8. **opencv_face**: Recently added face recognition software which is not yet stabilized.
8. **datasets**: Datasets Reader -- Code for reading existing computer vision databases and samples of using the readers to train, test and run using that dataset's data.
9. **opencv_latentsvm**: Implementation of the LatentSVM detector algorithm.
9. **dnn**: Deep Neural Networks (DNNs) -- This module can read in image recogniton networks trained in the Caffe neural netowrk library and run them efficiently on CPU.
10. **opencv_line_descriptor**: Binary descriptors for lines extracted from an image.
10. **dnns_easily_fooled**: Subvert DNNs -- This code can use the activations in a network to fool the networks into recognizing something else.
11. **opencv_matlab**: OpenCV Matlab Code Generator.
11. **dpm**: Deformable Part Model -- Felzenszwalb's Cascade with deformable parts object recognition code.
12. **opencv_optflow**: Optical Flow Algorithms for tracking points.
12. **face**: Face Recognition -- Face recognition techniques: Eigen, Fisher and Local Binary Pattern Histograms LBPH methods.
13. **opencv_reg**: Image Registration module.
13. **fuzzy**: Fuzzy Logic in Vision -- Fuzzy logic image transform and inverse; Fuzzy image processing.
14. **opencv_rgbd**: RGB-Depth Processing module.
14. **hdf**: Hierarchical Data Storage -- This module contains I/O routines for Hierarchical Data Format: https://en.m.wikipedia.org/wiki/Hierarchical_Data_Format meant to store large amounts of data.
15. **opencv_saliency**: Saliency API, understanding where humans focus given a scene.
15. **line_descriptor**: Line Segment Extract and Match -- Methods of extracting, describing and latching line segments using binary descriptors.
16. **opencv_surface_matching**: Surface Matching Algorithm Through 3D Features.
16. **matlab**: Matlab Interface -- OpenCV Matlab Mex wrapper code generator for certain opencv core modules.
17. **opencv_text**: Scene Text Detection and Recognition in Natural Scene Images.
17. **optflow**: Optical Flow -- Algorithms for running and evaluating deepflow, simpleflow, sparsetodenseflow and motion templates (silhouette flow).
18. **opencv_tracking**: Long-term optical tracking API.
18. **plot**: Plotting -- The plot module allows you to easily plot data in 1D or 2D.
19. **opencv_xfeatures2d**: Extra 2D Features Framework containing experimental and non-free 2D feature algorithms.
19. **reg**: Image Registration -- Pixels based image registration for precise alignment. Follows the paper "Image Alignment and Stitching: A Tutorial", by Richard Szeliski.
20. **opencv_ximgproc**: Extended Image Processing: Structured Forests / Domain Transform Filter / Guided Filter / Adaptive Manifold Filter / Joint Bilateral Filter / Superpixels.
20. **rgbd**: RGB-Depth Processing module -- Linemod 3D object recognition; Fast surface normals and 3D plane finding. 3D visual odometry
21. **opencv_xobjdetect**: Integral Channel Features Detector Framework.
21. **saliency**: Saliency API -- Where humans would look in a scene. Has routines for static, motion and "objectness" saliency.
22. **opencv_xphoto**: Additional photo processing algorithms: Color balance / Denoising / Inpainting.
22. **sfm**: Structure from Motion -- This module contains algorithms to perform 3d reconstruction from 2d images. The core of the module is a light version of Libmv.
23. **opencv_stereo**: Stereo Correspondence done with different descriptors: Census / CS-Census / MCT / BRIEF / MV.
23. **stereo**: Stereo Correspondence -- Stereo matching done with different descriptors: Census / CS-Census / MCT / BRIEF / MV.
24. **opencv_hdf**: Hierarchical Data Format I/O.
24. **structured_light**: Structured Light Use -- How to generate and project gray code patterns and use them to find dense depth in a scene.
25. **opencv_fuzzy**: New module focused on the fuzzy image processing.
25. **surface_matching**: Point Pair Features -- Implements 3d object detection and localization using multimodal point pair features.
26. **text**: Visual Text Matching -- In a visual scene, detect text, segment words and recognise the text.
27. **tracking**: Vision Based Object Tracking -- Use and/or evaluate one of 5 different visual object tracking techniques.
28. **xfeatures2d**: Features2D extra -- Extra 2D Features Framework containing experimental and non-free 2D feature detector/descriptor algorithms. SURF, SIFT, BRIEF, Censure, Freak, LUCID, Daisy, Self-similar.
29. **ximgproc**: Extended Image Processing -- Structured Forests / Domain Transform Filter / Guided Filter / Adaptive Manifold Filter / Joint Bilateral Filter / Superpixels.
30. **xobjdetect**: Boosted 2D Object Detection -- Uses a Waldboost cascade and local binary patterns computed as integral features for 2D object detection.
31. **xphoto**: Extra Computational Photography -- Additional photo processing algorithms: Color balance / Denoising / Inpainting.

@ -374,7 +374,7 @@ CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, P
*/
CV_EXPORTS_W void refineDetectedMarkers(
InputArray image, Ptr<Board> &board, InputOutputArrayOfArrays detectedCorners,
InputOutputArray detectedIds, InputOutputArray rejectedCorners,
InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners,
InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),
float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true,
OutputArray recoveredIdxs = noArray(), const Ptr<DetectorParameters> &parameters = DetectorParameters::create());

@ -58,7 +58,7 @@ const char* about =
const char* keys =
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{l | | Marker side lenght (in meters) }"
"{l | | Marker side length (in meters) }"
"{s | | Separation between two consecutive markers in the grid (in meters) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "

@ -57,8 +57,8 @@ const char* about =
const char* keys =
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{sl | | Square side lenght (in pixels) }"
"{ml | | Marker side lenght (in pixels) }"
"{sl | | Square side length (in meters) }"
"{ml | | Marker side length (in meters) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"

@ -48,7 +48,7 @@ const char* keys =
"{@outfile |<none> | Output image }"
"{w | | Number of markers in X direction }"
"{h | | Number of markers in Y direction }"
"{l | | Marker side lenght (in pixels) }"
"{l | | Marker side length (in pixels) }"
"{s | | Separation between two consecutive markers in the grid (in pixels)}"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "

@ -48,8 +48,8 @@ const char* keys =
"{@outfile |<none> | Output image }"
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{sl | | Square side lenght (in pixels) }"
"{ml | | Marker side lenght (in pixels) }"
"{sl | | Square side length (in pixels) }"
"{ml | | Marker side length (in pixels) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"

@ -49,8 +49,8 @@ namespace {
const char* about = "Create a ChArUco marker image";
const char* keys =
"{@outfile |<none> | Output image }"
"{sl | | Square side lenght (in pixels) }"
"{ml | | Marker side lenght (in pixels) }"
"{sl | | Square side length (in pixels) }"
"{ml | | Marker side length (in pixels) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"

@ -51,8 +51,8 @@ const char* about = "Pose estimation using a ChArUco board";
const char* keys =
"{w | | Number of squares in X direction }"
"{h | | Number of squares in Y direction }"
"{sl | | Square side lenght (in pixels) }"
"{ml | | Marker side lenght (in pixels) }"
"{sl | | Square side length (in meters) }"
"{ml | | Marker side length (in meters) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"

@ -49,8 +49,8 @@ using namespace cv;
namespace {
const char* about = "Detect ChArUco markers";
const char* keys =
"{sl | | Square side lenght (in pixels) }"
"{ml | | Marker side lenght (in pixels) }"
"{sl | | Square side length (in meters) }"
"{ml | | Marker side length (in meters) }"
"{d | | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"

@ -1,5 +1,8 @@
%YAML:1.0
nmarkers: 1024
adaptiveThreshWinSizeMin: 3
adaptiveThreshWinSizeMax: 23
adaptiveThreshWinSizeStep: 10
adaptiveThreshWinSize: 21
adaptiveThreshConstant: 7
minMarkerPerimeterRate: 0.03
@ -8,6 +11,8 @@ polygonalApproxAccuracyRate: 0.05
minCornerDistance: 10.0
minDistanceToBorder: 3
minMarkerDistance: 10.0
minMarkerDistanceRate: 0.05
doCornerRefinement: false
cornerRefinementWinSize: 5
cornerRefinementMaxIterations: 30
cornerRefinementMinAccuracy: 0.1
@ -15,3 +20,5 @@ markerBorderBits: 1
perspectiveRemovePixelPerCell: 8
perspectiveRemoveIgnoredMarginPerCell: 0.13
maxErroneousBitsInBorderRate: 0.04
minOtsuStdDev: 5.0
errorCorrectionRate: 0.6

@ -1099,7 +1099,7 @@ static void _projectUndetectedMarkers(Ptr<Board> &_board, InputOutputArrayOfArra
*/
void refineDetectedMarkers(InputArray _image, Ptr<Board> &_board,
InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds,
InputOutputArray _rejectedCorners, InputArray _cameraMatrix,
InputOutputArrayOfArrays _rejectedCorners, InputArray _cameraMatrix,
InputArray _distCoeffs, float minRepDistance, float errorCorrectionRate,
bool checkAllOrders, OutputArray _recoveredIdxs,
const Ptr<DetectorParameters> &_params) {

@ -155,6 +155,11 @@ The output image will be something like this:
A full working example of board creation is included in the ```create_board.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
"_output path_/aboard.png" -w=5 -h=7 -l=100 -s=10 -d=10
```
Finally, a full example of board detection:
``` c++
@ -204,6 +209,12 @@ Sample video:
A full working example is included in the ```detect_board.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
-c="_path_"/calib.txt" "_path_/aboard.png" -w=5 -h=7 -l=100 -s=10 -d=10
```
Refine marker detection
-----

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

After

Width:  |  Height:  |  Size: 11 KiB

@ -61,6 +61,11 @@ Finally, the ```calibrationFlags``` parameter determines some of the options for
A full working example is included in the ```calibrate_camera_charuco.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
_output path_" -dp="_path_/detector_params.yml" -w=5 -h=7 -sl=0.04 -ml=0.02 -d=10
```
Calibration with ArUco Boards
@ -100,3 +105,8 @@ The rest of parameters are the same than in ```calibrateCameraCharuco()```, exce
any ```Board``` object.
A full working example is included in the ```calibrate_camera.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
"_path_/calib.txt" -w=5 -h=7 -l=100 -s=10 -d=10
```

@ -103,6 +103,10 @@ The generated image is:
A full working example is included in the ```create_marker.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
"/Users/Sarthak/Dropbox/OpenCV_GSoC/marker.png" -d=10 -id=1
```
Marker Detection
------
@ -226,6 +230,11 @@ output vector of rejected candidates.
A full working example is included in the ```detect_markers.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
-c="_path_/calib.txt" -d=10
```
Pose Estimation
@ -331,6 +340,12 @@ Sample video:
A full working example is included in the ```detect_markers.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
-c="_path_/calib.txt" -d=10
```
Selecting a dictionary
------

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.7 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

@ -78,6 +78,11 @@ The output image will be something like this:
A full working example is included in the ```create_board_charuco.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
"_ output path_/chboard.png" -w=5 -h=7 -sl=200 -ml=120 -d=10
```
ChArUco Board Detection
------
@ -240,6 +245,11 @@ Sample video:
A full working example is included in the ```detect_board_charuco.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
-c="_path_/calib.txt" -dp="_path_/detector_params.yml" -w=5 -h=7 -sl=0.04 -ml=0.02 -d=10
```
ChArUco Pose Estimation
------
@ -312,3 +322,8 @@ A full example of ChArUco detection with pose estimation:
```
A full working example is included in the ```detect_board_charuco.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
"_path_/calib.txt" -dp="_path_/detector_params.yml" -w=5 -h=7 -sl=0.04 -ml=0.02 -d=10
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

@ -60,6 +60,10 @@ The image produced will be:
A full working example is included in the ```create_diamond.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
"_path_/mydiamond.png" -sl=200 -ml=120 -d=10 -ids=45,68,28,74
```
ChArUco Diamond Detection
------
@ -116,6 +120,10 @@ The result is the same that the one produced by ```drawDetectedMarkers()```, but
A full working example is included in the ```detect_diamonds.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
-c="_path_/calib.txt" -dp="_path_/detector_params.yml" -sl=0.04 -ml=0.02 -d=10
```
ChArUco Diamond Pose Estimation
------
@ -159,3 +167,8 @@ Sample video:
@endhtmlonly
A full working example is included in the ```detect_diamonds.cpp``` inside the module samples folder.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
``` c++
-c="_output path_/calib.txt" -dp="_path_/detector_params.yml" -sl=0.04 -ml=0.02 -d=10
```

@ -1,6 +1,8 @@
cmake_minimum_required(VERSION 2.8)
if(APPLE_FRAMEWORK OR WINRT)
if(APPLE_FRAMEWORK OR WINRT
OR AARCH64 # protobuf doesn't know this platform
)
ocv_module_disable(dnn)
endif()

@ -3,7 +3,7 @@ Build opencv_contrib with dnn module {#tutorial_dnn_build}
Introduction
------------
opencv_dnn module is placed in the secondary [opencv_contrib](https://github.com/Itseez/opencv_contrib) repository,
opencv_dnn module is placed in the secondary [opencv_contrib](https://github.com/opencv/opencv_contrib) repository,
which isn't distributed in binary form, therefore you need to build it manually.
To do this you need to have installed: [CMake](http://www.cmake.org/download), git, and build system (*gcc* with *make* for Linux or *MS Visual Studio* for Windows)
@ -12,12 +12,12 @@ Steps
-----
-# Make any directory, for example **opencv_root**
-# Clone [opencv](https://github.com/Itseez/opencv) and [opencv_contrib](https://github.com/Itseez/opencv_contrib) repos to the **opencv_root**.
-# Clone [opencv](https://github.com/opencv/opencv) and [opencv_contrib](https://github.com/opencv/opencv_contrib) repos to the **opencv_root**.
You can do it in terminal like here:
@code
cd opencv_root
git clone https://github.com/Itseez/opencv
git clone https://github.com/Itseez/opencv_contrib
git clone https://github.com/opencv/opencv
git clone https://github.com/opencv/opencv_contrib
@endcode
-# Run [CMake-gui] and set source and build directories:

@ -122,8 +122,8 @@ Section 8 – Interpretation.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>45</height>
<width>11</width>
<height>11</height>
<width>45</width>
<stageParams>
<maxWeakCount>85</maxWeakCount></stageParams>
<featureParams>

@ -121,8 +121,8 @@ Section 8 – Interpretation.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>22</height>
<width>5</width>
<height>5</height>
<width>22</width>
<stageParams>
<maxWeakCount>133</maxWeakCount></stageParams>
<featureParams>

@ -120,8 +120,8 @@ Section 8 – Interpretation.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>12</height>
<width>20</width>
<height>20</height>
<width>12</width>
<stageParams>
<maxWeakCount>65</maxWeakCount></stageParams>
<featureParams>

@ -121,8 +121,8 @@ Section 8 – Interpretation.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>18</height>
<width>12</width>
<height>12</height>
<width>18</width>
<stageParams>
<maxWeakCount>279</maxWeakCount></stageParams>
<featureParams>

@ -122,8 +122,8 @@ Section 8 – Interpretation.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>25</height>
<width>15</width>
<height>15</height>
<width>25</width>
<stageParams>
<maxWeakCount>218</maxWeakCount></stageParams>
<featureParams>

@ -121,8 +121,8 @@ Section 8 – Interpretation.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>18</height>
<width>15</width>
<height>15</height>
<width>18</width>
<stageParams>
<maxWeakCount>377</maxWeakCount></stageParams>
<featureParams>

@ -121,8 +121,8 @@ Section 8 – Interpretation.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>12</height>
<width>20</width>
<height>20</height>
<width>12</width>
<stageParams>
<maxWeakCount>61</maxWeakCount></stageParams>
<featureParams>

@ -121,8 +121,8 @@ Section 8 – Interpretation.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>18</height>
<width>12</width>
<height>12</height>
<width>18</width>
<stageParams>
<maxWeakCount>415</maxWeakCount></stageParams>
<featureParams>

@ -119,8 +119,8 @@ Section 8 – Interpretation.
<opencv_storage>
<cascade type_id="opencv-cascade-classifier"><stageType>BOOST</stageType>
<featureType>HAAR</featureType>
<height>22</height>
<width>20</width>
<height>20</height>
<width>22</width>
<stageParams>
<maxWeakCount>334</maxWeakCount></stageParams>
<featureParams>

@ -302,6 +302,25 @@ public:
/** @copybrief getGradientDescentIterations @see getGradientDescentIterations */
CV_WRAP virtual void setVariationalRefinementIterations(int val) = 0;
/** @brief Weight of the smoothness term
@see setVariationalRefinementAlpha */
CV_WRAP virtual float getVariationalRefinementAlpha() const = 0;
/** @copybrief getVariationalRefinementAlpha @see getVariationalRefinementAlpha */
CV_WRAP virtual void setVariationalRefinementAlpha(float val) = 0;
/** @brief Weight of the color constancy term
@see setVariationalRefinementDelta */
CV_WRAP virtual float getVariationalRefinementDelta() const = 0;
/** @copybrief getVariationalRefinementDelta @see getVariationalRefinementDelta */
CV_WRAP virtual void setVariationalRefinementDelta(float val) = 0;
/** @brief Weight of the gradient constancy term
@see setVariationalRefinementGamma */
CV_WRAP virtual float getVariationalRefinementGamma() const = 0;
/** @copybrief getVariationalRefinementGamma @see getVariationalRefinementGamma */
CV_WRAP virtual void setVariationalRefinementGamma(float val) = 0;
/** @brief Whether to use mean-normalization of patches when computing patch distance. It is turned on
by default as it typically provides a noticeable quality boost because of increased robustness to
illumanition variations. Turn it off if you are certain that your sequence does't contain any changes

@ -65,6 +65,9 @@ class DISOpticalFlowImpl : public DISOpticalFlow
int patch_stride;
int grad_descent_iter;
int variational_refinement_iter;
float variational_refinement_alpha;
float variational_refinement_gamma;
float variational_refinement_delta;
bool use_mean_normalization;
bool use_spatial_propagation;
@ -84,6 +87,13 @@ class DISOpticalFlowImpl : public DISOpticalFlow
void setGradientDescentIterations(int val) { grad_descent_iter = val; }
int getVariationalRefinementIterations() const { return variational_refinement_iter; }
void setVariationalRefinementIterations(int val) { variational_refinement_iter = val; }
float getVariationalRefinementAlpha() const { return variational_refinement_alpha; }
void setVariationalRefinementAlpha(float val) { variational_refinement_alpha = val; }
float getVariationalRefinementDelta() const { return variational_refinement_delta; }
void setVariationalRefinementDelta(float val) { variational_refinement_delta = val; }
float getVariationalRefinementGamma() const { return variational_refinement_gamma; }
void setVariationalRefinementGamma(float val) { variational_refinement_gamma = val; }
bool getUseMeanNormalization() const { return use_mean_normalization; }
void setUseMeanNormalization(bool val) { use_mean_normalization = val; }
bool getUseSpatialPropagation() const { return use_spatial_propagation; }
@ -161,6 +171,10 @@ DISOpticalFlowImpl::DISOpticalFlowImpl()
patch_stride = 4;
grad_descent_iter = 16;
variational_refinement_iter = 5;
variational_refinement_alpha = 20.f;
variational_refinement_gamma = 10.f;
variational_refinement_delta = 5.f;
border_size = 16;
use_mean_normalization = true;
use_spatial_propagation = true;
@ -234,9 +248,9 @@ void DISOpticalFlowImpl::prepareBuffers(Mat &I0, Mat &I1)
spatialGradient(I0s[i], I0xs[i], I0ys[i]);
Ux[i].create(cur_rows, cur_cols);
Uy[i].create(cur_rows, cur_cols);
variational_refinement_processors[i]->setAlpha(20.0f);
variational_refinement_processors[i]->setDelta(5.0f);
variational_refinement_processors[i]->setGamma(10.0f);
variational_refinement_processors[i]->setAlpha(variational_refinement_alpha);
variational_refinement_processors[i]->setDelta(variational_refinement_delta);
variational_refinement_processors[i]->setGamma(variational_refinement_gamma);
variational_refinement_processors[i]->setSorIterations(5);
variational_refinement_processors[i]->setFixedPointIterations(variational_refinement_iter);
}

@ -1,2 +1,2 @@
set(the_description "Structured Light API")
ocv_define_module(structured_light opencv_core opencv_calib3d opencv_imgproc opencv_highgui opencv_features2d opencv_rgbd OPTIONAL opencv_viz)
ocv_define_module(structured_light opencv_core opencv_calib3d opencv_imgproc opencv_highgui opencv_features2d opencv_rgbd OPTIONAL opencv_viz WRAP python java)

@ -43,6 +43,7 @@
#define __OPENCV_GRAY_CODE_PATTERN_HPP__
#include "opencv2/core.hpp"
#include "opencv2/structured_light/structured_light.hpp"
namespace cv {
namespace structured_light {
@ -72,22 +73,22 @@ class CV_EXPORTS_W GrayCodePattern : public StructuredLightPattern
* @param width Projector's width. Default value is 1024.
* @param height Projector's height. Default value is 768.
*/
struct CV_EXPORTS_W_SIMPLE Params
struct CV_EXPORTS Params
{
CV_WRAP
Params();
CV_PROP_RW
int width;
CV_PROP_RW
int height;
};
/** @brief Constructor
@param parameters GrayCodePattern parameters GrayCodePattern::Params: the width and the height of the projector.
*/
CV_WRAP
static Ptr<GrayCodePattern> create( const GrayCodePattern::Params &parameters = GrayCodePattern::Params() );
// alias for scripting
CV_WRAP
static Ptr<GrayCodePattern> create( int width, int height );
/** @brief Get the number of pattern images needed for the graycode pattern.
*
* @return The number of pattern images needed for the graycode pattern.

@ -471,5 +471,15 @@ Ptr<GrayCodePattern> GrayCodePattern::create( const GrayCodePattern::Params& par
return makePtr<GrayCodePattern_Impl>( params );
}
// Creates the GrayCodePattern instance
// alias for scripting
Ptr<GrayCodePattern> GrayCodePattern::create( int width, int height )
{
Params params;
params.width = width;
params.height = height;
return makePtr<GrayCodePattern_Impl>( params );
}
}
}

@ -92,7 +92,7 @@ grouping horizontally aligned text, and the method proposed by Lluis Gomez and D
in [Gomez13][Gomez14] for grouping arbitrary oriented text (see erGrouping).
To see the text detector at work, have a look at the textdetection demo:
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/textdetection.cpp>
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/textdetection.cpp>
@defgroup text_recognize Scene Text Recognition
@}

@ -345,7 +345,7 @@ single vector\<Point\>, the function separates them in two different vectors (th
ERStats where extracted from two different channels).
An example of MSERsToERStats in use can be found in the text detection webcam_demo:
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>
*/
CV_EXPORTS void MSERsToERStats(InputArray image, std::vector<std::vector<Point> > &contours,
std::vector<std::vector<ERStat> > &regions);

@ -81,10 +81,10 @@ Notice that it is compiled only when tesseract-ocr is correctly installed.
@note
- (C++) An example of OCRTesseract recognition combined with scene text detection can be found
at the end_to_end_recognition demo:
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/end_to_end_recognition.cpp>
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/end_to_end_recognition.cpp>
- (C++) Another example of OCRTesseract recognition combined with scene text detection can be
found at the webcam_demo:
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>
*/
class CV_EXPORTS_W OCRTesseract : public BaseOCR
{
@ -152,7 +152,7 @@ enum decoder_mode
@note
- (C++) An example on using OCRHMMDecoder recognition combined with scene text detection can
be found at the webcam_demo sample:
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp>
*/
class CV_EXPORTS_W OCRHMMDecoder : public BaseOCR
{
@ -165,7 +165,7 @@ public:
The default character classifier and feature extractor can be loaded using the utility funtion
loadOCRHMMClassifierNM and KNN model provided in
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/OCRHMM_knn_model_data.xml.gz>.
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRHMM_knn_model_data.xml.gz>.
*/
class CV_EXPORTS_W ClassifierCallback
{
@ -321,7 +321,7 @@ CV_EXPORTS_W Ptr<OCRHMMDecoder::ClassifierCallback> loadOCRHMMClassifierCNN(cons
* The function calculate frequency statistics of character pairs from the given lexicon and fills the output transition_probabilities_table with them. The transition_probabilities_table can be used as input in the OCRHMMDecoder::create() and OCRBeamSearchDecoder::create() methods.
* @note
* - (C++) An alternative would be to load the default generic language transition table provided in the text module samples folder (created from ispell 42869 english words list) :
* <https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/OCRHMM_transitions_table.xml>
* <https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRHMM_transitions_table.xml>
**/
CV_EXPORTS void createOCRHMMTransitionsTable(std::string& vocabulary, std::vector<std::string>& lexicon, OutputArray transition_probabilities_table);
@ -335,7 +335,7 @@ CV_EXPORTS_W Mat createOCRHMMTransitionsTable(const String& vocabulary, std::vec
@note
- (C++) An example on using OCRBeamSearchDecoder recognition combined with scene text detection can
be found at the demo sample:
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/word_recognition.cpp>
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/word_recognition.cpp>
*/
class CV_EXPORTS_W OCRBeamSearchDecoder : public BaseOCR
{
@ -348,7 +348,7 @@ public:
The default character classifier and feature extractor can be loaded using the utility funtion
loadOCRBeamSearchClassifierCNN with all its parameters provided in
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/OCRBeamSearch_CNN_model_data.xml.gz>.
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRBeamSearch_CNN_model_data.xml.gz>.
*/
class CV_EXPORTS_W ClassifierCallback
{

@ -2820,12 +2820,12 @@ bool guo_hall_thinning(const Mat1b & img, Mat& skeleton)
p8 = (skeleton.data[row * skeleton.cols + col-1]) > 0;
p9 = (skeleton.data[(row-1) * skeleton.cols + col-1]) > 0;
int C = (!p2 & (p3 | p4)) + (!p4 & (p5 | p6)) +
(!p6 & (p7 | p8)) + (!p8 & (p9 | p2));
int N1 = (p9 | p2) + (p3 | p4) + (p5 | p6) + (p7 | p8);
int N2 = (p2 | p3) + (p4 | p5) + (p6 | p7) + (p8 | p9);
int C = (!p2 && (p3 || p4)) + (!p4 && (p5 || p6)) +
(!p6 && (p7 || p8)) + (!p8 && (p9 || p2));
int N1 = (p9 || p2) + (p3 || p4) + (p5 || p6) + (p7 || p8);
int N2 = (p2 || p3) + (p4 || p5) + (p6 || p7) + (p8 || p9);
int N = N1 < N2 ? N1 : N2;
int m = iter == 0 ? ((p6 | p7 | !p9) & p8) : ((p2 | p3 | !p5) & p4);
int m = iter == 0 ? ((p6 || p7 || !p9) && p8) : ((p2 || p3 || !p5) && p4);
if ((C == 1) && (N >= 2) && (N <= 3) && (m == 0))
{

@ -1206,7 +1206,7 @@ the output transition_probabilities_table with them.
The transition_probabilities_table can be used as input in the OCRHMMDecoder::create() and OCRBeamSearchDecoder::create() methods.
@note
- (C++) An alternative would be to load the default generic language transition table provided in the text module samples folder (created from ispell 42869 english words list) :
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/OCRHMM_transitions_table.xml>
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRHMM_transitions_table.xml>
*/
void createOCRHMMTransitionsTable(string& vocabulary, vector<string>& lexicon, OutputArray _transitions)
{

@ -54,8 +54,6 @@ namespace cv
//TODO based on the original implementation
//http://vision.ucsd.edu/~bbabenko/project_miltrack.shtml
#define sign(s) ((s > 0 ) ? 1 : ((s<0) ? -1 : 0))
class ClfOnlineStump;
class CV_EXPORTS ClfMilBoost

@ -42,6 +42,8 @@
#include "precomp.hpp"
#include "opencv2/tracking/onlineMIL.hpp"
#define sign(s) ((s > 0 ) ? 1 : ((s<0) ? -1 : 0))
template<class T> class SortableElementRev
{
public:

@ -28,8 +28,8 @@ Explanation
as shown in help. In the help, it means that the image files are numbered with 4 digits
(e.g. the file naming will be 0001.jpg, 0002.jpg, and so on).
You can find video samples in Itseez/opencv_extra/testdata/cv/tracking
<https://github.com/Itseez/opencv_extra/tree/master/testdata/cv/tracking>
You can find video samples in opencv_extra/testdata/cv/tracking
<https://github.com/opencv/opencv_extra/tree/master/testdata/cv/tracking>
-# **Declares the required variables**

@ -579,7 +579,7 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc
float hist_width = SIFT_DESCR_SCL_FCTR * scl;
int radius = cvRound(hist_width * 1.4142135623730951f * (d + 1) * 0.5f);
// Clip the radius to the diagonal of the image to avoid autobuffer too large exception
radius = std::min(radius, (int) sqrt((double) img.cols*img.cols + img.rows*img.rows));
radius = std::min(radius, (int) sqrt(((double) img.cols)*img.cols + ((double) img.rows)*img.rows));
cos_t /= hist_width;
sin_t /= hist_width;

@ -9,3 +9,4 @@ Extended Image Processing
6. Superpixels
7. Graph segmentation
8. Selective search from segmentation
10. Paillou Filter

@ -166,3 +166,13 @@
year={2014},
organization={IEEE}
}
@article{paillou1997detecting,
title={Detecting step edges in noisy SAR images: a new linear operator},
author={Paillou, Philippe},
journal={IEEE transactions on geoscience and remote sensing},
volume={35},
number={1},
pages={191--196},
year={1997}
}

@ -48,6 +48,8 @@
#include "ximgproc/weighted_median_filter.hpp"
#include "ximgproc/slic.hpp"
#include "ximgproc/lsc.hpp"
#include "ximgproc/paillou_filter.hpp"
/** @defgroup ximgproc Extended Image Processing
@{

@ -0,0 +1,67 @@
/*
* By downloading, copying, installing or using the software you agree to this license.
* If you do not agree to this license, do not download, install,
* copy or use the software.
*
*
* License Agreement
* For Open Source Computer Vision Library
* (3 - clause BSD License)
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met :
*
* *Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution.
*
* * Neither the names of the copyright holders nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* This software is provided by the copyright holders and contributors "as is" and
* any express or implied warranties, including, but not limited to, the implied
* warranties of merchantability and fitness for a particular purpose are disclaimed.
* In no event shall copyright holders or contributors be liable for any direct,
* indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort(including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*/
#ifndef __OPENCV_PAILLOUFILTER_HPP__
#define __OPENCV_PAILLOUFILTER_HPP__
#ifdef __cplusplus
#include <opencv2/core.hpp>
namespace cv {
namespace ximgproc {
//! @addtogroup ximgproc_filters
//! @{
/**
* @brief Applies Paillou filter to an image.
*
* For more details about this implementation, please see @cite paillou1997detecting
*
* @param op Source 8-bit or 16bit image, 1-channel or 3-channel image.
* @param _dst result CV_32F image with same numeber of channel than op.
* @param omega double see paper
* @param alpha double see paper
*
* @sa GradientPaillouX, GradientPaillouY
*/
CV_EXPORTS void GradientPaillouY(InputArray op, OutputArray _dst, double alpha, double omega);
CV_EXPORTS void GradientPaillouX(InputArray op, OutputArray _dst, double alpha, double omega);
}
}
#endif
#endif

@ -0,0 +1,107 @@
/*
* By downloading, copying, installing or using the software you agree to this license.
* If you do not agree to this license, do not download, install,
* copy or use the software.
*
*
* License Agreement
* For Open Source Computer Vision Library
* (3 - clause BSD License)
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met :
*
* *Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution.
*
* * Neither the names of the copyright holders nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* This software is provided by the copyright holders and contributors "as is" and
* any express or implied warranties, including, but not limited to, the implied
* warranties of merchantability and fitness for a particular purpose are disclaimed.
* In no event shall copyright holders or contributors be liable for any direct,
* indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort(including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*/
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/ximgproc.hpp>
#include "opencv2/ximgproc/paillou_filter.hpp"
using namespace cv;
using namespace cv::ximgproc;
#include <iostream>
using namespace std;
int aa = 100, ww = 10;
Mat dx, dy;
UMat img;
const char* window_name = "Gradient Modulus";
static void DisplayImage(Mat x,string s)
{
vector<Mat> sx;
split(x, sx);
vector<double> minVal(3), maxVal(3);
for (int i = 0; i < static_cast<int>(sx.size()); i++)
{
minMaxLoc(sx[i], &minVal[i], &maxVal[i]);
}
maxVal[0] = *max_element(maxVal.begin(), maxVal.end());
minVal[0] = *min_element(minVal.begin(), minVal.end());
Mat uc;
x.convertTo(uc, CV_8U,255/(maxVal[0]-minVal[0]),-255*minVal[0]/(maxVal[0]-minVal[0]));
imshow(s, uc);
}
/**
* @function paillouFilter
* @brief Trackbar callback
*/
static void PaillouFilter(int, void*)
{
Mat dst;
double a=aa/100.0,w=ww/100.0;
Mat rx,ry;
GradientPaillouX(img,rx,a,w);
GradientPaillouY(img,ry,a,w);
DisplayImage(rx, "Gx");
DisplayImage(ry, "Gy");
add(rx.mul(rx),ry.mul(ry),dst);
sqrt(dst,dst);
DisplayImage(dst, window_name );
}
int main(int argc, char* argv[])
{
if (argc==2)
imread(argv[1]).copyTo(img);
if (img.empty())
{
cout << "File not found or empty image\n";
}
imshow("Original",img);
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create a Trackbar for user to enter threshold
createTrackbar( "a:",window_name, &aa, 400, PaillouFilter );
createTrackbar( "w:", window_name, &ww, 400, PaillouFilter );
PaillouFilter(0,NULL);
waitKey();
return 0;
}

@ -1,7 +1,7 @@
/**************************************************************************************
The structered edge demo requires you to provide a model.
This model can be found at the opencv_extra repository on Github on the following link:
https://github.com/Itseez/opencv_extra/blob/master/testdata/cv/ximgproc/model.yml.gz
https://github.com/opencv/opencv_extra/blob/master/testdata/cv/ximgproc/model.yml.gz
***************************************************************************************/
#include <opencv2/ximgproc.hpp>

@ -0,0 +1,486 @@
#include "precomp.hpp"
#include "opencv2/highgui.hpp"
#include <math.h>
#include <vector>
#include <iostream>
namespace cv {
namespace ximgproc {
/*
If you use this code please cite this @cite paillou1997detecting
Detecting step edges in noisy SAR images: a new linear operator IEEE Transactions on Geoscience and Remote Sensing (Volume:35 , Issue: 1 ) 1997
*/
class ParallelGradientPaillouYCols: public ParallelLoopBody
{
private:
Mat &img;
Mat &dst;
double a;
double w;
bool verbose;
public:
ParallelGradientPaillouYCols(Mat& imgSrc, Mat &d,double aa,double ww):
img(imgSrc),
dst(d),
a(aa),
w(ww),
verbose(false)
{}
void Verbose(bool b){verbose=b;}
virtual void operator()(const Range& range) const
{
CV_Assert(img.depth()==CV_8UC1 || img.depth()==CV_16SC1 || img.depth()==CV_16UC1);
CV_Assert(dst.depth()==CV_32FC1);
if (verbose)
std::cout << getThreadNum()<<"# :Start from row " << range.start << " to " << range.end-1<<" ("<<range.end-range.start<<" loops)" << std::endl;
float *f2;
int tailleSequence=(img.rows>img.cols)?img.rows:img.cols;
Mat matYp(1,tailleSequence,CV_64FC1), matYm(1,tailleSequence,CV_64FC1);
double *yp=matYp.ptr<double>(0), *ym=matYm.ptr<double>(0);
int rows=img.rows,cols=img.cols;
// Equation 12 p193
double b1=-2*exp(-a)*cosh(w);
double a1=2*exp(-a)*cosh(w)-exp(-2*a)-1;
double b2=exp(-2*a);
switch(img.depth()){
case CV_8U :
for (int j=range.start;j<range.end;j++)
{
// Equation 26 p194
uchar *c1 = img.ptr(0)+j;
f2 = dst.ptr<float>(0)+j;
double border=*c1;
yp[0] = *c1 ;
c1+=cols;
yp[1] = *c1 - b1*yp[0]-b2*border;
c1+=cols;
for (int i=2;i<rows;i++,c1+=cols)
yp[i] = *c1-b1*yp[i-1]-b2*yp[i-2];
// Equation 27 p194
c1 = img.ptr(rows-1)+j;
border=*c1;
ym[rows - 1] = *c1;
c1 -= cols;
ym[rows-2] =*c1 - b1*ym[rows-1];
c1 -= cols;
for (int i=rows-3;i>=0;i--,c1-=cols)
ym[i]=*c1-b1*ym[i+1]-b2*ym[i+2];
// Equation 25 p193
for (int i=0;i<rows;i++,f2+=cols)
*f2 = (float)(a1*(ym[i]-yp[i]));
}
break;
case CV_16S :
for (int j = range.start; j<range.end; j++)
{
// Equation 26 p194
short *c1 = img.ptr<short>(0) + j;
f2 = dst.ptr<float>(0) + j;
double border = *c1;
yp[0] = *c1;
c1 += cols;
yp[1] = *c1 - b1*yp[0] - b2*border;
c1 += cols;
for (int i = 2; i<rows; i++, c1 += cols)
yp[i] = *c1 - b1*yp[i - 1] - b2*yp[i - 2];
// Equation 27 p194
c1 = img.ptr<short>(rows - 1) + j;
border = *c1;
ym[rows - 1] = *c1;
c1 -= cols;
ym[rows - 2] = *c1 - b1*ym[rows - 1];
c1 -= cols;
for (int i = rows - 3; i >= 0; i--, c1 -= cols)
ym[i] = *c1 - b1*ym[i + 1] - b2*ym[i + 2];
// Equation 25 p193
for (int i = 0; i<rows; i++, f2 += cols)
*f2 = (float)(a1*(ym[i] - yp[i]));
}
break;
case CV_16U :
for (int j = range.start; j<range.end; j++)
{
// Equation 26 p194
ushort *c1 = img.ptr<ushort>(0) + j;
f2 = dst.ptr<float>(0) + j;
double border = *c1;
yp[0] = *c1;
c1 += cols;
yp[1] = *c1 - b1*yp[0] - b2*border;
c1 += cols;
for (int i = 2; i<rows; i++, c1 += cols)
yp[i] = *c1 - b1*yp[i - 1] - b2*yp[i - 2];
// Equation 27 p194
c1 = img.ptr<ushort>(rows - 1) + j;
border = *c1;
ym[rows - 1] = *c1;
c1 -= cols;
ym[rows - 2] = *c1 - b1*ym[rows - 1];
c1 -= cols;
for (int i = rows - 3; i >= 0; i--, c1 -= cols)
ym[i] = *c1 - b1*ym[i + 1] - b2*ym[i + 2];
// Equation 25 p193
for (int i = 0; i<rows; i++, f2 += cols)
*f2 = (float)(a1*(ym[i] - yp[i]));
}
break;
default :
return ;
}
};
ParallelGradientPaillouYCols& operator=(const ParallelGradientPaillouYCols &) {
return *this;
};
};
class ParallelGradientPaillouYRows: public ParallelLoopBody
{
private:
Mat &img;
Mat &dst;
double a;
double w;
bool verbose;
public:
ParallelGradientPaillouYRows(Mat& imgSrc, Mat &d,double aa,double ww):
img(imgSrc),
dst(d),
a(aa),
w(ww),
verbose(false)
{}
void Verbose(bool b){verbose=b;}
virtual void operator()(const Range& range) const
{
CV_Assert(img.depth()==CV_32FC1);
if (verbose)
std::cout << getThreadNum()<<"# :Start from row " << range.start << " to " << range.end-1<<" ("<<range.end-range.start<<" loops)" << std::endl;
float *iy,*iy0;
int tailleSequence=(img.rows>img.cols)?img.rows:img.cols;
Mat matIym(1,tailleSequence,CV_64FC1), matIyp(1,tailleSequence,CV_64FC1);
double *iym=matIym.ptr<double>(0), *iyp=matIyp.ptr<double>(0);
int cols=img.cols;
// Equation 13 p193
double d=(1-2*exp(-a)*cosh(w)+exp(-2*a))/(2*a*exp(-a)*sinh(w)+w*(1-exp(-2*a)));
double c1=a*d;
double c2=w*d;
// Equation 12 p193
double b1=-2*exp(-a)*cosh(w);
double b2=exp(-2*a);
// Equation 14 p193
double a0p=c2;
double a1p=(c1*sinh(w)-c2*cosh(w))*exp(-a);
double a1m=a1p-c2*b1;
double a2m=-c2*b2;
for (int i=range.start;i<range.end;i++)
{
iy0 = img.ptr<float>(i);
int j=0;
iyp[0] = a0p*iy0[0] ;
iyp[1] = a0p*iy0[1] + a1p*iy0[0] - b1*iyp[0];
iy0 += 2;
for (j=2;j<cols;j++,iy0++)
iyp[j] = a0p*iy0[0] + a1p*iy0[-1] - b1*iyp[j-1] - b2*iyp[j-2];
iy0 = img.ptr<float>(i)+cols-1;
iym[cols-1] = 0;
iy0--;
iym[cols-2] = a1m*iy0[1] - b1*iym[cols-1];
iy0--;
for (j=cols-3;j>=0;j--,iy0--)
iym[j] = a1m*iy0[1] + a2m*iy0[2] - b1*iym[j+1] - b2*iym[j+2];
iy = dst.ptr<float>(i);
for (j=0;j<cols;j++,iy++)
*iy = (float)(iym[j]+iyp[j]);
}
};
ParallelGradientPaillouYRows& operator=(const ParallelGradientPaillouYRows &) {
return *this;
};
};
class ParallelGradientPaillouXCols: public ParallelLoopBody
{
private:
Mat &img;
Mat &dst;
double a;
double w;
bool verbose;
public:
ParallelGradientPaillouXCols(Mat& imgSrc, Mat &d,double aa,double ww):
img(imgSrc),
dst(d),
a(aa),
w(ww),
verbose(false)
{}
void Verbose(bool b){verbose=b;}
virtual void operator()(const Range& range) const
{
CV_Assert(img.depth()==CV_32FC1);
if (verbose)
std::cout << getThreadNum() << "# :Start from row " << range.start << " to " << range.end - 1 << " (" << range.end - range.start << " loops)" << std::endl;
float *iy, *iy0;
int tailleSequence = (img.rows>img.cols) ? img.rows : img.cols;
Mat matIym(1,tailleSequence,CV_64FC1), matIyp(1,tailleSequence,CV_64FC1);
double *iym=matIym.ptr<double>(0), *iyp=matIyp.ptr<double>(0);
int rows = img.rows,cols=img.cols;
// Equation 13 p193
double d = (1 - 2 * exp(-a)*cosh(w) + exp(-2 * a)) / (2 * a*exp(-a)*sinh(w) + w*(1 - exp(-2 * a)));
double c1 = a*d;
double c2 = w*d;
// Equation 12 p193
double b1 = -2 * exp(-a)*cosh(w);
double b2 = exp(-2 * a);
// Equation 14 p193
double a0p = c2;
double a1p = (c1*sinh(w) - c2*cosh(w))*exp(-a);
double a1m = a1p - c2*b1;
double a2m = -c2*b2;
for (int j = range.start; j<range.end; j++)
{
iy0 = img.ptr<float>(0)+j;
iyp[0] = a0p*iy0[0];
iy0 +=cols;
iyp[1] = a0p*iy0[0] + a1p*iy0[-cols] - b1*iyp[0];
iy0 +=cols;
for (int i = 2; i<rows; i++, iy0+=cols)
iyp[i] = a0p*iy0[0] + a1p*iy0[-cols] - b1*iyp[i - 1] - b2*iyp[i - 2];
iy0 = img.ptr<float>(rows-1) + j;
iym[rows - 1] = 0;
iy0 -=cols;
iym[rows - 2] = a1m*iy0[cols] - b1*iym[rows-1];
iy0-=cols;
for (int i = rows - 3; i >= 0; i--, iy0-=cols)
iym[i] = a1m*iy0[cols] + a2m*iy0[2*cols] - b1*iym[i + 1] - b2*iym[i + 2];
iy = dst.ptr<float>(0)+j;
for (int i = 0; i<rows; i++, iy+=cols)
*iy = (float)(iym[i] + iyp[i]);
}
};
ParallelGradientPaillouXCols& operator=(const ParallelGradientPaillouXCols &) {
return *this;
};
};
class ParallelGradientPaillouXRows: public ParallelLoopBody
{
private:
Mat &img;
Mat &im1;
double a;
double w;
bool verbose;
public:
ParallelGradientPaillouXRows(Mat& imgSrc, Mat &d,double aa,double ww):
img(imgSrc),
im1(d),
a(aa),
w(ww),
verbose(false)
{}
void Verbose(bool b){verbose=b;}
virtual void operator()(const Range& range) const
{
if (verbose)
std::cout << getThreadNum()<<"# :Start from row " << range.start << " to " << range.end-1<<" ("<<range.end-range.start<<" loops)" << std::endl;
float *f2;
int tailleSequence = (img.rows>img.cols) ? img.rows : img.cols;
Mat matYp(1,tailleSequence,CV_64FC1), matYm(1,tailleSequence,CV_64FC1);
double *yp=matYp.ptr<double>(0), *ym=matYm.ptr<double>(0);
int cols = img.cols;
// Equation 12 p193
double b1 = -2 * exp(-a)*cosh(w);
double a1 = 2 * exp(-a)*cosh(w) - exp(-2 * a) - 1;
double b2 = exp(-2 * a);
switch(img.depth()){
case CV_8U :
for (int i = range.start; i<range.end; i++)
{
// Equation 26 p194
uchar *c1 = img.ptr(i);
double border = *c1;
yp[0] = *c1;
c1++;
yp[1] = *c1 - b1*yp[0] - b2*border;
c1++;
for (int j = 2; j<cols; j++, c1++)
yp[j] = *c1 - b1*yp[j - 1] - b2*yp[j - 2];
// Equation 27 p194
c1 = img.ptr(i)+cols-1;
border = *c1;
ym[cols - 1] = *c1;
c1--;
ym[cols - 2] = *c1 - b1*ym[cols - 1];
c1--;
for (int j = cols - 3; j >= 0; j--, c1--)
ym[j] = *c1 - b1*ym[j + 1] - b2*ym[j + 2];
// Equation 25 p193
f2 = im1.ptr<float>(i);
for (int j = 0; j<cols; j++, f2 ++)
*f2 = (float)(a1*(ym[j] - yp[j]));
}
break;
case CV_8S :
for (int i = range.start; i<range.end; i++)
{
// Equation 26 p194
char *c1 = img.ptr<char>(i);
double border = *c1;
yp[0] = *c1;
c1++;
yp[1] = *c1 - b1*yp[0] - b2*border;
c1++;
for (int j = 2; j<cols; j++, c1++)
yp[j] = *c1 - b1*yp[j - 1] - b2*yp[j - 2];
// Equation 27 p194
c1 = img.ptr<char>(i)+cols-1;
border = *c1;
ym[cols - 1] = *c1;
c1--;
ym[cols - 2] = *c1 - b1*ym[cols - 1];
c1--;
for (int j = cols - 3; j >= 0; j--, c1--)
ym[j] = *c1 - b1*ym[j + 1] - b2*ym[j + 2];
// Equation 25 p193
f2 = im1.ptr<float>(i);
for (int j = 0; j<cols; j++, f2 ++)
*f2 = (float)(a1*(ym[j] - yp[j]));
}
break;
case CV_16S :
for (int i = range.start; i<range.end; i++)
{
// Equation 26 p194
short *c1 = img.ptr<short>(i);
f2 = im1.ptr<float>(i);
double border = *c1;
yp[0] = *c1;
c1++;
yp[1] = *c1 - b1*yp[0] - b2*border;
c1++;
for (int j = 2; j<cols; j++, c1++)
yp[j] = *c1 - b1*yp[j - 1] - b2*yp[j - 2];
// Equation 27 p194
c1 = img.ptr<short>(i) + cols - 1;
border = *c1;
ym[cols - 1] = *c1;
c1--;
ym[cols - 2] = *c1 - b1*ym[cols - 1];
c1--;
for (int j = cols - 3; j >= 0; j--, c1--)
ym[j] = *c1 - b1*ym[j + 1] - b2*ym[j + 2];
// Equation 25 p193
for (int j = 0; j<cols; j++, f2++)
*f2 = (float)(a1*(ym[i] - yp[i]));
}
break;
case CV_16U :
for (int i = range.start; i<range.end; i++)
{
// Equation 26 p194
ushort *c1 = img.ptr<ushort>(i);
f2 = im1.ptr<float>(i);
double border = *c1;
yp[0] = *c1;
c1++;
yp[1] = *c1 - b1*yp[0] - b2*border;
c1++;
for (int j = 2; j<cols; j++, c1++)
yp[j] = *c1 - b1*yp[j - 1] - b2*yp[j - 2];
// Equation 27 p194
c1 = img.ptr<ushort>(i) + cols - 1;
border = *c1;
ym[cols - 1] = *c1;
c1--;
ym[cols - 2] = *c1 - b1*ym[cols - 1];
c1--;
for (int j = cols - 3; j >= 0; j--, c1--)
ym[j] = *c1 - b1*ym[j + 1] - b2*ym[j + 2];
// Equation 25 p193
for (int j = 0; j<cols; j++, f2++)
*f2 = (float)(a1*(ym[i] - yp[i]));
}
break;
default :
return ;
}
};
ParallelGradientPaillouXRows& operator=(const ParallelGradientPaillouXRows &) {
return *this;
};
};
void GradientPaillouY(InputArray _op, OutputArray _dst, double alpha, double omega)
{
Mat tmp(_op.size(),CV_32FC(_op.channels()));
_dst.create( _op.size(),CV_32FC(tmp.channels()) );
cv::Mat opSrc = _op.getMat();
std::vector<Mat> planSrc;
split(opSrc,planSrc);
std::vector<Mat> planTmp;
split(tmp,planTmp);
std::vector<Mat> planDst;
split(_dst,planDst);
for (int i = 0; i < static_cast<int>(planSrc.size()); i++)
{
if (planSrc[i].isContinuous() && planTmp[i].isContinuous() && planDst[i].isContinuous())
{
ParallelGradientPaillouYCols x(planSrc[i],planTmp[i],alpha,omega);
parallel_for_(Range(0,opSrc.cols), x,getNumThreads());
ParallelGradientPaillouYRows xr(planTmp[i],planDst[i],alpha,omega);
parallel_for_(Range(0,opSrc.rows), xr,getNumThreads());
}
else
std::cout << "PB";
}
merge(planDst,_dst);
}
void GradientPaillouX(InputArray _op, OutputArray _dst, double alpha, double omega)
{
Mat tmp(_op.size(),CV_32FC(_op.channels()));
_dst.create( _op.size(),CV_32FC(tmp.channels()) );
Mat opSrc = _op.getMat();
std::vector<Mat> planSrc;
split(opSrc,planSrc);
std::vector<Mat> planTmp;
split(tmp,planTmp);
std::vector<Mat> planDst;
split(_dst,planDst);
for (int i = 0; i < static_cast<int>(planSrc.size()); i++)
{
if (planSrc[i].isContinuous() && planTmp[i].isContinuous() && planDst[i].isContinuous())
{
ParallelGradientPaillouXRows x(planSrc[i],planTmp[i],alpha,omega);
parallel_for_(Range(0,opSrc.rows), x,getNumThreads());
ParallelGradientPaillouXCols xr(planTmp[i],planDst[i],alpha,omega);
parallel_for_(Range(0,opSrc.cols), xr,getNumThreads());
}
else
std::cout << "PB";
}
merge(planDst,_dst);
}
}
}

@ -27,7 +27,7 @@ Source Stereoscopic Image
Source Code
-----------
We will be using snippets from the example application, that can be downloaded [here ](https://github.com/Itseez/opencv_contrib/blob/master/modules/ximgproc/samples/disparity_filtering.cpp).
We will be using snippets from the example application, that can be downloaded [here ](https://github.com/opencv/opencv_contrib/blob/master/modules/ximgproc/samples/disparity_filtering.cpp).
Explanation
-----------

@ -19,7 +19,7 @@ file(GLOB ${the_target}_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
add_executable(${the_target} ${${the_target}_SOURCES})
target_link_libraries(${the_target} ${OPENCV_${the_target}_DEPS})
ocv_target_link_libraries(${the_target} ${OPENCV_${the_target}_DEPS})
set_target_properties(${the_target} PROPERTIES
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"

Loading…
Cancel
Save