From 823dea726f260e07b47fb1faf446f4e35a255a6f Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 16 Jun 2016 18:21:26 +0300 Subject: [PATCH] migration: github.com/opencv/opencv_contrib --- .github/ISSUE_TEMPLATE.md | 2 +- .travis.yml | 2 +- CONTRIBUTING.md | 2 +- modules/dnn/tutorials/tutorial_dnn_build.markdown | 8 ++++---- modules/text/include/opencv2/text.hpp | 2 +- modules/text/include/opencv2/text/erfilter.hpp | 2 +- modules/text/include/opencv2/text/ocr.hpp | 14 +++++++------- modules/text/src/ocr_hmm_decoder.cpp | 2 +- .../tutorial_introduction_to_tracker.markdown | 4 ++-- .../ximgproc/samples/structured_edge_detection.cpp | 2 +- .../tutorials/disparity_filtering.markdown | 2 +- 11 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 4f1453a2f..e72c70d8c 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,6 +1,6 @@ diff --git a/.travis.yml b/.travis.yml index 23b3aba4e..5c603efde 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ compiler: - clang before_script: - cd ../ - - git clone https://github.com/Itseez/opencv.git + - git clone https://github.com/opencv/opencv.git - mkdir build-opencv - cd build-opencv - cmake -DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib/modules ../opencv diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1e13c8996..318e9ac8f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,3 @@ ## Contributing guidelines -All guidelines for contributing to the OpenCV repository can be found at [`How to contribute guideline`](https://github.com/Itseez/opencv/wiki/How_to_contribute). +All guidelines for contributing to the OpenCV repository can be found at [`How to contribute guideline`](https://github.com/opencv/opencv/wiki/How_to_contribute). diff --git a/modules/dnn/tutorials/tutorial_dnn_build.markdown b/modules/dnn/tutorials/tutorial_dnn_build.markdown index d5fbc7f6a..36f0fecbf 100644 --- a/modules/dnn/tutorials/tutorial_dnn_build.markdown +++ b/modules/dnn/tutorials/tutorial_dnn_build.markdown @@ -3,7 +3,7 @@ Build opencv_contrib with dnn module {#tutorial_dnn_build} Introduction ------------ -opencv_dnn module is placed in the secondary [opencv_contrib](https://github.com/Itseez/opencv_contrib) repository, +opencv_dnn module is placed in the secondary [opencv_contrib](https://github.com/opencv/opencv_contrib) repository, which isn't distributed in binary form, therefore you need to build it manually. To do this you need to have installed: [CMake](http://www.cmake.org/download), git, and build system (*gcc* with *make* for Linux or *MS Visual Studio* for Windows) @@ -12,12 +12,12 @@ Steps ----- -# Make any directory, for example **opencv_root** --# Clone [opencv](https://github.com/Itseez/opencv) and [opencv_contrib](https://github.com/Itseez/opencv_contrib) repos to the **opencv_root**. +-# Clone [opencv](https://github.com/opencv/opencv) and [opencv_contrib](https://github.com/opencv/opencv_contrib) repos to the **opencv_root**. You can do it in terminal like here: @code cd opencv_root -git clone https://github.com/Itseez/opencv -git clone https://github.com/Itseez/opencv_contrib +git clone https://github.com/opencv/opencv +git clone https://github.com/opencv/opencv_contrib @endcode -# Run [CMake-gui] and set source and build directories: diff --git a/modules/text/include/opencv2/text.hpp b/modules/text/include/opencv2/text.hpp index 591424cb4..945194a16 100644 --- a/modules/text/include/opencv2/text.hpp +++ b/modules/text/include/opencv2/text.hpp @@ -92,7 +92,7 @@ grouping horizontally aligned text, and the method proposed by Lluis Gomez and D in [Gomez13][Gomez14] for grouping arbitrary oriented text (see erGrouping). To see the text detector at work, have a look at the textdetection demo: - + @defgroup text_recognize Scene Text Recognition @} diff --git a/modules/text/include/opencv2/text/erfilter.hpp b/modules/text/include/opencv2/text/erfilter.hpp index d17ac8879..9303e128f 100644 --- a/modules/text/include/opencv2/text/erfilter.hpp +++ b/modules/text/include/opencv2/text/erfilter.hpp @@ -345,7 +345,7 @@ single vector\, the function separates them in two different vectors (th ERStats where extracted from two different channels). An example of MSERsToERStats in use can be found in the text detection webcam_demo: - + */ CV_EXPORTS void MSERsToERStats(InputArray image, std::vector > &contours, std::vector > ®ions); diff --git a/modules/text/include/opencv2/text/ocr.hpp b/modules/text/include/opencv2/text/ocr.hpp index 651934b0c..1261046cd 100644 --- a/modules/text/include/opencv2/text/ocr.hpp +++ b/modules/text/include/opencv2/text/ocr.hpp @@ -81,10 +81,10 @@ Notice that it is compiled only when tesseract-ocr is correctly installed. @note - (C++) An example of OCRTesseract recognition combined with scene text detection can be found at the end_to_end_recognition demo: - + - (C++) Another example of OCRTesseract recognition combined with scene text detection can be found at the webcam_demo: - + */ class CV_EXPORTS_W OCRTesseract : public BaseOCR { @@ -152,7 +152,7 @@ enum decoder_mode @note - (C++) An example on using OCRHMMDecoder recognition combined with scene text detection can be found at the webcam_demo sample: - + */ class CV_EXPORTS_W OCRHMMDecoder : public BaseOCR { @@ -165,7 +165,7 @@ public: The default character classifier and feature extractor can be loaded using the utility funtion loadOCRHMMClassifierNM and KNN model provided in - . + . */ class CV_EXPORTS_W ClassifierCallback { @@ -321,7 +321,7 @@ CV_EXPORTS_W Ptr loadOCRHMMClassifierCNN(cons * The function calculate frequency statistics of character pairs from the given lexicon and fills the output transition_probabilities_table with them. The transition_probabilities_table can be used as input in the OCRHMMDecoder::create() and OCRBeamSearchDecoder::create() methods. * @note * - (C++) An alternative would be to load the default generic language transition table provided in the text module samples folder (created from ispell 42869 english words list) : - * + * **/ CV_EXPORTS void createOCRHMMTransitionsTable(std::string& vocabulary, std::vector& lexicon, OutputArray transition_probabilities_table); @@ -335,7 +335,7 @@ CV_EXPORTS_W Mat createOCRHMMTransitionsTable(const String& vocabulary, std::vec @note - (C++) An example on using OCRBeamSearchDecoder recognition combined with scene text detection can be found at the demo sample: - + */ class CV_EXPORTS_W OCRBeamSearchDecoder : public BaseOCR { @@ -348,7 +348,7 @@ public: The default character classifier and feature extractor can be loaded using the utility funtion loadOCRBeamSearchClassifierCNN with all its parameters provided in - . + . */ class CV_EXPORTS_W ClassifierCallback { diff --git a/modules/text/src/ocr_hmm_decoder.cpp b/modules/text/src/ocr_hmm_decoder.cpp index 1076475f1..5b7a5aa31 100644 --- a/modules/text/src/ocr_hmm_decoder.cpp +++ b/modules/text/src/ocr_hmm_decoder.cpp @@ -1206,7 +1206,7 @@ the output transition_probabilities_table with them. The transition_probabilities_table can be used as input in the OCRHMMDecoder::create() and OCRBeamSearchDecoder::create() methods. @note - (C++) An alternative would be to load the default generic language transition table provided in the text module samples folder (created from ispell 42869 english words list) : - + */ void createOCRHMMTransitionsTable(string& vocabulary, vector& lexicon, OutputArray _transitions) { diff --git a/modules/tracking/tutorials/tutorial_introduction_to_tracker.markdown b/modules/tracking/tutorials/tutorial_introduction_to_tracker.markdown index 500df0c1c..9e2b6eb89 100644 --- a/modules/tracking/tutorials/tutorial_introduction_to_tracker.markdown +++ b/modules/tracking/tutorials/tutorial_introduction_to_tracker.markdown @@ -28,8 +28,8 @@ Explanation as shown in help. In the help, it means that the image files are numbered with 4 digits (e.g. the file naming will be 0001.jpg, 0002.jpg, and so on). - You can find video samples in Itseez/opencv_extra/testdata/cv/tracking - + You can find video samples in opencv_extra/testdata/cv/tracking + -# **Declares the required variables** diff --git a/modules/ximgproc/samples/structured_edge_detection.cpp b/modules/ximgproc/samples/structured_edge_detection.cpp index 4c20d01a5..a11e9f102 100644 --- a/modules/ximgproc/samples/structured_edge_detection.cpp +++ b/modules/ximgproc/samples/structured_edge_detection.cpp @@ -1,7 +1,7 @@ /************************************************************************************** The structered edge demo requires you to provide a model. This model can be found at the opencv_extra repository on Github on the following link: -https://github.com/Itseez/opencv_extra/blob/master/testdata/cv/ximgproc/model.yml.gz +https://github.com/opencv/opencv_extra/blob/master/testdata/cv/ximgproc/model.yml.gz ***************************************************************************************/ #include diff --git a/modules/ximgproc/tutorials/disparity_filtering.markdown b/modules/ximgproc/tutorials/disparity_filtering.markdown index 4ca559f83..0248c6db4 100644 --- a/modules/ximgproc/tutorials/disparity_filtering.markdown +++ b/modules/ximgproc/tutorials/disparity_filtering.markdown @@ -27,7 +27,7 @@ Source Stereoscopic Image Source Code ----------- -We will be using snippets from the example application, that can be downloaded [here ](https://github.com/Itseez/opencv_contrib/blob/master/modules/ximgproc/samples/disparity_filtering.cpp). +We will be using snippets from the example application, that can be downloaded [here ](https://github.com/opencv/opencv_contrib/blob/master/modules/ximgproc/samples/disparity_filtering.cpp). Explanation -----------