diff --git a/modules/aruco/include/opencv2/aruco.hpp b/modules/aruco/include/opencv2/aruco.hpp index 56bfe982b..f0d34ed98 100644 --- a/modules/aruco/include/opencv2/aruco.hpp +++ b/modules/aruco/include/opencv2/aruco.hpp @@ -354,7 +354,7 @@ class CV_EXPORTS_W GridBoard : public Board { // number of markers in X and Y directions int _markersX, _markersY; - // marker side lenght (normally in meters) + // marker side length (normally in meters) float _markerLength; // separation between markers in the grid diff --git a/modules/aruco/src/apriltag_quad_thresh.cpp b/modules/aruco/src/apriltag_quad_thresh.cpp index fc75a3847..296309267 100644 --- a/modules/aruco/src/apriltag_quad_thresh.cpp +++ b/modules/aruco/src/apriltag_quad_thresh.cpp @@ -661,7 +661,7 @@ int fit_quad(const Ptr &_params, const Mat im, zarray_t *clu if (dot < 0) return 0; - // we now sort the points according to theta. This is a prepatory + // we now sort the points according to theta. This is a preparatory // step for segmenting them into four lines. if (1) { // zarray_sort(cluster, pt_compare_theta); diff --git a/modules/aruco/src/charuco.cpp b/modules/aruco/src/charuco.cpp index 5e5f01a9a..e1cf22437 100644 --- a/modules/aruco/src/charuco.cpp +++ b/modules/aruco/src/charuco.cpp @@ -616,7 +616,7 @@ void drawDetectedCornersCharuco(InputOutputArray _image, InputArray _charucoCorn /** * Check if a set of 3d points are enough for calibration. Z coordinate is ignored. - * Only axis paralel lines are considered + * Only axis parallel lines are considered */ static bool _arePointsEnoughForPoseEstimation(const vector< Point3f > &points) { diff --git a/modules/aruco/src/dictionary.cpp b/modules/aruco/src/dictionary.cpp index d00ec9e88..e76ee2e46 100644 --- a/modules/aruco/src/dictionary.cpp +++ b/modules/aruco/src/dictionary.cpp @@ -122,7 +122,7 @@ bool Dictionary::identify(const Mat &onlyBits, int &idx, int &rotation, } } - // if maxCorrection is fullfilled, return this one + // if maxCorrection is fulfilled, return this one if(currentMinDistance <= maxCorrectionRecalculed) { idx = m; rotation = currentRotation; diff --git a/modules/aruco/tutorials/charuco_detection/charuco_detection.markdown b/modules/aruco/tutorials/charuco_detection/charuco_detection.markdown index 849aee100..75e681631 100644 --- a/modules/aruco/tutorials/charuco_detection/charuco_detection.markdown +++ b/modules/aruco/tutorials/charuco_detection/charuco_detection.markdown @@ -32,7 +32,7 @@ This class, as the rest of ChArUco functionalities, are defined in: #include @endcode -To define a ```CharucoBoard```, it is necesary: +To define a ```CharucoBoard```, it is necessary: - Number of chessboard squares in X direction. - Number of chessboard squares in Y direction. diff --git a/modules/aruco/tutorials/charuco_diamond_detection/charuco_diamond_detection.markdown b/modules/aruco/tutorials/charuco_diamond_detection/charuco_diamond_detection.markdown index 829c3e859..c1a84df12 100644 --- a/modules/aruco/tutorials/charuco_diamond_detection/charuco_diamond_detection.markdown +++ b/modules/aruco/tutorials/charuco_diamond_detection/charuco_diamond_detection.markdown @@ -102,7 +102,7 @@ function and, for each diamond, the corners are represented in the same order th starting with the top-left corner. The second returned parameter, ```diamondIds```, contains all the ids of the returned diamond corners in ```diamondCorners```. Each id is actually an array of 4 integers that can be represented with ```Vec4i```. -The detected diamond can be visualized using the function ```drawDetectedDiamonds()``` which simply recieves the image and the diamond +The detected diamond can be visualized using the function ```drawDetectedDiamonds()``` which simply receives the image and the diamond corners and ids: @code{.cpp} diff --git a/modules/bioinspired/doc/retina.markdown b/modules/bioinspired/doc/retina.markdown index 0e0e13970..3caf359a0 100644 --- a/modules/bioinspired/doc/retina.markdown +++ b/modules/bioinspired/doc/retina.markdown @@ -134,7 +134,7 @@ For more information, refer to the following papers : - Please have a look at the reference work of Jeanny Herault that you can read in his book : @cite Herault2010 -This retina filter code includes the research contributions of phd/research collegues from which +This retina filter code includes the research contributions of phd/research colleagues from which code has been redrawn by the author : - take a look at the *retinacolor.hpp* module to discover Brice Chaix de Lavarene phD color diff --git a/modules/bioinspired/samples/OpenEXRimages_HDR_Retina_toneMapping.cpp b/modules/bioinspired/samples/OpenEXRimages_HDR_Retina_toneMapping.cpp index 9e0ed4384..f2cd420b6 100644 --- a/modules/bioinspired/samples/OpenEXRimages_HDR_Retina_toneMapping.cpp +++ b/modules/bioinspired/samples/OpenEXRimages_HDR_Retina_toneMapping.cpp @@ -75,7 +75,7 @@ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, con cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY); } - // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation + // get histogram density probability in order to cut values under above edges limits (here 5-95%)... useful for HDR pixel errors cancellation cv::Mat dst, hist; int histSize = 256; calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0); @@ -233,7 +233,7 @@ int main(int argc, char* argv[]) // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup" retina->write("RetinaDefaultParameters.xml"); - // desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here + // desactivate Magnocellular pathway processing (motion information extraction) since it is not useful here retina->activateMovingContoursProcessing(false); // declare retina output buffers diff --git a/modules/bioinspired/samples/cpp/OpenEXRimages_HDR_Retina_toneMapping.cpp b/modules/bioinspired/samples/cpp/OpenEXRimages_HDR_Retina_toneMapping.cpp index 5d30e9d37..28d817f33 100644 --- a/modules/bioinspired/samples/cpp/OpenEXRimages_HDR_Retina_toneMapping.cpp +++ b/modules/bioinspired/samples/cpp/OpenEXRimages_HDR_Retina_toneMapping.cpp @@ -74,7 +74,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY); } - // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation + // get histogram density probability in order to cut values under above edges limits (here 5-95%)... useful for HDR pixel errors cancellation cv::Mat dst, hist; int histSize = 256; calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0); @@ -231,7 +231,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup" retina->write("RetinaDefaultParameters.xml"); - // desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here + // desactivate Magnocellular pathway processing (motion information extraction) since it is not useful here retina->activateMovingContoursProcessing(false); // declare retina output buffers diff --git a/modules/bioinspired/src/retina_ocl.cpp b/modules/bioinspired/src/retina_ocl.cpp index 15a19d52d..bba662fc3 100644 --- a/modules/bioinspired/src/retina_ocl.cpp +++ b/modules/bioinspired/src/retina_ocl.cpp @@ -1367,7 +1367,7 @@ void RetinaFilter::setGlobalParameters(const float OPLspatialResponse1, const fl _normalizeMagnoOutput_0_maxOutputValue = normalizeMagnoOutput_0_maxOutputValue; _maxOutputValue = maxOutputValue; _photoreceptorsPrefilter.setV0CompressionParameter(0.9f, maxInputValue, meanValue); - _photoreceptorsPrefilter.setLPfilterParameters(0, 0, 10, 3); // keeps low pass filter with low cut frequency in memory (usefull for the tone mapping function) + _photoreceptorsPrefilter.setLPfilterParameters(0, 0, 10, 3); // keeps low pass filter with low cut frequency in memory (useful for the tone mapping function) _ParvoRetinaFilter.setOPLandParvoFiltersParameters(0, OPLtemporalresponse1, OPLspatialResponse1, OPLassymetryGain, OPLtemporalresponse2, OPLspatialResponse2); _ParvoRetinaFilter.setV0CompressionParameter(0.9f, maxInputValue, meanValue); _MagnoRetinaFilter.setCoefficientsTable(LPfilterGain, LPfilterTemporalresponse, LPfilterSpatialResponse, MovingContoursExtractorCoefficient, 0, 2.0f * LPfilterSpatialResponse); @@ -1433,7 +1433,7 @@ bool RetinaFilter::runFilter(const UMat &imageInput, const bool useAdaptiveFilte if (_useParvoOutput) { - _ParvoRetinaFilter.normalizeGrayOutputCentredSigmoide(); // models the saturation of the cells, usefull for visualisation of the ON-OFF Parvo Output, Bipolar cells outputs do not change !!! + _ParvoRetinaFilter.normalizeGrayOutputCentredSigmoide(); // models the saturation of the cells, useful for visualisation of the ON-OFF Parvo Output, Bipolar cells outputs do not change !!! _ParvoRetinaFilter.centerReductImageLuminance(); // best for further spectrum analysis if (_normalizeParvoOutput_0_maxOutputValue) diff --git a/modules/ccalib/tutorials/omnidir_tutorial.markdown b/modules/ccalib/tutorials/omnidir_tutorial.markdown index 422c2cfb7..1a31e9431 100644 --- a/modules/ccalib/tutorials/omnidir_tutorial.markdown +++ b/modules/ccalib/tutorials/omnidir_tutorial.markdown @@ -153,7 +153,7 @@ The first step of stereo reconstruction is stereo rectification so that epipolar The API of stereo reconstruction for omnidrectional camera is ```omnidir::stereoReconstruct```. Here we use an example to show how it works. -First, calibrate a stereo pair of cameras as describe above and get parameters like ```K1```, ```D1```, ```xi1```, ```K2```, ```D2```, ```xi2```, ```rvec```, ```tvec```. Then read two images from the first and second camera respectively, for instance, ```image1``` and ```image2```, which are shown below. +First, calibrate a stereo pair of cameras as described above and get parameters like ```K1```, ```D1```, ```xi1```, ```K2```, ```D2```, ```xi2```, ```rvec```, ```tvec```. Then read two images from the first and second camera respectively, for instance, ```image1``` and ```image2```, which are shown below. ![image](img/imgs.jpg) diff --git a/modules/cvv/src/controller/view_controller.hpp b/modules/cvv/src/controller/view_controller.hpp index 10d28aa14..5ac0e9fa8 100644 --- a/modules/cvv/src/controller/view_controller.hpp +++ b/modules/cvv/src/controller/view_controller.hpp @@ -191,13 +191,13 @@ class ViewController void showCallTab(size_t tabId); /** - * @brief Shows the tab and opens it if neccessary. + * @brief Shows the tab and opens it if necessary. * @param tabId id of the tab */ void showAndOpenCallTab(size_t tabId); /** - * @brief Opens the tab it if neccessary. + * @brief Opens the tab if necessary. * @param tabId id of the tab */ void openCallTab(size_t tabId); diff --git a/modules/cvv/src/extension_api/api.hpp b/modules/cvv/src/extension_api/api.hpp index 746fb3d39..b62f758ea 100644 --- a/modules/cvv/src/extension_api/api.hpp +++ b/modules/cvv/src/extension_api/api.hpp @@ -43,7 +43,7 @@ template void addMatchView(const QString name) using TabFactory = controller::TabFactory; /** * @brief Introduces a new call-type. - * @param factory A function that recieves a reference to a call and should + * @param factory A function that receives a reference to a call and should * return the appropriate * window. */ diff --git a/modules/cvv/src/gui/overview_group_subtable.hpp b/modules/cvv/src/gui/overview_group_subtable.hpp index 89299222e..0b5682802 100644 --- a/modules/cvv/src/gui/overview_group_subtable.hpp +++ b/modules/cvv/src/gui/overview_group_subtable.hpp @@ -72,7 +72,7 @@ class OverviewGroupSubtable : public QWidget /** * @brief Set the displayed rows. * @note This method does some optimisations to only fully rebuild all - * rows if neccessary. + * rows if necessary. * @param newGroup new group of rows that will be displayed */ void setRowGroup(stfl::ElementGroup &newGroup); diff --git a/modules/cvv/src/qtutil/matchview/cvvkeypoint.hpp b/modules/cvv/src/qtutil/matchview/cvvkeypoint.hpp index 6e57f71f4..fcf26a57f 100644 --- a/modules/cvv/src/qtutil/matchview/cvvkeypoint.hpp +++ b/modules/cvv/src/qtutil/matchview/cvvkeypoint.hpp @@ -93,7 +93,7 @@ class CVVKeyPoint : public QGraphicsObject,public cv::KeyPoint signals: /** - * @brief this signal will be emited when the imagepoint in the scene + * @brief this signal will be emitted when the imagepoint in the scene * has changed * @param visible it is true if this keypoint is in the visibleArea */ diff --git a/modules/cvv/src/qtutil/matchview/keypointmanagement.hpp b/modules/cvv/src/qtutil/matchview/keypointmanagement.hpp index ea2831082..4bc78b0e1 100644 --- a/modules/cvv/src/qtutil/matchview/keypointmanagement.hpp +++ b/modules/cvv/src/qtutil/matchview/keypointmanagement.hpp @@ -112,7 +112,7 @@ public slots: signals: /** - * @brief this signal will be emited when the selection was changed. + * @brief this signal will be emitted when the selection was changed. * it can be used for syncronisation with other selector */ void updateSelection(const std::vector &selection); diff --git a/modules/cvv/src/qtutil/matchview/keypointselectionselector.hpp b/modules/cvv/src/qtutil/matchview/keypointselectionselector.hpp index bcdb34382..e015961fb 100644 --- a/modules/cvv/src/qtutil/matchview/keypointselectionselector.hpp +++ b/modules/cvv/src/qtutil/matchview/keypointselectionselector.hpp @@ -11,7 +11,7 @@ namespace cvv{ namespace qtutil{ /** - * @brief this class can use diffrent KeyPointSelection + * @brief this class can use different KeyPointSelection * you can register functions which take a std::vector as argument. */ class KeyPointSelectionSelector:public KeyPointSelection,public RegisterHelper>{ diff --git a/modules/cvv/src/qtutil/matchview/keypointsettingsselector.hpp b/modules/cvv/src/qtutil/matchview/keypointsettingsselector.hpp index 6c99099f4..2a099bbe9 100644 --- a/modules/cvv/src/qtutil/matchview/keypointsettingsselector.hpp +++ b/modules/cvv/src/qtutil/matchview/keypointsettingsselector.hpp @@ -11,7 +11,7 @@ namespace cvv{ namespace qtutil{ /** - * @brief this class can use diffrent KeyPointSettings + * @brief this class can use different KeyPointSettings * you can register functios which take a std::vector as argument. */ class KeyPointSettingsSelector:public KeyPointSettings, public RegisterHelper>{ diff --git a/modules/cvv/src/qtutil/matchview/keypointvaluechooser.hpp b/modules/cvv/src/qtutil/matchview/keypointvaluechooser.hpp index 4a050729a..917bd0223 100644 --- a/modules/cvv/src/qtutil/matchview/keypointvaluechooser.hpp +++ b/modules/cvv/src/qtutil/matchview/keypointvaluechooser.hpp @@ -11,7 +11,7 @@ namespace cvv{ namespace qtutil{ /** * @brief this widget contains a combobox with the attributes of an keypoint as entry. - * you cann call the method getChoosenValue which return the choosen value of the given keypoint + * you can call the method getChoosenValue which return the chosen value of the given keypoint */ class KeyPointValueChooser:public QWidget{ @@ -25,8 +25,8 @@ public: KeyPointValueChooser(QWidget *parent=nullptr); /** - * @brief returns the choosen value of the given keypoint - * @return the choosen value of the given keypoint + * @brief returns the chosen value of the given keypoint + * @return the chosen value of the given keypoint */ double getChoosenValue(cv::KeyPoint keypoint); diff --git a/modules/cvv/src/qtutil/matchview/matchmanagement.hpp b/modules/cvv/src/qtutil/matchview/matchmanagement.hpp index 1cf0bffd6..cad9d903e 100644 --- a/modules/cvv/src/qtutil/matchview/matchmanagement.hpp +++ b/modules/cvv/src/qtutil/matchview/matchmanagement.hpp @@ -110,7 +110,7 @@ public slots: signals: /** - * @brief this signal will be emited when the selection was changed. + * @brief this signal will be emitted when the selection was changed. * it can be used for syncronisation with other selector */ void updateSelection(const std::vector &selection); diff --git a/modules/cvv/src/qtutil/matchview/matchselectionselector.hpp b/modules/cvv/src/qtutil/matchview/matchselectionselector.hpp index d80a10a2f..898b0595c 100644 --- a/modules/cvv/src/qtutil/matchview/matchselectionselector.hpp +++ b/modules/cvv/src/qtutil/matchview/matchselectionselector.hpp @@ -11,7 +11,7 @@ namespace cvv{ namespace qtutil{ /** - * @brief this class can use diffrent MatchSelection + * @brief this class can use different MatchSelection * you can register functions which take a std::vector as argument. */ class MatchSelectionSelector:public MatchSelection,public RegisterHelper>{ diff --git a/modules/cvv/src/qtutil/matchview/matchsettingsselector.hpp b/modules/cvv/src/qtutil/matchview/matchsettingsselector.hpp index 5c73e9e8f..0a527a93b 100644 --- a/modules/cvv/src/qtutil/matchview/matchsettingsselector.hpp +++ b/modules/cvv/src/qtutil/matchview/matchsettingsselector.hpp @@ -11,7 +11,7 @@ namespace cvv{ namespace qtutil{ /** - * @brief this class can use diffrent MatchSettings + * @brief this class can use different MatchSettings * you can register functios which take a std::vector as argument. */ class MatchSettingsSelector:public MatchSettings, public RegisterHelper>{ diff --git a/modules/cvv/src/qtutil/matchview/singlecolorkeypointpen.hpp b/modules/cvv/src/qtutil/matchview/singlecolorkeypointpen.hpp index 6467d01ea..6bd6dab57 100644 --- a/modules/cvv/src/qtutil/matchview/singlecolorkeypointpen.hpp +++ b/modules/cvv/src/qtutil/matchview/singlecolorkeypointpen.hpp @@ -13,7 +13,7 @@ namespace qtutil /** * This KeyPointPen return for all CVVKeyPoints the same Color, - * the Color can be choosen by an QColorDialog + * the Color can be chosen by an QColorDialog */ class SingleColorKeyPen : public KeyPointSettings diff --git a/modules/cvv/src/qtutil/matchview/singlecolormatchpen.hpp b/modules/cvv/src/qtutil/matchview/singlecolormatchpen.hpp index 244844695..f4120cb8f 100644 --- a/modules/cvv/src/qtutil/matchview/singlecolormatchpen.hpp +++ b/modules/cvv/src/qtutil/matchview/singlecolormatchpen.hpp @@ -16,7 +16,7 @@ namespace qtutil /** * This MatchPen return for all CVVMatches the same Color, - * the Color can be choosen by an QColorDialog + * the Color can be chosen by an QColorDialog */ class SingleColorMatchPen : public MatchSettings diff --git a/modules/cvv/src/qtutil/signalslot.hpp b/modules/cvv/src/qtutil/signalslot.hpp index d00140b5e..a41b6b586 100644 --- a/modules/cvv/src/qtutil/signalslot.hpp +++ b/modules/cvv/src/qtutil/signalslot.hpp @@ -44,7 +44,7 @@ class Signal : public QObject } signals: /** - * @brief The signal emited by emitSignal. + * @brief The signal emitted by emitSignal. */ void signal() const; }; @@ -158,7 +158,7 @@ class SignalMatRef : public QObject } signals: /** - * @brief The signal emited by emitSignal. + * @brief The signal emitted by emitSignal. */ void signal(cv::Mat &mat) const; }; diff --git a/modules/cvv/src/qtutil/util.hpp b/modules/cvv/src/qtutil/util.hpp index bd13dee27..f3e4f7180 100644 --- a/modules/cvv/src/qtutil/util.hpp +++ b/modules/cvv/src/qtutil/util.hpp @@ -73,9 +73,9 @@ QSet createStringSet(QString string); std::pair typeToQString(const cv::Mat &mat); /** - * @brief Returns a string descripton to a image conversion result. + * @brief Returns a string description to a image conversion result. * @param result The image conversion result. - * @return The descripton. + * @return The description. */ QString conversionResultToString(const ImageConversionResult &result); diff --git a/modules/cvv/src/stfl/stfl_engine.hpp b/modules/cvv/src/stfl/stfl_engine.hpp index 90c8ad9e0..bb1dc04be 100644 --- a/modules/cvv/src/stfl/stfl_engine.hpp +++ b/modules/cvv/src/stfl/stfl_engine.hpp @@ -47,7 +47,7 @@ template class STFLEngine /** * @brief Constructs (and initializes) a new engine. * - * Use this constructor only if you want to have controll about everything. + * Use this constructor only if you want to have control about everything. * Consider using the simple constructor in combination with the add* * commands instead. * diff --git a/modules/datasets/samples/tr_icdar_benchmark.cpp b/modules/datasets/samples/tr_icdar_benchmark.cpp index 32d9a89ff..c053b9237 100644 --- a/modules/datasets/samples/tr_icdar_benchmark.cpp +++ b/modules/datasets/samples/tr_icdar_benchmark.cpp @@ -280,7 +280,7 @@ int main(int argc, char *argv[]) ocr->run(grey, group_img, output, &boxes, &words, &confidences, OCR_LEVEL_WORD); output.erase(remove(output.begin(), output.end(), '\n'), output.end()); - //cout << "OCR output = \"" << output << "\" lenght = " << output.size() << endl; + //cout << "OCR output = \"" << output << "\" length = " << output.size() << endl; if (output.size() < 3) continue; diff --git a/modules/datasets/samples/tr_svt_benchmark.cpp b/modules/datasets/samples/tr_svt_benchmark.cpp index c108249f6..9d2e17659 100644 --- a/modules/datasets/samples/tr_svt_benchmark.cpp +++ b/modules/datasets/samples/tr_svt_benchmark.cpp @@ -233,7 +233,7 @@ int main(int argc, char *argv[]) ocr->run(group_img, output, &boxes, &words, &confidences, OCR_LEVEL_WORD); output.erase(remove(output.begin(), output.end(), '\n'), output.end()); - //cout << "OCR output = \"" << output << "\" lenght = " << output.size() << endl; + //cout << "OCR output = \"" << output << "\" length = " << output.size() << endl; if (output.size() < 3) continue; diff --git a/modules/face/include/opencv2/face.hpp b/modules/face/include/opencv2/face.hpp index 4f38c0cb1..b24c07e41 100644 --- a/modules/face/include/opencv2/face.hpp +++ b/modules/face/include/opencv2/face.hpp @@ -112,7 +112,7 @@ Here is an example of setting a threshold for the Eigenfaces method, when creati int num_components = 10; double threshold = 10.0; // Then if you want to have a cv::FaceRecognizer with a confidence threshold, -// create the concrete implementation with the appropiate parameters: +// create the concrete implementation with the appropriate parameters: Ptr model = EigenFaceRecognizer::create(num_components, threshold); @endcode diff --git a/modules/face/include/opencv2/face/face_alignment.hpp b/modules/face/include/opencv2/face/face_alignment.hpp index 612bce12c..e96081c77 100644 --- a/modules/face/include/opencv2/face/face_alignment.hpp +++ b/modules/face/include/opencv2/face/face_alignment.hpp @@ -23,7 +23,7 @@ public: unsigned long tree_depth; /// num_trees_per_cascade_level This stores number of trees fit per cascade level. unsigned long num_trees_per_cascade_level; - /// learning_rate stores the learning rate in gradient boosting, also reffered as shrinkage. + /// learning_rate stores the learning rate in gradient boosting, also referred as shrinkage. float learning_rate; /// oversampling_amount stores number of initialisations used to create training samples. unsigned long oversampling_amount; diff --git a/modules/face/include/opencv2/face/facemark_train.hpp b/modules/face/include/opencv2/face/facemark_train.hpp index 33ecb9464..d6e27e9fa 100644 --- a/modules/face/include/opencv2/face/facemark_train.hpp +++ b/modules/face/include/opencv2/face/facemark_train.hpp @@ -249,7 +249,7 @@ Ptr facemark = FacemarkLBF::create(); The typical pipeline for facemark detection is listed as follows: - (Non-mandatory) Set a user defined face detection using FacemarkTrain::setFaceDetector. - The facemark algorithms are desgined to fit the facial points into a face. + The facemark algorithms are designed to fit the facial points into a face. Therefore, the face information should be provided to the facemark algorithm. Some algorithms might provides a default face recognition function. However, the users might prefer to use their own face detector to obtains the best possible detection result. diff --git a/modules/fuzzy/tutorials/inpainting/inpainting.markdown b/modules/fuzzy/tutorials/inpainting/inpainting.markdown index 12cddf156..271785eab 100644 --- a/modules/fuzzy/tutorials/inpainting/inpainting.markdown +++ b/modules/fuzzy/tutorials/inpainting/inpainting.markdown @@ -72,7 +72,7 @@ Using the masks, we applied three different kind of corruption on the same input ![input1, input2 and input3](images/fuzzy_inp_input.jpg) -> Do not forget that in real life usage, images `input1`, `input2` and `input3` are created naturaly and used as the input directly. +> Do not forget that in real life usage, images `input1`, `input2` and `input3` are created naturally and used as the input directly. Declaration of output images follows. In the following lines, the method of inpainting is applied. Let me explain three different algorithms one by one. diff --git a/modules/hfs/include/opencv2/hfs.hpp b/modules/hfs/include/opencv2/hfs.hpp index 2d8441237..948f117c3 100644 --- a/modules/hfs/include/opencv2/hfs.hpp +++ b/modules/hfs/include/opencv2/hfs.hpp @@ -100,7 +100,7 @@ CV_WRAP virtual float getSpatialWeight() = 0; * above(the SLIC stage). It describes the size of each * superpixel when initializing SLIC. Every superpixel * approximately has \f$slicSpixelSize \times slicSpixelSize\f$ -* pixels in the begining. +* pixels in the beginning. */ CV_WRAP virtual void setSlicSpixelSize(int n) = 0; CV_WRAP virtual int getSlicSpixelSize() = 0; diff --git a/modules/line_descriptor/include/opencv2/line_descriptor/descriptor.hpp b/modules/line_descriptor/include/opencv2/line_descriptor/descriptor.hpp index e4b6c335c..15e8fe7b4 100644 --- a/modules/line_descriptor/include/opencv2/line_descriptor/descriptor.hpp +++ b/modules/line_descriptor/include/opencv2/line_descriptor/descriptor.hpp @@ -214,7 +214,7 @@ class CV_EXPORTS_W BinaryDescriptor : public Algorithm @param parameters configuration parameters BinaryDescriptor::Params If no argument is provided, constructor sets default values (see comments in the code snippet in - previous section). Default values are strongly reccomended. + previous section). Default values are strongly recommended. */ BinaryDescriptor( const BinaryDescriptor::Params ¶meters = BinaryDescriptor::Params() ); diff --git a/modules/line_descriptor/src/binary_descriptor.cpp b/modules/line_descriptor/src/binary_descriptor.cpp index 4b7dc95d9..1468e79e9 100644 --- a/modules/line_descriptor/src/binary_descriptor.cpp +++ b/modules/line_descriptor/src/binary_descriptor.cpp @@ -807,7 +807,7 @@ int BinaryDescriptor::OctaveKeyLines( cv::Mat& image, ScaleLines &keyLines ) float diffNearThreshold = ( tempValue > 6 ) ? ( tempValue ) : 6; diffNearThreshold = ( diffNearThreshold < 12 ) ? diffNearThreshold : 12; - /* compute scaled lenght of current line */ + /* compute scaled length of current line */ dx = fabs( edLineVec_[octaveCount]->lineEndpoints_[lineCurId][0] - edLineVec_[octaveCount]->lineEndpoints_[lineCurId][2] ); //x1-x2 dy = fabs( edLineVec_[octaveCount]->lineEndpoints_[lineCurId][1] - edLineVec_[octaveCount]->lineEndpoints_[lineCurId][3] ); //y1-y2 length = scale[octaveCount] * sqrt( dx * dx + dy * dy ); diff --git a/modules/matlab/generator/filters.py b/modules/matlab/generator/filters.py index 745392e0b..f1266701c 100644 --- a/modules/matlab/generator/filters.py +++ b/modules/matlab/generator/filters.py @@ -18,7 +18,7 @@ def ninputs(fun): def outputs(args): '''Determines whether any of the given arguments is an output reference, and returns a list of only those elements. - In OpenCV, output references are preceeded by CV_OUT or has *OutputArray* type + In OpenCV, output references are preceded by CV_OUT or has *OutputArray* type ''' try: return [arg for arg in args['only'] if arg.O and not arg.I] diff --git a/modules/sfm/samples/trajectory_reconstruction.cpp b/modules/sfm/samples/trajectory_reconstruction.cpp index d2cadb1a4..4e694bea6 100644 --- a/modules/sfm/samples/trajectory_reconstruction.cpp +++ b/modules/sfm/samples/trajectory_reconstruction.cpp @@ -30,7 +30,7 @@ static void help() { << " \n" << " Each row corresponds to a different point.\n" << " \n" - << " f is the focal lenght in pixels. \n" + << " f is the focal length in pixels. \n" << " cx is the image principal point x coordinates in pixels. \n" << " cy is the image principal point y coordinates in pixels. \n" << "------------------------------------------------------------------\n\n" diff --git a/modules/sfm/src/libmv_light/libmv/multiview/homography.cc b/modules/sfm/src/libmv_light/libmv/multiview/homography.cc index 9a9d22b43..8816ef5aa 100644 --- a/modules/sfm/src/libmv_light/libmv/multiview/homography.cc +++ b/modules/sfm/src/libmv_light/libmv/multiview/homography.cc @@ -294,7 +294,7 @@ bool EstimateHomography2DFromCorrespondences( x2_normalized = x2; } - // Assume algebraic estiation always suceeds, + // Assume algebraic estiation always succeeds, Homography2DFromCorrespondencesLinear(x1_normalized, x2_normalized, H); // Denormalize the homography matrix. diff --git a/modules/sfm/src/libmv_light/libmv/multiview/panography.h b/modules/sfm/src/libmv_light/libmv/multiview/panography.h index 6e87bd713..04fad9efa 100644 --- a/modules/sfm/src/libmv_light/libmv/multiview/panography.h +++ b/modules/sfm/src/libmv_light/libmv/multiview/panography.h @@ -38,7 +38,7 @@ namespace libmv { // The 2-point algorithm solves for the rotation of the camera with a single // focal length (4 degrees of freedom). // -// Compute from 1 to 3 possible focal lenght for 2 point correspondences. +// Compute from 1 to 3 possible focal length for 2 point correspondences. // Suppose that the cameras share the same optical center and focal lengths: // // Image 1 => H*x = x' => Image 2 diff --git a/modules/sfm/src/libmv_light/libmv/multiview/projection.cc b/modules/sfm/src/libmv_light/libmv/multiview/projection.cc index f8bece3de..39239c67e 100644 --- a/modules/sfm/src/libmv_light/libmv/multiview/projection.cc +++ b/modules/sfm/src/libmv_light/libmv/multiview/projection.cc @@ -110,7 +110,7 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) { // Compute translation. Vec p(3); p << P(0, 3), P(1, 3), P(2, 3); - // TODO(pau) This sould be done by a SolveLinearSystem(A, b, &x) call. + // TODO(pau) This should be done by a SolveLinearSystem(A, b, &x) call. // TODO(keir) use the eigen LU solver syntax... Vec3 t = K.inverse() * p; diff --git a/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc b/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc index 2d07ad497..58006e72a 100644 --- a/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc +++ b/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc @@ -470,7 +470,7 @@ void EuclideanBundleCommonIntrinsics( // N-th element denotes whether track N is a constant zero-weigthed track. vector zero_weight_tracks_flags(tracks.MaxTrack() + 1, true); - // Residual blocks with 10 parameters are unwieldly with Ceres, so pack the + // Residual blocks with 10 parameters are unwieldy with Ceres, so pack the // intrinsics into a single block and rely on local parameterizations to // control which intrinsics are allowed to vary. double ceres_intrinsics[OFFSET_MAX]; diff --git a/modules/sfm/tutorials/sfm_scene reconstruction/sfm_scene_reconstruction.markdown b/modules/sfm/tutorials/sfm_scene reconstruction/sfm_scene_reconstruction.markdown index bf9ed7b23..6791b43c2 100644 --- a/modules/sfm/tutorials/sfm_scene reconstruction/sfm_scene_reconstruction.markdown +++ b/modules/sfm/tutorials/sfm_scene reconstruction/sfm_scene_reconstruction.markdown @@ -67,7 +67,7 @@ Finally, the obtained results will be shown in Viz. Usage and Results ----------------- -In order to run this sample we need to specify the path to the image paths files, the focal lenght of the camera in addition to the center projection coordinates (in pixels). +In order to run this sample we need to specify the path to the image paths files, the focal length of the camera in addition to the center projection coordinates (in pixels). **1. Middlebury temple** diff --git a/modules/surface_matching/include/opencv2/surface_matching/ppf_helpers.hpp b/modules/surface_matching/include/opencv2/surface_matching/ppf_helpers.hpp index 7b4de88de..8e02b15ef 100644 --- a/modules/surface_matching/include/opencv2/surface_matching/ppf_helpers.hpp +++ b/modules/surface_matching/include/opencv2/surface_matching/ppf_helpers.hpp @@ -59,7 +59,7 @@ namespace ppf_match_3d * @param [in] fileName The PLY model to read * @param [in] withNormals Flag wheather the input PLY contains normal information, * and whether it should be loaded or not - * @return Returns the matrix on successfull load + * @return Returns the matrix on successful load */ CV_EXPORTS_W Mat loadPLYSimple(const char* fileName, int withNormals = 0); diff --git a/modules/surface_matching/src/hash_murmur86.hpp b/modules/surface_matching/src/hash_murmur86.hpp index 0477d37ed..d45688664 100644 --- a/modules/surface_matching/src/hash_murmur86.hpp +++ b/modules/surface_matching/src/hash_murmur86.hpp @@ -261,7 +261,7 @@ uint32_t PMurHash32_Result(uint32_t h, uint32_t carry, uint32_t total_length) /*---------------------------------------------------------------------------*/ -/* Murmur3A compatable all-at-once */ +/* Murmur3A compatible all-at-once */ uint32_t PMurHash32(uint32_t seed, const void *key, int len) { uint32_t h1=seed, carry=0; diff --git a/modules/surface_matching/src/ppf_match_3d.cpp b/modules/surface_matching/src/ppf_match_3d.cpp index d0aef5e11..f8477a38f 100644 --- a/modules/surface_matching/src/ppf_match_3d.cpp +++ b/modules/surface_matching/src/ppf_match_3d.cpp @@ -244,7 +244,7 @@ void PPF3DDetector::trainModel(const Mat &PC) //printf("///////////////////// NEW REFERENCE ////////////////////////\n"); for (int j=0; j(j)); diff --git a/modules/text/include/opencv2/text/ocr.hpp b/modules/text/include/opencv2/text/ocr.hpp index b70b30af7..c1e0d1719 100644 --- a/modules/text/include/opencv2/text/ocr.hpp +++ b/modules/text/include/opencv2/text/ocr.hpp @@ -407,7 +407,7 @@ public: This way it hides the feature extractor and the classifier itself, so developers can write their own OCR code. - The default character classifier and feature extractor can be loaded using the utility funtion + The default character classifier and feature extractor can be loaded using the utility function loadOCRBeamSearchClassifierCNN with all its parameters provided in . */ diff --git a/modules/text/src/erfilter.cpp b/modules/text/src/erfilter.cpp index 8e01b8f98..2560b8b05 100644 --- a/modules/text/src/erfilter.cpp +++ b/modules/text/src/erfilter.cpp @@ -2868,8 +2868,8 @@ static float extract_features(Mat &grey, Mat& channel, vector ®ions, Scene Text Extraction, arXiv:1407.7504 [cs.CV]. Gomez L. and Karatzas D.: Multi-script Text Extraction from Natural Scenes, ICDAR 2013. - \param _image Original RGB image from wich the regions were extracted. - \param _src Vector of sinle channel images CV_8UC1 from wich the regions were extracted. + \param _image Original RGB image from which the regions were extracted. + \param _src Vector of sinle channel images CV_8UC1 from which the regions were extracted. \param regions Vector of ER's retrieved from the ERFilter algorithm from each channel \param groups The output of the algorithm are stored in this parameter as list of indexes to provided regions. \param text_boxes The output of the algorithm are stored in this parameter as list of rectangles. @@ -3591,8 +3591,8 @@ bool sort_couples (Vec3i i,Vec3i j) { return (i[0] tracker = TrackerGOTURN::create(); //Load and init full ALOV300++ dataset with a given datasetID, as alternative you can use loadAnnotatedOnly(..) - //to load only frames with labled ground truth ~ every 5-th frame + //to load only frames with labelled ground truth ~ every 5-th frame Ptr dataset = TRACK_alov::create(); dataset->load(datasetRootPath); dataset->initDataset(datasetID); diff --git a/modules/tracking/samples/multitracker.cpp b/modules/tracking/samples/multitracker.cpp index a5bafae89..7b21334d5 100644 --- a/modules/tracking/samples/multitracker.cpp +++ b/modules/tracking/samples/multitracker.cpp @@ -6,12 +6,12 @@ * example_tracking_multitracker Bolt/img/%04d.jpg * example_tracking_multitracker faceocc2.webm KCF * - * Note: after the OpenCV libary is installed, + * Note: after the OpenCV library is installed, * please re-compile this code with "HAVE_OPENCV" parameter activated * to enable the high precission of fps computation *--------------------------------------------------*/ -/* after the OpenCV libary is installed +/* after the OpenCV library is installed * please uncomment the the line below and re-compile this code * to enable high precission of fps computation */ @@ -46,7 +46,7 @@ int main( int argc, char** argv ){ " example_tracking_multitracker Bolt/img/%04d.jpg\n" " example_tracking_multitracker faceocc2.webm MEDIANFLOW\n" " \n" - " Note: after the OpenCV libary is installed,\n" + " Note: after the OpenCV library is installed,\n" " please re-compile with the HAVE_OPENCV parameter activated\n" " to enable the high precission of fps computation.\n" << endl; diff --git a/modules/tracking/src/trackerCSRTUtils.cpp b/modules/tracking/src/trackerCSRTUtils.cpp index 15aa00a16..923bdd153 100644 --- a/modules/tracking/src/trackerCSRTUtils.cpp +++ b/modules/tracking/src/trackerCSRTUtils.cpp @@ -36,7 +36,7 @@ Mat gaussian_shaped_labels(const float sigma, const int w, const int h) y.at(i,j) = (float)exp((-0.5 / pow(sigma, 2)) * (pow((i+1-h2), 2) + pow((j+1-w2), 2))); } } - // wrap-arround with the circulat shifting + // wrap-around with the circulat shifting y = circshift(y, -cvFloor(y.cols / 2), -cvFloor(y.rows / 2)); Mat yf; dft(y, yf, DFT_COMPLEX_OUTPUT); diff --git a/modules/tracking/tutorials/tutorial_customizing_cn_tracker.markdown b/modules/tracking/tutorials/tutorial_customizing_cn_tracker.markdown index 4d52e2d73..d3eb4b58c 100644 --- a/modules/tracking/tutorials/tutorial_customizing_cn_tracker.markdown +++ b/modules/tracking/tutorials/tutorial_customizing_cn_tracker.markdown @@ -61,7 +61,7 @@ If you need a more detailed information to use @ref cv::Tracker, please refer to -# **Defining the feature** - In this tutorial, the extracted feature is reponse of the Sobel filter in x and y direction. + In this tutorial, the extracted feature is response of the Sobel filter in x and y direction. Those Sobel filter responses are concatenated, resulting a feature with 2 channels. @snippet tracking/samples/tutorial_customizing_cn_tracker.cpp sobel diff --git a/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp b/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp index 4eeba5ea5..ca958c8b1 100644 --- a/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp +++ b/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp @@ -948,7 +948,7 @@ public: /** @brief Estimates cornerness for prespecified KeyPoints using the FAST algorithm @param image grayscale image where keypoints (corners) are detected. -@param keypoints keypoints which should be tested to fit the FAST criteria. Keypoints not beeing +@param keypoints keypoints which should be tested to fit the FAST criteria. Keypoints not being detected as corners are removed. @param threshold threshold on difference between intensity of the central pixel and pixels of a circle around this pixel. diff --git a/modules/xfeatures2d/src/boostdesc.cpp b/modules/xfeatures2d/src/boostdesc.cpp index 4ccc0c94c..3f7ee225f 100644 --- a/modules/xfeatures2d/src/boostdesc.cpp +++ b/modules/xfeatures2d/src/boostdesc.cpp @@ -138,7 +138,7 @@ protected: // patch size int m_patch_size; - // orient quantitiy + // orient quantity int m_orient_q; // patch scale factor diff --git a/modules/xfeatures2d/src/surf.cpp b/modules/xfeatures2d/src/surf.cpp index c81a56b3c..7629af7a3 100644 --- a/modules/xfeatures2d/src/surf.cpp +++ b/modules/xfeatures2d/src/surf.cpp @@ -7,7 +7,7 @@ * * There are still serveral lacks for this experimental implementation: * 1.The interpolation of sub-pixel mentioned in article was not implemented yet; - * 2.A comparision with original libSurf.so shows that the hessian detector is not a 100% match to their implementation; + * 2.A comparison with original libSurf.so shows that the hessian detector is not a 100% match to their implementation; * 3.Due to above reasons, I recommanded the original one for study and reuse; * * However, the speed of this implementation is something comparable to original one. diff --git a/modules/ximgproc/include/opencv2/ximgproc/ridgefilter.hpp b/modules/ximgproc/include/opencv2/ximgproc/ridgefilter.hpp index 185202bf8..7f21e730f 100644 --- a/modules/ximgproc/include/opencv2/ximgproc/ridgefilter.hpp +++ b/modules/ximgproc/include/opencv2/ximgproc/ridgefilter.hpp @@ -5,7 +5,7 @@ /* Ridge Detection Filter. OpenCV port by : Kushal Vyas (@kushalvyas), Venkatesh Vijaykumar(@venkateshvijaykumar) -Adapted from Niki Estner's explaination of RidgeFilter. +Adapted from Niki Estner's explanation of RidgeFilter. */ #ifndef __OPENCV_XIMGPROC_RIDGEFILTER_HPP__ diff --git a/modules/ximgproc/include/opencv2/ximgproc/segmentation.hpp b/modules/ximgproc/include/opencv2/ximgproc/segmentation.hpp index 19d032bac..02346aa90 100644 --- a/modules/ximgproc/include/opencv2/ximgproc/segmentation.hpp +++ b/modules/ximgproc/include/opencv2/ximgproc/segmentation.hpp @@ -73,9 +73,9 @@ namespace cv { */ class CV_EXPORTS_W SelectiveSearchSegmentationStrategy : public Algorithm { public: - /** @brief Set a initial image, with a segementation. + /** @brief Set a initial image, with a segmentation. @param img The input image. Any number of channel can be provided - @param regions A segementation of the image. The parameter must be the same size of img. + @param regions A segmentation of the image. The parameter must be the same size of img. @param sizes The sizes of different regions @param image_id If not set to -1, try to cache pre-computations. If the same set og (img, regions, size) is used, the image_id need to be the same. */ diff --git a/modules/ximgproc/include/opencv2/ximgproc/slic.hpp b/modules/ximgproc/include/opencv2/ximgproc/slic.hpp index f9f125c54..1f789dfef 100644 --- a/modules/ximgproc/include/opencv2/ximgproc/slic.hpp +++ b/modules/ximgproc/include/opencv2/ximgproc/slic.hpp @@ -72,7 +72,7 @@ to efficiently generate compact, nearly uniform superpixels. The simplicity of a extremely easy to use a lone parameter specifies the number of superpixels and the efficiency of the algorithm makes it very practical. Several optimizations are available for SLIC class: -SLICO stands for "Zero parameter SLIC" and it is an optimization of baseline SLIC descibed in @cite Achanta2012. +SLICO stands for "Zero parameter SLIC" and it is an optimization of baseline SLIC described in @cite Achanta2012. MSLIC stands for "Manifold SLIC" and it is an optimization of baseline SLIC described in @cite Liu_2017_IEEE. */ diff --git a/modules/ximgproc/src/graphsegmentation.cpp b/modules/ximgproc/src/graphsegmentation.cpp index 4a11a4324..77a8ed40f 100644 --- a/modules/ximgproc/src/graphsegmentation.cpp +++ b/modules/ximgproc/src/graphsegmentation.cpp @@ -219,7 +219,7 @@ namespace cv { // Sort edges std::sort(edges, edges + nb_edges); - // Create a set with all point (by default mapped to themselfs) + // Create a set with all point (by default mapped to themselves) *es = new PointSet(img_filtered.cols * img_filtered.rows); // Thresholds diff --git a/modules/ximgproc/src/seeds.cpp b/modules/ximgproc/src/seeds.cpp index c851f12a6..268564b3f 100644 --- a/modules/ximgproc/src/seeds.cpp +++ b/modules/ximgproc/src/seeds.cpp @@ -1104,7 +1104,7 @@ float SuperpixelSEEDSImpl::intersectConf(int level1, int label1A, int label1B, * - intersection B = * intersection of (level1, label1B) - (level2, label2) and (level2, label2) * where (level1, label1B) - (level2, label2) - * is the substraction of 2 histograms (-> delete_block method) + * is the subtraction of 2 histograms (-> delete_block method) * - returns the difference between the 2 intersections: intA - intB */ diff --git a/modules/xphoto/src/bm3d_denoising_transforms.hpp b/modules/xphoto/src/bm3d_denoising_transforms.hpp index 33eee25c4..ded5c53bb 100644 --- a/modules/xphoto/src/bm3d_denoising_transforms.hpp +++ b/modules/xphoto/src/bm3d_denoising_transforms.hpp @@ -49,7 +49,7 @@ namespace cv namespace xphoto { -// Following class contains interface of the tranform domain functions. +// Following class contains interface of the transform domain functions. template class Transform {