backport commit 8c09249352
pull/2375/head
Brian Wignall 5 years ago committed by Alexander Alekhin
parent e70a4c423b
commit 078c45633d
  1. 2
      modules/aruco/include/opencv2/aruco.hpp
  2. 2
      modules/aruco/src/apriltag_quad_thresh.cpp
  3. 2
      modules/aruco/src/charuco.cpp
  4. 2
      modules/aruco/src/dictionary.cpp
  5. 2
      modules/aruco/tutorials/charuco_detection/charuco_detection.markdown
  6. 2
      modules/aruco/tutorials/charuco_diamond_detection/charuco_diamond_detection.markdown
  7. 2
      modules/bioinspired/doc/retina.markdown
  8. 4
      modules/bioinspired/samples/OpenEXRimages_HDR_Retina_toneMapping.cpp
  9. 4
      modules/bioinspired/samples/cpp/OpenEXRimages_HDR_Retina_toneMapping.cpp
  10. 4
      modules/bioinspired/src/retina_ocl.cpp
  11. 2
      modules/ccalib/tutorials/omnidir_tutorial.markdown
  12. 4
      modules/cvv/src/controller/view_controller.hpp
  13. 2
      modules/cvv/src/extension_api/api.hpp
  14. 2
      modules/cvv/src/gui/overview_group_subtable.hpp
  15. 2
      modules/cvv/src/qtutil/matchview/cvvkeypoint.hpp
  16. 2
      modules/cvv/src/qtutil/matchview/keypointmanagement.hpp
  17. 2
      modules/cvv/src/qtutil/matchview/keypointselectionselector.hpp
  18. 2
      modules/cvv/src/qtutil/matchview/keypointsettingsselector.hpp
  19. 6
      modules/cvv/src/qtutil/matchview/keypointvaluechooser.hpp
  20. 2
      modules/cvv/src/qtutil/matchview/matchmanagement.hpp
  21. 2
      modules/cvv/src/qtutil/matchview/matchselectionselector.hpp
  22. 2
      modules/cvv/src/qtutil/matchview/matchsettingsselector.hpp
  23. 2
      modules/cvv/src/qtutil/matchview/singlecolorkeypointpen.hpp
  24. 2
      modules/cvv/src/qtutil/matchview/singlecolormatchpen.hpp
  25. 4
      modules/cvv/src/qtutil/signalslot.hpp
  26. 4
      modules/cvv/src/qtutil/util.hpp
  27. 2
      modules/cvv/src/stfl/stfl_engine.hpp
  28. 2
      modules/datasets/samples/tr_icdar_benchmark.cpp
  29. 2
      modules/datasets/samples/tr_svt_benchmark.cpp
  30. 2
      modules/face/include/opencv2/face.hpp
  31. 2
      modules/face/include/opencv2/face/face_alignment.hpp
  32. 2
      modules/face/include/opencv2/face/facemark_train.hpp
  33. 2
      modules/fuzzy/tutorials/inpainting/inpainting.markdown
  34. 2
      modules/hfs/include/opencv2/hfs.hpp
  35. 2
      modules/line_descriptor/include/opencv2/line_descriptor/descriptor.hpp
  36. 2
      modules/line_descriptor/src/binary_descriptor.cpp
  37. 2
      modules/matlab/generator/filters.py
  38. 2
      modules/sfm/samples/trajectory_reconstruction.cpp
  39. 2
      modules/sfm/src/libmv_light/libmv/multiview/homography.cc
  40. 2
      modules/sfm/src/libmv_light/libmv/multiview/panography.h
  41. 2
      modules/sfm/src/libmv_light/libmv/multiview/projection.cc
  42. 2
      modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc
  43. 2
      modules/sfm/tutorials/sfm_scene reconstruction/sfm_scene_reconstruction.markdown
  44. 2
      modules/surface_matching/include/opencv2/surface_matching/ppf_helpers.hpp
  45. 2
      modules/surface_matching/src/hash_murmur86.hpp
  46. 2
      modules/surface_matching/src/ppf_match_3d.cpp
  47. 2
      modules/text/include/opencv2/text/ocr.hpp
  48. 8
      modules/text/src/erfilter.cpp
  49. 2
      modules/tracking/include/opencv2/tracking.hpp
  50. 2
      modules/tracking/samples/goturnTracker.cpp
  51. 6
      modules/tracking/samples/multitracker.cpp
  52. 2
      modules/tracking/src/trackerCSRTUtils.cpp
  53. 2
      modules/tracking/tutorials/tutorial_customizing_cn_tracker.markdown
  54. 2
      modules/xfeatures2d/include/opencv2/xfeatures2d.hpp
  55. 2
      modules/xfeatures2d/src/boostdesc.cpp
  56. 2
      modules/xfeatures2d/src/surf.cpp
  57. 2
      modules/ximgproc/include/opencv2/ximgproc/ridgefilter.hpp
  58. 4
      modules/ximgproc/include/opencv2/ximgproc/segmentation.hpp
  59. 2
      modules/ximgproc/include/opencv2/ximgproc/slic.hpp
  60. 2
      modules/ximgproc/src/graphsegmentation.cpp
  61. 2
      modules/ximgproc/src/seeds.cpp
  62. 2
      modules/xphoto/src/bm3d_denoising_transforms.hpp

@ -354,7 +354,7 @@ class CV_EXPORTS_W GridBoard : public Board {
// number of markers in X and Y directions
int _markersX, _markersY;
// marker side lenght (normally in meters)
// marker side length (normally in meters)
float _markerLength;
// separation between markers in the grid

@ -661,7 +661,7 @@ int fit_quad(const Ptr<DetectorParameters> &_params, const Mat im, zarray_t *clu
if (dot < 0)
return 0;
// we now sort the points according to theta. This is a prepatory
// we now sort the points according to theta. This is a preparatory
// step for segmenting them into four lines.
if (1) {
// zarray_sort(cluster, pt_compare_theta);

@ -616,7 +616,7 @@ void drawDetectedCornersCharuco(InputOutputArray _image, InputArray _charucoCorn
/**
* Check if a set of 3d points are enough for calibration. Z coordinate is ignored.
* Only axis paralel lines are considered
* Only axis parallel lines are considered
*/
static bool _arePointsEnoughForPoseEstimation(const vector< Point3f > &points) {

@ -122,7 +122,7 @@ bool Dictionary::identify(const Mat &onlyBits, int &idx, int &rotation,
}
}
// if maxCorrection is fullfilled, return this one
// if maxCorrection is fulfilled, return this one
if(currentMinDistance <= maxCorrectionRecalculed) {
idx = m;
rotation = currentRotation;

@ -32,7 +32,7 @@ This class, as the rest of ChArUco functionalities, are defined in:
#include <opencv2/aruco/charuco.hpp>
@endcode
To define a ```CharucoBoard```, it is necesary:
To define a ```CharucoBoard```, it is necessary:
- Number of chessboard squares in X direction.
- Number of chessboard squares in Y direction.

@ -102,7 +102,7 @@ function and, for each diamond, the corners are represented in the same order th
starting with the top-left corner. The second returned parameter, ```diamondIds```, contains all the ids of the returned
diamond corners in ```diamondCorners```. Each id is actually an array of 4 integers that can be represented with ```Vec4i```.
The detected diamond can be visualized using the function ```drawDetectedDiamonds()``` which simply recieves the image and the diamond
The detected diamond can be visualized using the function ```drawDetectedDiamonds()``` which simply receives the image and the diamond
corners and ids:
@code{.cpp}

@ -134,7 +134,7 @@ For more information, refer to the following papers :
- Please have a look at the reference work of Jeanny Herault that you can read in his book : @cite Herault2010
This retina filter code includes the research contributions of phd/research collegues from which
This retina filter code includes the research contributions of phd/research colleagues from which
code has been redrawn by the author :
- take a look at the *retinacolor.hpp* module to discover Brice Chaix de Lavarene phD color

@ -75,7 +75,7 @@ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, con
cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
}
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... useful for HDR pixel errors cancellation
cv::Mat dst, hist;
int histSize = 256;
calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
@ -233,7 +233,7 @@ int main(int argc, char* argv[])
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
retina->write("RetinaDefaultParameters.xml");
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
// desactivate Magnocellular pathway processing (motion information extraction) since it is not useful here
retina->activateMovingContoursProcessing(false);
// declare retina output buffers

@ -74,7 +74,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
}
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... useful for HDR pixel errors cancellation
cv::Mat dst, hist;
int histSize = 256;
calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
@ -231,7 +231,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
retina->write("RetinaDefaultParameters.xml");
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
// desactivate Magnocellular pathway processing (motion information extraction) since it is not useful here
retina->activateMovingContoursProcessing(false);
// declare retina output buffers

@ -1367,7 +1367,7 @@ void RetinaFilter::setGlobalParameters(const float OPLspatialResponse1, const fl
_normalizeMagnoOutput_0_maxOutputValue = normalizeMagnoOutput_0_maxOutputValue;
_maxOutputValue = maxOutputValue;
_photoreceptorsPrefilter.setV0CompressionParameter(0.9f, maxInputValue, meanValue);
_photoreceptorsPrefilter.setLPfilterParameters(0, 0, 10, 3); // keeps low pass filter with low cut frequency in memory (usefull for the tone mapping function)
_photoreceptorsPrefilter.setLPfilterParameters(0, 0, 10, 3); // keeps low pass filter with low cut frequency in memory (useful for the tone mapping function)
_ParvoRetinaFilter.setOPLandParvoFiltersParameters(0, OPLtemporalresponse1, OPLspatialResponse1, OPLassymetryGain, OPLtemporalresponse2, OPLspatialResponse2);
_ParvoRetinaFilter.setV0CompressionParameter(0.9f, maxInputValue, meanValue);
_MagnoRetinaFilter.setCoefficientsTable(LPfilterGain, LPfilterTemporalresponse, LPfilterSpatialResponse, MovingContoursExtractorCoefficient, 0, 2.0f * LPfilterSpatialResponse);
@ -1433,7 +1433,7 @@ bool RetinaFilter::runFilter(const UMat &imageInput, const bool useAdaptiveFilte
if (_useParvoOutput)
{
_ParvoRetinaFilter.normalizeGrayOutputCentredSigmoide(); // models the saturation of the cells, usefull for visualisation of the ON-OFF Parvo Output, Bipolar cells outputs do not change !!!
_ParvoRetinaFilter.normalizeGrayOutputCentredSigmoide(); // models the saturation of the cells, useful for visualisation of the ON-OFF Parvo Output, Bipolar cells outputs do not change !!!
_ParvoRetinaFilter.centerReductImageLuminance(); // best for further spectrum analysis
if (_normalizeParvoOutput_0_maxOutputValue)

@ -153,7 +153,7 @@ The first step of stereo reconstruction is stereo rectification so that epipolar
The API of stereo reconstruction for omnidrectional camera is ```omnidir::stereoReconstruct```. Here we use an example to show how it works.
First, calibrate a stereo pair of cameras as describe above and get parameters like ```K1```, ```D1```, ```xi1```, ```K2```, ```D2```, ```xi2```, ```rvec```, ```tvec```. Then read two images from the first and second camera respectively, for instance, ```image1``` and ```image2```, which are shown below.
First, calibrate a stereo pair of cameras as described above and get parameters like ```K1```, ```D1```, ```xi1```, ```K2```, ```D2```, ```xi2```, ```rvec```, ```tvec```. Then read two images from the first and second camera respectively, for instance, ```image1``` and ```image2```, which are shown below.
![image](img/imgs.jpg)

@ -191,13 +191,13 @@ class ViewController
void showCallTab(size_t tabId);
/**
* @brief Shows the tab and opens it if neccessary.
* @brief Shows the tab and opens it if necessary.
* @param tabId id of the tab
*/
void showAndOpenCallTab(size_t tabId);
/**
* @brief Opens the tab it if neccessary.
* @brief Opens the tab if necessary.
* @param tabId id of the tab
*/
void openCallTab(size_t tabId);

@ -43,7 +43,7 @@ template <class MView> void addMatchView(const QString name)
using TabFactory = controller::TabFactory;
/**
* @brief Introduces a new call-type.
* @param factory A function that recieves a reference to a call and should
* @param factory A function that receives a reference to a call and should
* return the appropriate
* window.
*/

@ -72,7 +72,7 @@ class OverviewGroupSubtable : public QWidget
/**
* @brief Set the displayed rows.
* @note This method does some optimisations to only fully rebuild all
* rows if neccessary.
* rows if necessary.
* @param newGroup new group of rows that will be displayed
*/
void setRowGroup(stfl::ElementGroup<OverviewTableRow> &newGroup);

@ -93,7 +93,7 @@ class CVVKeyPoint : public QGraphicsObject,public cv::KeyPoint
signals:
/**
* @brief this signal will be emited when the imagepoint in the scene
* @brief this signal will be emitted when the imagepoint in the scene
* has changed
* @param visible it is true if this keypoint is in the visibleArea
*/

@ -112,7 +112,7 @@ public slots:
signals:
/**
* @brief this signal will be emited when the selection was changed.
* @brief this signal will be emitted when the selection was changed.
* it can be used for syncronisation with other selector
*/
void updateSelection(const std::vector<cv::KeyPoint> &selection);

@ -11,7 +11,7 @@
namespace cvv{ namespace qtutil{
/**
* @brief this class can use diffrent KeyPointSelection
* @brief this class can use different KeyPointSelection
* you can register functions which take a std::vector<cv::KeyPoint> as argument.
*/
class KeyPointSelectionSelector:public KeyPointSelection,public RegisterHelper<KeyPointSelection,std::vector<cv::KeyPoint>>{

@ -11,7 +11,7 @@
namespace cvv{ namespace qtutil{
/**
* @brief this class can use diffrent KeyPointSettings
* @brief this class can use different KeyPointSettings
* you can register functios which take a std::vector<cv::DMatch> as argument.
*/
class KeyPointSettingsSelector:public KeyPointSettings, public RegisterHelper<KeyPointSettings,std::vector<cv::KeyPoint>>{

@ -11,7 +11,7 @@ namespace cvv{ namespace qtutil{
/**
* @brief this widget contains a combobox with the attributes of an keypoint as entry.
* you cann call the method getChoosenValue which return the choosen value of the given keypoint
* you can call the method getChoosenValue which return the chosen value of the given keypoint
*/
class KeyPointValueChooser:public QWidget{
@ -25,8 +25,8 @@ public:
KeyPointValueChooser(QWidget *parent=nullptr);
/**
* @brief returns the choosen value of the given keypoint
* @return the choosen value of the given keypoint
* @brief returns the chosen value of the given keypoint
* @return the chosen value of the given keypoint
*/
double getChoosenValue(cv::KeyPoint keypoint);

@ -110,7 +110,7 @@ public slots:
signals:
/**
* @brief this signal will be emited when the selection was changed.
* @brief this signal will be emitted when the selection was changed.
* it can be used for syncronisation with other selector
*/
void updateSelection(const std::vector<cv::DMatch> &selection);

@ -11,7 +11,7 @@
namespace cvv{ namespace qtutil{
/**
* @brief this class can use diffrent MatchSelection
* @brief this class can use different MatchSelection
* you can register functions which take a std::vector<cv::DMatch> as argument.
*/
class MatchSelectionSelector:public MatchSelection,public RegisterHelper<MatchSelection,std::vector<cv::DMatch>>{

@ -11,7 +11,7 @@
namespace cvv{ namespace qtutil{
/**
* @brief this class can use diffrent MatchSettings
* @brief this class can use different MatchSettings
* you can register functios which take a std::vector<cv::DMatch> as argument.
*/
class MatchSettingsSelector:public MatchSettings, public RegisterHelper<MatchSettings,std::vector<cv::DMatch>>{

@ -13,7 +13,7 @@ namespace qtutil
/**
* This KeyPointPen return for all CVVKeyPoints the same Color,
* the Color can be choosen by an QColorDialog
* the Color can be chosen by an QColorDialog
*/
class SingleColorKeyPen : public KeyPointSettings

@ -16,7 +16,7 @@ namespace qtutil
/**
* This MatchPen return for all CVVMatches the same Color,
* the Color can be choosen by an QColorDialog
* the Color can be chosen by an QColorDialog
*/
class SingleColorMatchPen : public MatchSettings

@ -44,7 +44,7 @@ class Signal : public QObject
}
signals:
/**
* @brief The signal emited by emitSignal.
* @brief The signal emitted by emitSignal.
*/
void signal() const;
};
@ -158,7 +158,7 @@ class SignalMatRef : public QObject
}
signals:
/**
* @brief The signal emited by emitSignal.
* @brief The signal emitted by emitSignal.
*/
void signal(cv::Mat &mat) const;
};

@ -73,9 +73,9 @@ QSet<QString> createStringSet(QString string);
std::pair<bool, QString> typeToQString(const cv::Mat &mat);
/**
* @brief Returns a string descripton to a image conversion result.
* @brief Returns a string description to a image conversion result.
* @param result The image conversion result.
* @return The descripton.
* @return The description.
*/
QString conversionResultToString(const ImageConversionResult &result);

@ -47,7 +47,7 @@ template <typename Element> class STFLEngine
/**
* @brief Constructs (and initializes) a new engine.
*
* Use this constructor only if you want to have controll about everything.
* Use this constructor only if you want to have control about everything.
* Consider using the simple constructor in combination with the add*
* commands instead.
*

@ -280,7 +280,7 @@ int main(int argc, char *argv[])
ocr->run(grey, group_img, output, &boxes, &words, &confidences, OCR_LEVEL_WORD);
output.erase(remove(output.begin(), output.end(), '\n'), output.end());
//cout << "OCR output = \"" << output << "\" lenght = " << output.size() << endl;
//cout << "OCR output = \"" << output << "\" length = " << output.size() << endl;
if (output.size() < 3)
continue;

@ -233,7 +233,7 @@ int main(int argc, char *argv[])
ocr->run(group_img, output, &boxes, &words, &confidences, OCR_LEVEL_WORD);
output.erase(remove(output.begin(), output.end(), '\n'), output.end());
//cout << "OCR output = \"" << output << "\" lenght = " << output.size() << endl;
//cout << "OCR output = \"" << output << "\" length = " << output.size() << endl;
if (output.size() < 3)
continue;

@ -112,7 +112,7 @@ Here is an example of setting a threshold for the Eigenfaces method, when creati
int num_components = 10;
double threshold = 10.0;
// Then if you want to have a cv::FaceRecognizer with a confidence threshold,
// create the concrete implementation with the appropiate parameters:
// create the concrete implementation with the appropriate parameters:
Ptr<FaceRecognizer> model = EigenFaceRecognizer::create(num_components, threshold);
@endcode

@ -23,7 +23,7 @@ public:
unsigned long tree_depth;
/// num_trees_per_cascade_level This stores number of trees fit per cascade level.
unsigned long num_trees_per_cascade_level;
/// learning_rate stores the learning rate in gradient boosting, also reffered as shrinkage.
/// learning_rate stores the learning rate in gradient boosting, also referred as shrinkage.
float learning_rate;
/// oversampling_amount stores number of initialisations used to create training samples.
unsigned long oversampling_amount;

@ -249,7 +249,7 @@ Ptr<Facemark> facemark = FacemarkLBF::create();
The typical pipeline for facemark detection is listed as follows:
- (Non-mandatory) Set a user defined face detection using FacemarkTrain::setFaceDetector.
The facemark algorithms are desgined to fit the facial points into a face.
The facemark algorithms are designed to fit the facial points into a face.
Therefore, the face information should be provided to the facemark algorithm.
Some algorithms might provides a default face recognition function.
However, the users might prefer to use their own face detector to obtains the best possible detection result.

@ -72,7 +72,7 @@ Using the masks, we applied three different kind of corruption on the same input
![input1, input2 and input3](images/fuzzy_inp_input.jpg)
> Do not forget that in real life usage, images `input1`, `input2` and `input3` are created naturaly and used as the input directly.
> Do not forget that in real life usage, images `input1`, `input2` and `input3` are created naturally and used as the input directly.
Declaration of output images follows. In the following lines, the method of inpainting is applied. Let me explain three different algorithms one by one.

@ -100,7 +100,7 @@ CV_WRAP virtual float getSpatialWeight() = 0;
* above(the SLIC stage). It describes the size of each
* superpixel when initializing SLIC. Every superpixel
* approximately has \f$slicSpixelSize \times slicSpixelSize\f$
* pixels in the begining.
* pixels in the beginning.
*/
CV_WRAP virtual void setSlicSpixelSize(int n) = 0;
CV_WRAP virtual int getSlicSpixelSize() = 0;

@ -214,7 +214,7 @@ class CV_EXPORTS_W BinaryDescriptor : public Algorithm
@param parameters configuration parameters BinaryDescriptor::Params
If no argument is provided, constructor sets default values (see comments in the code snippet in
previous section). Default values are strongly reccomended.
previous section). Default values are strongly recommended.
*/
BinaryDescriptor( const BinaryDescriptor::Params &parameters = BinaryDescriptor::Params() );

@ -807,7 +807,7 @@ int BinaryDescriptor::OctaveKeyLines( cv::Mat& image, ScaleLines &keyLines )
float diffNearThreshold = ( tempValue > 6 ) ? ( tempValue ) : 6;
diffNearThreshold = ( diffNearThreshold < 12 ) ? diffNearThreshold : 12;
/* compute scaled lenght of current line */
/* compute scaled length of current line */
dx = fabs( edLineVec_[octaveCount]->lineEndpoints_[lineCurId][0] - edLineVec_[octaveCount]->lineEndpoints_[lineCurId][2] ); //x1-x2
dy = fabs( edLineVec_[octaveCount]->lineEndpoints_[lineCurId][1] - edLineVec_[octaveCount]->lineEndpoints_[lineCurId][3] ); //y1-y2
length = scale[octaveCount] * sqrt( dx * dx + dy * dy );

@ -18,7 +18,7 @@ def ninputs(fun):
def outputs(args):
'''Determines whether any of the given arguments is an output
reference, and returns a list of only those elements.
In OpenCV, output references are preceeded by CV_OUT or has *OutputArray* type
In OpenCV, output references are preceded by CV_OUT or has *OutputArray* type
'''
try:
return [arg for arg in args['only'] if arg.O and not arg.I]

@ -30,7 +30,7 @@ static void help() {
<< " \n"
<< " Each row corresponds to a different point.\n"
<< " \n"
<< " f is the focal lenght in pixels. \n"
<< " f is the focal length in pixels. \n"
<< " cx is the image principal point x coordinates in pixels. \n"
<< " cy is the image principal point y coordinates in pixels. \n"
<< "------------------------------------------------------------------\n\n"

@ -294,7 +294,7 @@ bool EstimateHomography2DFromCorrespondences(
x2_normalized = x2;
}
// Assume algebraic estiation always suceeds,
// Assume algebraic estiation always succeeds,
Homography2DFromCorrespondencesLinear(x1_normalized, x2_normalized, H);
// Denormalize the homography matrix.

@ -38,7 +38,7 @@ namespace libmv {
// The 2-point algorithm solves for the rotation of the camera with a single
// focal length (4 degrees of freedom).
//
// Compute from 1 to 3 possible focal lenght for 2 point correspondences.
// Compute from 1 to 3 possible focal length for 2 point correspondences.
// Suppose that the cameras share the same optical center and focal lengths:
//
// Image 1 => H*x = x' => Image 2

@ -110,7 +110,7 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
// Compute translation.
Vec p(3);
p << P(0, 3), P(1, 3), P(2, 3);
// TODO(pau) This sould be done by a SolveLinearSystem(A, b, &x) call.
// TODO(pau) This should be done by a SolveLinearSystem(A, b, &x) call.
// TODO(keir) use the eigen LU solver syntax...
Vec3 t = K.inverse() * p;

@ -470,7 +470,7 @@ void EuclideanBundleCommonIntrinsics(
// N-th element denotes whether track N is a constant zero-weigthed track.
vector<bool> zero_weight_tracks_flags(tracks.MaxTrack() + 1, true);
// Residual blocks with 10 parameters are unwieldly with Ceres, so pack the
// Residual blocks with 10 parameters are unwieldy with Ceres, so pack the
// intrinsics into a single block and rely on local parameterizations to
// control which intrinsics are allowed to vary.
double ceres_intrinsics[OFFSET_MAX];

@ -67,7 +67,7 @@ Finally, the obtained results will be shown in Viz.
Usage and Results
-----------------
In order to run this sample we need to specify the path to the image paths files, the focal lenght of the camera in addition to the center projection coordinates (in pixels).
In order to run this sample we need to specify the path to the image paths files, the focal length of the camera in addition to the center projection coordinates (in pixels).
**1. Middlebury temple**

@ -59,7 +59,7 @@ namespace ppf_match_3d
* @param [in] fileName The PLY model to read
* @param [in] withNormals Flag wheather the input PLY contains normal information,
* and whether it should be loaded or not
* @return Returns the matrix on successfull load
* @return Returns the matrix on successful load
*/
CV_EXPORTS_W Mat loadPLYSimple(const char* fileName, int withNormals = 0);

@ -261,7 +261,7 @@ uint32_t PMurHash32_Result(uint32_t h, uint32_t carry, uint32_t total_length)
/*---------------------------------------------------------------------------*/
/* Murmur3A compatable all-at-once */
/* Murmur3A compatible all-at-once */
uint32_t PMurHash32(uint32_t seed, const void *key, int len)
{
uint32_t h1=seed, carry=0;

@ -244,7 +244,7 @@ void PPF3DDetector::trainModel(const Mat &PC)
//printf("///////////////////// NEW REFERENCE ////////////////////////\n");
for (int j=0; j<numRefPoints; j++)
{
// cannnot compute the ppf with myself
// cannot compute the ppf with myself
if (i!=j)
{
const Vec3f p2(sampled.ptr<float>(j));

@ -407,7 +407,7 @@ public:
This way it hides the feature extractor and the classifier itself, so developers can write
their own OCR code.
The default character classifier and feature extractor can be loaded using the utility funtion
The default character classifier and feature extractor can be loaded using the utility function
loadOCRBeamSearchClassifierCNN with all its parameters provided in
<https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRBeamSearch_CNN_model_data.xml.gz>.
*/

@ -2868,8 +2868,8 @@ static float extract_features(Mat &grey, Mat& channel, vector<ERStat> &regions,
Scene Text Extraction, arXiv:1407.7504 [cs.CV].
Gomez L. and Karatzas D.: Multi-script Text Extraction from Natural Scenes, ICDAR 2013.
\param _image Original RGB image from wich the regions were extracted.
\param _src Vector of sinle channel images CV_8UC1 from wich the regions were extracted.
\param _image Original RGB image from which the regions were extracted.
\param _src Vector of sinle channel images CV_8UC1 from which the regions were extracted.
\param regions Vector of ER's retrieved from the ERFilter algorithm from each channel
\param groups The output of the algorithm are stored in this parameter as list of indexes to provided regions.
\param text_boxes The output of the algorithm are stored in this parameter as list of rectangles.
@ -3591,8 +3591,8 @@ bool sort_couples (Vec3i i,Vec3i j) { return (i[0]<j[0]); }
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
Neumann L., Matas J.: A method for text localization and detection, ACCV 2010
\param _img Original RGB image from wich the regions were extracted.
\param _src Vector of sinle channel images CV_8UC1 from wich the regions were extracted.
\param _img Original RGB image from which the regions were extracted.
\param _src Vector of sinle channel images CV_8UC1 from which the regions were extracted.
\param regions Vector of ER's retrieved from the ERFilter algorithm from each channel
\param out_groups The output of the algorithm are stored in this parameter as list of indexes to provided regions.
\param out_boxes The output of the algorithm are stored in this parameter as list of rectangles.

@ -62,7 +62,7 @@ There are three main components: the TrackerSampler, the TrackerFeatureSet and t
first component is the object that computes the patches over the frame based on the last target
location. The TrackerFeatureSet is the class that manages the Features, is possible plug many kind
of these (HAAR, HOG, LBP, Feature2D, etc). The last component is the internal representation of the
target, it is the appearence model. It stores all state candidates and compute the trajectory (the
target, it is the appearance model. It stores all state candidates and compute the trajectory (the
most likely target states). The class TrackerTargetState represents a possible state of the target.
The TrackerSampler and the TrackerFeatureSet are the visual representation of the target, instead
the TrackerModel is the statistical model.

@ -143,7 +143,7 @@ int main(int argc, char *argv[])
Ptr<Tracker> tracker = TrackerGOTURN::create();
//Load and init full ALOV300++ dataset with a given datasetID, as alternative you can use loadAnnotatedOnly(..)
//to load only frames with labled ground truth ~ every 5-th frame
//to load only frames with labelled ground truth ~ every 5-th frame
Ptr<cv::datasets::TRACK_alov> dataset = TRACK_alov::create();
dataset->load(datasetRootPath);
dataset->initDataset(datasetID);

@ -6,12 +6,12 @@
* example_tracking_multitracker Bolt/img/%04d.jpg
* example_tracking_multitracker faceocc2.webm KCF
*
* Note: after the OpenCV libary is installed,
* Note: after the OpenCV library is installed,
* please re-compile this code with "HAVE_OPENCV" parameter activated
* to enable the high precission of fps computation
*--------------------------------------------------*/
/* after the OpenCV libary is installed
/* after the OpenCV library is installed
* please uncomment the the line below and re-compile this code
* to enable high precission of fps computation
*/
@ -46,7 +46,7 @@ int main( int argc, char** argv ){
" example_tracking_multitracker Bolt/img/%04d.jpg\n"
" example_tracking_multitracker faceocc2.webm MEDIANFLOW\n"
" \n"
" Note: after the OpenCV libary is installed,\n"
" Note: after the OpenCV library is installed,\n"
" please re-compile with the HAVE_OPENCV parameter activated\n"
" to enable the high precission of fps computation.\n"
<< endl;

@ -36,7 +36,7 @@ Mat gaussian_shaped_labels(const float sigma, const int w, const int h)
y.at<float>(i,j) = (float)exp((-0.5 / pow(sigma, 2)) * (pow((i+1-h2), 2) + pow((j+1-w2), 2)));
}
}
// wrap-arround with the circulat shifting
// wrap-around with the circulat shifting
y = circshift(y, -cvFloor(y.cols / 2), -cvFloor(y.rows / 2));
Mat yf;
dft(y, yf, DFT_COMPLEX_OUTPUT);

@ -61,7 +61,7 @@ If you need a more detailed information to use @ref cv::Tracker, please refer to
-# **Defining the feature**
In this tutorial, the extracted feature is reponse of the Sobel filter in x and y direction.
In this tutorial, the extracted feature is response of the Sobel filter in x and y direction.
Those Sobel filter responses are concatenated, resulting a feature with 2 channels.
@snippet tracking/samples/tutorial_customizing_cn_tracker.cpp sobel

@ -948,7 +948,7 @@ public:
/** @brief Estimates cornerness for prespecified KeyPoints using the FAST algorithm
@param image grayscale image where keypoints (corners) are detected.
@param keypoints keypoints which should be tested to fit the FAST criteria. Keypoints not beeing
@param keypoints keypoints which should be tested to fit the FAST criteria. Keypoints not being
detected as corners are removed.
@param threshold threshold on difference between intensity of the central pixel and pixels of a
circle around this pixel.

@ -138,7 +138,7 @@ protected:
// patch size
int m_patch_size;
// orient quantitiy
// orient quantity
int m_orient_q;
// patch scale factor

@ -7,7 +7,7 @@
*
* There are still serveral lacks for this experimental implementation:
* 1.The interpolation of sub-pixel mentioned in article was not implemented yet;
* 2.A comparision with original libSurf.so shows that the hessian detector is not a 100% match to their implementation;
* 2.A comparison with original libSurf.so shows that the hessian detector is not a 100% match to their implementation;
* 3.Due to above reasons, I recommanded the original one for study and reuse;
*
* However, the speed of this implementation is something comparable to original one.

@ -5,7 +5,7 @@
/*
Ridge Detection Filter.
OpenCV port by : Kushal Vyas (@kushalvyas), Venkatesh Vijaykumar(@venkateshvijaykumar)
Adapted from Niki Estner's explaination of RidgeFilter.
Adapted from Niki Estner's explanation of RidgeFilter.
*/
#ifndef __OPENCV_XIMGPROC_RIDGEFILTER_HPP__

@ -73,9 +73,9 @@ namespace cv {
*/
class CV_EXPORTS_W SelectiveSearchSegmentationStrategy : public Algorithm {
public:
/** @brief Set a initial image, with a segementation.
/** @brief Set a initial image, with a segmentation.
@param img The input image. Any number of channel can be provided
@param regions A segementation of the image. The parameter must be the same size of img.
@param regions A segmentation of the image. The parameter must be the same size of img.
@param sizes The sizes of different regions
@param image_id If not set to -1, try to cache pre-computations. If the same set og (img, regions, size) is used, the image_id need to be the same.
*/

@ -72,7 +72,7 @@ to efficiently generate compact, nearly uniform superpixels. The simplicity of a
extremely easy to use a lone parameter specifies the number of superpixels and the efficiency of
the algorithm makes it very practical.
Several optimizations are available for SLIC class:
SLICO stands for "Zero parameter SLIC" and it is an optimization of baseline SLIC descibed in @cite Achanta2012.
SLICO stands for "Zero parameter SLIC" and it is an optimization of baseline SLIC described in @cite Achanta2012.
MSLIC stands for "Manifold SLIC" and it is an optimization of baseline SLIC described in @cite Liu_2017_IEEE.
*/

@ -219,7 +219,7 @@ namespace cv {
// Sort edges
std::sort(edges, edges + nb_edges);
// Create a set with all point (by default mapped to themselfs)
// Create a set with all point (by default mapped to themselves)
*es = new PointSet(img_filtered.cols * img_filtered.rows);
// Thresholds

@ -1104,7 +1104,7 @@ float SuperpixelSEEDSImpl::intersectConf(int level1, int label1A, int label1B,
* - intersection B =
* intersection of (level1, label1B) - (level2, label2) and (level2, label2)
* where (level1, label1B) - (level2, label2)
* is the substraction of 2 histograms (-> delete_block method)
* is the subtraction of 2 histograms (-> delete_block method)
* - returns the difference between the 2 intersections: intA - intB
*/

@ -49,7 +49,7 @@ namespace cv
namespace xphoto
{
// Following class contains interface of the tranform domain functions.
// Following class contains interface of the transform domain functions.
template <typename T, typename TT>
class Transform
{

Loading…
Cancel
Save