Merge pull request #10846 from luzpaz:misc-modules-typos-cont

pull/10859/head
Alexander Alekhin 7 years ago
commit 252e871a8b
  1. 6
      doc/tutorials/imgproc/imgtrans/distance_transformation/distance_transform.markdown
  2. 4
      modules/calib3d/src/calibinit.cpp
  3. 2
      modules/calib3d/src/circlesgrid.cpp
  4. 2
      modules/calib3d/src/five-point.cpp
  5. 2
      modules/calib3d/src/opencl/stereobm.cl
  6. 2
      modules/calib3d/src/p3p.cpp
  7. 2
      modules/calib3d/src/polynom_solver.cpp
  8. 8
      modules/calib3d/src/rho.cpp
  9. 8
      modules/calib3d/test/test_cameracalibration_tilt.cpp
  10. 2
      modules/calib3d/test/test_posit.cpp
  11. 2
      modules/calib3d/test/test_stereomatching.cpp
  12. 2
      modules/core/include/opencv2/core/cuda.hpp
  13. 4
      modules/core/include/opencv2/core/cuda/block.hpp
  14. 2
      modules/core/include/opencv2/core/types_c.h
  15. 4
      modules/core/misc/java/gen_dict.json
  16. 2
      modules/flann/include/opencv2/flann/dist.h
  17. 2
      modules/flann/include/opencv2/flann/kmeans_index.h
  18. 2
      modules/js/test/test_imgproc.js
  19. 6
      modules/ml/doc/ml_intro.markdown
  20. 10
      modules/ml/include/opencv2/ml.hpp
  21. 2
      modules/ml/src/gbt.cpp
  22. 4
      modules/ml/src/svm.cpp
  23. 2
      modules/ml/test/test_emknearestkmeans.cpp
  24. 6
      modules/ml/test/test_lr.cpp
  25. 16
      modules/objdetect/include/opencv2/objdetect.hpp
  26. 6
      modules/objdetect/src/hog.cpp
  27. 4
      modules/photo/include/opencv2/photo.hpp
  28. 2
      modules/shape/include/opencv2/shape/shape_transformer.hpp
  29. 16
      modules/shape/src/emdL1.cpp
  30. 6
      modules/shape/src/emdL1_def.hpp
  31. 2
      modules/shape/src/tps_trans.cpp
  32. 2
      modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp
  33. 2
      modules/stitching/src/matchers.cpp
  34. 2
      modules/stitching/src/motion_estimators.cpp
  35. 2
      modules/superres/include/opencv2/superres.hpp

@ -27,16 +27,16 @@ Explanation / Result
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp load_image
![](images/source.jpeg)
-# Then if we have an image with a white background, it is good to transform it to black. This will help us to descriminate the foreground objects easier when we will apply the Distance Transform:
-# Then if we have an image with a white background, it is good to transform it to black. This will help us to discriminate the foreground objects easier when we will apply the Distance Transform:
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp black_bg
![](images/black_bg.jpeg)
-# Afterwards we will sharp our image in order to acute the edges of the foreground objects. We will apply a laplacian filter with a quite strong filter (an approximation of second derivative):
-# Afterwards we will sharpen our image in order to acute the edges of the foreground objects. We will apply a laplacian filter with a quite strong filter (an approximation of second derivative):
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp sharp
![](images/laplace.jpeg)
![](images/sharp.jpeg)
-# Now we transfrom our new sharped source image to a grayscale and a binary one, respectively:
-# Now we transform our new sharpened source image to a grayscale and a binary one, respectively:
@snippet samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp bin
![](images/bin.jpeg)

@ -1391,7 +1391,7 @@ icvCheckQuadGroup( CvCBQuad **quad_group, int quad_count,
}
}
// start with a corner that belongs to a quad with a signle neighbor.
// start with a corner that belongs to a quad with a single neighbor.
// if we do not have such, start with a corner of a quad with two neighbors.
if( !first )
first = first2;
@ -2173,7 +2173,7 @@ bool cv::findCirclesGrid2( InputArray _image, Size patternSize,
boxFinder.getAsymmetricHoles(centers);
break;
default:
CV_Error(CV_StsBadArg, "Unkown pattern type");
CV_Error(CV_StsBadArg, "Unknown pattern type");
}
if (i != 0)

@ -619,7 +619,7 @@ bool CirclesGridFinder::findHoles()
}
default:
CV_Error(Error::StsBadArg, "Unkown pattern type");
CV_Error(Error::StsBadArg, "Unknown pattern type");
}
return (isDetectionCorrect());
//CV_Error( 0, "Detection is not correct" );

@ -506,7 +506,7 @@ int cv::recoverPose( InputArray E, InputArray _points1, InputArray _points2,
// Do the cheirality check.
// Notice here a threshold dist is used to filter
// out far away points (i.e. infinite points) since
// there depth may vary between postive and negtive.
// their depth may vary between positive and negtive.
std::vector<Mat> allTriangulations(4);
Mat Q;

@ -141,7 +141,7 @@ __kernel void stereoBM(__global const uchar * leftptr,
__global const uchar * rightptr,
__global uchar * dispptr, int disp_step, int disp_offset,
int rows, int cols, // rows, cols of left and right images, not disp
int textureTreshold, int uniquenessRatio)
int textureThreshold, int uniquenessRatio)
{
int lz = get_local_id(0);
int gx = get_global_id(1) * BLOCK_SIZE_X;

@ -192,7 +192,7 @@ int p3p::solve(double R[4][3][3], double t[4][3],
}
/// Given 3D distances between three points and cosines of 3 angles at the apex, calculates
/// the lentghs of the line segments connecting projection center (P) and the three 3D points (A, B, C).
/// the lengths of the line segments connecting projection center (P) and the three 3D points (A, B, C).
/// Returned distances are for |PA|, |PB|, |PC| respectively.
/// Only the solution to the main branch.
/// Reference : X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"

@ -32,7 +32,7 @@ int solve_deg3(double a, double b, double c, double d,
double & x0, double & x1, double & x2)
{
if (a == 0) {
// Solve second order sytem
// Solve second order system
if (b == 0) {
// Solve first order system
if (c == 0)

@ -443,7 +443,7 @@ static inline void sacSub8x1 (float* Hout,
/**
* External access to context constructor.
*
* @return A pointer to the context if successful; NULL if an error occured.
* @return A pointer to the context if successful; NULL if an error occurred.
*/
Ptr<RHO_HEST> rhoInit(void){
@ -1205,7 +1205,7 @@ inline void RHO_HEST_REFC::PROSACGoToNextPhase(void){
/**
* Get a sample according to PROSAC rules. Namely:
* - If we're past the phase end interation, select randomly 4 out of the first
* - If we're past the phase end interaction, select randomly 4 out of the first
* phNum matches.
* - Otherwise, select match phNum-1 and select randomly the 3 others out of
* the first phNum-1 matches.
@ -1742,7 +1742,7 @@ inline void RHO_HEST_REFC::updateBounds(void){
}
/**
* Ouput the best model so far to the output argument.
* Output the best model so far to the output argument.
*
* Reads (direct): arg.finalH, best.H, arg.inl, best.inl, arg.N
* Reads (callees): arg.finalH, arg.inl, arg.N
@ -1762,7 +1762,7 @@ inline void RHO_HEST_REFC::outputModel(void){
}
/**
* Ouput a zeroed H to the output argument.
* Output a zeroed H to the output argument.
*
* Reads (direct): arg.finalH, arg.inl, arg.N
* Reads (callees): None.

@ -80,7 +80,7 @@ protected:
static const double m_pointTargetDist;
static const int m_pointTargetNum;
/** image distance coresponding to working distance */
/** image distance corresponding to working distance */
double m_imageDistance;
/** image tilt angle corresponding to the tilt of the object plane */
double m_imageTiltDegree;
@ -202,7 +202,7 @@ void cameraCalibrationTiltTest::SetUp()
double nearFarFactorImage[2] = {
aperture/(aperture - circleConfusion),
aperture/(aperture + circleConfusion)};
// on the object side - points that determin the field of
// on the object side - points that determine the field of
// view
std::vector<cv::Vec3d> fovBottomTop(6);
std::vector<cv::Vec3d>::iterator itFov = fovBottomTop.begin();
@ -552,7 +552,7 @@ void showVec(const std::string& name, const INPUT& in, const cv::Mat& est)
For given camera matrix and distortion coefficients
- project point target in different positions onto the sensor
- add pixel noise
- estimate camera modell with noisy measurements
- estimate camera model with noisy measurements
- compare result with initial model parameter
Parameter are differently affected by the noise
@ -623,7 +623,7 @@ TEST_F(cameraCalibrationTiltTest, calibrateCamera)
cv::TermCriteria::COUNT+cv::TermCriteria::EPS,
50000,
1e-14);
// modell coice
// model choice
int flag =
cv::CALIB_FIX_ASPECT_RATIO |
// cv::CALIB_RATIONAL_MODEL |

@ -94,7 +94,7 @@ void CV_POSITTest::run( int start_from )
const float flFocalLength = 760.f;
const float flEpsilon = 0.5f;
/* Initilization */
/* Initialization */
criteria.type = CV_TERMCRIT_EPS|CV_TERMCRIT_ITER;
criteria.epsilon = flEpsilon;
criteria.max_iter = 10000;

@ -41,7 +41,7 @@
/*
This is a regression test for stereo matching algorithms. This test gets some quality metrics
discribed in "A Taxonomy and Evaluation of Dense Two-Frame Stereo Correspondence Algorithms".
described in "A Taxonomy and Evaluation of Dense Two-Frame Stereo Correspondence Algorithms".
Daniel Scharstein, Richard Szeliski
*/

@ -382,7 +382,7 @@ the DefaultAllocator since the stack for pool1 is full.
@endcode
If a third stream is declared in the above example, allocating with #getBuffer
within that stream will also be carried out by the DefaultAllocator becuase we've run out of
within that stream will also be carried out by the DefaultAllocator because we've run out of
stacks.
@code

@ -106,7 +106,7 @@ namespace cv { namespace cuda { namespace device
}
template<typename InIt, typename OutIt, class UnOp>
static __device__ __forceinline__ void transfrom(InIt beg, InIt end, OutIt out, UnOp op)
static __device__ __forceinline__ void transform(InIt beg, InIt end, OutIt out, UnOp op)
{
int STRIDE = stride();
InIt t = beg + flattenedThreadId();
@ -117,7 +117,7 @@ namespace cv { namespace cuda { namespace device
}
template<typename InIt1, typename InIt2, typename OutIt, class BinOp>
static __device__ __forceinline__ void transfrom(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
static __device__ __forceinline__ void transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
{
int STRIDE = stride();
InIt1 t1 = beg1 + flattenedThreadId();

@ -1361,7 +1361,7 @@ CvGraph;
/** @} */
/*********************************** Chain/Countour *************************************/
/*********************************** Chain/Contour *************************************/
typedef struct CvChain
{

@ -108,13 +108,13 @@
" return result;",
"\n",
" } catch(const cv::Exception& e) {",
" LOGD(\"Core::n_1minMaxLoc() catched cv::Exception: %s\", e.what());",
" LOGD(\"Core::n_1minMaxLoc() caught cv::Exception: %s\", e.what());",
" jclass je = env->FindClass(\"org/opencv/core/CvException\");",
" if(!je) je = env->FindClass(\"java/lang/Exception\");",
" env->ThrowNew(je, e.what());",
" return NULL;",
" } catch (...) {",
" LOGD(\"Core::n_1minMaxLoc() catched unknown exception (...)\");",
" LOGD(\"Core::n_1minMaxLoc() caught unknown exception (...)\");",
" jclass je = env->FindClass(\"java/lang/Exception\");",
" env->ThrowNew(je, \"Unknown exception in JNI code {core::minMaxLoc()}\");",
" return NULL;",

@ -843,7 +843,7 @@ typename Distance::ResultType ensureSquareDistance( typename Distance::ResultTyp
/*
* ...and a template to ensure the user that he will process the normal distance,
* and not squared distance, without loosing processing time calling sqrt(ensureSquareDistance)
* and not squared distance, without losing processing time calling sqrt(ensureSquareDistance)
* that will result in doing actually sqrt(dist*dist) for L1 distance for instance.
*/
template <typename Distance, typename ElementType>

@ -1053,7 +1053,7 @@ private:
/**
* Helper function the descends in the hierarchical k-means tree by spliting those clusters that minimize
* Helper function the descends in the hierarchical k-means tree by splitting those clusters that minimize
* the overall variance of the clustering.
* Params:
* root = root node

@ -555,7 +555,7 @@ QUnit.test('test_filter', function(assert) {
cv._free(dataPtr2);
}
// Arithmatic operations
// Arithmetic operations
{
let data1 = new Uint8Array([0, 1, 2, 3, 4, 5, 6, 7, 8]);
let data2 = new Uint8Array([0, 2, 4, 6, 8, 10, 12, 14, 16]);

@ -441,8 +441,8 @@ Batch Gradient Descent and Mini-Batch Gradient Descent algorithms are used (see
discriminative classifier (see <http://www.cs.cmu.edu/~tom/NewChapters.html> for more details).
Logistic Regression is implemented as a C++ class in LogisticRegression.
In Logistic Regression, we try to optimize the training paramater \f$\theta\f$ such that the
hypothesis \f$0 \leq h_\theta(x) \leq 1\f$ is acheived. We have \f$h_\theta(x) = g(h_\theta(x))\f$
In Logistic Regression, we try to optimize the training parameter \f$\theta\f$ such that the
hypothesis \f$0 \leq h_\theta(x) \leq 1\f$ is achieved. We have \f$h_\theta(x) = g(h_\theta(x))\f$
and \f$g(z) = \frac{1}{1+e^{-z}}\f$ as the logistic or sigmoid function. The term "Logistic" in
Logistic Regression refers to this function. For given data of a binary classification problem of
classes 0 and 1, one can determine that the given data instance belongs to class 1 if \f$h_\theta(x)
@ -472,7 +472,7 @@ training error and ensuring high training accuracy:
cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref
cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is
set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has
to be to a postive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize
to be to a positive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize
"setMiniBatchSize".
A sample set of training parameters for the Logistic Regression classifier can be initialized as follows:

@ -1269,7 +1269,7 @@ public:
results for each of the sample cases. If the model is a classifier, it will return
a Mat with samples + 1 rows, where the first row gives the class number and the
following rows return the votes each class had for each sample.
@param samples Array containg the samples for which votes will be calculated.
@param samples Array containing the samples for which votes will be calculated.
@param results Array where the result of the calculation will be written.
@param flags Flags for defining the type of RTrees.
*/
@ -1658,9 +1658,9 @@ public:
*/
CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
/** @brief This function returns the trained paramters arranged across rows.
/** @brief This function returns the trained parameters arranged across rows.
For a two class classifcation problem, it returns a row matrix. It returns learnt paramters of
For a two class classifcation problem, it returns a row matrix. It returns learnt parameters of
the Logistic Regression as a matrix of type CV_32F.
*/
CV_WRAP virtual Mat get_learnt_thetas() const = 0;
@ -1854,7 +1854,7 @@ public:
/****************************************************************************************\
* Auxilary functions declarations *
* Auxiliary functions declarations *
\****************************************************************************************/
/** @brief Generates _sample_ from multivariate normal distribution
@ -1921,7 +1921,7 @@ struct SimulatedAnnealingSolverSystem
{
/** Give energy value for a state of system.*/
double energy() const;
/** Function which change the state of system (random pertubation).*/
/** Function which change the state of system (random perturbation).*/
void changeState();
/** Function to reverse to the previous state. Can be called once only after changeState(). */
void reverseState();

@ -253,7 +253,7 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
}
}
// inside gbt learning proccess only regression decision trees are built
// inside gbt learning process only regression decision trees are built
data->is_classifier = false;
// preproccessing sample indices

@ -52,7 +52,7 @@
The code has been derived from libsvm library (version 2.6)
(http://www.csie.ntu.edu.tw/~cjlin/libsvm).
Here is the orignal copyright:
Here is the original copyright:
------------------------------------------------------------------------------------------
Copyright (c) 2000-2003 Chih-Chung Chang and Chih-Jen Lin
All rights reserved.
@ -287,7 +287,7 @@ public:
double d = sample[k]-another[k];
double devisor = sample[k]+another[k];
/// if devisor == 0, the Chi2 distance would be zero,
// but calculation would rise an error because of deviding by zero
// but calculation would rise an error because of dividing by zero
if (devisor != 0)
{
chi2 += d*d/devisor;

@ -590,7 +590,7 @@ protected:
if( errCaseCount > 0 )
{
ts->printf( cvtest::TS::LOG, "Different prediction results before writeing and after reading (errCaseCount=%d).\n", errCaseCount );
ts->printf( cvtest::TS::LOG, "Different prediction results before writing and after reading (errCaseCount=%d).\n", errCaseCount );
code = cvtest::TS::FAIL_BAD_ACCURACY;
}

@ -91,7 +91,7 @@ protected:
void CV_LRTest::run( int /*start_from*/ )
{
CV_TRACE_FUNCTION();
// initialize varibles from the popular Iris Dataset
// initialize variables from the popular Iris Dataset
string dataFileName = ts->get_data_path() + "iris.data";
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
@ -153,7 +153,7 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ )
CV_TRACE_FUNCTION();
int code = cvtest::TS::OK;
// initialize varibles from the popular Iris Dataset
// initialize variables from the popular Iris Dataset
string dataFileName = ts->get_data_path() + "iris.data";
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
@ -205,7 +205,7 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ )
comp_learnt_mats = comp_learnt_mats/255;
// compare difference in prediction outputs and stored inputs
// check if there is any difference between computed learnt mat and retreived mat
// check if there is any difference between computed learnt mat and retrieved mat
float errorCount = 0.0;
errorCount += 1 - (float)countNonZero(responses1 == responses2)/responses1.rows;

@ -346,7 +346,7 @@ struct DetectionROI
{
//! scale(size) of the bounding box
double scale;
//! set of requrested locations to be evaluated
//! set of requested locations to be evaluated
std::vector<cv::Point> locations;
//! vector that will contain confidence values for each location
std::vector<double> confidences;
@ -497,11 +497,11 @@ public:
@param foundLocations Vector of point where each point contains left-top corner point of detected object boundaries.
@param weights Vector that will contain confidence values for each detected object.
@param hitThreshold Threshold for the distance between features and SVM classifying plane.
Usually it is 0 and should be specfied in the detector coefficients (as the last free coefficient).
Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
But if the free coefficient is omitted (which is allowed), you can specify it manually here.
@param winStride Window stride. It must be a multiple of block stride.
@param padding Padding
@param searchLocations Vector of Point includes set of requrested locations to be evaluated.
@param searchLocations Vector of Point includes set of requested locations to be evaluated.
*/
CV_WRAP virtual void detect(const Mat& img, CV_OUT std::vector<Point>& foundLocations,
CV_OUT std::vector<double>& weights,
@ -513,7 +513,7 @@ public:
@param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
@param foundLocations Vector of point where each point contains left-top corner point of detected object boundaries.
@param hitThreshold Threshold for the distance between features and SVM classifying plane.
Usually it is 0 and should be specfied in the detector coefficients (as the last free coefficient).
Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
But if the free coefficient is omitted (which is allowed), you can specify it manually here.
@param winStride Window stride. It must be a multiple of block stride.
@param padding Padding
@ -530,7 +530,7 @@ public:
@param foundLocations Vector of rectangles where each rectangle contains the detected object.
@param foundWeights Vector that will contain confidence values for each detected object.
@param hitThreshold Threshold for the distance between features and SVM classifying plane.
Usually it is 0 and should be specfied in the detector coefficients (as the last free coefficient).
Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
But if the free coefficient is omitted (which is allowed), you can specify it manually here.
@param winStride Window stride. It must be a multiple of block stride.
@param padding Padding
@ -548,7 +548,7 @@ public:
@param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
@param foundLocations Vector of rectangles where each rectangle contains the detected object.
@param hitThreshold Threshold for the distance between features and SVM classifying plane.
Usually it is 0 and should be specfied in the detector coefficients (as the last free coefficient).
Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
But if the free coefficient is omitted (which is allowed), you can specify it manually here.
@param winStride Window stride. It must be a multiple of block stride.
@param padding Padding
@ -632,7 +632,7 @@ public:
@param foundLocations Vector of Point where each Point is detected object's top-left point.
@param confidences confidences
@param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually
it is 0 and should be specfied in the detector coefficients (as the last free coefficient). But if
it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if
the free coefficient is omitted (which is allowed), you can specify it manually here
@param winStride winStride
@param padding padding
@ -646,7 +646,7 @@ public:
@param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
@param foundLocations Vector of rectangles where each rectangle contains the detected object.
@param locations Vector of DetectionROI
@param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually it is 0 and should be specfied
@param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually it is 0 and should be specified
in the detector coefficients (as the last free coefficient). But if the free coefficient is omitted (which is allowed), you can specify it manually here.
@param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.
*/

@ -3608,7 +3608,7 @@ void HOGDescriptor::detectROI(const cv::Mat& img, const std::vector<cv::Point> &
const HOGCache::BlockData& bj = blockData[j];
Point pt = pt0 + bj.imgOffset;
// need to devide this into 4 parts!
// need to divide this into 4 parts!
const float* vec = cache.getBlock(pt, &blockHist[0]);
#if CV_SSE2
__m128 _vec = _mm_loadu_ps(vec);
@ -3699,7 +3699,7 @@ void HOGDescriptor::readALTModel(String modelfile)
CV_THROW (Exception(Error::StsError, eerr, efile, efunc, __LINE__));
}
if(strcmp(version_buffer,"V6.01")) {
String eerr("version doesnot match");
String eerr("version does not match");
String efile(__FILE__);
String efunc(__FUNCTION__);
fclose(modelfl);
@ -3715,7 +3715,7 @@ void HOGDescriptor::readALTModel(String modelfile)
}
if (version < 200)
{
String eerr("version doesnot match");
String eerr("version does not match");
String efile(__FILE__);
String efunc(__FUNCTION__);
fclose(modelfl);

@ -189,7 +189,7 @@ CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst,
float h = 3, float hColor = 3,
int templateWindowSize = 7, int searchWindowSize = 21);
/** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been
/** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
captured in small period of time. For example video. This version of the function is for grayscale
images or for manual manipulation with colorspaces. For more details see
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
@ -216,7 +216,7 @@ CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputA
int imgToDenoiseIndex, int temporalWindowSize,
float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);
/** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been
/** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
captured in small period of time. For example video. This version of the function is for grayscale
images or for manual manipulation with colorspaces. For more details see
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>

@ -92,7 +92,7 @@ public:
/** @brief Definition of the transformation
ocupied in the paper "Principal Warps: Thin-Plate Splines and Decomposition of Deformations", by
occupied in the paper "Principal Warps: Thin-Plate Splines and Decomposition of Deformations", by
F.L. Bookstein (PAMI 1989). :
*/
class CV_EXPORTS_W ThinPlateSplineShapeTransformer : public ShapeTransformer

@ -294,8 +294,8 @@ bool EmdL1::greedySolution2()
// move to up
pBV = &(m_EdgesUp[r][c]);
m_NBVEdges[nNBV++] = &(m_EdgesRight[r][c]);
D[r+1][c] += dFlow; // auxilary matrix maintanence
d1s[r+1] += dFlow; // auxilary matrix maintanence
D[r+1][c] += dFlow; // auxiliary matrix maintenance
d1s[r+1] += dFlow; // auxiliary matrix maintenance
}
else
{
@ -304,8 +304,8 @@ bool EmdL1::greedySolution2()
if(r<binsDim1-1)
m_NBVEdges[nNBV++] = &(m_EdgesUp[r][c]);
D[r][c+1] += dFlow; // auxilary matrix maintanence
d2s[c+1] += dFlow; // auxilary matrix maintanence
D[r][c+1] += dFlow; // auxiliary matrix maintenance
d2s[c+1] += dFlow; // auxiliary matrix maintenance
}
pBV->pParent->pChild = pBV;
pBV->flow = fabs(dFlow);
@ -318,7 +318,7 @@ bool EmdL1::greedySolution2()
{
dFlow = D[r][c];
pBV = &(m_EdgesUp[r][c]);
D[r+1][c] += dFlow; // auxilary matrix maintanence
D[r+1][c] += dFlow; // auxiliary matrix maintenance
pBV->pParent->pChild= pBV;
pBV->flow = fabs(dFlow);
pBV->iDir = dFlow>0; // 1:outward, 0:inward
@ -402,7 +402,7 @@ bool EmdL1::greedySolution3()
pBV = &(m_3dEdgesUp[i1][i2][i3]); // up
if(i2<binsDim2-1) m_NBVEdges[nNBV++] = &(m_3dEdgesRight[i1][i2][i3]); // right
if(i3<binsDim3-1) m_NBVEdges[nNBV++] = &(m_3dEdgesDeep[i1][i2][i3]); // deep
D[i1+1][i2][i3] += dFlow; // maintain auxilary matrix
D[i1+1][i2][i3] += dFlow; // maintain auxiliary matrix
d1s[i1+1] += dFlow;
}
else if(f2<f3)
@ -410,7 +410,7 @@ bool EmdL1::greedySolution3()
pBV = &(m_3dEdgesRight[i1][i2][i3]); // right
if(i1<binsDim1-1) m_NBVEdges[nNBV++] = &(m_3dEdgesUp[i1][i2][i3]); // up
if(i3<binsDim3-1) m_NBVEdges[nNBV++] = &(m_3dEdgesDeep[i1][i2][i3]); // deep
D[i1][i2+1][i3] += dFlow; // maintain auxilary matrix
D[i1][i2+1][i3] += dFlow; // maintain auxiliary matrix
d2s[i2+1] += dFlow;
}
else
@ -418,7 +418,7 @@ bool EmdL1::greedySolution3()
pBV = &(m_3dEdgesDeep[i1][i2][i3]); // deep
if(i2<binsDim2-1) m_NBVEdges[nNBV++] = &(m_3dEdgesRight[i1][i2][i3]); // right
if(i1<binsDim1-1) m_NBVEdges[nNBV++] = &(m_3dEdgesUp[i1][i2][i3]); // up
D[i1][i2][i3+1] += dFlow; // maintain auxilary matrix
D[i1][i2][i3+1] += dFlow; // maintain auxiliary matrix
d3s[i3+1] += dFlow;
}

@ -54,7 +54,7 @@ struct cvEMDNode
int pos[3]; // grid position
float d; // initial value
int u;
// tree maintainance
// tree maintenance
int iLevel; // level in the tree, 0 means root
cvPEmdNode pParent; // pointer to its parent
cvPEmdEdge pChild;
@ -64,7 +64,7 @@ struct cvEMDEdge
{
float flow; // initial value
int iDir; // 1:outward, 0:inward
// tree maintainance
// tree maintenance
cvPEmdNode pParent; // point to its parent
cvPEmdNode pChild; // the child node
cvPEmdEdge pNxt; // next child/edge
@ -123,7 +123,7 @@ private:
private:
int dimension;
int binsDim1, binsDim2, binsDim3; // the hitogram contains m_n1 rows and m_n2 columns
int binsDim1, binsDim2, binsDim3; // the histogram contains m_n1 rows and m_n2 columns
int nNBV; // number of Non-Basic Variables (NBV)
int nMaxIt;
cvEMDNodeArray2D m_Nodes; // all nodes

@ -235,7 +235,7 @@ void ThinPlateSplineShapeTransformerImpl::estimateTransformation(InputArray _pts
// Building the matrices for solving the L*(w|a)=(v|0) problem with L={[K|P];[P'|0]}
//Building K and P (Neede to buil L)
//Building K and P (Needed to build L)
Mat matK((int)matches.size(),(int)matches.size(),CV_32F);
Mat matP((int)matches.size(),3,CV_32F);
for (int i=0, end=(int)matches.size(); i<end; i++)

@ -111,7 +111,7 @@ private:
/** @brief Affine transformation based estimator.
This estimator uses pairwise tranformations estimated by matcher to estimate
This estimator uses pairwise transformations estimated by matcher to estimate
final transformation for each camera.
@sa cv::detail::HomographyBasedEstimator

@ -859,7 +859,7 @@ void AffineBestOf2NearestMatcher::match(const ImageFeatures &features1, const Im
/* should we remove matches between too close images? */
// matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
// extend H to represent linear tranformation in homogeneous coordinates
// extend H to represent linear transformation in homogeneous coordinates
matches_info.H.push_back(Mat::zeros(1, 3, CV_64F));
matches_info.H.at<double>(2, 2) = 1;
}

@ -89,7 +89,7 @@ struct CalcRotation
/**
* @brief Functor calculating final tranformation by chaining linear transformations
* @brief Functor calculating final transformation by chaining linear transformations
*/
struct CalcAffineTransform
{

@ -50,7 +50,7 @@
@defgroup superres Super Resolution
The Super Resolution module contains a set of functions and classes that can be used to solve the
problem of resolution enhancement. There are a few methods implemented, most of them are descibed in
problem of resolution enhancement. There are a few methods implemented, most of them are described in
the papers @cite Farsiu03 and @cite Mitzel09 .
*/

Loading…
Cancel
Save