Merge pull request #25252 from gursimarsingh:cpp_samples_cleanup

Move API focused C++ samples to snippets #25252

Clean Samples #25006
This PR removes 39 outdated C++ samples from the project, as part of an effort to keep the codebase clean and focused on current best practices.
pull/25915/head
Gursimar Singh 10 months ago committed by GitHub
parent 1d9ca7160b
commit 9aa5f3f1db
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 4
      doc/tutorials/others/stitcher.markdown
  2. 3
      modules/3d/include/opencv2/3d.hpp
  3. 50
      modules/core/include/opencv2/core.hpp
  4. 3
      modules/highgui/include/opencv2/highgui.hpp
  5. 28
      modules/imgproc/include/opencv2/imgproc.hpp
  6. 3
      modules/photo/include/opencv2/photo.hpp
  7. 2
      modules/stitching/include/opencv2/stitching.hpp
  8. 8
      modules/video/include/opencv2/video/tracking.hpp
  9. 5
      modules/videoio/include/opencv2/videoio.hpp
  10. 550
      samples/cpp/cloning_gui.cpp
  11. 86
      samples/cpp/dft.cpp
  12. 67
      samples/cpp/ela.cpp
  13. 0
      samples/cpp/floodfill.cpp
  14. 80
      samples/cpp/imagelist_reader.cpp
  15. 82
      samples/cpp/imgcodecs_jpeg.cpp
  16. 91
      samples/cpp/inpaint.cpp
  17. 186
      samples/cpp/matchmethod_orb_akaze_brisk.cpp
  18. 0
      samples/cpp/snippets/camshiftdemo.cpp
  19. 0
      samples/cpp/snippets/cloning_demo.cpp
  20. 0
      samples/cpp/snippets/create_mask.cpp
  21. 0
      samples/cpp/snippets/detect_blob.cpp
  22. 109
      samples/cpp/snippets/dft.cpp
  23. 0
      samples/cpp/snippets/dis_opticalflow.cpp
  24. 0
      samples/cpp/snippets/distrans.cpp
  25. 0
      samples/cpp/snippets/epipolar_lines.cpp
  26. 0
      samples/cpp/snippets/falsecolor.cpp
  27. 0
      samples/cpp/snippets/intersectExample.cpp
  28. 0
      samples/cpp/snippets/kalman.cpp
  29. 0
      samples/cpp/snippets/kmeans.cpp
  30. 0
      samples/cpp/snippets/laplace.cpp
  31. 0
      samples/cpp/snippets/lsd_lines.cpp
  32. 0
      samples/cpp/snippets/mask_tmpl.cpp
  33. 0
      samples/cpp/snippets/phase_corr.cpp
  34. 0
      samples/cpp/snippets/polar_transforms.cpp
  35. 0
      samples/cpp/snippets/segment_objects.cpp
  36. 0
      samples/cpp/snippets/squares.cpp
  37. 0
      samples/cpp/snippets/stitching.cpp
  38. 0
      samples/cpp/snippets/warpPerspective_demo.cpp
  39. 0
      samples/cpp/snippets/watershed.cpp
  40. 74
      samples/cpp/text_skewness_correction.cpp
  41. 2
      samples/cpp/videowriter_basic.cpp

@ -27,14 +27,14 @@ Code
This tutorial code's is shown lines below. You can also download it from
[here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/stitching.cpp).
@include samples/cpp/stitching.cpp
@include samples/cpp/snippets/stitching.cpp
Explanation
-----------
The most important code part is:
@snippet cpp/stitching.cpp stitching
@snippet cpp/snippets/stitching.cpp stitching
A new instance of stitcher is created and the @ref cv::Stitcher::stitch will
do all the hard work.

@ -1333,6 +1333,9 @@ The function converts 2D or 3D points from/to homogeneous coordinates by calling
*/
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
/** @example samples/cpp/snippets/epipolar_lines.cpp
An example using the findFundamentalMat function
*/
/** @brief Calculates a fundamental matrix from the corresponding points in two images.
@param points1 Array of N points from the first image. The point coordinates should be

@ -2212,47 +2212,9 @@ current implementation). Such an efficient DFT size can be calculated using the
method.
The sample below illustrates how to calculate a DFT-based convolution of two 2D real arrays:
@code
void convolveDFT(InputArray A, InputArray B, OutputArray C)
{
// reallocate the output array if needed
C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());
Size dftSize;
// calculate the size of DFT transform
dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);
dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);
// allocate temporary buffers and initialize them with 0's
Mat tempA(dftSize, A.type(), Scalar::all(0));
Mat tempB(dftSize, B.type(), Scalar::all(0));
// copy A and B to the top-left corners of tempA and tempB, respectively
Mat roiA(tempA, Rect(0,0,A.cols,A.rows));
A.copyTo(roiA);
Mat roiB(tempB, Rect(0,0,B.cols,B.rows));
B.copyTo(roiB);
// now transform the padded A & B in-place;
// use "nonzeroRows" hint for faster processing
dft(tempA, tempA, 0, A.rows);
dft(tempB, tempB, 0, B.rows);
// multiply the spectrums;
// the function handles packed spectrum representations well
mulSpectrums(tempA, tempB, tempA);
// transform the product back from the frequency domain.
// Even though all the result rows will be non-zero,
// you need only the first C.rows of them, and thus you
// pass nonzeroRows == C.rows
dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);
// now copy the result back to C.
tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);
// all the temporary buffers will be deallocated automatically
}
@endcode
@include samples/cpp/snippets/dft.cpp
An example on DFT-based convolution
To optimize this sample, consider the following approaches:
- Since nonzeroRows != 0 is passed to the forward transform calls and since A and B are copied to
the top-left corners of tempA and tempB, respectively, it is not necessary to clear the whole
@ -3092,7 +3054,7 @@ private:
//! @addtogroup core_cluster
//! @{
/** @example samples/cpp/kmeans.cpp
/** @example samples/cpp/snippets/kmeans.cpp
An example on K-means clustering
*/
@ -3207,6 +3169,10 @@ etc.).
Here is example of SimpleBlobDetector use in your application via Algorithm interface:
@snippet snippets/core_various.cpp Algorithm
@example samples/cpp/snippets/detect_blob.cpp
An example using the BLOB to detect and filter region.
*/
class CV_EXPORTS_W Algorithm
{

@ -447,9 +447,10 @@ The function getWindowImageRect returns the client screen coordinates, width and
*/
CV_EXPORTS_W Rect getWindowImageRect(const String& winname);
/** @example samples/cpp/create_mask.cpp
/** @example samples/cpp/snippets/create_mask.cpp
This program demonstrates using mouse events and how to make and use a mask image (black and white) .
*/
/** @brief Sets mouse handler for the specified window
@param winname Name of the window.

@ -1341,7 +1341,7 @@ protected:
//! @addtogroup imgproc_feature
//! @{
/** @example samples/cpp/lsd_lines.cpp
/** @example samples/cpp/snippets/lsd_lines.cpp
An example using the LineSegmentDetector
\image html building_lsd.png "Sample output image" width=434 height=300
*/
@ -1844,7 +1844,7 @@ CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth,
int dx, int dy, double scale = 1, double delta = 0,
int borderType = BORDER_DEFAULT );
/** @example samples/cpp/laplace.cpp
/** @example samples/cpp/snippets/laplace.cpp
An example using Laplace transformations for edge detection
*/
@ -2468,7 +2468,7 @@ CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst,
int borderMode = BORDER_CONSTANT,
const Scalar& borderValue = Scalar());
/** @example samples/cpp/warpPerspective_demo.cpp
/** @example samples/cpp/snippets/warpPerspective_demo.cpp
An example program shows using cv::getPerspectiveTransform and cv::warpPerspective for image warping
*/
@ -2684,7 +2684,7 @@ source image. The center must be inside the image.
CV_EXPORTS_W void getRectSubPix( InputArray image, Size patchSize,
Point2f center, OutputArray patch, int patchType = -1 );
/** @example samples/cpp/polar_transforms.cpp
/** @example samples/cpp/snippets/polar_transforms.cpp
An example using the cv::linearPolar and cv::logPolar operations
*/
@ -2840,7 +2840,7 @@ the destination image will have the given size therefore the area of the boundin
You can get reverse mapping adding #WARP_INVERSE_MAP to `flags`
\snippet polar_transforms.cpp InverseMap
In addiction, to calculate the original coordinate from a polar mapped coordinate \f$(rho, phi)->(x, y)\f$:
In addition, to calculate the original coordinate from a polar mapped coordinate \f$(rho, phi)->(x, y)\f$:
\snippet polar_transforms.cpp InverseCoordinate
@param src Source image.
@ -2997,6 +2997,9 @@ floating-point.
CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst,
double alpha, InputArray mask = noArray() );
/** @example samples/cpp/snippets/phase_corr.cpp
An example using the phaseCorrelate function
*/
/** @brief The function is used to detect translational shifts that occur between two images.
The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
@ -3415,7 +3418,7 @@ CV_EXPORTS_AS(EMD) float wrapperEMD( InputArray signature1, InputArray signature
//! @addtogroup imgproc_segmentation
//! @{
/** @example samples/cpp/watershed.cpp
/** @example samples/cpp/snippets/watershed.cpp
An example using the watershed algorithm
*/
@ -3527,7 +3530,7 @@ CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect,
//! @addtogroup imgproc_misc
//! @{
/** @example samples/cpp/distrans.cpp
/** @example samples/cpp/snippets/distrans.cpp
An example on using the distance transform
*/
@ -3679,7 +3682,7 @@ CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask,
Scalar loDiff = Scalar(), Scalar upDiff = Scalar(),
int flags = 4 );
/** @example samples/cpp/ffilldemo.cpp
/** @example samples/cpp/floodfill.cpp
An example using the FloodFill technique
*/
@ -3897,6 +3900,9 @@ enum TemplateMatchModes {
/** @example samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp
An example using Template Matching algorithm
*/
/** @example samples/cpp/snippets/mask_tmpl.cpp
An example using Template Matching algorithm with mask
*/
/** @brief Compares a template against overlapped image regions.
@ -4256,7 +4262,7 @@ without self-intersections. Otherwise, the function output is undefined.
*/
CV_EXPORTS_W bool isContourConvex( InputArray contour );
/** @example samples/cpp/intersectExample.cpp
/** @example samples/cpp/snippets/intersectExample.cpp
Examples of how intersectConvexConvex works
*/
@ -4483,7 +4489,7 @@ enum ColormapTypes
COLORMAP_DEEPGREEN = 21 //!< ![deepgreen](pics/colormaps/colorscale_deepgreen.jpg)
};
/** @example samples/cpp/falsecolor.cpp
/** @example samples/cpp/snippets/falsecolor.cpp
An example using applyColorMap function
*/
@ -4733,7 +4739,7 @@ CV_EXPORTS void polylines(InputOutputArray img, const Point* const* pts, const i
int thickness = 1, int lineType = LINE_8, int shift = 0 );
/** @example samples/cpp/segment_objects.cpp
/** @example samples/cpp/snippets/segment_objects.cpp
An example using drawContours to clean up a background segmentation result
*/

@ -723,6 +723,9 @@ enum
/** @example samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp
An example using seamlessClone function
*/
/** @example samples/cpp/snippets/cloning_demo.cpp
An example using illuminationChange, colorChange, seamlessClone, textureFlattening functions
*/
/** @brief Image editing tasks concern either global changes (color/intensity corrections, filters,
deformations) or local changes concerned to a selection. Here we are interested in achieving local
changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless

@ -109,7 +109,7 @@ namespace cv {
//! @addtogroup stitching
//! @{
/** @example samples/cpp/stitching.cpp
/** @example samples/cpp/snippets/stitching.cpp
A basic example on image stitching
*/

@ -78,7 +78,8 @@ See the OpenCV sample camshiftdemo.c that tracks colored objects.
*/
CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window,
TermCriteria criteria );
/** @example samples/cpp/camshiftdemo.cpp
/** @example samples/cpp/snippets/camshiftdemo.cpp
An example using the mean-shift tracking algorithm
*/
@ -345,7 +346,7 @@ double findTransformECC(InputArray templateImage, InputArray inputImage,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001),
InputArray inputMask = noArray());
/** @example samples/cpp/kalman.cpp
/** @example samples/cpp/snippets/kalman.cpp
An example using the standard Kalman filter
*/
@ -430,6 +431,9 @@ CV_EXPORTS_W Mat readOpticalFlow( const String& path );
*/
CV_EXPORTS_W bool writeOpticalFlow( const String& path, InputArray flow );
/** @example samples/cpp/snippets/dis_opticalflow.cpp
An example using the dense optical flow and DIS optical flow algorithms
*/
/**
Base class for dense optical flow algorithms
*/

@ -707,9 +707,6 @@ namespace internal { class VideoCapturePrivateAccessor; }
The class provides C++ API for capturing video from cameras or for reading video files and image sequences.
Here is how the class can be used:
@include samples/cpp/videocapture_basic.cpp
@note In @ref videoio_c "C API" the black-box structure `CvCapture` is used instead of %VideoCapture.
@note
- (C++) A basic sample on using the %VideoCapture interface can be found at
@ -992,7 +989,7 @@ Check @ref tutorial_video_write "the corresponding tutorial" for more details
*/
/** @example samples/cpp/videowriter_basic.cpp
An example using VideoCapture and VideoWriter class
An example using VideoWriter class
*/
/** @brief Video writer class.

@ -1,550 +0,0 @@
/*
* cloning.cpp
*
* Author:
* Siddharth Kherada <siddharthkherada27[at]gmail[dot]com>
*
* This tutorial demonstrates how to use OpenCV seamless cloning
* module.
*
* 1- Normal Cloning
* 2- Mixed Cloning
* 3- Monochrome Transfer
* 4- Color Change
* 5- Illumination change
* 6- Texture Flattening
* The program takes as input a source and a destination image (for 1-3 methods)
* and outputs the cloned image.
* Step 1:
* -> In the source image, select the region of interest by left click mouse button. A Polygon ROI will be created by left clicking mouse button.
* -> To set the Polygon ROI, click the right mouse button or 'd' key.
* -> To reset the region selected, click the middle mouse button or 'r' key.
* Step 2:
* -> In the destination image, select the point where you want to place the ROI in the image by left clicking mouse button.
* -> To get the cloned result, click the right mouse button or 'c' key.
* -> To quit the program, use 'q' key.
*
* Result: The cloned image will be displayed.
*/
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
using std::cin;
using std::cout;
using std::endl;
using std::string;
using namespace cv;
Mat img0, img1, img2, res, res1, final, final1, blend;
Point point;
int drag = 0;
int destx, desty;
int numpts = 100;
Point* pts = new Point[100];
Point* pts2 = new Point[100];
Point* pts_diff = new Point[100];
int var = 0;
int flag = 0, flag1 = 0, flag4 = 0;
int minx, miny, maxx, maxy, lenx, leny;
int minxd, minyd, maxxd, maxyd, lenxd, lenyd;
int channel, num, kernel_size;
float alpha,beta;
float red, green, blue;
float low_t, high_t;
void source(int, int, int, int, void*);
void destination(int, int, int, int, void*);
void checkfile(char*);
void source(int event, int x, int y, int, void*)
{
if (event == EVENT_LBUTTONDOWN && !drag)
{
if(flag1 == 0)
{
if(var==0)
img1 = img0.clone();
point = Point(x, y);
circle(img1,point,2,Scalar(0, 0, 255),-1, 8, 0);
pts[var] = point;
var++;
drag = 1;
if(var>1)
line(img1,pts[var-2], point, Scalar(0, 0, 255), 2, 8, 0);
imshow("Source", img1);
}
}
if (event == EVENT_LBUTTONUP && drag)
{
imshow("Source", img1);
drag = 0;
}
if (event == EVENT_RBUTTONDOWN)
{
flag1 = 1;
img1 = img0.clone();
for(int i = var; i < numpts ; i++)
pts[i] = point;
if(var!=0)
{
const Point* pts3[1] = {&pts[0]};
polylines( img1, pts3, &numpts,1, 1, Scalar(0,0,0), 2, 8, 0);
}
for(int i=0;i<var;i++)
{
minx = min(minx,pts[i].x);
maxx = max(maxx,pts[i].x);
miny = min(miny,pts[i].y);
maxy = max(maxy,pts[i].y);
}
lenx = maxx - minx;
leny = maxy - miny;
int mid_pointx = minx + lenx/2;
int mid_pointy = miny + leny/2;
for(int i=0;i<var;i++)
{
pts_diff[i].x = pts[i].x - mid_pointx;
pts_diff[i].y = pts[i].y - mid_pointy;
}
imshow("Source", img1);
}
if (event == EVENT_RBUTTONUP)
{
flag = var;
final = Mat::zeros(img0.size(),CV_8UC3);
res1 = Mat::zeros(img0.size(),CV_8UC1);
const Point* pts4[1] = {&pts[0]};
fillPoly(res1, pts4,&numpts, 1, Scalar(255, 255, 255), 8, 0);
bitwise_and(img0, img0, final,res1);
imshow("Source", img1);
if(num == 4)
{
colorChange(img0,res1,blend,red,green,blue);
imshow("Color Change Image", blend);
waitKey(0);
}
else if(num == 5)
{
illuminationChange(img0,res1,blend,alpha,beta);
imshow("Illum Change Image", blend);
waitKey(0);
}
else if(num == 6)
{
textureFlattening(img0,res1,blend,low_t,high_t,kernel_size);
imshow("Texture Flattened", blend);
waitKey(0);
}
}
if (event == EVENT_MBUTTONDOWN)
{
for(int i = 0; i < numpts ; i++)
{
pts[i].x=0;
pts[i].y=0;
}
var = 0;
flag1 = 0;
minx = INT_MAX; miny = INT_MAX; maxx = INT_MIN; maxy = INT_MIN;
imshow("Source", img0);
if(num == 1 || num == 2 || num == 3)
imshow("Destination",img2);
drag = 0;
}
}
void destination(int event, int x, int y, int, void*)
{
Mat im1;
minxd = INT_MAX; minyd = INT_MAX; maxxd = INT_MIN; maxyd = INT_MIN;
im1 = img2.clone();
if (event == EVENT_LBUTTONDOWN)
{
flag4 = 1;
if(flag1 == 1)
{
point = Point(x, y);
for(int i=0;i<var;i++)
{
pts2[i].x = point.x + pts_diff[i].x;
pts2[i].y = point.y + pts_diff[i].y;
}
for(int i=var;i<numpts;i++)
{
pts2[i].x = point.x + pts_diff[0].x;
pts2[i].y = point.y + pts_diff[0].y;
}
const Point* pts5[1] = {&pts2[0]};
polylines( im1, pts5, &numpts,1, 1, Scalar(0,0,255), 2, 8, 0);
destx = x;
desty = y;
imshow("Destination", im1);
}
}
if (event == EVENT_RBUTTONUP)
{
for(int i=0;i<flag;i++)
{
minxd = min(minxd,pts2[i].x);
maxxd = max(maxxd,pts2[i].x);
minyd = min(minyd,pts2[i].y);
maxyd = max(maxyd,pts2[i].y);
}
if(maxxd > im1.size().width || maxyd > im1.size().height || minxd < 0 || minyd < 0)
{
cout << "Index out of range" << endl;
exit(1);
}
final1 = Mat::zeros(img2.size(),CV_8UC3);
res = Mat::zeros(img2.size(),CV_8UC1);
for(int i=miny, k=minyd;i<(miny+leny);i++,k++)
for(int j=minx,l=minxd ;j<(minx+lenx);j++,l++)
{
for(int c=0;c<channel;c++)
{
final1.at<uchar>(k,l*channel+c) = final.at<uchar>(i,j*channel+c);
}
}
const Point* pts6[1] = {&pts2[0]};
fillPoly(res, pts6, &numpts, 1, Scalar(255, 255, 255), 8, 0);
if(num == 1 || num == 2 || num == 3)
{
seamlessClone(img0,img2,res1,point,blend,num);
imshow("Cloned Image", blend);
imwrite("cloned.png",blend);
waitKey(0);
}
for(int i = 0; i < flag ; i++)
{
pts2[i].x=0;
pts2[i].y=0;
}
minxd = INT_MAX; minyd = INT_MAX; maxxd = INT_MIN; maxyd = INT_MIN;
}
im1.release();
}
int main()
{
cout << endl;
cout << "Cloning Module" << endl;
cout << "---------------" << endl;
cout << "Step 1:" << endl;
cout << " -> In the source image, select the region of interest by left click mouse button. A Polygon ROI will be created by left clicking mouse button." << endl;
cout << " -> To set the Polygon ROI, click the right mouse button or use 'd' key" << endl;
cout << " -> To reset the region selected, click the middle mouse button or use 'r' key." << endl;
cout << "Step 2:" << endl;
cout << " -> In the destination image, select the point where you want to place the ROI in the image by left clicking mouse button." << endl;
cout << " -> To get the cloned result, click the right mouse button or use 'c' key." << endl;
cout << " -> To quit the program, use 'q' key." << endl;
cout << endl;
cout << "Options: " << endl;
cout << endl;
cout << "1) Normal Cloning " << endl;
cout << "2) Mixed Cloning " << endl;
cout << "3) Monochrome Transfer " << endl;
cout << "4) Local Color Change " << endl;
cout << "5) Local Illumination Change " << endl;
cout << "6) Texture Flattening " << endl;
cout << endl;
cout << "Press number 1-6 to choose from above techniques: ";
cin >> num;
cout << endl;
minx = INT_MAX; miny = INT_MAX; maxx = INT_MIN; maxy = INT_MIN;
minxd = INT_MAX; minyd = INT_MAX; maxxd = INT_MIN; maxyd = INT_MIN;
int flag3 = 0;
if(num == 1 || num == 2 || num == 3)
{
string src,dest;
cout << "Enter Source Image: ";
cin >> src;
cout << "Enter Destination Image: ";
cin >> dest;
img0 = imread(samples::findFile(src));
img2 = imread(samples::findFile(dest));
if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(2);
}
if(img2.empty())
{
cout << "Destination Image does not exist" << endl;
exit(2);
}
channel = img0.channels();
res = Mat::zeros(img2.size(),CV_8UC1);
res1 = Mat::zeros(img0.size(),CV_8UC1);
final = Mat::zeros(img0.size(),CV_8UC3);
final1 = Mat::zeros(img2.size(),CV_8UC3);
//////////// source image ///////////////////
namedWindow("Source", 1);
setMouseCallback("Source", source, NULL);
imshow("Source", img0);
/////////// destination image ///////////////
namedWindow("Destination", 1);
setMouseCallback("Destination", destination, NULL);
imshow("Destination",img2);
}
else if(num == 4)
{
string src;
cout << "Enter Source Image: ";
cin >> src;
cout << "Enter RGB values: " << endl;
cout << "Red: ";
cin >> red;
cout << "Green: ";
cin >> green;
cout << "Blue: ";
cin >> blue;
img0 = imread(samples::findFile(src));
if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(2);
}
res1 = Mat::zeros(img0.size(),CV_8UC1);
final = Mat::zeros(img0.size(),CV_8UC3);
//////////// source image ///////////////////
namedWindow("Source", 1);
setMouseCallback("Source", source, NULL);
imshow("Source", img0);
}
else if(num == 5)
{
string src;
cout << "Enter Source Image: ";
cin >> src;
cout << "alpha: ";
cin >> alpha;
cout << "beta: ";
cin >> beta;
img0 = imread(samples::findFile(src));
if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(2);
}
res1 = Mat::zeros(img0.size(),CV_8UC1);
final = Mat::zeros(img0.size(),CV_8UC3);
//////////// source image ///////////////////
namedWindow("Source", 1);
setMouseCallback("Source", source, NULL);
imshow("Source", img0);
}
else if(num == 6)
{
string src;
cout << "Enter Source Image: ";
cin >> src;
cout << "low_threshold: ";
cin >> low_t;
cout << "high_threshold: ";
cin >> high_t;
cout << "kernel_size: ";
cin >> kernel_size;
img0 = imread(samples::findFile(src));
if(img0.empty())
{
cout << "Source Image does not exist" << endl;
exit(2);
}
res1 = Mat::zeros(img0.size(),CV_8UC1);
final = Mat::zeros(img0.size(),CV_8UC3);
//////////// source image ///////////////////
namedWindow("Source", 1);
setMouseCallback("Source", source, NULL);
imshow("Source", img0);
}
else
{
cout << "Wrong Option Chosen" << endl;
exit(1);
}
for(;;)
{
char key = (char)waitKey(0);
if(key == 'd' && flag3 == 0)
{
flag1 = 1;
flag3 = 1;
img1 = img0.clone();
for(int i = var; i < numpts ; i++)
pts[i] = point;
if(var!=0)
{
const Point* pts3[1] = {&pts[0]};
polylines( img1, pts3, &numpts,1, 1, Scalar(0,0,0), 2, 8, 0);
}
for(int i=0;i<var;i++)
{
minx = min(minx,pts[i].x);
maxx = max(maxx,pts[i].x);
miny = min(miny,pts[i].y);
maxy = max(maxy,pts[i].y);
}
lenx = maxx - minx;
leny = maxy - miny;
int mid_pointx = minx + lenx/2;
int mid_pointy = miny + leny/2;
for(int i=0;i<var;i++)
{
pts_diff[i].x = pts[i].x - mid_pointx;
pts_diff[i].y = pts[i].y - mid_pointy;
}
flag = var;
final = Mat::zeros(img0.size(),CV_8UC3);
res1 = Mat::zeros(img0.size(),CV_8UC1);
const Point* pts4[1] = {&pts[0]};
fillPoly(res1, pts4,&numpts, 1, Scalar(255, 255, 255), 8, 0);
bitwise_and(img0, img0, final,res1);
imshow("Source", img1);
}
else if(key == 'r')
{
for(int i = 0; i < numpts ; i++)
{
pts[i].x=0;
pts[i].y=0;
}
var = 0;
flag1 = 0;
flag3 = 0;
flag4 = 0;
minx = INT_MAX; miny = INT_MAX; maxx = INT_MIN; maxy = INT_MIN;
imshow("Source", img0);
if(num == 1 || num == 2 || num == 3)
imshow("Destination",img2);
drag = 0;
}
else if ((num == 1 || num == 2 || num == 3) && key == 'c' && flag1 == 1 && flag4 == 1)
{
seamlessClone(img0,img2,res1,point,blend,num);
imshow("Cloned Image", blend);
imwrite("cloned.png",blend);
}
else if (num == 4 && key == 'c' && flag1 == 1)
{
colorChange(img0,res1,blend,red,green,blue);
imshow("Color Change Image", blend);
imwrite("cloned.png",blend);
}
else if (num == 5 && key == 'c' && flag1 == 1)
{
illuminationChange(img0,res1,blend,alpha,beta);
imshow("Illum Change Image", blend);
imwrite("cloned.png",blend);
}
else if (num == 6 && key == 'c' && flag1 == 1)
{
textureFlattening(img0,res1,blend,low_t,high_t,kernel_size);
imshow("Texture Flattened", blend);
imwrite("cloned.png",blend);
}
else if(key == 'q')
break;
}
return 0;
}

@ -1,86 +0,0 @@
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <stdio.h>
using namespace cv;
using namespace std;
static void help(const char ** argv)
{
printf("\nThis program demonstrated the use of the discrete Fourier transform (dft)\n"
"The dft of an image is taken and it's power spectrum is displayed.\n"
"Usage:\n %s [image_name -- default lena.jpg]\n",argv[0]);
}
const char* keys =
{
"{help h||}{@image|lena.jpg|input image file}"
};
int main(int argc, const char ** argv)
{
help(argv);
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
help(argv);
return 0;
}
string filename = parser.get<string>(0);
Mat img = imread(samples::findFile(filename), IMREAD_GRAYSCALE);
if( img.empty() )
{
help(argv);
printf("Cannot read image file: %s\n", filename.c_str());
return -1;
}
int M = getOptimalDFTSize( img.rows );
int N = getOptimalDFTSize( img.cols );
Mat padded;
copyMakeBorder(img, padded, 0, M - img.rows, 0, N - img.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
// compute log(1 + sqrt(Re(DFT(img))**2 + Im(DFT(img))**2))
split(complexImg, planes);
magnitude(planes[0], planes[1], planes[0]);
Mat mag = planes[0];
mag += Scalar::all(1);
log(mag, mag);
// crop the spectrum, if it has an odd number of rows or columns
mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));
int cx = mag.cols/2;
int cy = mag.rows/2;
// rearrange the quadrants of Fourier image
// so that the origin is at the image center
Mat tmp;
Mat q0(mag, Rect(0, 0, cx, cy));
Mat q1(mag, Rect(cx, 0, cx, cy));
Mat q2(mag, Rect(0, cy, cx, cy));
Mat q3(mag, Rect(cx, cy, cx, cy));
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
normalize(mag, mag, 0, 1, NORM_MINMAX);
imshow("spectrum magnitude", mag);
waitKey();
return 0;
}

@ -1,67 +0,0 @@
/**
@file ela.cpp
@author Alessandro de Oliveira Faria (A.K.A. CABELO)
@brief Error Level Analysis (ELA) permits identifying areas within an image that are at different compression levels. With JPEG images, the entire picture should be at roughly the same level. If a section of the image is at a significantly different error level, then it likely indicates a digital modification. This example allows to see visually the changes made in a JPG image based in it's compression error analysis. Questions and suggestions email to: Alessandro de Oliveira Faria cabelo[at]opensuse[dot]org or OpenCV Team.
@date Jun 24, 2018
*/
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace cv;
int scale_value = 7;
int quality = 95;
Mat image;
Mat compressed_img;
const char* decodedwin = "the recompressed image";
const char* diffwin = "scaled difference between the original and recompressed images";
static void processImage(int , void*)
{
Mat Ela;
// Compression jpeg
std::vector<int> compressing_factor;
std::vector<uchar> buf;
compressing_factor.push_back(IMWRITE_JPEG_QUALITY);
compressing_factor.push_back(quality);
imencode(".jpg", image, buf, compressing_factor);
compressed_img = imdecode(buf, 1);
Mat output;
absdiff(image,compressed_img,output);
output.convertTo(Ela, CV_8UC3, scale_value);
// Shows processed image
imshow(decodedwin, compressed_img);
imshow(diffwin, Ela);
}
int main (int argc, char* argv[])
{
CommandLineParser parser(argc, argv, "{ input i | ela_modified.jpg | Input image to calculate ELA algorithm. }");
parser.about("\nJpeg Recompression Example:\n");
parser.printMessage();
// Read the new image
image = imread(samples::findFile(parser.get<String>("input")));
// Check image
if (!image.empty())
{
processImage(0, 0);
createTrackbar("Scale", diffwin, &scale_value, 100, processImage);
createTrackbar("Quality", diffwin, &quality, 100, processImage);
waitKey(0);
}
else
{
std::cout << "> Error in load image\n";
}
return 0;
}

@ -1,80 +0,0 @@
/*
* Created on: Nov 23, 2010
* Author: Ethan Rublee
*
* A starter sample for using opencv, load up an imagelist
* that was generated with imagelist_creator.cpp
* easy as CV_PI right?
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <vector>
using namespace cv;
using namespace std;
static void help(char** av)
{
cout << "\nThis program gets you started being able to read images from a list in a file\n"
"Usage:\n./" << av[0] << " image_list.yaml\n"
<< "\tThis is a starter sample, to get you up and going in a copy pasta fashion.\n"
<< "\tThe program reads in an list of images from a yaml or xml file and displays\n"
<< "one at a time\n"
<< "\tTry running imagelist_creator to generate a list of images.\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
}
static bool readStringList(const string& filename, vector<string>& l)
{
l.resize(0);
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
return false;
FileNode n = fs.getFirstTopLevelNode();
if (n.type() != FileNode::SEQ)
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for (; it != it_end; ++it)
l.push_back((string)*it);
return true;
}
static int process(const vector<string>& images)
{
namedWindow("image", WINDOW_KEEPRATIO); //resizable window;
for (size_t i = 0; i < images.size(); i++)
{
Mat image = imread(images[i], IMREAD_GRAYSCALE); // do grayscale processing?
imshow("image",image);
cout << "Press a key to see the next image in the list." << endl;
waitKey(); // wait infinitely for a key to be pressed
}
return 0;
}
int main(int ac, char** av)
{
cv::CommandLineParser parser(ac, av, "{help h||}{@input||}");
if (parser.has("help"))
{
help(av);
return 0;
}
std::string arg = parser.get<std::string>("@input");
if (arg.empty())
{
help(av);
return 1;
}
vector<string> imagelist;
if (!readStringList(arg,imagelist))
{
cerr << "Failed to read image list\n" << endl;
help(av);
return 1;
}
return process(imagelist);
}

@ -1,82 +0,0 @@
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <iostream>
#include <vector>
using namespace std;
using namespace cv;
int main(int /*argc*/, const char** /* argv */ )
{
Mat framebuffer( 160 * 2, 160 * 5, CV_8UC3, cv::Scalar::all(255) );
Mat img( 160, 160, CV_8UC3, cv::Scalar::all(255) );
// Create test image.
{
const Point center( img.rows / 2 , img.cols /2 );
for( int radius = 5; radius < img.rows ; radius += 3 )
{
cv::circle( img, center, radius, Scalar(255,0,255) );
}
cv::rectangle( img, Point(0,0), Point(img.rows-1, img.cols-1), Scalar::all(0), 2 );
}
// Draw original image(s).
int top = 0; // Upper images
{
for( int left = 0 ; left < img.rows * 5 ; left += img.rows ){
Mat roi = framebuffer( Rect( left, top, img.rows, img.cols ) );
img.copyTo(roi);
cv::putText( roi, "original", Point(5,15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar::all(0), 2, 4, false );
}
}
// Draw lossy images
top += img.cols; // Lower images
{
struct test_config{
string comment;
uint32_t sampling_factor;
} config [] = {
{ "411", IMWRITE_JPEG_SAMPLING_FACTOR_411 },
{ "420", IMWRITE_JPEG_SAMPLING_FACTOR_420 },
{ "422", IMWRITE_JPEG_SAMPLING_FACTOR_422 },
{ "440", IMWRITE_JPEG_SAMPLING_FACTOR_440 },
{ "444", IMWRITE_JPEG_SAMPLING_FACTOR_444 },
};
const int config_num = 5;
int left = 0;
for ( int i = 0 ; i < config_num; i++ )
{
// Compress images with sampling factor parameter.
vector<int> param;
param.push_back( IMWRITE_JPEG_SAMPLING_FACTOR );
param.push_back( config[i].sampling_factor );
vector<uint8_t> jpeg;
(void) imencode(".jpg", img, jpeg, param );
// Decompress it.
Mat jpegMat(jpeg);
Mat lossy_img = imdecode(jpegMat, -1);
// Copy into framebuffer and comment
Mat roi = framebuffer( Rect( left, top, lossy_img.rows, lossy_img.cols ) );
lossy_img.copyTo(roi);
cv::putText( roi, config[i].comment, Point(5,155), FONT_HERSHEY_SIMPLEX, 0.5, Scalar::all(0), 2, 4, false );
left += lossy_img.rows;
}
}
// Output framebuffer(as lossless).
imwrite( "imgcodecs_jpeg_samplingfactor_result.png", framebuffer );
return 0;
}

@ -1,91 +0,0 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/photo.hpp"
#include <iostream>
using namespace cv;
using namespace std;
static void help( char** argv )
{
cout << "\nCool inpainging demo. Inpainting repairs damage to images by floodfilling the damage \n"
<< "with surrounding image areas.\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
"Usage:\n" << argv[0] <<" [image_name -- Default fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
"\tESC - quit the program\n"
"\tr - restore the original image\n"
"\ti or SPACE - run inpainting algorithm\n"
"\t\t(before running it, paint something on the image)\n" << endl;
}
Mat img, inpaintMask;
Point prevPt(-1,-1);
static void onMouse( int event, int x, int y, int flags, void* )
{
if( event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON) )
prevPt = Point(-1,-1);
else if( event == EVENT_LBUTTONDOWN )
prevPt = Point(x,y);
else if( event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON) )
{
Point pt(x,y);
if( prevPt.x < 0 )
prevPt = pt;
line( inpaintMask, prevPt, pt, Scalar::all(255), 5, 8, 0 );
line( img, prevPt, pt, Scalar::all(255), 5, 8, 0 );
prevPt = pt;
imshow("image", img);
}
}
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{@image|fruits.jpg|}");
help(argv);
string filename = samples::findFile(parser.get<string>("@image"));
Mat img0 = imread(filename, IMREAD_COLOR);
if(img0.empty())
{
cout << "Couldn't open the image " << filename << ". Usage: inpaint <image_name>\n" << endl;
return 0;
}
namedWindow("image", WINDOW_AUTOSIZE);
img = img0.clone();
inpaintMask = Mat::zeros(img.size(), CV_8U);
imshow("image", img);
setMouseCallback( "image", onMouse, NULL);
for(;;)
{
char c = (char)waitKey();
if( c == 27 )
break;
if( c == 'r' )
{
inpaintMask = Scalar::all(0);
img0.copyTo(img);
imshow("image", img);
}
if( c == 'i' || c == ' ' )
{
Mat inpainted;
inpaint(img, inpaintMask, inpainted, 3, INPAINT_TELEA);
imshow("inpainted image", inpainted);
}
}
return 0;
}

@ -1,186 +0,0 @@
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>
#include <vector>
#include <iostream>
using namespace std;
using namespace cv;
static void help(char* argv[])
{
cout << "\n This program demonstrates how to detect compute and match ORB BRISK and AKAZE descriptors \n"
"Usage: \n "
<< argv[0] << " --image1=<image1(basketball1.png as default)> --image2=<image2(basketball2.png as default)>\n"
"Press a key when image window is active to change algorithm or descriptor";
}
int main(int argc, char *argv[])
{
vector<String> typeDesc;
vector<String> typeAlgoMatch;
vector<String> fileName;
// This descriptor are going to be detect and compute
typeDesc.push_back("AKAZE-DESCRIPTOR_KAZE_UPRIGHT"); // see https://docs.opencv.org/5.x/d8/d30/classcv_1_1AKAZE.html
typeDesc.push_back("AKAZE"); // see http://docs.opencv.org/5.x/d8/d30/classcv_1_1AKAZE.html
typeDesc.push_back("ORB"); // see http://docs.opencv.org/5.x/de/dbf/classcv_1_1BRISK.html
typeDesc.push_back("BRISK"); // see http://docs.opencv.org/5.x/db/d95/classcv_1_1ORB.html
// This algorithm would be used to match descriptors see http://docs.opencv.org/5.x/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257
typeAlgoMatch.push_back("BruteForce");
typeAlgoMatch.push_back("BruteForce-L1");
typeAlgoMatch.push_back("BruteForce-Hamming");
typeAlgoMatch.push_back("BruteForce-Hamming(2)");
cv::CommandLineParser parser(argc, argv,
"{ @image1 | basketball1.png | }"
"{ @image2 | basketball2.png | }"
"{help h ||}");
if (parser.has("help"))
{
help(argv);
return 0;
}
fileName.push_back(samples::findFile(parser.get<string>(0)));
fileName.push_back(samples::findFile(parser.get<string>(1)));
Mat img1 = imread(fileName[0], IMREAD_GRAYSCALE);
Mat img2 = imread(fileName[1], IMREAD_GRAYSCALE);
if (img1.empty())
{
cerr << "Image " << fileName[0] << " is empty or cannot be found" << endl;
return 1;
}
if (img2.empty())
{
cerr << "Image " << fileName[1] << " is empty or cannot be found" << endl;
return 1;
}
vector<double> desMethCmp;
Ptr<Feature2D> b;
// Descriptor loop
vector<String>::iterator itDesc;
for (itDesc = typeDesc.begin(); itDesc != typeDesc.end(); ++itDesc)
{
Ptr<DescriptorMatcher> descriptorMatcher;
// Match between img1 and img2
vector<DMatch> matches;
// keypoint for img1 and img2
vector<KeyPoint> keyImg1, keyImg2;
// Descriptor for img1 and img2
Mat descImg1, descImg2;
vector<String>::iterator itMatcher = typeAlgoMatch.end();
if (*itDesc == "AKAZE-DESCRIPTOR_KAZE_UPRIGHT"){
b = AKAZE::create(AKAZE::DESCRIPTOR_KAZE_UPRIGHT);
}
if (*itDesc == "AKAZE"){
b = AKAZE::create();
}
if (*itDesc == "ORB"){
b = ORB::create();
}
else if (*itDesc == "BRISK"){
b = BRISK::create();
}
try
{
// We can detect keypoint with detect method
b->detect(img1, keyImg1, Mat());
// and compute their descriptors with method compute
b->compute(img1, keyImg1, descImg1);
// or detect and compute descriptors in one step
b->detectAndCompute(img2, Mat(),keyImg2, descImg2,false);
// Match method loop
for (itMatcher = typeAlgoMatch.begin(); itMatcher != typeAlgoMatch.end(); ++itMatcher){
descriptorMatcher = DescriptorMatcher::create(*itMatcher);
if ((*itMatcher == "BruteForce-Hamming" || *itMatcher == "BruteForce-Hamming(2)") && (b->descriptorType() == CV_32F || b->defaultNorm() <= NORM_L2SQR))
{
cout << "**************************************************************************\n";
cout << "It's strange. You should use Hamming distance only for a binary descriptor\n";
cout << "**************************************************************************\n";
}
if ((*itMatcher == "BruteForce" || *itMatcher == "BruteForce-L1") && (b->defaultNorm() >= NORM_HAMMING))
{
cout << "**************************************************************************\n";
cout << "It's strange. You shouldn't use L1 or L2 distance for a binary descriptor\n";
cout << "**************************************************************************\n";
}
try
{
descriptorMatcher->match(descImg1, descImg2, matches, Mat());
// Keep best matches only to have a nice drawing.
// We sort distance between descriptor matches
Mat index;
int nbMatch=int(matches.size());
Mat tab(nbMatch, 1, CV_32F);
for (int i = 0; i<nbMatch; i++)
{
tab.at<float>(i, 0) = matches[i].distance;
}
sortIdx(tab, index, SORT_EVERY_COLUMN + SORT_ASCENDING);
vector<DMatch> bestMatches;
for (int i = 0; i<30; i++)
{
bestMatches.push_back(matches[index.at<int>(i, 0)]);
}
Mat result;
drawMatches(img1, keyImg1, img2, keyImg2, bestMatches, result);
namedWindow(*itDesc+": "+*itMatcher, WINDOW_AUTOSIZE);
imshow(*itDesc + ": " + *itMatcher, result);
// Saved result could be wrong due to bug 4308
FileStorage fs(*itDesc + "_" + *itMatcher + ".yml", FileStorage::WRITE);
fs<<"Matches"<<matches;
vector<DMatch>::iterator it;
cout<<"**********Match results**********\n";
cout << "Index \tIndex \tdistance\n";
cout << "in img1\tin img2\n";
// Use to compute distance between keyPoint matches and to evaluate match algorithm
double cumSumDist2=0;
for (it = bestMatches.begin(); it != bestMatches.end(); ++it)
{
cout << it->queryIdx << "\t" << it->trainIdx << "\t" << it->distance << "\n";
Point2d p=keyImg1[it->queryIdx].pt-keyImg2[it->trainIdx].pt;
cumSumDist2=p.x*p.x+p.y*p.y;
}
desMethCmp.push_back(cumSumDist2);
waitKey();
}
catch (const Exception& e)
{
cout << e.msg << endl;
cout << "Cumulative distance cannot be computed." << endl;
desMethCmp.push_back(-1);
}
}
}
catch (const Exception& e)
{
cerr << "Exception: " << e.what() << endl;
cout << "Feature : " << *itDesc << "\n";
if (itMatcher != typeAlgoMatch.end())
{
cout << "Matcher : " << *itMatcher << "\n";
}
}
}
int i=0;
cout << "Cumulative distance between keypoint match for different algorithm and feature detector \n\t";
cout << "We cannot say which is the best but we can say results are different! \n\t";
for (vector<String>::iterator itMatcher = typeAlgoMatch.begin(); itMatcher != typeAlgoMatch.end(); ++itMatcher)
{
cout<<*itMatcher<<"\t";
}
cout << "\n";
for (itDesc = typeDesc.begin(); itDesc != typeDesc.end(); ++itDesc)
{
cout << *itDesc << "\t";
for (vector<String>::iterator itMatcher = typeAlgoMatch.begin(); itMatcher != typeAlgoMatch.end(); ++itMatcher, ++i)
{
cout << desMethCmp[i]<<"\t";
}
cout<<"\n";
}
return 0;
}

@ -0,0 +1,109 @@
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
static void convolveDFT(InputArray A, InputArray B, OutputArray C) {
// Calculate the size of the output array
int outputRows = A.rows() + B.rows() - 1;
int outputCols = A.cols() + B.cols() - 1;
// Reallocate the output array if needed
C.create(outputRows, outputCols, A.type());
Size dftSize;
// Calculate the size of DFT transform
dftSize.width = getOptimalDFTSize(A.cols() + B.cols() - 1);
dftSize.height = getOptimalDFTSize(A.rows() + B.rows() - 1);
// Allocate temporary buffers and initialize them with 0's
Mat tempA(dftSize, A.type(), Scalar::all(0));
Mat tempB(dftSize, B.type(), Scalar::all(0));
// Copy A and B to the top-left corners of tempA and tempB, respectively
Mat roiA(tempA, Rect(0, 0, A.cols(), A.rows()));
A.copyTo(roiA);
Mat roiB(tempB, Rect(0, 0, B.cols(), B.rows()));
B.copyTo(roiB);
// Now transform the padded A & B in-place;
// use "nonzeroRows" hint for faster processing
dft(tempA, tempA, 0, A.rows());
dft(tempB, tempB, 0, B.rows());
// Multiply the spectrums;
// the function handles packed spectrum representations well
mulSpectrums(tempA, tempB, tempA, 0);
// Transform the product back from the frequency domain.
// Even though all the result rows will be non-zero,
// you need only the first C.rows of them, and thus you
// pass nonzeroRows == C.rows
dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows());
// Now copy the result back to C.
tempA(Rect(0, 0, C.cols(), C.rows())).copyTo(C);
// All the temporary buffers will be deallocated automatically
}
static void help(const char ** argv)
{
printf("\nThis program demonstrates the use of convolution using discrete Fourier transform (DFT)\n"
"An image is convolved with kernel filter using DFT.\n"
"Usage:\n %s [input -- default lena.jpg]\n", argv[0]);
}
const char* keys =
{
"{help h||}{@input|lena.jpg|input image file}"
};
int main(int argc, const char** argv) {
// Load the image in grayscale
help(argv);
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
help(argv);
return 0;
}
string filename = parser.get<string>(0);
Mat img = imread(samples::findFile(filename), IMREAD_GRAYSCALE);
// Check if the image is loaded successfully
if (img.empty()) {
std::cerr << "Error: Image not loaded!" << std::endl;
return -1;
}
// Convert the image to CV_32F
Mat img_32f;
img.convertTo(img_32f, CV_32F);
float kernelData[9] = { 1.0f/9, 1.0f/9, 1.0f/9, 1.0f/9, 1.0f/9, 1.0f/9, 1.0f/9, 1.0f/9, 1.0f/9 }; // example of blur filter, can be changed to other filter as well.
Mat kernel(3, 3, CV_32F, kernelData);
// Perform convolution of the image with the sharpening kernel
Mat result;
convolveDFT(img_32f, kernel, result);
// Normalize the result for better visualization
normalize(result, result, 0, 255, NORM_MINMAX);
// Convert result back to 8-bit for display
Mat result_8u;
result.convertTo(result_8u, CV_8U);
// Display the images
imshow("Original Image", img);
imshow("Output Image", result_8u);
waitKey(0); // Wait for a key press to close the windows
return 0;
}

@ -1,74 +0,0 @@
/*
This tutorial demonstrates how to correct the skewness in a text.
The program takes as input a skewed source image and shows non skewed text.
*/
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
#include <iomanip>
#include <string>
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
CommandLineParser parser(argc, argv, "{@input | imageTextR.png | input image}");
// Load image from the disk
Mat image = imread( samples::findFile( parser.get<String>("@input") ), IMREAD_COLOR);
if (image.empty())
{
cout << "Cannot load the image " + parser.get<String>("@input") << endl;
return -1;
}
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
//Threshold the image, setting all foreground pixels to 255 and all background pixels to 0
Mat thresh;
threshold(gray, thresh, 0, 255, THRESH_BINARY_INV | THRESH_OTSU);
// Applying erode filter to remove random noise
int erosion_size = 1;
Mat element = getStructuringElement( MORPH_RECT, Size(2*erosion_size+1, 2*erosion_size+1), Point(erosion_size, erosion_size) );
erode(thresh, thresh, element);
cv::Mat coords;
findNonZero(thresh, coords);
RotatedRect box = minAreaRect(coords);
float angle = box.angle;
// The cv::minAreaRect function returns values in the range [-90, 0)
// if the angle is less than -45 we need to add 90 to it
if (angle < -45.0f)
{
angle = (90.0f + angle);
}
//Obtaining the rotation matrix
Point2f center((image.cols) / 2.0f, (image.rows) / 2.0f);
Mat M = getRotationMatrix2D(center, angle, 1.0f);
Mat rotated;
// Rotating the image by required angle
stringstream angle_to_str;
angle_to_str << fixed << setprecision(2) << angle;
warpAffine(image, rotated, M, image.size(), INTER_CUBIC, BORDER_REPLICATE);
putText(rotated, "Angle " + angle_to_str.str() + " degrees", Point(10, 30), FONT_HERSHEY_SIMPLEX, 0.7, Scalar(0, 0, 255), 2);
cout << "[INFO] angle: " << angle_to_str.str() << endl;
//Show the image
imshow("Input", image);
imshow("Rotated", rotated);
waitKey(0);
return 0;
}

@ -64,4 +64,4 @@ int main(int, char**)
}
// the videofile will be closed and released automatically in VideoWriter destructor
return 0;
}
}
Loading…
Cancel
Save