samples: use findFile() in "cpp"

pull/12354/head
Alexander Alekhin 6 years ago committed by Alexander Alekhin
parent 2fa9bd221d
commit c4c31f5bba
  1. 2
      samples/cpp/application_trace.cpp
  2. 3
      samples/cpp/bgfg_segm.cpp
  3. 25
      samples/cpp/calibration.cpp
  4. 54
      samples/cpp/cloning_demo.cpp
  5. 12
      samples/cpp/cloning_gui.cpp
  6. 4
      samples/cpp/connected_components.cpp
  7. 4
      samples/cpp/create_mask.cpp
  8. 2
      samples/cpp/dbt_face_detection.cpp
  9. 6
      samples/cpp/demhist.cpp
  10. 26
      samples/cpp/detect_blob.cpp
  11. 2
      samples/cpp/detect_mser.cpp
  12. 6
      samples/cpp/dft.cpp
  13. 6
      samples/cpp/distrans.cpp
  14. 6
      samples/cpp/edge.cpp
  15. 32
      samples/cpp/facedetect.cpp
  16. 22
      samples/cpp/facial_features.cpp
  17. 2
      samples/cpp/falsecolor.cpp
  18. 6
      samples/cpp/ffilldemo.cpp
  19. 4
      samples/cpp/filestorage.cpp
  20. 6
      samples/cpp/fitellipse.cpp
  21. 4
      samples/cpp/grabcut.cpp
  22. 1
      samples/cpp/gstreamer_pipeline.cpp
  23. 6
      samples/cpp/image.cpp
  24. 12
      samples/cpp/image_alignment.cpp
  25. 12
      samples/cpp/inpaint.cpp
  26. 35
      samples/cpp/laplace.cpp
  27. 6
      samples/cpp/letter_recog.cpp
  28. 104
      samples/cpp/live_detect_qrcode.cpp
  29. 2
      samples/cpp/logistic_regression.cpp
  30. 4
      samples/cpp/lsd_lines.cpp
  31. 24
      samples/cpp/mask_tmpl.cpp
  32. 24
      samples/cpp/matchmethod_orb_akaze_brisk.cpp
  33. 2
      samples/cpp/minarea.cpp
  34. 19
      samples/cpp/morphology2.cpp
  35. 14
      samples/cpp/npr_demo.cpp
  36. 5
      samples/cpp/peopledetect.cpp
  37. 5
      samples/cpp/points_classifier.cpp
  38. 2
      samples/cpp/polar_transforms.cpp
  39. 2
      samples/cpp/segment_objects.cpp
  40. 2
      samples/cpp/select3dobj.cpp
  41. 11
      samples/cpp/smiledetect.cpp
  42. 9
      samples/cpp/squares.cpp
  43. 9
      samples/cpp/stereo_calib.cpp
  44. 4
      samples/cpp/stereo_match.cpp
  45. 2
      samples/cpp/stitching.cpp
  46. 4
      samples/cpp/stitching_detailed.cpp
  47. 2
      samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp
  48. 4
      samples/cpp/tvl1_optical_flow.cpp
  49. 6
      samples/cpp/warpPerspective_demo.cpp
  50. 8
      samples/cpp/watershed.cpp
  51. 15
      samples/data/calibration.yml
  52. 6
      samples/python/facedetect.py

@ -41,7 +41,7 @@ int main(int argc, char** argv)
if (video.size() == 1 && isdigit(video[0]))
capture.open(parser.get<int>("@video"));
else
capture.open(video);
capture.open(samples::findFileOrKeep(video)); // keep GStreamer pipelines
int nframes = 0;
if (capture.isOpened())
{

@ -38,7 +38,10 @@ int main(int argc, const char** argv)
if (file.empty())
cap.open(camera);
else
{
file = samples::findFileOrKeep(file); // ignore gstreamer pipelines
cap.open(file.c_str());
}
if (!cap.isOpened())
{
cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;

@ -254,12 +254,31 @@ static bool readStringList( const string& filename, vector<string>& l )
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
size_t dir_pos = filename.rfind('/');
if (dir_pos == string::npos)
dir_pos = filename.rfind('\\');
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
{
string fname = (string)*it;
if (dir_pos != string::npos)
{
string fpath = samples::findFile(filename.substr(0, dir_pos + 1) + fname, false);
if (fpath.empty())
{
fpath = samples::findFile(fname);
}
fname = fpath;
}
else
{
fname = samples::findFile(fname);
}
l.push_back(fname);
}
return true;
}
@ -383,10 +402,10 @@ int main( int argc, char** argv )
if( !inputFilename.empty() )
{
if( !videofile && readStringList(inputFilename, imageList) )
if( !videofile && readStringList(samples::findFile(inputFilename), imageList) )
mode = CAPTURING;
else
capture.open(inputFilename);
capture.open(samples::findFileOrKeep(inputFilename));
}
else
capture.open(cameraId);

@ -17,8 +17,7 @@
* The program takes as input a source and a destination image (for 1-3 methods)
* and outputs the cloned image.
*
* Download test images from opencv_extra folder @github.
*
* Download test images from opencv_extra repository.
*/
#include "opencv2/photo.hpp"
@ -27,7 +26,6 @@
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
#include <stdlib.h>
using namespace std;
using namespace cv;
@ -35,6 +33,7 @@ using namespace cv;
int main()
{
cout << endl;
cout << "Note: specify OPENCV_SAMPLES_DATA_PATH_HINT=<opencv_extra>/testdata/cv" << endl << endl;
cout << "Cloning Module" << endl;
cout << "---------------" << endl;
cout << "Options: " << endl;
@ -54,9 +53,9 @@ int main()
if(num == 1)
{
string folder = "cloning/Normal_Cloning/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "destination1.png";
string original_path3 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "destination1.png");
string original_path3 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat destination = imread(original_path2, IMREAD_COLOR);
@ -86,14 +85,14 @@ int main()
seamlessClone(source, destination, mask, p, result, 1);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 2)
{
string folder = "cloning/Mixed_Cloning/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "destination1.png";
string original_path3 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "destination1.png");
string original_path3 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat destination = imread(original_path2, IMREAD_COLOR);
@ -123,14 +122,14 @@ int main()
seamlessClone(source, destination, mask, p, result, 2);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 3)
{
string folder = "cloning/Monochrome_Transfer/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "destination1.png";
string original_path3 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "destination1.png");
string original_path3 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat destination = imread(original_path2, IMREAD_COLOR);
@ -160,13 +159,13 @@ int main()
seamlessClone(source, destination, mask, p, result, 3);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 4)
{
string folder = "cloning/Color_Change/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "mask.png";
string folder = "cloning/color_change/";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat mask = imread(original_path2, IMREAD_COLOR);
@ -187,13 +186,13 @@ int main()
colorChange(source, mask, result, 1.5, .5, .5);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 5)
{
string folder = "cloning/Illumination_Change/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat mask = imread(original_path2, IMREAD_COLOR);
@ -214,13 +213,13 @@ int main()
illuminationChange(source, mask, result, 0.2f, 0.4f);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else if(num == 6)
{
string folder = "cloning/Texture_Flattening/";
string original_path1 = folder + "source1.png";
string original_path2 = folder + "mask.png";
string original_path1 = samples::findFile(folder + "source1.png");
string original_path2 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat mask = imread(original_path2, IMREAD_COLOR);
@ -241,7 +240,12 @@ int main()
textureFlattening(source, mask, result, 30, 45, 3);
imshow("Output",result);
imwrite(folder + "cloned.png", result);
imwrite("cloned.png", result);
}
else
{
cerr << "Invalid selection: " << num << endl;
exit(1);
}
waitKey(0);
}

@ -30,14 +30,12 @@
* Result: The cloned image will be displayed.
*/
#include <signal.h>
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
#include <stdlib.h>
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
using std::cin;
@ -320,9 +318,9 @@ int main()
cout << "Enter Destination Image: ";
cin >> dest;
img0 = imread(src);
img0 = imread(samples::findFile(src));
img2 = imread(dest);
img2 = imread(samples::findFile(dest));
if(img0.empty())
{
@ -370,7 +368,7 @@ int main()
cout << "Blue: ";
cin >> blue;
img0 = imread(src);
img0 = imread(samples::findFile(src));
if(img0.empty())
{
@ -400,7 +398,7 @@ int main()
cout << "beta: ";
cin >> beta;
img0 = imread(src);
img0 = imread(samples::findFile(src));
if(img0.empty())
{
@ -433,7 +431,7 @@ int main()
cout << "kernel_size: ";
cin >> kernel_size;
img0 = imread(src);
img0 = imread(samples::findFile(src));
if(img0.empty())
{

@ -35,14 +35,14 @@ static void on_trackbar(int, void*)
int main( int argc, const char** argv )
{
CommandLineParser parser(argc, argv, "{@image|../data/stuff.jpg|image for converting to a grayscale}");
CommandLineParser parser(argc, argv, "{@image|stuff.jpg|image for converting to a grayscale}");
parser.about("\nThis program demonstrates connected components and use of the trackbar\n");
parser.printMessage();
cout << "\nThe image is converted to grayscale and displayed, another image has a trackbar\n"
"that controls thresholding and thereby the extracted contours which are drawn in color\n";
String inputImage = parser.get<string>(0);
img = imread(inputImage, IMREAD_GRAYSCALE);
img = imread(samples::findFile(inputImage), IMREAD_GRAYSCALE);
if(img.empty())
{

@ -95,7 +95,7 @@ void mouseHandler(int event, int x, int y, int, void*)
int main(int argc, char **argv)
{
CommandLineParser parser(argc, argv, "{@input | ../data/lena.jpg | input image}");
CommandLineParser parser(argc, argv, "{@input | lena.jpg | input image}");
parser.about("This program demonstrates using mouse events\n");
parser.printMessage();
cout << "\n\tleft mouse button - set a point to create mask shape\n"
@ -103,7 +103,7 @@ int main(int argc, char **argv)
"\tmiddle mouse button - reset\n";
String input_image = parser.get<String>("@input");
src = imread(input_image);
src = imread(samples::findFile(input_image));
if (src.empty())
{

@ -49,7 +49,7 @@ int main(int , char** )
return 1;
}
std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml";
std::string cascadeFrontalfilename = samples::findFile("data/lbpcascades/lbpcascade_frontalface.xml");
cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
if ( cascade->empty() )

@ -59,12 +59,12 @@ static void updateBrightnessContrast( int /*arg*/, void* )
static void help()
{
std::cout << "\nThis program demonstrates the use of calcHist() -- histogram creation.\n"
<< "Usage: \n" << "demhist [image_name -- Defaults to ../data/baboon.jpg]" << std::endl;
<< "Usage: \n" << "demhist [image_name -- Defaults to baboon.jpg]" << std::endl;
}
const char* keys =
{
"{help h||}{@image|../data/baboon.jpg|input image file}"
"{help h||}{@image|baboon.jpg|input image file}"
};
int main( int argc, const char** argv )
@ -78,7 +78,7 @@ int main( int argc, const char** argv )
string inputImage = parser.get<string>(0);
// Load the source image. HighGUI use.
image = imread( inputImage, 0 );
image = imread(samples::findFile(inputImage), IMREAD_GRAYSCALE);
if(image.empty())
{
std::cerr << "Cannot read image file: " << inputImage << std::endl;

@ -14,7 +14,7 @@ static void help()
{
cout << "\n This program demonstrates how to use BLOB to detect and filter region \n"
"Usage: \n"
" ./detect_blob <image1(../data/detect_blob.png as default)>\n"
" ./detect_blob <image1(detect_blob.png as default)>\n"
"Press a key when image window is active to change descriptor";
}
@ -70,20 +70,19 @@ static String Legende(SimpleBlobDetector::Params &pAct)
int main(int argc, char *argv[])
{
vector<String> fileName;
Mat img(600, 800, CV_8UC1);
cv::CommandLineParser parser(argc, argv, "{@input |../data/detect_blob.png| }{h help | | }");
String fileName;
cv::CommandLineParser parser(argc, argv, "{@input |detect_blob.png| }{h help | | }");
if (parser.has("h"))
{
help();
return 0;
}
fileName.push_back(parser.get<string>("@input"));
img = imread(fileName[0], IMREAD_COLOR);
if (img.rows*img.cols <= 0)
fileName = parser.get<string>("@input");
Mat img = imread(samples::findFile(fileName), IMREAD_COLOR);
if (img.empty())
{
cout << "Image " << fileName[0] << " is empty or cannot be found\n";
return(0);
cout << "Image " << fileName << " is empty or cannot be found\n";
return 1;
}
SimpleBlobDetector::Params pDefaultBLOB;
@ -116,14 +115,17 @@ int main(int argc, char *argv[])
vector< Vec3b > palette;
for (int i = 0; i<65536; i++)
{
palette.push_back(Vec3b((uchar)rand(), (uchar)rand(), (uchar)rand()));
uchar c1 = (uchar)rand();
uchar c2 = (uchar)rand();
uchar c3 = (uchar)rand();
palette.push_back(Vec3b(c1, c2, c3));
}
help();
// These descriptors are going to be detecting and computing BLOBS with 6 different params
// Param for first BLOB detector we want all
typeDesc.push_back("BLOB"); // see http://docs.opencv.org/trunk/d0/d7a/classcv_1_1SimpleBlobDetector.html
typeDesc.push_back("BLOB"); // see http://docs.opencv.org/3.4/d0/d7a/classcv_1_1SimpleBlobDetector.html
pBLOB.push_back(pDefaultBLOB);
pBLOB.back().filterByArea = true;
pBLOB.back().minArea = 1;
@ -150,7 +152,7 @@ int main(int argc, char *argv[])
pBLOB.back().filterByConvexity = true;
pBLOB.back().minConvexity = 0.;
pBLOB.back().maxConvexity = (float)0.9;
// Param for six BLOB detector we want blob with gravity center color equal to 0 bug #4321 must be fixed
// Param for six BLOB detector we want blob with gravity center color equal to 0
typeDesc.push_back("BLOB");
pBLOB.push_back(pDefaultBLOB);
pBLOB.back().filterByColor = true;

@ -412,7 +412,7 @@ int main(int argc, char *argv[])
string input = parser.get<string>("@input");
if (!input.empty())
{
imgOrig = imread(input, IMREAD_GRAYSCALE);
imgOrig = imread(samples::findFile(input), IMREAD_GRAYSCALE);
blur(imgOrig, img, blurSize);
}
else

@ -14,12 +14,12 @@ static void help()
printf("\nThis program demonstrated the use of the discrete Fourier transform (dft)\n"
"The dft of an image is taken and it's power spectrum is displayed.\n"
"Usage:\n"
"./dft [image_name -- default ../data/lena.jpg]\n");
"./dft [image_name -- default lena.jpg]\n");
}
const char* keys =
{
"{help h||}{@image|../data/lena.jpg|input image file}"
"{help h||}{@image|lena.jpg|input image file}"
};
int main(int argc, const char ** argv)
@ -32,7 +32,7 @@ int main(int argc, const char ** argv)
return 0;
}
string filename = parser.get<string>(0);
Mat img = imread(filename, IMREAD_GRAYSCALE);
Mat img = imread(samples::findFile(filename), IMREAD_GRAYSCALE);
if( img.empty() )
{
help();

@ -91,7 +91,7 @@ static void help()
{
printf("\nProgram to demonstrate the use of the distance transform function between edge images.\n"
"Usage:\n"
"./distrans [image_name -- default image is ../data/stuff.jpg]\n"
"./distrans [image_name -- default image is stuff.jpg]\n"
"\nHot keys: \n"
"\tESC - quit the program\n"
"\tC - use C/Inf metric\n"
@ -107,7 +107,7 @@ static void help()
const char* keys =
{
"{help h||}{@image |../data/stuff.jpg|input image file}"
"{help h||}{@image |stuff.jpg|input image file}"
};
int main( int argc, const char** argv )
@ -117,7 +117,7 @@ int main( int argc, const char** argv )
if (parser.has("help"))
return 0;
string filename = parser.get<string>(0);
gray = imread(filename, 0);
gray = imread(samples::findFile(filename), 0);
if(gray.empty())
{
printf("Cannot read image file: %s\n", filename.c_str());

@ -43,12 +43,12 @@ static void help()
{
printf("\nThis sample demonstrates Canny edge detection\n"
"Call:\n"
" /.edge [image_name -- Default is ../data/fruits.jpg]\n\n");
" /.edge [image_name -- Default is fruits.jpg]\n\n");
}
const char* keys =
{
"{help h||}{@image |../data/fruits.jpg|input image name}"
"{help h||}{@image |fruits.jpg|input image name}"
};
int main( int argc, const char** argv )
@ -57,7 +57,7 @@ int main( int argc, const char** argv )
CommandLineParser parser(argc, argv, keys);
string filename = parser.get<string>(0);
image = imread(filename, IMREAD_COLOR);
image = imread(samples::findFile(filename), IMREAD_COLOR);
if(image.empty())
{
printf("Cannot read image file: %s\n", filename.c_str());

@ -18,7 +18,7 @@ static void help()
" [--try-flip]\n"
" [filename|camera_index]\n\n"
"see facedetect.cmd for one call:\n"
"./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n"
"./facedetect --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n"
"During execution:\n\tHit any key to quit.\n"
"\tUsing OpenCV version " << CV_VERSION << "\n" << endl;
}
@ -41,8 +41,8 @@ int main( int argc, const char** argv )
cv::CommandLineParser parser(argc, argv,
"{help h||}"
"{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
"{nested-cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}"
"{cascade|data/haarcascades/haarcascade_frontalface_alt.xml|}"
"{nested-cascade|data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}"
"{scale|1|}{try-flip||}{@filename||}"
);
if (parser.has("help"))
@ -62,9 +62,9 @@ int main( int argc, const char** argv )
parser.printErrors();
return 0;
}
if ( !nestedCascade.load( nestedCascadeName ) )
if (!nestedCascade.load(samples::findFileOrKeep(nestedCascadeName)))
cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
if( !cascade.load( cascadeName ) )
if (!cascade.load(samples::findFile(cascadeName)))
{
cerr << "ERROR: Could not load classifier cascade" << endl;
help();
@ -74,21 +74,31 @@ int main( int argc, const char** argv )
{
int camera = inputName.empty() ? 0 : inputName[0] - '0';
if(!capture.open(camera))
{
cout << "Capture from camera #" << camera << " didn't work" << endl;
return 1;
}
}
else if( inputName.size() )
else if (!inputName.empty())
{
image = imread(samples::findFileOrKeep(inputName), IMREAD_COLOR);
if (image.empty())
{
image = imread( inputName, 1 );
if( image.empty() )
if (!capture.open(samples::findFileOrKeep(inputName)))
{
if(!capture.open( inputName ))
cout << "Could not read " << inputName << endl;
return 1;
}
}
}
else
{
image = imread( "../data/lena.jpg", 1 );
if(image.empty()) cout << "Couldn't read ../data/lena.jpg" << endl;
image = imread(samples::findFile("lena.jpg"), IMREAD_COLOR);
if (image.empty())
{
cout << "Couldn't read lena.jpg" << endl;
return 1;
}
}
if( capture.isOpened() )

@ -32,14 +32,14 @@ string face_cascade_path, eye_cascade_path, nose_cascade_path, mouth_cascade_pat
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv,
"{eyes||}{nose||}{mouth||}{help h||}");
"{eyes||}{nose||}{mouth||}{help h||}{@image||}{@facexml||}");
if (parser.has("help"))
{
help();
return 0;
}
input_image_path = parser.get<string>(0);
face_cascade_path = parser.get<string>(1);
input_image_path = parser.get<string>("@image");
face_cascade_path = parser.get<string>("@facexml");
eye_cascade_path = parser.has("eyes") ? parser.get<string>("eyes") : "";
nose_cascade_path = parser.has("nose") ? parser.get<string>("nose") : "";
mouth_cascade_path = parser.has("mouth") ? parser.get<string>("mouth") : "";
@ -50,7 +50,7 @@ int main(int argc, char** argv)
}
// Load image and cascade classifier files
Mat image;
image = imread(input_image_path);
image = imread(samples::findFile(input_image_path));
// Detect faces and facial features
vector<Rect_<int> > faces;
@ -92,14 +92,15 @@ static void help()
" \nhttps://github.com/opencv/opencv/tree/3.4/data/haarcascades";
cout << "\n\nThe classifiers for nose and mouth can be downloaded from : "
" \nhttps://github.com/opencv/opencv_contrib/tree/master/modules/face/data/cascades\n";
" \nhttps://github.com/opencv/opencv_contrib/tree/3.4/modules/face/data/cascades\n";
}
static void detectFaces(Mat& img, vector<Rect_<int> >& faces, string cascade_path)
{
CascadeClassifier face_cascade;
face_cascade.load(cascade_path);
face_cascade.load(samples::findFile(cascade_path));
if (!face_cascade.empty())
face_cascade.detectMultiScale(img, faces, 1.15, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
@ -186,8 +187,9 @@ static void detectFacialFeaures(Mat& img, const vector<Rect_<int> > faces, strin
static void detectEyes(Mat& img, vector<Rect_<int> >& eyes, string cascade_path)
{
CascadeClassifier eyes_cascade;
eyes_cascade.load(cascade_path);
eyes_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
if (!eyes_cascade.empty())
eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
@ -195,8 +197,9 @@ static void detectEyes(Mat& img, vector<Rect_<int> >& eyes, string cascade_path)
static void detectNose(Mat& img, vector<Rect_<int> >& nose, string cascade_path)
{
CascadeClassifier nose_cascade;
nose_cascade.load(cascade_path);
nose_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
if (!nose_cascade.empty())
nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
@ -204,8 +207,9 @@ static void detectNose(Mat& img, vector<Rect_<int> >& nose, string cascade_path)
static void detectMouth(Mat& img, vector<Rect_<int> >& mouth, string cascade_path)
{
CascadeClassifier mouth_cascade;
mouth_cascade.load(cascade_path);
mouth_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
if (!mouth_cascade.empty())
mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}

@ -87,7 +87,7 @@ int main(int argc, char** argv)
Mat img;
if (argc > 1)
img = imread(argv[1], IMREAD_GRAYSCALE);
img = imread(samples::findFile(argv[1]), IMREAD_GRAYSCALE);
else
img = DrawMyImage(2,256);

@ -12,7 +12,7 @@ static void help()
{
cout << "\nThis program demonstrated the floodFill() function\n"
"Call:\n"
"./ffilldemo [image_name -- Default: ../data/fruits.jpg]\n" << endl;
"./ffilldemo [image_name -- Default: fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
"\tESC - quit the program\n"
@ -74,7 +74,7 @@ static void onMouse( int event, int x, int y, int, void* )
int main( int argc, char** argv )
{
cv::CommandLineParser parser (argc, argv,
"{help h | | show help message}{@image|../data/fruits.jpg| input image}"
"{help h | | show help message}{@image|fruits.jpg| input image}"
);
if (parser.has("help"))
{
@ -82,7 +82,7 @@ int main( int argc, char** argv )
return 0;
}
string filename = parser.get<string>("@image");
image0 = imread(filename, 1);
image0 = imread(samples::findFile(filename), 1);
if( image0.empty() )
{

@ -92,8 +92,8 @@ int main(int ac, char** av)
cout << "writing images\n";
fs << "images" << "[";
fs << "image1.jpg" << "myfi.png" << "../data/baboon.jpg";
cout << "image1.jpg" << " myfi.png" << " ../data/baboon.jpg" << endl;
fs << "image1.jpg" << "myfi.png" << "baboon.jpg";
cout << "image1.jpg" << " myfi.png" << " baboon.jpg" << endl;
fs << "]";

@ -171,7 +171,7 @@ static void help()
"contours and approximate it by ellipses. Three methods are used to find the \n"
"elliptical fits: fitEllipse, fitEllipseAMS and fitEllipseDirect.\n"
"Call:\n"
"./fitellipse [image_name -- Default ../data/stuff.jpg]\n" << endl;
"./fitellipse [image_name -- Default ellipses.jpg]\n" << endl;
}
int sliderPos = 70;
@ -192,14 +192,14 @@ int main( int argc, char** argv )
fitEllipseAMSQ = true;
fitEllipseDirectQ = true;
cv::CommandLineParser parser(argc, argv,"{help h||}{@image|../data/ellipses.jpg|}");
cv::CommandLineParser parser(argc, argv,"{help h||}{@image|ellipses.jpg|}");
if (parser.has("help"))
{
help();
return 0;
}
string filename = parser.get<string>("@image");
image = imread(filename, 0);
image = imread(samples::findFile(filename), 0);
if( image.empty() )
{
cout << "Couldn't open image " << filename << "\n";

@ -276,7 +276,7 @@ static void on_mouse( int event, int x, int y, int flags, void* param )
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{@input| ../data/messi5.jpg |}");
cv::CommandLineParser parser(argc, argv, "{@input| messi5.jpg |}");
help();
string filename = parser.get<string>("@input");
@ -285,7 +285,7 @@ int main( int argc, char** argv )
cout << "\nDurn, empty filename" << endl;
return 1;
}
Mat image = imread( filename, 1 );
Mat image = imread(samples::findFile(filename), IMREAD_COLOR);
if( image.empty() )
{
cout << "\n Durn, couldn't read image filename " << filename << endl;

@ -266,6 +266,7 @@ int main(int argc, char *argv[])
cout << "Unsupported mode: " << mode << endl;
return -1;
}
file_name = samples::findFile(file_name);
cout << "Mode: " << mode << ", Backend: " << backend << ", File: " << file_name << ", Codec: " << codec << endl;
TickMeter total;

@ -14,7 +14,7 @@ static void help()
"It shows reading of images, converting to planes and merging back, color conversion\n"
"and also iterating through pixels.\n"
"Call:\n"
"./image [image-name Default: ../data/lena.jpg]\n" << endl;
"./image [image-name Default: lena.jpg]\n" << endl;
}
// enable/disable use of mixed API in the code below.
@ -27,7 +27,7 @@ static void help()
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{help h | |}{@image|../data/lena.jpg|}");
cv::CommandLineParser parser(argc, argv, "{help h | |}{@image|lena.jpg|}");
if (parser.has("help"))
{
help();
@ -47,7 +47,7 @@ int main( int argc, char** argv )
// is converted, while the data is shared)
//! [iplimage]
#else
Mat img = imread(imagename); // the newer cvLoadImage alternative, MATLAB-style function
Mat img = imread(samples::findFile(imagename)); // the newer cvLoadImage alternative, MATLAB-style function
if(img.empty())
{
fprintf(stderr, "Can not load image %s\n", imagename.c_str());

@ -3,7 +3,7 @@
* findTransformECC that implements the image alignment ECC algorithm
*
*
* The demo loads an image (defaults to ../data/fruits.jpg) and it artificially creates
* The demo loads an image (defaults to fruits.jpg) and it artificially creates
* a template image based on the given motion type. When two images are given,
* the first image is the input image and the second one defines the template image.
* In the latter case, you can also parse the warp's initialization.
@ -44,7 +44,7 @@ static void draw_warped_roi(Mat& image, const int width, const int height, Mat&
const std::string keys =
"{@inputImage | ../data/fruits.jpg | input image filename }"
"{@inputImage | fruits.jpg | input image filename }"
"{@templateImage | | template image filename (optional)}"
"{@inputWarp | | input warp (matrix) filename (optional)}"
"{n numOfIter | 50 | ECC's iterations }"
@ -65,10 +65,10 @@ static void help(void)
" are given, the initialization of the warp by command line parsing is possible. "
"If inputWarp is missing, the identity transformation initializes the algorithm. \n" << endl;
cout << "\nUsage example (one image): \n./ecc ../data/fruits.jpg -o=outWarp.ecc "
cout << "\nUsage example (one image): \n./image_alignment fruits.jpg -o=outWarp.ecc "
"-m=euclidean -e=1e-6 -N=70 -v=1 \n" << endl;
cout << "\nUsage example (two images with initialization): \n./ecc yourInput.png yourTemplate.png "
cout << "\nUsage example (two images with initialization): \n./image_alignment yourInput.png yourTemplate.png "
"yourInitialWarp.ecc -o=outWarp.ecc -m=homography -e=1e-6 -N=70 -v=1 -w=yourFinalImage.png \n" << endl;
}
@ -212,7 +212,7 @@ int main (const int argc, const char * argv[])
else
mode_temp = MOTION_HOMOGRAPHY;
Mat inputImage = imread(imgFile,0);
Mat inputImage = imread(samples::findFile(imgFile), IMREAD_GRAYSCALE);
if (inputImage.empty())
{
cerr << "Unable to load the inputImage" << endl;
@ -224,7 +224,7 @@ int main (const int argc, const char * argv[])
if (tempImgFile!="") {
inputImage.copyTo(target_image);
template_image = imread(tempImgFile,0);
template_image = imread(samples::findFile(tempImgFile), IMREAD_GRAYSCALE);
if (template_image.empty()){
cerr << "Unable to load the template image" << endl;
return -1;

@ -14,7 +14,7 @@ static void help()
<< "with surrounding image areas.\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
"Usage:\n"
"./inpaint [image_name -- Default ../data/fruits.jpg]\n" << endl;
"./inpaint [image_name -- Default fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
"\tESC - quit the program\n"
@ -47,24 +47,24 @@ static void onMouse( int event, int x, int y, int flags, void* )
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{@image|../data/fruits.jpg|}");
cv::CommandLineParser parser(argc, argv, "{@image|fruits.jpg|}");
help();
string filename = parser.get<string>("@image");
Mat img0 = imread(filename, -1);
string filename = samples::findFile(parser.get<string>("@image"));
Mat img0 = imread(filename, IMREAD_COLOR);
if(img0.empty())
{
cout << "Couldn't open the image " << filename << ". Usage: inpaint <image_name>\n" << endl;
return 0;
}
namedWindow( "image", 1 );
namedWindow("image", WINDOW_AUTOSIZE);
img = img0.clone();
inpaintMask = Mat::zeros(img.size(), CV_8U);
imshow("image", img);
setMouseCallback( "image", onMouse, 0 );
setMouseCallback( "image", onMouse, NULL);
for(;;)
{

@ -25,39 +25,46 @@ int smoothType = GAUSSIAN;
int main( int argc, char** argv )
{
VideoCapture cap;
cv::CommandLineParser parser(argc, argv, "{ c | 0 | }{ p | | }");
help();
if( parser.get<string>("c").size() == 1 && isdigit(parser.get<string>("c")[0]) )
VideoCapture cap;
string camera = parser.get<string>("c");
if (camera.size() == 1 && isdigit(camera[0]))
cap.open(parser.get<int>("c"));
else
cap.open(parser.get<string>("c"));
if( cap.isOpened() )
cap.open(samples::findFileOrKeep(camera));
if (!cap.isOpened())
{
cerr << "Can't open camera/video stream: " << camera << endl;
return 1;
}
cout << "Video " << parser.get<string>("c") <<
": width=" << cap.get(CAP_PROP_FRAME_WIDTH) <<
", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) <<
", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl;
if( parser.has("p") )
int pos = 0;
if (parser.has("p"))
{
int pos = parser.get<int>("p");
pos = parser.get<int>("p");
}
if (!parser.check())
{
parser.printErrors();
return -1;
}
cout << "seeking to frame #" << pos << endl;
cap.set(CAP_PROP_POS_FRAMES, pos);
}
if( !cap.isOpened() )
if (pos != 0)
{
cout << "Could not initialize capturing...\n";
return -1;
cout << "seeking to frame #" << pos << endl;
if (!cap.set(CAP_PROP_POS_FRAMES, pos))
{
cerr << "ERROR: seekeing is not supported" << endl;
}
}
namedWindow( "Laplacian", 0 );
createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 );
namedWindow("Laplacian", WINDOW_AUTOSIZE);
createTrackbar("Sigma", "Laplacian", &sigma, 15, 0);
Mat smoothed, laplace, result;

@ -520,13 +520,13 @@ int main( int argc, char *argv[] )
string data_filename;
int method = 0;
cv::CommandLineParser parser(argc, argv, "{data|../data/letter-recognition.data|}{save||}{load||}{boost||}"
cv::CommandLineParser parser(argc, argv, "{data|letter-recognition.data|}{save||}{load||}{boost||}"
"{mlp||}{knn knearest||}{nbayes||}{svm||}");
data_filename = parser.get<string>("data");
data_filename = samples::findFile(parser.get<string>("data"));
if (parser.has("save"))
filename_to_save = parser.get<string>("save");
if (parser.has("load"))
filename_to_load = parser.get<string>("load");
filename_to_load = samples::findFile(parser.get<string>("load"));
if (parser.has("boost"))
method = 1;
else if (parser.has("mlp"))

@ -7,10 +7,10 @@
using namespace std;
using namespace cv;
void getMatWithQRCodeContour(Mat &color_image, vector<Point> transform);
void getMatWithFPS(Mat &color_image, double fps);
int liveQRCodeDetect();
int showImageQRCodeDetect(string in, string out);
static void drawQRCodeContour(Mat &color_image, vector<Point> transform);
static void drawFPS(Mat &color_image, double fps);
static int liveQRCodeDetect(const string& out_file);
static int imageQRCodeDetect(const string& in_file, const string& out_file);
int main(int argc, char *argv[])
{
@ -28,7 +28,9 @@ int main(int argc, char *argv[])
}
string in_file_name = cmd_parser.get<string>("in"); // input path to image
string out_file_name = cmd_parser.get<string>("out"); // output path to image
string out_file_name;
if (cmd_parser.has("out"))
out_file_name = cmd_parser.get<string>("out"); // output path to image
if (!cmd_parser.check())
{
@ -39,16 +41,16 @@ int main(int argc, char *argv[])
int return_code = 0;
if (in_file_name.empty())
{
return_code = liveQRCodeDetect();
return_code = liveQRCodeDetect(out_file_name);
}
else
{
return_code = showImageQRCodeDetect(in_file_name, out_file_name);
return_code = imageQRCodeDetect(samples::findFile(in_file_name), out_file_name);
}
return return_code;
}
void getMatWithQRCodeContour(Mat &color_image, vector<Point> transform)
void drawQRCodeContour(Mat &color_image, vector<Point> transform)
{
if (!transform.empty())
{
@ -70,19 +72,19 @@ void getMatWithQRCodeContour(Mat &color_image, vector<Point> transform)
}
}
void getMatWithFPS(Mat &color_image, double fps)
void drawFPS(Mat &color_image, double fps)
{
ostringstream convert;
convert << cvRound(fps) << " FPS.";
convert << cvRound(fps) << " FPS (QR detection)";
putText(color_image, convert.str(), Point(25, 25), FONT_HERSHEY_DUPLEX, 1, Scalar(0, 0, 255), 2);
}
int liveQRCodeDetect()
int liveQRCodeDetect(const string& out_file)
{
VideoCapture cap(0);
if(!cap.isOpened())
{
cout << "Cannot open a camera" << '\n';
cout << "Cannot open a camera" << endl;
return -4;
}
@ -94,7 +96,11 @@ int liveQRCodeDetect()
string decode_info;
vector<Point> transform;
cap >> frame;
if(frame.empty()) { break; }
if (frame.empty())
{
cout << "End of video stream" << endl;
break;
}
cvtColor(frame, src, COLOR_BGR2GRAY);
total.start();
@ -102,24 +108,30 @@ int liveQRCodeDetect()
if (result_detection)
{
decode_info = qrcode.decode(src, transform, straight_barcode);
if (!decode_info.empty()) { cout << decode_info << '\n'; }
if (!decode_info.empty()) { cout << decode_info << endl; }
}
total.stop();
double fps = 1 / total.getTimeSec();
total.reset();
if (result_detection) { getMatWithQRCodeContour(frame, transform); }
getMatWithFPS(frame, fps);
if (result_detection) { drawQRCodeContour(frame, transform); }
drawFPS(frame, fps);
imshow("Live QR code detector", frame);
if( waitKey(30) > 0 ) { break; }
char c = (char)waitKey(30);
if (c == 27)
break;
if (c == ' ' && !out_file.empty())
imwrite(out_file, frame); // TODO write original frame too
}
return 0;
}
int showImageQRCodeDetect(string in, string out)
int imageQRCodeDetect(const string& in_file, const string& out_file)
{
Mat src = imread(in, IMREAD_GRAYSCALE), straight_barcode;
Mat color_src = imread(in_file, IMREAD_COLOR), src;
cvtColor(color_src, src, COLOR_BGR2GRAY);
Mat straight_barcode;
string decoded_info;
vector<Point> transform;
const int count_experiments = 10;
@ -135,54 +147,40 @@ int showImageQRCodeDetect(string in, string out)
total.stop();
transform_time += total.getTimeSec();
total.reset();
if (!result_detection) { break; }
if (!result_detection)
continue;
total.start();
decoded_info = qrcode.decode(src, transform, straight_barcode);
total.stop();
transform_time += total.getTimeSec();
total.reset();
if (decoded_info.empty()) { break; }
}
double fps = count_experiments / transform_time;
if (!result_detection) { cout << "QR code not found\n"; return -2; }
if (decoded_info.empty()) { cout << "QR code cannot be decoded\n"; return -3; }
if (!result_detection)
cout << "QR code not found" << endl;
if (decoded_info.empty())
cout << "QR code cannot be decoded" << endl;
Mat color_src = imread(in);
getMatWithQRCodeContour(color_src, transform);
getMatWithFPS(color_src, fps);
drawQRCodeContour(color_src, transform);
drawFPS(color_src, fps);
for(;;)
{
imshow("Detect QR code on image", color_src);
if( waitKey(30) > 0 ) { break; }
}
cout << "Input image file path: " << in_file << endl;
cout << "Output image file path: " << out_file << endl;
cout << "Size: " << color_src.size() << endl;
cout << "FPS: " << fps << endl;
cout << "Decoded info: " << decoded_info << endl;
if (!out.empty())
{
getMatWithQRCodeContour(color_src, transform);
getMatWithFPS(color_src, fps);
cout << "Input image file path: " << in << '\n';
cout << "Output image file path: " << out << '\n';
cout << "Size: " << color_src.size() << '\n';
cout << "FPS: " << fps << '\n';
cout << "Decoded info: " << decoded_info << '\n';
vector<int> compression_params;
compression_params.push_back(IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
try
if (!out_file.empty())
{
imwrite(out, color_src, compression_params);
imwrite(out_file, color_src);
}
catch (const cv::Exception& ex)
for(;;)
{
cout << "Exception converting image to PNG format: ";
cout << ex.what() << '\n';
return -3;
}
imshow("Detect QR code on image", color_src);
if (waitKey(0) == 27)
break;
}
return 0;
}

@ -83,7 +83,7 @@ static float calculateAccuracyPercent(const Mat &original, const Mat &predicted)
int main()
{
const String filename = "../data/data01.xml";
const String filename = samples::findFile("data01.xml");
cout << "**********************************************************************" << endl;
cout << filename
<< " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl;

@ -9,7 +9,7 @@ using namespace cv;
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv,
"{input i|../data/building.jpg|input image}"
"{input i|building.jpg|input image}"
"{refine r|false|if true use LSD_REFINE_STD method, if false use LSD_REFINE_NONE method}"
"{canny c|false|use Canny edge detector}"
"{overlay o|false|show result on input image}"
@ -23,7 +23,7 @@ int main(int argc, char** argv)
parser.printMessage();
String filename = parser.get<String>("input");
String filename = samples::findFile(parser.get<String>("input"));
bool useRefine = parser.get<bool>("refine");
bool useCanny = parser.get<bool>("canny");
bool overlay = parser.get<bool>("overlay");

@ -8,17 +8,27 @@ using namespace cv;
int main( int argc, const char** argv )
{
CommandLineParser parser(argc, argv,
"{ i | ../data/lena_tmpl.jpg |image name }"
"{ t | ../data/tmpl.png |template name }"
"{ m | ../data/mask.png |mask name }"
"{ i | lena_tmpl.jpg |image name }"
"{ t | tmpl.png |template name }"
"{ m | mask.png |mask name }"
"{ cm| 3 |comparison method }");
cout << "This program demonstrates the use of template matching with mask.\n\n";
cout << "This program demonstrates the use of template matching with mask." << endl
<< endl
<< "Available methods: https://docs.opencv.org/3.4/df/dfb/group__imgproc__object.html#ga3a7850640f1fe1f58fe91a2d7583695d" << endl
<< " TM_SQDIFF = " << (int)TM_SQDIFF << endl
<< " TM_SQDIFF_NORMED = " << (int)TM_SQDIFF_NORMED << endl
<< " TM_CCORR = " << (int)TM_CCORR << endl
<< " TM_CCORR_NORMED = " << (int)TM_CCORR_NORMED << endl
<< " TM_CCOEFF = " << (int)TM_CCOEFF << endl
<< " TM_CCOEFF_NORMED = " << (int)TM_CCOEFF_NORMED << endl
<< endl;
parser.printMessage();
string filename = parser.get<string>("i");
string tmplname = parser.get<string>("t");
string maskname = parser.get<string>("m");
string filename = samples::findFile(parser.get<string>("i"));
string tmplname = samples::findFile(parser.get<string>("t"));
string maskname = samples::findFile(parser.get<string>("m"));
Mat img = imread(filename);
Mat tmpl = imread(tmplname);
Mat mask = imread(maskname);

@ -12,7 +12,7 @@ static void help()
{
cout << "\n This program demonstrates how to detect compute and match ORB BRISK and AKAZE descriptors \n"
"Usage: \n"
" ./matchmethod_orb_akaze_brisk --image1=<image1(../data/basketball1.png as default)> --image2=<image2(../data/basketball2.png as default)>\n"
" ./matchmethod_orb_akaze_brisk --image1=<image1(basketball1.png as default)> --image2=<image2(basketball2.png as default)>\n"
"Press a key when image window is active to change algorithm or descriptor";
}
@ -34,27 +34,27 @@ int main(int argc, char *argv[])
typeAlgoMatch.push_back("BruteForce-Hamming");
typeAlgoMatch.push_back("BruteForce-Hamming(2)");
cv::CommandLineParser parser(argc, argv,
"{ @image1 | ../data/basketball1.png | }"
"{ @image2 | ../data/basketball2.png | }"
"{ @image1 | basketball1.png | }"
"{ @image2 | basketball2.png | }"
"{help h ||}");
if (parser.has("help"))
{
help();
return 0;
}
fileName.push_back(parser.get<string>(0));
fileName.push_back(parser.get<string>(1));
fileName.push_back(samples::findFile(parser.get<string>(0)));
fileName.push_back(samples::findFile(parser.get<string>(1)));
Mat img1 = imread(fileName[0], IMREAD_GRAYSCALE);
Mat img2 = imread(fileName[1], IMREAD_GRAYSCALE);
if (img1.rows*img1.cols <= 0)
if (img1.empty())
{
cout << "Image " << fileName[0] << " is empty or cannot be found\n";
return(0);
cerr << "Image " << fileName[0] << " is empty or cannot be found" << endl;
return 1;
}
if (img2.rows*img2.cols <= 0)
if (img2.empty())
{
cout << "Image " << fileName[1] << " is empty or cannot be found\n";
return(0);
cerr << "Image " << fileName[1] << " is empty or cannot be found" << endl;
return 1;
}
vector<double> desMethCmp;
@ -157,12 +157,12 @@ int main(int argc, char *argv[])
}
catch (const Exception& e)
{
cerr << "Exception: " << e.what() << endl;
cout << "Feature : " << *itDesc << "\n";
if (itMatcher != typeAlgoMatch.end())
{
cout << "Matcher : " << *itMatcher << "\n";
}
cout << e.msg << endl;
}
}
int i=0;

@ -18,7 +18,7 @@ int main( int /*argc*/, char** /*argv*/ )
{
help();
Mat img(500, 500, CV_8UC3);
Mat img(500, 500, CV_8UC3, Scalar::all(0));
RNG& rng = theRNG();
for(;;)

@ -33,8 +33,8 @@ int erode_dilate_pos = 0;
// callback function for open/close trackbar
static void OpenClose(int, void*)
{
int n = open_close_pos - max_iters;
int an = n > 0 ? n : -n;
int n = open_close_pos;
int an = abs(n);
Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );
if( n < 0 )
morphologyEx(src, dst, MORPH_OPEN, element);
@ -46,8 +46,8 @@ static void OpenClose(int, void*)
// callback function for erode/dilate trackbar
static void ErodeDilate(int, void*)
{
int n = erode_dilate_pos - max_iters;
int an = n > 0 ? n : -n;
int n = erode_dilate_pos;
int an = abs(n);
Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );
if( n < 0 )
erode(src, dst, element);
@ -59,13 +59,13 @@ static void ErodeDilate(int, void*)
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{help h||}{ @image | ../data/baboon.jpg | }");
cv::CommandLineParser parser(argc, argv, "{help h||}{ @image | baboon.jpg | }");
if (parser.has("help"))
{
help();
return 0;
}
std::string filename = parser.get<std::string>("@image");
std::string filename = samples::findFile(parser.get<std::string>("@image"));
if( (src = imread(filename,IMREAD_COLOR)).empty() )
{
help();
@ -78,7 +78,14 @@ int main( int argc, char** argv )
open_close_pos = erode_dilate_pos = max_iters;
createTrackbar("iterations", "Open/Close",&open_close_pos,max_iters*2+1,OpenClose);
setTrackbarMin("iterations", "Open/Close", -max_iters);
setTrackbarMax("iterations", "Open/Close", max_iters);
setTrackbarPos("iterations", "Open/Close", 0);
createTrackbar("iterations", "Erode/Dilate",&erode_dilate_pos,max_iters*2+1,ErodeDilate);
setTrackbarMin("iterations", "Erode/Dilate", -max_iters);
setTrackbarMax("iterations", "Erode/Dilate", max_iters);
setTrackbarPos("iterations", "Erode/Dilate", 0);
for(;;)
{

@ -28,26 +28,22 @@ using namespace cv;
int main(int argc, char* argv[])
{
cv::CommandLineParser parser(argc, argv, "{help h||show help message}{@image|../data/lena.jpg|input image}");
cv::CommandLineParser parser(argc, argv, "{help h||show help message}{@image|lena.jpg|input image}");
if (parser.has("help"))
{
parser.printMessage();
exit(0);
}
if (parser.get<string>("@image").empty())
{
parser.printMessage();
exit(0);
return 0;
}
string filename = samples::findFile(parser.get<string>("@image"));
Mat I = imread(parser.get<string>("@image"));
Mat I = imread(filename);
int num,type;
if(I.empty())
{
cout << "Image not found" << endl;
exit(0);
return 1;
}
cout << endl;

@ -72,7 +72,10 @@ int main(int argc, char** argv)
if (file.empty())
cap.open(camera);
else
cap.open(file.c_str());
{
file = samples::findFileOrKeep(file);
cap.open(file);
}
if (!cap.isOpened())
{
cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;

@ -2,11 +2,6 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/ml.hpp"
#include "opencv2/highgui.hpp"
#ifdef HAVE_OPENCV_OCL
#define _OCL_KNN_ 1 // select whether using ocl::KNN method or not, default is using
#define _OCL_SVM_ 1 // select whether using ocl::svm method or not, default is using
#include "opencv2/ocl/ocl.hpp"
#endif
#include <stdio.h>

@ -24,7 +24,7 @@ int main( int argc, char** argv )
if( arg.size() == 1 && isdigit(arg[0]) )
capture.open( arg[0] - '0' );
else
capture.open( arg.c_str() );
capture.open(samples::findFileOrKeep(arg));
if( !capture.isOpened() )
{

@ -73,7 +73,7 @@ int main(int argc, char** argv)
if (input.empty())
cap.open(0);
else
cap.open(input);
cap.open(samples::findFileOrKeep(input));
if( !cap.isOpened() )
{

@ -416,7 +416,7 @@ int main(int argc, char** argv)
if ( parser.get<string>("@input").size() == 1 && isdigit(parser.get<string>("@input")[0]) )
cameraId = parser.get<int>("@input");
else
inputName = parser.get<string>("@input");
inputName = samples::findFileOrKeep(parser.get<string>("@input"));
if (!parser.check())
{
puts(help);

@ -16,7 +16,7 @@ static void help()
" [--try-flip]\n"
" [video_filename|camera_index]\n\n"
"Example:\n"
"./smiledetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --smile-cascade=\"../../data/haarcascades/haarcascade_smile.xml\" --scale=2.0\n\n"
"./smiledetect --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --smile-cascade=\"data/haarcascades/haarcascade_smile.xml\" --scale=2.0\n\n"
"During execution:\n\tHit any key to quit.\n"
"\tUsing OpenCV version " << CV_VERSION << "\n" << endl;
}
@ -41,16 +41,16 @@ int main( int argc, const char** argv )
double scale;
cv::CommandLineParser parser(argc, argv,
"{help h||}{scale|1|}"
"{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
"{smile-cascade|../../data/haarcascades/haarcascade_smile.xml|}"
"{cascade|data/haarcascades/haarcascade_frontalface_alt.xml|}"
"{smile-cascade|data/haarcascades/haarcascade_smile.xml|}"
"{try-flip||}{@input||}");
if (parser.has("help"))
{
help();
return 0;
}
cascadeName = parser.get<string>("cascade");
nestedCascadeName = parser.get<string>("smile-cascade");
cascadeName = samples::findFile(parser.get<string>("cascade"));
nestedCascadeName = samples::findFile(parser.get<string>("smile-cascade"));
tryflip = parser.has("try-flip");
inputName = parser.get<string>("@input");
scale = parser.get<int>("scale");
@ -81,6 +81,7 @@ int main( int argc, const char** argv )
}
else if( inputName.size() )
{
inputName = samples::findFileOrKeep(inputName);
if(!capture.open( inputName ))
cout << "Could not read " << inputName << endl;
}

@ -138,8 +138,8 @@ static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
int main(int argc, char** argv)
{
static const char* names[] = { "../data/pic1.png", "../data/pic2.png", "../data/pic3.png",
"../data/pic4.png", "../data/pic5.png", "../data/pic6.png", 0 };
static const char* names[] = { "data/pic1.png", "data/pic2.png", "data/pic3.png",
"data/pic4.png", "data/pic5.png", "data/pic6.png", 0 };
help(argv[0]);
if( argc > 1)
@ -152,10 +152,11 @@ int main(int argc, char** argv)
for( int i = 0; names[i] != 0; i++ )
{
Mat image = imread(names[i], IMREAD_COLOR);
string filename = samples::findFile(names[i]);
Mat image = imread(filename, IMREAD_COLOR);
if( image.empty() )
{
cout << "Couldn't load " << names[i] << endl;
cout << "Couldn't load " << filename << endl;
continue;
}

@ -18,7 +18,6 @@
Homepage: http://opencv.org
Online docs: http://docs.opencv.org
Q&A forum: http://answers.opencv.org
Issue tracker: http://code.opencv.org
GitHub: https://github.com/opencv/opencv/
************************************************** */
@ -46,11 +45,11 @@ static int print_help()
" on the chessboards, and a flag: useCalibrated for \n"
" calibrated (0) or\n"
" uncalibrated \n"
" (1: use cvStereoCalibrate(), 2: compute fundamental\n"
" (1: use stereoCalibrate(), 2: compute fundamental\n"
" matrix separately) stereo. \n"
" Calibrate the cameras and display the\n"
" rectified results along with the computed disparity images. \n" << endl;
cout << "Usage:\n ./stereo_calib -w=<board_width default=9> -h=<board_height default=6> -s=<square_size default=1.0> <image list XML/YML file default=../data/stereo_calib.xml>\n" << endl;
cout << "Usage:\n ./stereo_calib -w=<board_width default=9> -h=<board_height default=6> -s=<square_size default=1.0> <image list XML/YML file default=stereo_calib.xml>\n" << endl;
return 0;
}
@ -347,11 +346,11 @@ int main(int argc, char** argv)
Size boardSize;
string imagelistfn;
bool showRectified;
cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|../data/stereo_calib.xml|}");
cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|stereo_calib.xml|}");
if (parser.has("help"))
return print_help();
showRectified = !parser.has("nr");
imagelistfn = parser.get<string>("@input");
imagelistfn = samples::findFile(parser.get<string>("@input"));
boardSize.width = parser.get<int>("w");
boardSize.height = parser.get<int>("h");
float squareSize = parser.get<float>("s");

@ -65,8 +65,8 @@ int main(int argc, char** argv)
print_help();
return 0;
}
img1_filename = parser.get<std::string>(0);
img2_filename = parser.get<std::string>(1);
img1_filename = samples::findFile(parser.get<std::string>(0));
img2_filename = samples::findFile(parser.get<std::string>(1));
if (parser.has("algorithm"))
{
std::string _alg = parser.get<std::string>("algorithm");

@ -112,7 +112,7 @@ int parseCmdArgs(int argc, char** argv)
}
else
{
Mat img = imread(argv[i]);
Mat img = imread(samples::findFile(argv[i]));
if (img.empty())
{
cout << "Can't read image '" << argv[i] << "'\n";

@ -405,7 +405,7 @@ int main(int argc, char* argv[])
for (int i = 0; i < num_images; ++i)
{
full_img = imread(img_names[i]);
full_img = imread(samples::findFile(img_names[i]));
full_img_sizes[i] = full_img.size();
if (full_img.empty())
@ -727,7 +727,7 @@ int main(int argc, char* argv[])
LOGLN("Compositing image #" << indices[img_idx]+1);
// Read image and resize it if necessary
full_img = imread(img_names[img_idx]);
full_img = imread(samples::findFile(img_names[img_idx]));
if (!is_compose_scale_set)
{
if (compose_megapix > 0)

@ -2,7 +2,7 @@
#include <iostream>
#include <time.h>
// OpenCV
#include <opencv2//core.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>

@ -164,8 +164,8 @@ int main(int argc, const char* argv[])
return -1;
}
Mat frame0 = imread(frame0_name, IMREAD_GRAYSCALE);
Mat frame1 = imread(frame1_name, IMREAD_GRAYSCALE);
Mat frame0 = imread(samples::findFile(frame0_name), IMREAD_GRAYSCALE);
Mat frame1 = imread(samples::findFile(frame1_name), IMREAD_GRAYSCALE);
if (frame0.empty())
{

@ -20,7 +20,7 @@ static void help(char** argv)
cout << "\nThis is a demo program shows how perspective transformation applied on an image, \n"
"Using OpenCV version " << CV_VERSION << endl;
cout << "\nUsage:\n" << argv[0] << " [image_name -- Default ../data/right.jpg]\n" << endl;
cout << "\nUsage:\n" << argv[0] << " [image_name -- Default data/right.jpg]\n" << endl;
cout << "\nHot keys: \n"
"\tESC, q - quit the program\n"
@ -45,9 +45,9 @@ bool validation_needed = true;
int main(int argc, char** argv)
{
help(argv);
CommandLineParser parser(argc, argv, "{@input| ../data/right.jpg |}");
CommandLineParser parser(argc, argv, "{@input| data/right.jpg |}");
string filename = parser.get<string>("@input");
string filename = samples::findFile(parser.get<string>("@input"));
Mat original_image = imread( filename );
Mat image;

@ -13,7 +13,7 @@ static void help()
{
cout << "\nThis program demonstrates the famous watershed segmentation algorithm in OpenCV: watershed()\n"
"Usage:\n"
"./watershed [image_name -- default is ../data/fruits.jpg]\n" << endl;
"./watershed [image_name -- default is fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
@ -48,18 +48,18 @@ static void onMouse( int event, int x, int y, int flags, void* )
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv, "{help h | | }{ @input | ../data/fruits.jpg | }");
cv::CommandLineParser parser(argc, argv, "{help h | | }{ @input | fruits.jpg | }");
if (parser.has("help"))
{
help();
return 0;
}
string filename = parser.get<string>("@input");
string filename = samples::findFile(parser.get<string>("@input"));
Mat img0 = imread(filename, 1), imgGray;
if( img0.empty() )
{
cout << "Couldn'g open image " << filename << ". Usage: watershed <image_name>\n";
cout << "Couldn't open image " << filename << ". Usage: watershed <image_name>\n";
return 0;
}
help();

@ -0,0 +1,15 @@
%YAML:1.0
images:
- left01.jpg
- left02.jpg
- left03.jpg
- left04.jpg
- left05.jpg
- left06.jpg
- left07.jpg
- left08.jpg
- left09.jpg
- left11.jpg
- left12.jpg
- left13.jpg
- left14.jpg

@ -43,10 +43,10 @@ if __name__ == '__main__':
cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")
cascade = cv.CascadeClassifier(cascade_fn)
nested = cv.CascadeClassifier(nested_fn)
cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))
cam = create_capture(video_src, fallback='synth:bg=../data/lena.jpg:noise=0.05')
cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg')))
while True:
ret, img = cam.read()

Loading…
Cancel
Save