Merge pull request #1722 from StevenPuttemans:feature_1631_second

pull/1828/head^2
Roman Donchenko 11 years ago committed by OpenCV Buildbot
commit 7703b63cae
  1. 2
      samples/cpp/3calibration.cpp
  2. 6
      samples/cpp/OpenEXRimages_HDR_Retina_toneMapping.cpp
  3. 4
      samples/cpp/OpenEXRimages_HDR_Retina_toneMapping_video.cpp
  4. 55
      samples/cpp/Qt_sample/main.cpp
  5. 4
      samples/cpp/bagofwords_classification.cpp
  6. 8
      samples/cpp/bgfg_segm.cpp
  7. 2
      samples/cpp/build3dmodel.cpp
  8. 2
      samples/cpp/calibration.cpp
  9. 4
      samples/cpp/camshiftdemo.cpp
  10. 2
      samples/cpp/chamfer.cpp
  11. 4
      samples/cpp/descriptor_extractor_matcher.cpp
  12. 2
      samples/cpp/detection_based_tracker_sample.cpp
  13. 2
      samples/cpp/distrans.cpp
  14. 4
      samples/cpp/drawing.cpp
  15. 2
      samples/cpp/edge.cpp
  16. 2
      samples/cpp/em.cpp
  17. 6
      samples/cpp/fback.cpp
  18. 6
      samples/cpp/ffilldemo.cpp
  19. 2
      samples/cpp/generic_descriptor_match.cpp
  20. 8
      samples/cpp/grabcut.cpp
  21. 2
      samples/cpp/houghcircles.cpp
  22. 2
      samples/cpp/houghlines.cpp
  23. 2
      samples/cpp/image.cpp
  24. 2
      samples/cpp/lkdemo.cpp
  25. 2
      samples/cpp/meanshift_segmentation.cpp
  26. 2
      samples/cpp/minarea.cpp
  27. 2
      samples/cpp/morphology2.cpp
  28. 2
      samples/cpp/pca.cpp
  29. 2
      samples/cpp/phase_corr.cpp
  30. 14
      samples/cpp/points_classifier.cpp
  31. 4
      samples/cpp/rgbdodometry.cpp
  32. 2
      samples/cpp/segment_objects.cpp
  33. 4
      samples/cpp/stereo_calib.cpp
  34. 17
      samples/cpp/tutorial_code/HighGUI/BasicLinearTransformsTrackbar.cpp
  35. 6
      samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp
  36. 4
      samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp
  37. 4
      samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp
  38. 4
      samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp
  39. 2
      samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp
  40. 46
      samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
  41. 6
      samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp
  42. 10
      samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp
  43. 2
      samples/cpp/tutorial_code/ImgProc/Pyramids.cpp
  44. 4
      samples/cpp/tutorial_code/ImgProc/Smoothing.cpp
  45. 12
      samples/cpp/tutorial_code/ImgProc/Threshold.cpp
  46. 4
      samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp
  47. 6
      samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp
  48. 4
      samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp
  49. 6
      samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp
  50. 4
      samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp
  51. 2
      samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp
  52. 4
      samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp
  53. 2
      samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp
  54. 2
      samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp
  55. 6
      samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp
  56. 6
      samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp
  57. 6
      samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp
  58. 8
      samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp
  59. 6
      samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp
  60. 4
      samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp
  61. 6
      samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp
  62. 6
      samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp
  63. 6
      samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp
  64. 6
      samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp
  65. 2
      samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp
  66. 2
      samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp
  67. 4
      samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp
  68. 4
      samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp
  69. 4
      samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp
  70. 8
      samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp
  71. 4
      samples/cpp/tutorial_code/features2D/SURF_Homography.cpp
  72. 2
      samples/cpp/tutorial_code/introduction/display_image/display_image.cpp
  73. 10
      samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/Test.cpp
  74. 10
      samples/cpp/tutorial_code/objectDetection/objectDetection.cpp
  75. 10
      samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp
  76. 2
      samples/cpp/video_dmtx.cpp
  77. 2
      samples/cpp/video_homography.cpp
  78. 4
      samples/cpp/watershed.cpp

@ -271,7 +271,7 @@ int main( int argc, char** argv )
{ {
vector<Point2f> ptvec; vector<Point2f> ptvec;
imageSize = view.size(); imageSize = view.size();
cvtColor(view, viewGray, CV_BGR2GRAY); cvtColor(view, viewGray, COLOR_BGR2GRAY);
bool found = findChessboardCorners( view, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH ); bool found = findChessboardCorners( view, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH );
drawChessboardCorners( view, boardSize, Mat(ptvec), found ); drawChessboardCorners( view, boardSize, Mat(ptvec), found );

@ -1,10 +1,10 @@
//============================================================================ //============================================================================
// Name : HighDynamicRange_RetinaCompression.cpp // Name : OpenEXRimages_HDR_Retina_toneMapping.cpp
// Author : Alexandre Benoit (benoit.alexandre.vision@gmail.com) // Author : Alexandre Benoit (benoit.alexandre.vision@gmail.com)
// Version : 0.1 // Version : 0.1
// Copyright : Alexandre Benoit, LISTIC Lab, july 2011 // Copyright : Alexandre Benoit, LISTIC Lab, july 2011
// Description : HighDynamicRange compression (tone mapping) with the help of the Gipsa/Listic's retina in C++, Ansi-style // Description : HighDynamicRange retina tone mapping with the help of the Gipsa/Listic's retina in C++, Ansi-style
//============================================================================ //============================================================================
#include <iostream> #include <iostream>
@ -69,7 +69,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
{ {
cv::Mat rgbIntImg; cv::Mat rgbIntImg;
outputMat.convertTo(rgbIntImg, CV_8UC3); outputMat.convertTo(rgbIntImg, CV_8UC3);
cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY); cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
} }
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation

@ -4,7 +4,7 @@
// Author : Alexandre Benoit (benoit.alexandre.vision@gmail.com) // Author : Alexandre Benoit (benoit.alexandre.vision@gmail.com)
// Version : 0.2 // Version : 0.2
// Copyright : Alexandre Benoit, LISTIC Lab, december 2011 // Copyright : Alexandre Benoit, LISTIC Lab, december 2011
// Description : HighDynamicRange compression (tone mapping) for image sequences with the help of the Gipsa/Listic's retina in C++, Ansi-style // Description : HighDynamicRange retina tone mapping for image sequences with the help of the Gipsa/Listic's retina in C++, Ansi-style
// Known issues: the input OpenEXR sequences can have bad computed pixels that should be removed // Known issues: the input OpenEXR sequences can have bad computed pixels that should be removed
// => a simple method consists of cutting histogram edges (a slider for this on the UI is provided) // => a simple method consists of cutting histogram edges (a slider for this on the UI is provided)
// => however, in image sequences, this histogramm cut must be done in an elegant way from frame to frame... still not done... // => however, in image sequences, this histogramm cut must be done in an elegant way from frame to frame... still not done...
@ -92,7 +92,7 @@ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, con
{ {
cv::Mat rgbIntImg; cv::Mat rgbIntImg;
normalisedImage.convertTo(rgbIntImg, CV_8UC3); normalisedImage.convertTo(rgbIntImg, CV_8UC3);
cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY); cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
} }
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation

@ -4,7 +4,7 @@
#include <iostream> #include <iostream>
#include <vector> #include <vector>
#include <opencv/highgui.h> #include <opencv2/highgui/highgui.hpp>
#if defined WIN32 || defined _WIN32 || defined WINCE #if defined WIN32 || defined _WIN32 || defined WINCE
#include <windows.h> #include <windows.h>
@ -20,8 +20,7 @@
#include <GL/gl.h> #include <GL/gl.h>
#endif #endif
#include <opencv/cxcore.h> #include <opencv2/core/core.hpp>
#include <opencv/cv.h>
using namespace std; using namespace std;
using namespace cv; using namespace cv;
@ -111,12 +110,12 @@ static void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f)); modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
} }
static void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source, IplImage* grayImage) static void foundCorners(vector<CvPoint2D32f> *srcImagePoints, const Mat& source, Mat& grayImage)
{ {
cvCvtColor(source,grayImage,CV_RGB2GRAY); cvtColor(source, grayImage, COLOR_RGB2GRAY);
cvSmooth( grayImage, grayImage,CV_GAUSSIAN,11); GaussianBlur(grayImage, grayImage, Size(11,11), 0, 0);
cvNormalize(grayImage, grayImage, 0, 255, CV_MINMAX); normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
cvThreshold( grayImage, grayImage, 26, 255, CV_THRESH_BINARY_INV);//25 threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
Mat MgrayImage = grayImage; Mat MgrayImage = grayImage;
//For debug //For debug
@ -189,8 +188,8 @@ static void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source,
for(size_t i = 0 ; i<srcImagePoints_temp.size(); i++ ) for(size_t i = 0 ; i<srcImagePoints_temp.size(); i++ )
{ {
ss<<i; ss<<i;
circle(Msource,srcImagePoints->at(i),5,CV_RGB(255,0,0)); circle(Msource,srcImagePoints->at(i),5,Scalar(0,0,255));
putText( Msource, ss.str(), srcImagePoints->at(i),CV_FONT_HERSHEY_SIMPLEX,1,CV_RGB(255,0,0)); putText(Msource,ss.str(),srcImagePoints->at(i),FONT_HERSHEY_SIMPLEX,1,Scalar(0,0,255));
ss.str(""); ss.str("");
//new coordinate system in the middle of the frame and reversed (camera coordinate system) //new coordinate system in the middle of the frame and reversed (camera coordinate system)
@ -224,19 +223,19 @@ static void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMat
int main(void) int main(void)
{ {
help(); help();
CvCapture* video = cvCaptureFromFile("cube4.avi"); VideoCapture video("cube4.avi");
CV_Assert(video); CV_Assert(video.isOpened());
IplImage* source = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,3); Mat source, grayImage;
IplImage* grayImage = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,1);
cvNamedWindow("original",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO); video >> source;
cvNamedWindow("POSIT",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
namedWindow("original", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
namedWindow("POSIT", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000); displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
//For debug
//cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE);
float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
cvSetOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix); setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
vector<CvPoint3D32f> modelPoints; vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints); initPOSIT(&modelPoints);
@ -251,26 +250,22 @@ int main(void)
vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0)); vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
while(cvWaitKey(33) != 27) while(waitKey(33) != 27)
{ {
source=cvQueryFrame(video); video >> source;
cvShowImage("original",source); imshow("original",source);
foundCorners(&srcImagePoints,source,grayImage); foundCorners(&srcImagePoints,source,grayImage);
cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector ); cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector); createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
cvShowImage("POSIT",source); imshow("POSIT",source);
//For debug
//cvShowImage("tempGray",grayImage);
if (cvGetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99) if (VideoCapture::get(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99)
cvSetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO,0); VideoCapture::get(video,CV_CAP_PROP_POS_AVI_RATIO,0);
} }
cvDestroyAllWindows(); destroyAllWindows();
cvReleaseImage(&grayImage);
cvReleaseCapture(&video);
cvReleasePOSITObject(&positObject); cvReleasePOSITObject(&positObject);
return 0; return 0;

@ -54,10 +54,6 @@ static void help(char** argv)
<< "\n"; << "\n";
} }
static void makeDir( const string& dir ) static void makeDir( const string& dir )
{ {
#if defined WIN32 || defined _WIN32 #if defined WIN32 || defined _WIN32

@ -45,10 +45,10 @@ int main(int argc, const char** argv)
return -1; return -1;
} }
namedWindow("image", CV_WINDOW_NORMAL); namedWindow("image", WINDOW_NORMAL);
namedWindow("foreground mask", CV_WINDOW_NORMAL); namedWindow("foreground mask", WINDOW_NORMAL);
namedWindow("foreground image", CV_WINDOW_NORMAL); namedWindow("foreground image", WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL); namedWindow("mean background image", WINDOW_NORMAL);
BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5); BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5);

@ -418,7 +418,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
for( size_t i = 0; i < nimages; i++ ) for( size_t i = 0; i < nimages; i++ )
{ {
Mat img = imread(imageList[i], 1), gray; Mat img = imread(imageList[i], 1), gray;
cvtColor(img, gray, CV_BGR2GRAY); cvtColor(img, gray, COLOR_BGR2GRAY);
vector<KeyPoint> keypoints; vector<KeyPoint> keypoints;
detector->detect(gray, keypoints); detector->detect(gray, keypoints);

@ -461,7 +461,7 @@ int main( int argc, char** argv )
flip( view, view, 0 ); flip( view, view, 0 );
vector<Point2f> pointbuf; vector<Point2f> pointbuf;
cvtColor(view, viewGray, CV_BGR2GRAY); cvtColor(view, viewGray, COLOR_BGR2GRAY);
bool found; bool found;
switch( pattern ) switch( pattern )

@ -113,7 +113,7 @@ int main( int argc, const char** argv )
if( !paused ) if( !paused )
{ {
cvtColor(image, hsv, CV_BGR2HSV); cvtColor(image, hsv, COLOR_BGR2HSV);
if( trackObject ) if( trackObject )
{ {
@ -163,7 +163,7 @@ int main( int argc, const char** argv )
} }
if( backprojMode ) if( backprojMode )
cvtColor( backproj, image, CV_GRAY2BGR ); cvtColor( backproj, image, COLOR_GRAY2BGR );
ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA ); ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA );
} }
} }

@ -40,7 +40,7 @@ int main( int argc, const char** argv )
return -1; return -1;
} }
Mat cimg; Mat cimg;
cvtColor(img, cimg, CV_GRAY2BGR); cvtColor(img, cimg, COLOR_GRAY2BGR);
// if the image and the template are not edge maps but normal grayscale images, // if the image and the template are not edge maps but normal grayscale images,
// you might want to uncomment the lines below to produce the maps. You can also // you might want to uncomment the lines below to produce the maps. You can also

@ -208,7 +208,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
matchesMask[i1] = 1; matchesMask[i1] = 1;
} }
// draw inliers // draw inliers
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 255, 0), Scalar(255, 0, 0), matchesMask
#if DRAW_RICH_KEYPOINTS_MODE #if DRAW_RICH_KEYPOINTS_MODE
, DrawMatchesFlags::DRAW_RICH_KEYPOINTS , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
#endif #endif
@ -218,7 +218,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
// draw outliers // draw outliers
for( size_t i1 = 0; i1 < matchesMask.size(); i1++ ) for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
matchesMask[i1] = !matchesMask[i1]; matchesMask[i1] = !matchesMask[i1];
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask, drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(255, 0, 0), Scalar(0, 0, 255), matchesMask,
DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
#endif #endif

@ -116,7 +116,7 @@ static int test_FaceDetector(int argc, char *argv[])
LOGD("\n\nSTEP n=%d from prev step %f ms\n\n", n, t_ms); LOGD("\n\nSTEP n=%d from prev step %f ms\n\n", n, t_ms);
m=images[n-1]; m=images[n-1];
CV_Assert(! m.empty()); CV_Assert(! m.empty());
cvtColor(m, gray, CV_BGR2GRAY); cvtColor(m, gray, COLOR_BGR2GRAY);
fd.process(gray); fd.process(gray);

@ -128,7 +128,7 @@ int main( int argc, const char** argv )
// Call to update the view // Call to update the view
onTrackbar(0, 0); onTrackbar(0, 0);
int c = cvWaitKey(0) & 255; int c = waitKey(0) & 255;
if( c == 27 ) if( c == 27 )
break; break;

@ -157,14 +157,14 @@ int main()
return 0; return 0;
} }
Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0); Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((width - textsize.width)/2, (height - textsize.height)/2); Point org((width - textsize.width)/2, (height - textsize.height)/2);
Mat image2; Mat image2;
for( i = 0; i < 255; i += 2 ) for( i = 0; i < 255; i += 2 )
{ {
image2 = image - Scalar::all(i); image2 = image - Scalar::all(i);
putText(image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3, putText(image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType); Scalar(i, i, 255), 5, lineType);
imshow(wndname, image2); imshow(wndname, image2);

@ -49,7 +49,7 @@ int main( int argc, const char** argv )
return -1; return -1;
} }
cedge.create(image.size(), image.type()); cedge.create(image.size(), image.type());
cvtColor(image, gray, CV_BGR2GRAY); cvtColor(image, gray, COLOR_BGR2GRAY);
// Create a window // Create a window
namedWindow("Edge map", 1); namedWindow("Edge map", 1);

@ -59,7 +59,7 @@ int main( int /*argc*/, char** /*argv*/ )
params.cov_mat_type = CvEM::COV_MAT_DIAGONAL; params.cov_mat_type = CvEM::COV_MAT_DIAGONAL;
params.start_step = CvEM::START_E_STEP; params.start_step = CvEM::START_E_STEP;
params.means = em_model.get_means(); params.means = em_model.get_means();
params.covs = (const CvMat**)em_model.get_covs(); params.covs = em_model.get_covs();
params.weights = em_model.get_weights(); params.weights = em_model.get_weights();
em_model2.train( samples, Mat(), params, &labels ); em_model2.train( samples, Mat(), params, &labels );

@ -42,13 +42,13 @@ int main(int, char**)
for(;;) for(;;)
{ {
cap >> frame; cap >> frame;
cvtColor(frame, gray, CV_BGR2GRAY); cvtColor(frame, gray, COLOR_BGR2GRAY);
if( prevgray.data ) if( prevgray.data )
{ {
calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0); calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cvtColor(prevgray, cflow, CV_GRAY2BGR); cvtColor(prevgray, cflow, COLOR_GRAY2BGR);
drawOptFlowMap(flow, cflow, 16, 1.5, CV_RGB(0, 255, 0)); drawOptFlowMap(flow, cflow, 16, 1.5, Scalar(0, 255, 0));
imshow("flow", cflow); imshow("flow", cflow);
} }
if(waitKey(30)>=0) if(waitKey(30)>=0)

@ -81,7 +81,7 @@ int main( int argc, char** argv )
} }
help(); help();
image0.copyTo(image); image0.copyTo(image);
cvtColor(image0, gray, CV_BGR2GRAY); cvtColor(image0, gray, COLOR_BGR2GRAY);
mask.create(image0.rows+2, image0.cols+2, CV_8UC1); mask.create(image0.rows+2, image0.cols+2, CV_8UC1);
namedWindow( "image", 0 ); namedWindow( "image", 0 );
@ -106,7 +106,7 @@ int main( int argc, char** argv )
if( isColor ) if( isColor )
{ {
cout << "Grayscale mode is set\n"; cout << "Grayscale mode is set\n";
cvtColor(image0, gray, CV_BGR2GRAY); cvtColor(image0, gray, COLOR_BGR2GRAY);
mask = Scalar::all(0); mask = Scalar::all(0);
isColor = false; isColor = false;
} }
@ -135,7 +135,7 @@ int main( int argc, char** argv )
case 'r': case 'r':
cout << "Original image is restored\n"; cout << "Original image is restored\n";
image0.copyTo(image); image0.copyTo(image);
cvtColor(image, gray, CV_BGR2GRAY); cvtColor(image, gray, COLOR_BGR2GRAY);
mask = Scalar::all(0); mask = Scalar::all(0);
break; break;
case 's': case 's':

@ -79,7 +79,7 @@ Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, cons
for (size_t i = 0; i < features1.size(); i++) for (size_t i = 0; i < features1.size(); i++)
{ {
circle(img_corr, features1[i].pt, 3, CV_RGB(255, 0, 0)); circle(img_corr, features1[i].pt, 3, Scalar(0, 0, 255));
} }
for (size_t i = 0; i < features2.size(); i++) for (size_t i = 0; i < features2.size(); i++)

@ -296,15 +296,15 @@ int main( int argc, char** argv )
help(); help();
const string winName = "image"; const string winName = "image";
cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE ); namedWindow( winName, WINDOW_AUTOSIZE );
cvSetMouseCallback( winName.c_str(), on_mouse, 0 ); setMouseCallback( winName, on_mouse, 0 );
gcapp.setImageAndWinName( image, winName ); gcapp.setImageAndWinName( image, winName );
gcapp.showImage(); gcapp.showImage();
for(;;) for(;;)
{ {
int c = cvWaitKey(0); int c = waitKey(0);
switch( (char) c ) switch( (char) c )
{ {
case '\x1b': case '\x1b':
@ -331,6 +331,6 @@ int main( int argc, char** argv )
} }
exit_main: exit_main:
cvDestroyWindow( winName.c_str() ); destroyWindow( winName );
return 0; return 0;
} }

@ -27,7 +27,7 @@ int main(int argc, char** argv)
Mat cimg; Mat cimg;
medianBlur(img, img, 5); medianBlur(img, img, 5);
cvtColor(img, cimg, CV_GRAY2BGR); cvtColor(img, cimg, COLOR_GRAY2BGR);
vector<Vec3f> circles; vector<Vec3f> circles;
HoughCircles(img, circles, CV_HOUGH_GRADIENT, 1, 10, HoughCircles(img, circles, CV_HOUGH_GRADIENT, 1, 10,

@ -27,7 +27,7 @@ int main(int argc, char** argv)
Mat dst, cdst; Mat dst, cdst;
Canny(src, dst, 50, 200, 3); Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR); cvtColor(dst, cdst, COLOR_GRAY2BGR);
#if 0 #if 0
vector<Vec2f> lines; vector<Vec2f> lines;

@ -109,7 +109,7 @@ int main( int argc, char** argv )
cvtColor(img_yuv, img, CV_YCrCb2BGR); cvtColor(img_yuv, img, CV_YCrCb2BGR);
// this is counterpart for cvNamedWindow // this is counterpart for cvNamedWindow
namedWindow("image with grain", CV_WINDOW_AUTOSIZE); namedWindow("image with grain", WINDOW_AUTOSIZE);
#if DEMO_MIXED_API_USE #if DEMO_MIXED_API_USE
// this is to demonstrate that img and iplimg really share the data - the result of the above // this is to demonstrate that img and iplimg really share the data - the result of the above
// processing is stored in img and thus in iplimg too. // processing is stored in img and thus in iplimg too.

@ -71,7 +71,7 @@ int main( int argc, char** argv )
break; break;
frame.copyTo(image); frame.copyTo(image);
cvtColor(image, gray, CV_BGR2GRAY); cvtColor(image, gray, COLOR_BGR2GRAY);
if( nightMode ) if( nightMode )
image = Scalar::all(0); image = Scalar::all(0);

@ -65,7 +65,7 @@ int main(int argc, char** argv)
colorRad = 10; colorRad = 10;
maxPyrLevel = 1; maxPyrLevel = 1;
namedWindow( winName, CV_WINDOW_AUTOSIZE ); namedWindow( winName, WINDOW_AUTOSIZE );
createTrackbar( "spatialRad", winName, &spatialRad, 80, meanShiftSegmentation ); createTrackbar( "spatialRad", winName, &spatialRad, 80, meanShiftSegmentation );
createTrackbar( "colorRad", winName, &colorRad, 60, meanShiftSegmentation ); createTrackbar( "colorRad", winName, &colorRad, 60, meanShiftSegmentation );

@ -54,7 +54,7 @@ int main( int /*argc*/, char** /*argv*/ )
imshow( "rect & circle", img ); imshow( "rect & circle", img );
char key = (char)cvWaitKey(); char key = (char)waitKey();
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC' if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break; break;
} }

@ -79,7 +79,7 @@ int main( int argc, char** argv )
OpenClose(open_close_pos, 0); OpenClose(open_close_pos, 0);
ErodeDilate(erode_dilate_pos, 0); ErodeDilate(erode_dilate_pos, 0);
c = cvWaitKey(0); c = waitKey(0);
if( (char)c == 27 ) if( (char)c == 27 )
break; break;

@ -159,7 +159,7 @@ int main(int argc, char** argv)
// init highgui window // init highgui window
string winName = "Reconstruction | press 'q' to quit"; string winName = "Reconstruction | press 'q' to quit";
namedWindow(winName, CV_WINDOW_NORMAL); namedWindow(winName, WINDOW_NORMAL);
// params struct to pass to the trackbar handler // params struct to pass to the trackbar handler
params p; params p;

@ -13,7 +13,7 @@ int main(int, char* [])
do do
{ {
video >> frame; video >> frame;
cvtColor(frame, curr, CV_RGB2GRAY); cvtColor(frame, curr, COLOR_RGB2GRAY);
if(prev.empty()) if(prev.empty())
{ {

@ -7,7 +7,7 @@
using namespace std; using namespace std;
using namespace cv; using namespace cv;
const Scalar WHITE_COLOR = CV_RGB(255,255,255); const Scalar WHITE_COLOR = Scalar(255,255,255);
const string winName = "points"; const string winName = "points";
const int testStep = 5; const int testStep = 5;
@ -69,15 +69,15 @@ static void on_mouse( int event, int x, int y, int /*flags*/, void* )
// put the text // put the text
stringstream text; stringstream text;
text << "current class " << classColors.size()-1; text << "current class " << classColors.size()-1;
putText( img, text.str(), Point(10,25), CV_FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 ); putText( img, text.str(), Point(10,25), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
text.str(""); text.str("");
text << "total classes " << classColors.size(); text << "total classes " << classColors.size();
putText( img, text.str(), Point(10,50), CV_FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 ); putText( img, text.str(), Point(10,50), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
text.str(""); text.str("");
text << "total points " << trainedPoints.size(); text << "total points " << trainedPoints.size();
putText(img, text.str(), cvPoint(10,75), CV_FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 ); putText(img, text.str(), Point(10,75), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
// draw points // draw points
for( size_t i = 0; i < trainedPoints.size(); i++ ) for( size_t i = 0; i < trainedPoints.size(); i++ )
@ -178,7 +178,7 @@ static void find_decision_boundary_SVM( CvSVMParams params )
for( int i = 0; i < svmClassifier.get_support_vector_count(); i++ ) for( int i = 0; i < svmClassifier.get_support_vector_count(); i++ )
{ {
const float* supportVector = svmClassifier.get_support_vector(i); const float* supportVector = svmClassifier.get_support_vector(i);
circle( imgDst, Point(supportVector[0],supportVector[1]), 5, CV_RGB(255,255,255), -1 ); circle( imgDst, Point(supportVector[0],supportVector[1]), 5, Scalar(255,255,255), -1 );
} }
} }
@ -526,7 +526,7 @@ int main()
{ {
#if _NBC_ #if _NBC_
find_decision_boundary_NBC(); find_decision_boundary_NBC();
cvNamedWindow( "NormalBayesClassifier", WINDOW_AUTOSIZE ); namedWindow( "NormalBayesClassifier", WINDOW_AUTOSIZE );
imshow( "NormalBayesClassifier", imgDst ); imshow( "NormalBayesClassifier", imgDst );
#endif #endif
#if _KNN_ #if _KNN_
@ -560,7 +560,7 @@ int main()
params.C = 10; params.C = 10;
find_decision_boundary_SVM( params ); find_decision_boundary_SVM( params );
cvNamedWindow( "classificationSVM2", WINDOW_AUTOSIZE ); namedWindow( "classificationSVM2", WINDOW_AUTOSIZE );
imshow( "classificationSVM2", imgDst ); imshow( "classificationSVM2", imgDst );
#endif #endif

@ -125,8 +125,8 @@ int main(int argc, char** argv)
} }
Mat grayImage0, grayImage1, depthFlt0, depthFlt1/*in meters*/; Mat grayImage0, grayImage1, depthFlt0, depthFlt1/*in meters*/;
cvtColor( colorImage0, grayImage0, CV_BGR2GRAY ); cvtColor( colorImage0, grayImage0, COLOR_BGR2GRAY );
cvtColor( colorImage1, grayImage1, CV_BGR2GRAY ); cvtColor( colorImage1, grayImage1, COLOR_BGR2GRAY );
depth0.convertTo( depthFlt0, CV_32FC1, 1./1000 ); depth0.convertTo( depthFlt0, CV_32FC1, 1./1000 );
depth1.convertTo( depthFlt1, CV_32FC1, 1./1000 ); depth1.convertTo( depthFlt1, CV_32FC1, 1./1000 );

@ -95,8 +95,6 @@ int main(int argc, char** argv)
if( !tmp_frame.data ) if( !tmp_frame.data )
break; break;
bgsubtractor(tmp_frame, bgmask, update_bg_model ? -1 : 0); bgsubtractor(tmp_frame, bgmask, update_bg_model ? -1 : 0);
//CvMat _bgmask = bgmask;
//cvSegmentFGMask(&_bgmask);
refineSegments(tmp_frame, bgmask, out_frame); refineSegments(tmp_frame, bgmask, out_frame);
imshow("video", tmp_frame); imshow("video", tmp_frame);
imshow("segmented", out_frame); imshow("segmented", out_frame);

@ -118,7 +118,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
{ {
cout << filename << endl; cout << filename << endl;
Mat cimg, cimg1; Mat cimg, cimg1;
cvtColor(img, cimg, CV_GRAY2BGR); cvtColor(img, cimg, COLOR_GRAY2BGR);
drawChessboardCorners(cimg, boardSize, corners, found); drawChessboardCorners(cimg, boardSize, corners, found);
double sf = 640./MAX(img.rows, img.cols); double sf = 640./MAX(img.rows, img.cols);
resize(cimg, cimg1, Size(), sf, sf); resize(cimg, cimg1, Size(), sf, sf);
@ -302,7 +302,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
{ {
Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg; Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR); remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
cvtColor(rimg, cimg, CV_GRAY2BGR); cvtColor(rimg, cimg, COLOR_GRAY2BGR);
Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h)); Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA); resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);
if( useCalibrated ) if( useCalibrated )

@ -24,17 +24,14 @@ Mat image;
*/ */
static void on_trackbar( int, void* ) static void on_trackbar( int, void* )
{ {
Mat new_image = Mat::zeros( image.size(), image.type() ); Mat new_image = Mat::zeros( image.size(), image.type() );
for( int y = 0; y < image.rows; y++ ) for( int y = 0; y < image.rows; y++ )
{ for( int x = 0; x < image.cols; x++ ) for( int x = 0; x < image.cols; x++ )
{ for( int c = 0; c < 3; c++ ) for( int c = 0; c < 3; c++ )
{ new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
} imshow("New Image", new_image);
}
}
imshow("New Image", new_image);
} }

@ -31,14 +31,14 @@ int main( int, char** argv )
} }
/// Convert to grayscale /// Convert to grayscale
cvtColor( src, src, CV_BGR2GRAY ); cvtColor( src, src, COLOR_BGR2GRAY );
/// Apply Histogram Equalization /// Apply Histogram Equalization
equalizeHist( src, dst ); equalizeHist( src, dst );
/// Display results /// Display results
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
namedWindow( equalized_window, CV_WINDOW_AUTOSIZE ); namedWindow( equalized_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); imshow( source_window, src );
imshow( equalized_window, dst ); imshow( equalized_window, dst );

@ -33,8 +33,8 @@ int main( int, char** argv )
templ = imread( argv[2], 1 ); templ = imread( argv[2], 1 );
/// Create windows /// Create windows
namedWindow( image_window, CV_WINDOW_AUTOSIZE ); namedWindow( image_window, WINDOW_AUTOSIZE );
namedWindow( result_window, CV_WINDOW_AUTOSIZE ); namedWindow( result_window, WINDOW_AUTOSIZE );
/// Create Trackbar /// Create Trackbar
const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED"; const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";

@ -28,7 +28,7 @@ int main( int, char** argv )
/// Read the image /// Read the image
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
/// Transform it to HSV /// Transform it to HSV
cvtColor( src, hsv, CV_BGR2HSV ); cvtColor( src, hsv, COLOR_BGR2HSV );
/// Use only the Hue value /// Use only the Hue value
hue.create( hsv.size(), hsv.depth() ); hue.create( hsv.size(), hsv.depth() );
@ -37,7 +37,7 @@ int main( int, char** argv )
/// Create Trackbar to enter the number of bins /// Create Trackbar to enter the number of bins
const char* window_image = "Source image"; const char* window_image = "Source image";
namedWindow( window_image, CV_WINDOW_AUTOSIZE ); namedWindow( window_image, WINDOW_AUTOSIZE );
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj ); createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
Hist_and_Backproj(0, 0); Hist_and_Backproj(0, 0);

@ -31,10 +31,10 @@ int main( int, char** argv )
/// Read the image /// Read the image
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
/// Transform it to HSV /// Transform it to HSV
cvtColor( src, hsv, CV_BGR2HSV ); cvtColor( src, hsv, COLOR_BGR2HSV );
/// Show the image /// Show the image
namedWindow( window_image, CV_WINDOW_AUTOSIZE ); namedWindow( window_image, WINDOW_AUTOSIZE );
imshow( window_image, src ); imshow( window_image, src );
/// Set Trackbars for floodfill thresholds /// Set Trackbars for floodfill thresholds

@ -71,7 +71,7 @@ int main( int, char** argv )
} }
/// Display /// Display
namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE ); namedWindow("calcHist Demo", WINDOW_AUTOSIZE );
imshow("calcHist Demo", histImage ); imshow("calcHist Demo", histImage );
waitKey(0); waitKey(0);

@ -22,24 +22,25 @@ int main( int argc, char** argv )
Mat src_test2, hsv_test2; Mat src_test2, hsv_test2;
Mat hsv_half_down; Mat hsv_half_down;
/// Load three images with different environment settings /// Load three images with different environment settings
if( argc < 4 ) if( argc < 4 )
{ printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n"); {
return -1; printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n");
} return -1;
}
src_base = imread( argv[1], 1 ); src_base = imread( argv[1], 1 );
src_test1 = imread( argv[2], 1 ); src_test1 = imread( argv[2], 1 );
src_test2 = imread( argv[3], 1 ); src_test2 = imread( argv[3], 1 );
/// Convert to HSV /// Convert to HSV
cvtColor( src_base, hsv_base, CV_BGR2HSV ); cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, CV_BGR2HSV ); cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, CV_BGR2HSV ); cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) ); hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
/// Using 30 bins for hue and 32 for saturation /// Using 30 bins for hue and 32 for saturation
int h_bins = 50; int s_bins = 60; int h_bins = 50; int s_bins = 60;
int histSize[] = { h_bins, s_bins }; int histSize[] = { h_bins, s_bins };
@ -74,14 +75,15 @@ int main( int argc, char** argv )
/// Apply the histogram comparison methods /// Apply the histogram comparison methods
for( int i = 0; i < 4; i++ ) for( int i = 0; i < 4; i++ )
{ int compare_method = i; {
double base_base = compareHist( hist_base, hist_base, compare_method ); int compare_method = i;
double base_half = compareHist( hist_base, hist_half_down, compare_method ); double base_base = compareHist( hist_base, hist_base, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method ); double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method ); double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
} printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
printf( "Done \n" ); printf( "Done \n" );

@ -37,9 +37,9 @@ int main( int, char** argv )
{ return -1; } { return -1; }
/// Create windows /// Create windows
namedWindow( "Erosion Demo", CV_WINDOW_AUTOSIZE ); namedWindow( "Erosion Demo", WINDOW_AUTOSIZE );
namedWindow( "Dilation Demo", CV_WINDOW_AUTOSIZE ); namedWindow( "Dilation Demo", WINDOW_AUTOSIZE );
cvMoveWindow( "Dilation Demo", src.cols, 0 ); moveWindow( "Dilation Demo", src.cols, 0 );
/// Create Erosion Trackbar /// Create Erosion Trackbar
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo", createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",

@ -39,20 +39,20 @@ int main( int, char** argv )
{ return -1; } { return -1; }
/// Create window /// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE ); namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create Trackbar to select Morphology operation /// Create Trackbar to select Morphology operation
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations ); createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations );
/// Create Trackbar to select kernel type /// Create Trackbar to select kernel type
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name, createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem, &morph_elem, max_elem,
Morphology_Operations ); Morphology_Operations );
/// Create Trackbar to choose kernel size /// Create Trackbar to choose kernel size
createTrackbar( "Kernel size:\n 2n +1", window_name, createTrackbar( "Kernel size:\n 2n +1", window_name,
&morph_size, max_kernel_size, &morph_size, max_kernel_size,
Morphology_Operations ); Morphology_Operations );
/// Default start /// Default start
Morphology_Operations( 0, 0 ); Morphology_Operations( 0, 0 );

@ -40,7 +40,7 @@ int main( void )
dst = tmp; dst = tmp;
/// Create window /// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE ); namedWindow( window_name, WINDOW_AUTOSIZE );
imshow( window_name, dst ); imshow( window_name, dst );
/// Loop /// Loop

@ -31,7 +31,7 @@ int display_dst( int delay );
*/ */
int main( void ) int main( void )
{ {
namedWindow( window_name, CV_WINDOW_AUTOSIZE ); namedWindow( window_name, WINDOW_AUTOSIZE );
/// Load the source image /// Load the source image
src = imread( "../images/lena.png", 1 ); src = imread( "../images/lena.png", 1 );
@ -89,7 +89,7 @@ int display_caption( const char* caption )
dst = Mat::zeros( src.size(), src.type() ); dst = Mat::zeros( src.size(), src.type() );
putText( dst, caption, putText( dst, caption,
Point( src.cols/4, src.rows/2), Point( src.cols/4, src.rows/2),
CV_FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) ); FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
imshow( window_name, dst ); imshow( window_name, dst );
int c = waitKey( DELAY_CAPTION ); int c = waitKey( DELAY_CAPTION );

@ -37,19 +37,19 @@ int main( int, char** argv )
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
/// Convert the image to Gray /// Convert the image to Gray
cvtColor( src, src_gray, CV_RGB2GRAY ); cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Create a window to display results /// Create a window to display results
namedWindow( window_name, CV_WINDOW_AUTOSIZE ); namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create Trackbar to choose type of Threshold /// Create Trackbar to choose type of Threshold
createTrackbar( trackbar_type, createTrackbar( trackbar_type,
window_name, &threshold_type, window_name, &threshold_type,
max_type, Threshold_Demo ); max_type, Threshold_Demo );
createTrackbar( trackbar_value, createTrackbar( trackbar_value,
window_name, &threshold_value, window_name, &threshold_value,
max_value, Threshold_Demo ); max_value, Threshold_Demo );
/// Call the function to initialize /// Call the function to initialize
Threshold_Demo( 0, 0 ); Threshold_Demo( 0, 0 );

@ -58,10 +58,10 @@ int main( int, char** argv )
dst.create( src.size(), src.type() ); dst.create( src.size(), src.type() );
/// Convert the image to grayscale /// Convert the image to grayscale
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create a window /// Create a window
namedWindow( window_name, CV_WINDOW_AUTOSIZE ); namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create a Trackbar for user to enter threshold /// Create a Trackbar for user to enter threshold
createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold ); createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );

@ -65,13 +65,13 @@ int main( int, char** argv )
/// Show what you got /// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); imshow( source_window, src );
namedWindow( warp_window, CV_WINDOW_AUTOSIZE ); namedWindow( warp_window, WINDOW_AUTOSIZE );
imshow( warp_window, warp_dst ); imshow( warp_window, warp_dst );
namedWindow( warp_rotate_window, CV_WINDOW_AUTOSIZE ); namedWindow( warp_rotate_window, WINDOW_AUTOSIZE );
imshow( warp_rotate_window, warp_rotate_dst ); imshow( warp_rotate_window, warp_rotate_dst );
/// Wait until user exits the program /// Wait until user exits the program

@ -25,7 +25,7 @@ int main(int, char** argv)
{ return -1; } { return -1; }
/// Convert it to gray /// Convert it to gray
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Reduce the noise so we avoid false circle detection /// Reduce the noise so we avoid false circle detection
GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 ); GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 );
@ -47,7 +47,7 @@ int main(int, char** argv)
} }
/// Show your results /// Show your results
namedWindow( "Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE ); namedWindow( "Hough Circle Transform Demo", WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo", src ); imshow( "Hough Circle Transform Demo", src );
waitKey(0); waitKey(0);

@ -46,7 +46,7 @@ int main( int, char** argv )
} }
/// Pass the image to gray /// Pass the image to gray
cvtColor( src, src_gray, CV_RGB2GRAY ); cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Apply Canny edge detector /// Apply Canny edge detector
Canny( src_gray, edges, 50, 200, 3 ); Canny( src_gray, edges, 50, 200, 3 );
@ -55,10 +55,10 @@ int main( int, char** argv )
char thresh_label[50]; char thresh_label[50];
sprintf( thresh_label, "Thres: %d + input", min_threshold ); sprintf( thresh_label, "Thres: %d + input", min_threshold );
namedWindow( standard_name, CV_WINDOW_AUTOSIZE ); namedWindow( standard_name, WINDOW_AUTOSIZE );
createTrackbar( thresh_label, standard_name, &s_trackbar, max_trackbar, Standard_Hough); createTrackbar( thresh_label, standard_name, &s_trackbar, max_trackbar, Standard_Hough);
namedWindow( probabilistic_name, CV_WINDOW_AUTOSIZE ); namedWindow( probabilistic_name, WINDOW_AUTOSIZE );
createTrackbar( thresh_label, probabilistic_name, &p_trackbar, max_trackbar, Probabilistic_Hough); createTrackbar( thresh_label, probabilistic_name, &p_trackbar, max_trackbar, Probabilistic_Hough);
/// Initialize /// Initialize

@ -34,10 +34,10 @@ int main( int, char** argv )
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT ); GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
/// Convert the image to grayscale /// Convert the image to grayscale
cvtColor( src, src_gray, CV_RGB2GRAY ); cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Create window /// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE ); namedWindow( window_name, WINDOW_AUTOSIZE );
/// Apply Laplace function /// Apply Laplace function
Mat abs_dst; Mat abs_dst;

@ -34,7 +34,7 @@ int main( int, char** argv )
map_y.create( src.size(), CV_32FC1 ); map_y.create( src.size(), CV_32FC1 );
/// Create window /// Create window
namedWindow( remap_window, CV_WINDOW_AUTOSIZE ); namedWindow( remap_window, WINDOW_AUTOSIZE );
/// Loop /// Loop
for(;;) for(;;)

@ -33,10 +33,10 @@ int main( int, char** argv )
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT ); GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
/// Convert it to gray /// Convert it to gray
cvtColor( src, src_gray, CV_RGB2GRAY ); cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Create window /// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE ); namedWindow( window_name, WINDOW_AUTOSIZE );
/// Generate grad_x and grad_y /// Generate grad_x and grad_y
Mat grad_x, grad_y; Mat grad_x, grad_y;

@ -43,7 +43,7 @@ int main( int, char** argv )
printf( " ** Press 'ESC' to exit the program \n"); printf( " ** Press 'ESC' to exit the program \n");
/// Create window /// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE ); namedWindow( window_name, WINDOW_AUTOSIZE );
/// Initialize arguments for the filter /// Initialize arguments for the filter
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows); top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);

@ -35,7 +35,7 @@ int main ( int, char** argv )
{ return -1; } { return -1; }
/// Create window /// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE ); namedWindow( window_name, WINDOW_AUTOSIZE );
/// Initialize arguments for the filter /// Initialize arguments for the filter
anchor = Point( -1, -1 ); anchor = Point( -1, -1 );

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
/// Convert image to gray and blur it /// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) ); blur( src_gray, src_gray, Size(3,3) );
/// Create Window /// Create Window
const char* source_window = "Source"; const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback ); createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
@ -68,6 +68,6 @@ void thresh_callback(int, void* )
} }
/// Show in a window /// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing ); imshow( "Contours", drawing );
} }

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
/// Convert image to gray and blur it /// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) ); blur( src_gray, src_gray, Size(3,3) );
/// Create Window /// Create Window
const char* source_window = "Source"; const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback ); createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
@ -83,6 +83,6 @@ void thresh_callback(int, void* )
} }
/// Show in a window /// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing ); imshow( "Contours", drawing );
} }

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
/// Convert image to gray and blur it /// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) ); blur( src_gray, src_gray, Size(3,3) );
/// Create Window /// Create Window
const char* source_window = "Source"; const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback ); createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
@ -85,6 +85,6 @@ void thresh_callback(int, void* )
} }
/// Show in a window /// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing ); imshow( "Contours", drawing );
} }

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
/// Convert image to gray and blur it /// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) ); blur( src_gray, src_gray, Size(3,3) );
/// Create Window /// Create Window
const char* source_window = "Source"; const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback ); createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
@ -62,7 +62,7 @@ void thresh_callback(int, void* )
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the convex hull object for each contour /// Find the convex hull object for each contour
vector<vector<Point> >hull( contours.size() ); vector<vector<Point> >hull( contours.size() );
for( size_t i = 0; i < contours.size(); i++ ) for( size_t i = 0; i < contours.size(); i++ )
{ convexHull( Mat(contours[i]), hull[i], false ); } { convexHull( Mat(contours[i]), hull[i], false ); }
@ -76,6 +76,6 @@ void thresh_callback(int, void* )
} }
/// Show in a window /// Show in a window
namedWindow( "Hull demo", CV_WINDOW_AUTOSIZE ); namedWindow( "Hull demo", WINDOW_AUTOSIZE );
imshow( "Hull demo", drawing ); imshow( "Hull demo", drawing );
} }

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
/// Convert image to gray and blur it /// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) ); blur( src_gray, src_gray, Size(3,3) );
/// Create Window /// Create Window
const char* source_window = "Source"; const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback ); createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
@ -79,7 +79,7 @@ void thresh_callback(int, void* )
} }
/// Show in a window /// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing ); imshow( "Contours", drawing );
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function /// Calculate the area with the moments 00 and compare with the result of the OpenCV function

@ -71,9 +71,9 @@ int main( void )
/// Create Window and show your results /// Create Window and show your results
const char* source_window = "Source"; const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); imshow( source_window, src );
namedWindow( "Distance", CV_WINDOW_AUTOSIZE ); namedWindow( "Distance", WINDOW_AUTOSIZE );
imshow( "Distance", drawing ); imshow( "Distance", drawing );
waitKey(0); waitKey(0);

@ -40,7 +40,7 @@ int main( int, char** argv )
{ {
/// Load source image and convert it to gray /// Load source image and convert it to gray
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Set some parameters /// Set some parameters
int blockSize = 3; int apertureSize = 3; int blockSize = 3; int apertureSize = 3;
@ -64,7 +64,7 @@ int main( int, char** argv )
minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() ); minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */ /* Create Window and Trackbar */
namedWindow( myHarris_window, CV_WINDOW_AUTOSIZE ); namedWindow( myHarris_window, WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function ); createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function );
myHarris_function( 0, 0 ); myHarris_function( 0, 0 );
@ -75,7 +75,7 @@ int main( int, char** argv )
minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() ); minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */ /* Create Window and Trackbar */
namedWindow( myShiTomasi_window, CV_WINDOW_AUTOSIZE ); namedWindow( myShiTomasi_window, WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function ); createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function );
myShiTomasi_function( 0, 0 ); myShiTomasi_function( 0, 0 );

@ -31,10 +31,10 @@ int main( int, char** argv )
{ {
/// Load source image and convert it to gray /// Load source image and convert it to gray
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create a window and a trackbar /// Create a window and a trackbar
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo ); createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
imshow( source_window, src ); imshow( source_window, src );
@ -77,6 +77,6 @@ void cornerHarris_demo( int, void* )
} }
} }
/// Showing the result /// Showing the result
namedWindow( corners_window, CV_WINDOW_AUTOSIZE ); namedWindow( corners_window, WINDOW_AUTOSIZE );
imshow( corners_window, dst_norm_scaled ); imshow( corners_window, dst_norm_scaled );
} }

@ -32,10 +32,10 @@ int main( int, char** argv )
{ {
/// Load source image and convert it to gray /// Load source image and convert it to gray
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create Window /// Create Window
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners /// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo ); createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
@ -87,7 +87,7 @@ void goodFeaturesToTrack_Demo( int, void* )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); } { circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); }
/// Show what you got /// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy ); imshow( source_window, copy );
/// Set the neeed parameters to find the refined corners /// Set the neeed parameters to find the refined corners

@ -32,10 +32,10 @@ int main( int, char** argv )
{ {
/// Load source image and convert it to gray /// Load source image and convert it to gray
src = imread( argv[1], 1 ); src = imread( argv[1], 1 );
cvtColor( src, src_gray, CV_BGR2GRAY ); cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create Window /// Create Window
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners /// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo ); createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
@ -87,6 +87,6 @@ void goodFeaturesToTrack_Demo( int, void* )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); } { circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); }
/// Show what you got /// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy ); imshow( source_window, copy );
} }

@ -294,7 +294,7 @@ int main(int argc, char* argv[])
if( s.calibrationPattern == Settings::CHESSBOARD) if( s.calibrationPattern == Settings::CHESSBOARD)
{ {
Mat viewGray; Mat viewGray;
cvtColor(view, viewGray, CV_BGR2GRAY); cvtColor(view, viewGray, COLOR_BGR2GRAY);
cornerSubPix( viewGray, pointBuf, Size(11,11), cornerSubPix( viewGray, pointBuf, Size(11,11),
Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
} }

@ -56,7 +56,7 @@ int main( int argc, char** argv )
//-- 4. Display it as a CV_8UC1 image //-- 4. Display it as a CV_8UC1 image
imgDisparity16S.convertTo( imgDisparity8U, CV_8UC1, 255/(maxVal - minVal)); imgDisparity16S.convertTo( imgDisparity8U, CV_8UC1, 255/(maxVal - minVal));
namedWindow( windowDisparity, CV_WINDOW_NORMAL ); namedWindow( windowDisparity, WINDOW_NORMAL );
imshow( windowDisparity, imgDisparity8U ); imshow( windowDisparity, imgDisparity8U );
//-- 5. Save the image //-- 5. Save the image

@ -64,9 +64,9 @@ int main( void ){
/// 3. Display your stuff! /// 3. Display your stuff!
imshow( atom_window, atom_image ); imshow( atom_window, atom_image );
cvMoveWindow( atom_window, 0, 200 ); moveWindow( atom_window, 0, 200 );
imshow( rook_window, rook_image ); imshow( rook_window, rook_image );
cvMoveWindow( rook_window, w, 200 ); moveWindow( rook_window, w, 200 );
waitKey( 0 ); waitKey( 0 );
return(0); return(0);

@ -304,7 +304,7 @@ int Displaying_Random_Text( Mat image, char* window_name, RNG rng )
*/ */
int Displaying_Big_End( Mat image, char* window_name, RNG ) int Displaying_Big_End( Mat image, char* window_name, RNG )
{ {
Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0); Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2); Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2);
int lineType = 8; int lineType = 8;
@ -313,7 +313,7 @@ int Displaying_Big_End( Mat image, char* window_name, RNG )
for( int i = 0; i < 255; i += 2 ) for( int i = 0; i < 255; i += 2 )
{ {
image2 = image - Scalar::all(i); image2 = image - Scalar::all(i);
putText( image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3, putText( image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType ); Scalar(i, i, 255), 5, lineType );
imshow( window_name, image2 ); imshow( window_name, image2 );

@ -45,7 +45,7 @@ int main( int argc, char** argv )
// convert image to YUV color space. The output image will be created automatically. // convert image to YUV color space. The output image will be created automatically.
Mat I_YUV; Mat I_YUV;
cvtColor(I, I_YUV, CV_BGR2YCrCb); cvtColor(I, I_YUV, COLOR_BGR2YCrCb);
vector<Mat> planes; // Use the STL's vector structure to store multiple Mat objects vector<Mat> planes; // Use the STL's vector structure to store multiple Mat objects
split(I_YUV, planes); // split the image into separate color planes (Y U V) split(I_YUV, planes); // split the image into separate color planes (Y U V)
@ -117,7 +117,7 @@ int main( int argc, char** argv )
cvtColor(I_YUV, I, CV_YCrCb2BGR); // and produce the output RGB image cvtColor(I_YUV, I, CV_YCrCb2BGR); // and produce the output RGB image
namedWindow("image with grain", CV_WINDOW_AUTOSIZE); // use this to create images namedWindow("image with grain", WINDOW_AUTOSIZE); // use this to create images
#ifdef DEMO_MIXED_API_USE #ifdef DEMO_MIXED_API_USE
// this is to demonstrate that I and IplI really share the data - the result of the above // this is to demonstrate that I and IplI really share the data - the result of the above

@ -30,8 +30,8 @@ int main( int argc, char* argv[])
else else
I = imread( filename, CV_LOAD_IMAGE_COLOR); I = imread( filename, CV_LOAD_IMAGE_COLOR);
namedWindow("Input", CV_WINDOW_AUTOSIZE); namedWindow("Input", WINDOW_AUTOSIZE);
namedWindow("Output", CV_WINDOW_AUTOSIZE); namedWindow("Output", WINDOW_AUTOSIZE);
imshow("Input", I); imshow("Input", I);
double t = (double)getTickCount(); double t = (double)getTickCount();
@ -42,7 +42,7 @@ int main( int argc, char* argv[])
cout << "Hand written function times passed in seconds: " << t << endl; cout << "Hand written function times passed in seconds: " << t << endl;
imshow("Output", J); imshow("Output", J);
cvWaitKey(0); waitKey(0);
Mat kern = (Mat_<char>(3,3) << 0, -1, 0, Mat kern = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1, -1, 5, -1,
@ -54,7 +54,7 @@ int main( int argc, char* argv[])
imshow("Output", K); imshow("Output", K);
cvWaitKey(0); waitKey(0);
return 0; return 0;
} }
void Sharpen(const Mat& myImage,Mat& Result) void Sharpen(const Mat& myImage,Mat& Result)

@ -95,8 +95,8 @@ int main( int argc, char** argv )
//-- Get the corners from the image_1 ( the object to be "detected" ) //-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4); std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 ); obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows ); obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
std::vector<Point2f> scene_corners(4); std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H); perspectiveTransform( obj_corners, scene_corners, H);

@ -22,7 +22,7 @@ int main( int argc, char** argv )
return -1; return -1;
} }
namedWindow( "Display window", CV_WINDOW_AUTOSIZE );// Create a window for display. namedWindow( "Display window", WINDOW_AUTOSIZE );// Create a window for display.
imshow( "Display window", image ); // Show our image inside it. imshow( "Display window", image ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window waitKey(0); // Wait for a keystroke in the window

@ -74,10 +74,10 @@ int main(int argc, char *argv[])
const char* WIN_RF = "Reference"; const char* WIN_RF = "Reference";
// Windows // Windows
namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE ); namedWindow(WIN_RF, WINDOW_AUTOSIZE );
namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE ); namedWindow(WIN_UT, WINDOW_AUTOSIZE );
cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0) moveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2 moveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Frame resolution: Width=" << refS.width << " Height=" << refS.height cout << "Frame resolution: Width=" << refS.width << " Height=" << refS.height
<< " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl; << " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
@ -124,7 +124,7 @@ int main(int argc, char *argv[])
imshow( WIN_RF, frameReference); imshow( WIN_RF, frameReference);
imshow( WIN_UT, frameUnderTest); imshow( WIN_UT, frameUnderTest);
c = (char)cvWaitKey(delay); c = (char)waitKey(delay);
if (c == 27) break; if (c == 27) break;
} }

@ -30,7 +30,7 @@ RNG rng(12345);
*/ */
int main( void ) int main( void )
{ {
CvCapture* capture; VideoCapture capture;
Mat frame; Mat frame;
//-- 1. Load the cascades //-- 1. Load the cascades
@ -38,12 +38,12 @@ int main( void )
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; }; if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream //-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 ); capture.open( -1 );
if( capture ) if( capture.isOpened() )
{ {
for(;;) for(;;)
{ {
frame = cvQueryFrame( capture ); capture >> frame;
//-- 3. Apply the classifier to the frame //-- 3. Apply the classifier to the frame
if( !frame.empty() ) if( !frame.empty() )
@ -67,7 +67,7 @@ void detectAndDisplay( Mat frame )
std::vector<Rect> faces; std::vector<Rect> faces;
Mat frame_gray; Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY ); cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray ); equalizeHist( frame_gray, frame_gray );
//-- Detect faces //-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) ); face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

@ -30,7 +30,7 @@ RNG rng(12345);
*/ */
int main( void ) int main( void )
{ {
CvCapture* capture; VideoCapture capture;
Mat frame; Mat frame;
//-- 1. Load the cascade //-- 1. Load the cascade
@ -38,12 +38,12 @@ int main( void )
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; }; if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream //-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 ); capture.open( -1 );
if( capture ) if( capture.isOpened() )
{ {
for(;;) for(;;)
{ {
frame = cvQueryFrame( capture ); capture >> frame;
//-- 3. Apply the classifier to the frame //-- 3. Apply the classifier to the frame
if( !frame.empty() ) if( !frame.empty() )
@ -67,7 +67,7 @@ void detectAndDisplay( Mat frame )
std::vector<Rect> faces; std::vector<Rect> faces;
Mat frame_gray; Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY ); cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray ); equalizeHist( frame_gray, frame_gray );
//-- Detect faces //-- Detect faces

@ -52,7 +52,7 @@ namespace
if (frame.empty()) if (frame.empty())
break; break;
cv::Mat gray; cv::Mat gray;
cv::cvtColor(frame,gray,CV_RGB2GRAY); cv::cvtColor(frame,gray,COLOR_RGB2GRAY);
vector<string> codes; vector<string> codes;
Mat corners; Mat corners;
findDataMatrix(gray, codes, corners); findDataMatrix(gray, codes, corners);

@ -161,7 +161,7 @@ int main(int ac, char ** av)
if (frame.empty()) if (frame.empty())
break; break;
cvtColor(frame, gray, CV_RGB2GRAY); cvtColor(frame, gray, COLOR_RGB2GRAY);
detector.detect(gray, query_kpts); //Find interest points detector.detect(gray, query_kpts); //Find interest points

@ -58,8 +58,8 @@ int main( int argc, char** argv )
namedWindow( "image", 1 ); namedWindow( "image", 1 );
img0.copyTo(img); img0.copyTo(img);
cvtColor(img, markerMask, CV_BGR2GRAY); cvtColor(img, markerMask, COLOR_BGR2GRAY);
cvtColor(markerMask, imgGray, CV_GRAY2BGR); cvtColor(markerMask, imgGray, COLOR_GRAY2BGR);
markerMask = Scalar::all(0); markerMask = Scalar::all(0);
imshow( "image", img ); imshow( "image", img );
setMouseCallback( "image", onMouse, 0 ); setMouseCallback( "image", onMouse, 0 );

Loading…
Cancel
Save