Applied all fixes related to translating C to C++ code

Also fixed some typos and code alignment
Also adapted tutorial CPP samples
Fixed some identation problems
pull/1722/head
StevenPuttemans 11 years ago
parent 4bc105c40b
commit 2b106db02f
  1. 2
      samples/cpp/3calibration.cpp
  2. 6
      samples/cpp/OpenEXRimages_HDR_Retina_toneMapping.cpp
  3. 4
      samples/cpp/OpenEXRimages_HDR_Retina_toneMapping_video.cpp
  4. 55
      samples/cpp/Qt_sample/main.cpp
  5. 4
      samples/cpp/bagofwords_classification.cpp
  6. 8
      samples/cpp/bgfg_segm.cpp
  7. 2
      samples/cpp/build3dmodel.cpp
  8. 2
      samples/cpp/calibration.cpp
  9. 4
      samples/cpp/camshiftdemo.cpp
  10. 2
      samples/cpp/chamfer.cpp
  11. 4
      samples/cpp/descriptor_extractor_matcher.cpp
  12. 2
      samples/cpp/detection_based_tracker_sample.cpp
  13. 2
      samples/cpp/distrans.cpp
  14. 4
      samples/cpp/drawing.cpp
  15. 2
      samples/cpp/edge.cpp
  16. 2
      samples/cpp/em.cpp
  17. 6
      samples/cpp/fback.cpp
  18. 6
      samples/cpp/ffilldemo.cpp
  19. 2
      samples/cpp/generic_descriptor_match.cpp
  20. 8
      samples/cpp/grabcut.cpp
  21. 2
      samples/cpp/houghcircles.cpp
  22. 2
      samples/cpp/houghlines.cpp
  23. 2
      samples/cpp/image.cpp
  24. 2
      samples/cpp/lkdemo.cpp
  25. 2
      samples/cpp/meanshift_segmentation.cpp
  26. 2
      samples/cpp/minarea.cpp
  27. 2
      samples/cpp/morphology2.cpp
  28. 2
      samples/cpp/pca.cpp
  29. 2
      samples/cpp/phase_corr.cpp
  30. 14
      samples/cpp/points_classifier.cpp
  31. 4
      samples/cpp/rgbdodometry.cpp
  32. 2
      samples/cpp/segment_objects.cpp
  33. 4
      samples/cpp/stereo_calib.cpp
  34. 17
      samples/cpp/tutorial_code/HighGUI/BasicLinearTransformsTrackbar.cpp
  35. 6
      samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp
  36. 4
      samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp
  37. 4
      samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp
  38. 4
      samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp
  39. 2
      samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp
  40. 46
      samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
  41. 6
      samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp
  42. 10
      samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp
  43. 2
      samples/cpp/tutorial_code/ImgProc/Pyramids.cpp
  44. 4
      samples/cpp/tutorial_code/ImgProc/Smoothing.cpp
  45. 12
      samples/cpp/tutorial_code/ImgProc/Threshold.cpp
  46. 4
      samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp
  47. 6
      samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp
  48. 4
      samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp
  49. 6
      samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp
  50. 4
      samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp
  51. 2
      samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp
  52. 4
      samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp
  53. 2
      samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp
  54. 2
      samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp
  55. 6
      samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp
  56. 6
      samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp
  57. 6
      samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp
  58. 8
      samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp
  59. 6
      samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp
  60. 4
      samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp
  61. 6
      samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp
  62. 6
      samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp
  63. 6
      samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp
  64. 6
      samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp
  65. 2
      samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp
  66. 2
      samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp
  67. 4
      samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp
  68. 4
      samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp
  69. 4
      samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp
  70. 8
      samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp
  71. 4
      samples/cpp/tutorial_code/features2D/SURF_Homography.cpp
  72. 2
      samples/cpp/tutorial_code/introduction/display_image/display_image.cpp
  73. 10
      samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/Test.cpp
  74. 10
      samples/cpp/tutorial_code/objectDetection/objectDetection.cpp
  75. 10
      samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp
  76. 2
      samples/cpp/video_dmtx.cpp
  77. 2
      samples/cpp/video_homography.cpp
  78. 4
      samples/cpp/watershed.cpp

@ -271,7 +271,7 @@ int main( int argc, char** argv )
{
vector<Point2f> ptvec;
imageSize = view.size();
cvtColor(view, viewGray, CV_BGR2GRAY);
cvtColor(view, viewGray, COLOR_BGR2GRAY);
bool found = findChessboardCorners( view, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH );
drawChessboardCorners( view, boardSize, Mat(ptvec), found );

@ -1,10 +1,10 @@
//============================================================================
// Name : HighDynamicRange_RetinaCompression.cpp
// Name : OpenEXRimages_HDR_Retina_toneMapping.cpp
// Author : Alexandre Benoit (benoit.alexandre.vision@gmail.com)
// Version : 0.1
// Copyright : Alexandre Benoit, LISTIC Lab, july 2011
// Description : HighDynamicRange compression (tone mapping) with the help of the Gipsa/Listic's retina in C++, Ansi-style
// Description : HighDynamicRange retina tone mapping with the help of the Gipsa/Listic's retina in C++, Ansi-style
//============================================================================
#include <iostream>
@ -69,7 +69,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
{
cv::Mat rgbIntImg;
outputMat.convertTo(rgbIntImg, CV_8UC3);
cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
}
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation

@ -4,7 +4,7 @@
// Author : Alexandre Benoit (benoit.alexandre.vision@gmail.com)
// Version : 0.2
// Copyright : Alexandre Benoit, LISTIC Lab, december 2011
// Description : HighDynamicRange compression (tone mapping) for image sequences with the help of the Gipsa/Listic's retina in C++, Ansi-style
// Description : HighDynamicRange retina tone mapping for image sequences with the help of the Gipsa/Listic's retina in C++, Ansi-style
// Known issues: the input OpenEXR sequences can have bad computed pixels that should be removed
// => a simple method consists of cutting histogram edges (a slider for this on the UI is provided)
// => however, in image sequences, this histogramm cut must be done in an elegant way from frame to frame... still not done...
@ -92,7 +92,7 @@ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, con
{
cv::Mat rgbIntImg;
normalisedImage.convertTo(rgbIntImg, CV_8UC3);
cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
}
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation

@ -4,7 +4,7 @@
#include <iostream>
#include <vector>
#include <opencv/highgui.h>
#include <opencv2/highgui/highgui.hpp>
#if defined WIN32 || defined _WIN32 || defined WINCE
#include <windows.h>
@ -20,8 +20,7 @@
#include <GL/gl.h>
#endif
#include <opencv/cxcore.h>
#include <opencv/cv.h>
#include <opencv2/core/core.hpp>
using namespace std;
using namespace cv;
@ -111,12 +110,12 @@ static void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
}
static void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source, IplImage* grayImage)
static void foundCorners(vector<CvPoint2D32f> *srcImagePoints, const Mat& source, Mat& grayImage)
{
cvCvtColor(source,grayImage,CV_RGB2GRAY);
cvSmooth( grayImage, grayImage,CV_GAUSSIAN,11);
cvNormalize(grayImage, grayImage, 0, 255, CV_MINMAX);
cvThreshold( grayImage, grayImage, 26, 255, CV_THRESH_BINARY_INV);//25
cvtColor(source, grayImage, COLOR_RGB2GRAY);
GaussianBlur(grayImage, grayImage, Size(11,11), 0, 0);
normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
Mat MgrayImage = grayImage;
//For debug
@ -189,8 +188,8 @@ static void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source,
for(size_t i = 0 ; i<srcImagePoints_temp.size(); i++ )
{
ss<<i;
circle(Msource,srcImagePoints->at(i),5,CV_RGB(255,0,0));
putText( Msource, ss.str(), srcImagePoints->at(i),CV_FONT_HERSHEY_SIMPLEX,1,CV_RGB(255,0,0));
circle(Msource,srcImagePoints->at(i),5,Scalar(0,0,255));
putText(Msource,ss.str(),srcImagePoints->at(i),FONT_HERSHEY_SIMPLEX,1,Scalar(0,0,255));
ss.str("");
//new coordinate system in the middle of the frame and reversed (camera coordinate system)
@ -224,19 +223,19 @@ static void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMat
int main(void)
{
help();
CvCapture* video = cvCaptureFromFile("cube4.avi");
CV_Assert(video);
VideoCapture video("cube4.avi");
CV_Assert(video.isOpened());
IplImage* source = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,3);
IplImage* grayImage = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,1);
Mat source, grayImage;
cvNamedWindow("original",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
cvNamedWindow("POSIT",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
video >> source;
namedWindow("original", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
namedWindow("POSIT", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
//For debug
//cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE);
float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
cvSetOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints);
@ -251,26 +250,22 @@ int main(void)
vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
while(cvWaitKey(33) != 27)
while(waitKey(33) != 27)
{
source=cvQueryFrame(video);
cvShowImage("original",source);
video >> source;
imshow("original",source);
foundCorners(&srcImagePoints,source,grayImage);
cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
cvShowImage("POSIT",source);
//For debug
//cvShowImage("tempGray",grayImage);
imshow("POSIT",source);
if (cvGetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99)
cvSetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO,0);
if (VideoCapture::get(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99)
VideoCapture::get(video,CV_CAP_PROP_POS_AVI_RATIO,0);
}
cvDestroyAllWindows();
cvReleaseImage(&grayImage);
cvReleaseCapture(&video);
destroyAllWindows();
cvReleasePOSITObject(&positObject);
return 0;

@ -54,10 +54,6 @@ static void help(char** argv)
<< "\n";
}
static void makeDir( const string& dir )
{
#if defined WIN32 || defined _WIN32

@ -45,10 +45,10 @@ int main(int argc, const char** argv)
return -1;
}
namedWindow("image", CV_WINDOW_NORMAL);
namedWindow("foreground mask", CV_WINDOW_NORMAL);
namedWindow("foreground image", CV_WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL);
namedWindow("image", WINDOW_NORMAL);
namedWindow("foreground mask", WINDOW_NORMAL);
namedWindow("foreground image", WINDOW_NORMAL);
namedWindow("mean background image", WINDOW_NORMAL);
BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5);

@ -418,7 +418,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
for( size_t i = 0; i < nimages; i++ )
{
Mat img = imread(imageList[i], 1), gray;
cvtColor(img, gray, CV_BGR2GRAY);
cvtColor(img, gray, COLOR_BGR2GRAY);
vector<KeyPoint> keypoints;
detector->detect(gray, keypoints);

@ -461,7 +461,7 @@ int main( int argc, char** argv )
flip( view, view, 0 );
vector<Point2f> pointbuf;
cvtColor(view, viewGray, CV_BGR2GRAY);
cvtColor(view, viewGray, COLOR_BGR2GRAY);
bool found;
switch( pattern )

@ -113,7 +113,7 @@ int main( int argc, const char** argv )
if( !paused )
{
cvtColor(image, hsv, CV_BGR2HSV);
cvtColor(image, hsv, COLOR_BGR2HSV);
if( trackObject )
{
@ -163,7 +163,7 @@ int main( int argc, const char** argv )
}
if( backprojMode )
cvtColor( backproj, image, CV_GRAY2BGR );
cvtColor( backproj, image, COLOR_GRAY2BGR );
ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA );
}
}

@ -40,7 +40,7 @@ int main( int argc, const char** argv )
return -1;
}
Mat cimg;
cvtColor(img, cimg, CV_GRAY2BGR);
cvtColor(img, cimg, COLOR_GRAY2BGR);
// if the image and the template are not edge maps but normal grayscale images,
// you might want to uncomment the lines below to produce the maps. You can also

@ -208,7 +208,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
matchesMask[i1] = 1;
}
// draw inliers
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 255, 0), Scalar(255, 0, 0), matchesMask
#if DRAW_RICH_KEYPOINTS_MODE
, DrawMatchesFlags::DRAW_RICH_KEYPOINTS
#endif
@ -218,7 +218,7 @@ static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
// draw outliers
for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
matchesMask[i1] = !matchesMask[i1];
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask,
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(255, 0, 0), Scalar(0, 0, 255), matchesMask,
DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
#endif

@ -116,7 +116,7 @@ static int test_FaceDetector(int argc, char *argv[])
LOGD("\n\nSTEP n=%d from prev step %f ms\n\n", n, t_ms);
m=images[n-1];
CV_Assert(! m.empty());
cvtColor(m, gray, CV_BGR2GRAY);
cvtColor(m, gray, COLOR_BGR2GRAY);
fd.process(gray);

@ -128,7 +128,7 @@ int main( int argc, const char** argv )
// Call to update the view
onTrackbar(0, 0);
int c = cvWaitKey(0) & 255;
int c = waitKey(0) & 255;
if( c == 27 )
break;

@ -157,14 +157,14 @@ int main()
return 0;
}
Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((width - textsize.width)/2, (height - textsize.height)/2);
Mat image2;
for( i = 0; i < 255; i += 2 )
{
image2 = image - Scalar::all(i);
putText(image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3,
putText(image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType);
imshow(wndname, image2);

@ -49,7 +49,7 @@ int main( int argc, const char** argv )
return -1;
}
cedge.create(image.size(), image.type());
cvtColor(image, gray, CV_BGR2GRAY);
cvtColor(image, gray, COLOR_BGR2GRAY);
// Create a window
namedWindow("Edge map", 1);

@ -59,7 +59,7 @@ int main( int /*argc*/, char** /*argv*/ )
params.cov_mat_type = CvEM::COV_MAT_DIAGONAL;
params.start_step = CvEM::START_E_STEP;
params.means = em_model.get_means();
params.covs = (const CvMat**)em_model.get_covs();
params.covs = em_model.get_covs();
params.weights = em_model.get_weights();
em_model2.train( samples, Mat(), params, &labels );

@ -42,13 +42,13 @@ int main(int, char**)
for(;;)
{
cap >> frame;
cvtColor(frame, gray, CV_BGR2GRAY);
cvtColor(frame, gray, COLOR_BGR2GRAY);
if( prevgray.data )
{
calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cvtColor(prevgray, cflow, CV_GRAY2BGR);
drawOptFlowMap(flow, cflow, 16, 1.5, CV_RGB(0, 255, 0));
cvtColor(prevgray, cflow, COLOR_GRAY2BGR);
drawOptFlowMap(flow, cflow, 16, 1.5, Scalar(0, 255, 0));
imshow("flow", cflow);
}
if(waitKey(30)>=0)

@ -81,7 +81,7 @@ int main( int argc, char** argv )
}
help();
image0.copyTo(image);
cvtColor(image0, gray, CV_BGR2GRAY);
cvtColor(image0, gray, COLOR_BGR2GRAY);
mask.create(image0.rows+2, image0.cols+2, CV_8UC1);
namedWindow( "image", 0 );
@ -106,7 +106,7 @@ int main( int argc, char** argv )
if( isColor )
{
cout << "Grayscale mode is set\n";
cvtColor(image0, gray, CV_BGR2GRAY);
cvtColor(image0, gray, COLOR_BGR2GRAY);
mask = Scalar::all(0);
isColor = false;
}
@ -135,7 +135,7 @@ int main( int argc, char** argv )
case 'r':
cout << "Original image is restored\n";
image0.copyTo(image);
cvtColor(image, gray, CV_BGR2GRAY);
cvtColor(image, gray, COLOR_BGR2GRAY);
mask = Scalar::all(0);
break;
case 's':

@ -79,7 +79,7 @@ Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, cons
for (size_t i = 0; i < features1.size(); i++)
{
circle(img_corr, features1[i].pt, 3, CV_RGB(255, 0, 0));
circle(img_corr, features1[i].pt, 3, Scalar(0, 0, 255));
}
for (size_t i = 0; i < features2.size(); i++)

@ -296,15 +296,15 @@ int main( int argc, char** argv )
help();
const string winName = "image";
cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE );
cvSetMouseCallback( winName.c_str(), on_mouse, 0 );
namedWindow( winName, WINDOW_AUTOSIZE );
setMouseCallback( winName, on_mouse, 0 );
gcapp.setImageAndWinName( image, winName );
gcapp.showImage();
for(;;)
{
int c = cvWaitKey(0);
int c = waitKey(0);
switch( (char) c )
{
case '\x1b':
@ -331,6 +331,6 @@ int main( int argc, char** argv )
}
exit_main:
cvDestroyWindow( winName.c_str() );
destroyWindow( winName );
return 0;
}

@ -27,7 +27,7 @@ int main(int argc, char** argv)
Mat cimg;
medianBlur(img, img, 5);
cvtColor(img, cimg, CV_GRAY2BGR);
cvtColor(img, cimg, COLOR_GRAY2BGR);
vector<Vec3f> circles;
HoughCircles(img, circles, CV_HOUGH_GRADIENT, 1, 10,

@ -27,7 +27,7 @@ int main(int argc, char** argv)
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
cvtColor(dst, cdst, COLOR_GRAY2BGR);
#if 0
vector<Vec2f> lines;

@ -109,7 +109,7 @@ int main( int argc, char** argv )
cvtColor(img_yuv, img, CV_YCrCb2BGR);
// this is counterpart for cvNamedWindow
namedWindow("image with grain", CV_WINDOW_AUTOSIZE);
namedWindow("image with grain", WINDOW_AUTOSIZE);
#if DEMO_MIXED_API_USE
// this is to demonstrate that img and iplimg really share the data - the result of the above
// processing is stored in img and thus in iplimg too.

@ -71,7 +71,7 @@ int main( int argc, char** argv )
break;
frame.copyTo(image);
cvtColor(image, gray, CV_BGR2GRAY);
cvtColor(image, gray, COLOR_BGR2GRAY);
if( nightMode )
image = Scalar::all(0);

@ -65,7 +65,7 @@ int main(int argc, char** argv)
colorRad = 10;
maxPyrLevel = 1;
namedWindow( winName, CV_WINDOW_AUTOSIZE );
namedWindow( winName, WINDOW_AUTOSIZE );
createTrackbar( "spatialRad", winName, &spatialRad, 80, meanShiftSegmentation );
createTrackbar( "colorRad", winName, &colorRad, 60, meanShiftSegmentation );

@ -54,7 +54,7 @@ int main( int /*argc*/, char** /*argv*/ )
imshow( "rect & circle", img );
char key = (char)cvWaitKey();
char key = (char)waitKey();
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break;
}

@ -79,7 +79,7 @@ int main( int argc, char** argv )
OpenClose(open_close_pos, 0);
ErodeDilate(erode_dilate_pos, 0);
c = cvWaitKey(0);
c = waitKey(0);
if( (char)c == 27 )
break;

@ -159,7 +159,7 @@ int main(int argc, char** argv)
// init highgui window
string winName = "Reconstruction | press 'q' to quit";
namedWindow(winName, CV_WINDOW_NORMAL);
namedWindow(winName, WINDOW_NORMAL);
// params struct to pass to the trackbar handler
params p;

@ -13,7 +13,7 @@ int main(int, char* [])
do
{
video >> frame;
cvtColor(frame, curr, CV_RGB2GRAY);
cvtColor(frame, curr, COLOR_RGB2GRAY);
if(prev.empty())
{

@ -7,7 +7,7 @@
using namespace std;
using namespace cv;
const Scalar WHITE_COLOR = CV_RGB(255,255,255);
const Scalar WHITE_COLOR = Scalar(255,255,255);
const string winName = "points";
const int testStep = 5;
@ -69,15 +69,15 @@ static void on_mouse( int event, int x, int y, int /*flags*/, void* )
// put the text
stringstream text;
text << "current class " << classColors.size()-1;
putText( img, text.str(), Point(10,25), CV_FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
putText( img, text.str(), Point(10,25), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
text.str("");
text << "total classes " << classColors.size();
putText( img, text.str(), Point(10,50), CV_FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
putText( img, text.str(), Point(10,50), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
text.str("");
text << "total points " << trainedPoints.size();
putText(img, text.str(), cvPoint(10,75), CV_FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
putText(img, text.str(), Point(10,75), FONT_HERSHEY_SIMPLEX, 0.8f, WHITE_COLOR, 2 );
// draw points
for( size_t i = 0; i < trainedPoints.size(); i++ )
@ -178,7 +178,7 @@ static void find_decision_boundary_SVM( CvSVMParams params )
for( int i = 0; i < svmClassifier.get_support_vector_count(); i++ )
{
const float* supportVector = svmClassifier.get_support_vector(i);
circle( imgDst, Point(supportVector[0],supportVector[1]), 5, CV_RGB(255,255,255), -1 );
circle( imgDst, Point(supportVector[0],supportVector[1]), 5, Scalar(255,255,255), -1 );
}
}
@ -526,7 +526,7 @@ int main()
{
#if _NBC_
find_decision_boundary_NBC();
cvNamedWindow( "NormalBayesClassifier", WINDOW_AUTOSIZE );
namedWindow( "NormalBayesClassifier", WINDOW_AUTOSIZE );
imshow( "NormalBayesClassifier", imgDst );
#endif
#if _KNN_
@ -560,7 +560,7 @@ int main()
params.C = 10;
find_decision_boundary_SVM( params );
cvNamedWindow( "classificationSVM2", WINDOW_AUTOSIZE );
namedWindow( "classificationSVM2", WINDOW_AUTOSIZE );
imshow( "classificationSVM2", imgDst );
#endif

@ -125,8 +125,8 @@ int main(int argc, char** argv)
}
Mat grayImage0, grayImage1, depthFlt0, depthFlt1/*in meters*/;
cvtColor( colorImage0, grayImage0, CV_BGR2GRAY );
cvtColor( colorImage1, grayImage1, CV_BGR2GRAY );
cvtColor( colorImage0, grayImage0, COLOR_BGR2GRAY );
cvtColor( colorImage1, grayImage1, COLOR_BGR2GRAY );
depth0.convertTo( depthFlt0, CV_32FC1, 1./1000 );
depth1.convertTo( depthFlt1, CV_32FC1, 1./1000 );

@ -95,8 +95,6 @@ int main(int argc, char** argv)
if( !tmp_frame.data )
break;
bgsubtractor(tmp_frame, bgmask, update_bg_model ? -1 : 0);
//CvMat _bgmask = bgmask;
//cvSegmentFGMask(&_bgmask);
refineSegments(tmp_frame, bgmask, out_frame);
imshow("video", tmp_frame);
imshow("segmented", out_frame);

@ -118,7 +118,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
{
cout << filename << endl;
Mat cimg, cimg1;
cvtColor(img, cimg, CV_GRAY2BGR);
cvtColor(img, cimg, COLOR_GRAY2BGR);
drawChessboardCorners(cimg, boardSize, corners, found);
double sf = 640./MAX(img.rows, img.cols);
resize(cimg, cimg1, Size(), sf, sf);
@ -302,7 +302,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
{
Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
cvtColor(rimg, cimg, CV_GRAY2BGR);
cvtColor(rimg, cimg, COLOR_GRAY2BGR);
Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);
if( useCalibrated )

@ -24,17 +24,14 @@ Mat image;
*/
static void on_trackbar( int, void* )
{
Mat new_image = Mat::zeros( image.size(), image.type() );
Mat new_image = Mat::zeros( image.size(), image.type() );
for( int y = 0; y < image.rows; y++ )
{ for( int x = 0; x < image.cols; x++ )
{ for( int c = 0; c < 3; c++ )
{
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
}
}
}
imshow("New Image", new_image);
for( int y = 0; y < image.rows; y++ )
for( int x = 0; x < image.cols; x++ )
for( int c = 0; c < 3; c++ )
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
imshow("New Image", new_image);
}

@ -31,14 +31,14 @@ int main( int, char** argv )
}
/// Convert to grayscale
cvtColor( src, src, CV_BGR2GRAY );
cvtColor( src, src, COLOR_BGR2GRAY );
/// Apply Histogram Equalization
equalizeHist( src, dst );
/// Display results
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( equalized_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
namedWindow( equalized_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
imshow( equalized_window, dst );

@ -33,8 +33,8 @@ int main( int, char** argv )
templ = imread( argv[2], 1 );
/// Create windows
namedWindow( image_window, CV_WINDOW_AUTOSIZE );
namedWindow( result_window, CV_WINDOW_AUTOSIZE );
namedWindow( image_window, WINDOW_AUTOSIZE );
namedWindow( result_window, WINDOW_AUTOSIZE );
/// Create Trackbar
const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";

@ -28,7 +28,7 @@ int main( int, char** argv )
/// Read the image
src = imread( argv[1], 1 );
/// Transform it to HSV
cvtColor( src, hsv, CV_BGR2HSV );
cvtColor( src, hsv, COLOR_BGR2HSV );
/// Use only the Hue value
hue.create( hsv.size(), hsv.depth() );
@ -37,7 +37,7 @@ int main( int, char** argv )
/// Create Trackbar to enter the number of bins
const char* window_image = "Source image";
namedWindow( window_image, CV_WINDOW_AUTOSIZE );
namedWindow( window_image, WINDOW_AUTOSIZE );
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
Hist_and_Backproj(0, 0);

@ -31,10 +31,10 @@ int main( int, char** argv )
/// Read the image
src = imread( argv[1], 1 );
/// Transform it to HSV
cvtColor( src, hsv, CV_BGR2HSV );
cvtColor( src, hsv, COLOR_BGR2HSV );
/// Show the image
namedWindow( window_image, CV_WINDOW_AUTOSIZE );
namedWindow( window_image, WINDOW_AUTOSIZE );
imshow( window_image, src );
/// Set Trackbars for floodfill thresholds

@ -71,7 +71,7 @@ int main( int, char** argv )
}
/// Display
namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE );
namedWindow("calcHist Demo", WINDOW_AUTOSIZE );
imshow("calcHist Demo", histImage );
waitKey(0);

@ -22,24 +22,25 @@ int main( int argc, char** argv )
Mat src_test2, hsv_test2;
Mat hsv_half_down;
/// Load three images with different environment settings
if( argc < 4 )
{ printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n");
return -1;
}
/// Load three images with different environment settings
if( argc < 4 )
{
printf("** Error. Usage: ./compareHist_Demo <image_settings0> <image_setting1> <image_settings2>\n");
return -1;
}
src_base = imread( argv[1], 1 );
src_test1 = imread( argv[2], 1 );
src_test2 = imread( argv[3], 1 );
src_base = imread( argv[1], 1 );
src_test1 = imread( argv[2], 1 );
src_test2 = imread( argv[3], 1 );
/// Convert to HSV
cvtColor( src_base, hsv_base, CV_BGR2HSV );
cvtColor( src_test1, hsv_test1, CV_BGR2HSV );
cvtColor( src_test2, hsv_test2, CV_BGR2HSV );
/// Convert to HSV
cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
/// Using 30 bins for hue and 32 for saturation
/// Using 30 bins for hue and 32 for saturation
int h_bins = 50; int s_bins = 60;
int histSize[] = { h_bins, s_bins };
@ -74,14 +75,15 @@ int main( int argc, char** argv )
/// Apply the histogram comparison methods
for( int i = 0; i < 4; i++ )
{ int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
{
int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
printf( "Done \n" );

@ -37,9 +37,9 @@ int main( int, char** argv )
{ return -1; }
/// Create windows
namedWindow( "Erosion Demo", CV_WINDOW_AUTOSIZE );
namedWindow( "Dilation Demo", CV_WINDOW_AUTOSIZE );
cvMoveWindow( "Dilation Demo", src.cols, 0 );
namedWindow( "Erosion Demo", WINDOW_AUTOSIZE );
namedWindow( "Dilation Demo", WINDOW_AUTOSIZE );
moveWindow( "Dilation Demo", src.cols, 0 );
/// Create Erosion Trackbar
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",

@ -39,20 +39,20 @@ int main( int, char** argv )
{ return -1; }
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create Trackbar to select Morphology operation
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations );
/// Create Trackbar to select kernel type
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem,
Morphology_Operations );
&morph_elem, max_elem,
Morphology_Operations );
/// Create Trackbar to choose kernel size
createTrackbar( "Kernel size:\n 2n +1", window_name,
&morph_size, max_kernel_size,
Morphology_Operations );
&morph_size, max_kernel_size,
Morphology_Operations );
/// Default start
Morphology_Operations( 0, 0 );

@ -40,7 +40,7 @@ int main( void )
dst = tmp;
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
namedWindow( window_name, WINDOW_AUTOSIZE );
imshow( window_name, dst );
/// Loop

@ -31,7 +31,7 @@ int display_dst( int delay );
*/
int main( void )
{
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Load the source image
src = imread( "../images/lena.png", 1 );
@ -89,7 +89,7 @@ int display_caption( const char* caption )
dst = Mat::zeros( src.size(), src.type() );
putText( dst, caption,
Point( src.cols/4, src.rows/2),
CV_FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
imshow( window_name, dst );
int c = waitKey( DELAY_CAPTION );

@ -37,19 +37,19 @@ int main( int, char** argv )
src = imread( argv[1], 1 );
/// Convert the image to Gray
cvtColor( src, src_gray, CV_RGB2GRAY );
cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Create a window to display results
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create Trackbar to choose type of Threshold
createTrackbar( trackbar_type,
window_name, &threshold_type,
max_type, Threshold_Demo );
window_name, &threshold_type,
max_type, Threshold_Demo );
createTrackbar( trackbar_value,
window_name, &threshold_value,
max_value, Threshold_Demo );
window_name, &threshold_value,
max_value, Threshold_Demo );
/// Call the function to initialize
Threshold_Demo( 0, 0 );

@ -58,10 +58,10 @@ int main( int, char** argv )
dst.create( src.size(), src.type() );
/// Convert the image to grayscale
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create a window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create a Trackbar for user to enter threshold
createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );

@ -65,13 +65,13 @@ int main( int, char** argv )
/// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
namedWindow( warp_window, CV_WINDOW_AUTOSIZE );
namedWindow( warp_window, WINDOW_AUTOSIZE );
imshow( warp_window, warp_dst );
namedWindow( warp_rotate_window, CV_WINDOW_AUTOSIZE );
namedWindow( warp_rotate_window, WINDOW_AUTOSIZE );
imshow( warp_rotate_window, warp_rotate_dst );
/// Wait until user exits the program

@ -25,7 +25,7 @@ int main(int, char** argv)
{ return -1; }
/// Convert it to gray
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Reduce the noise so we avoid false circle detection
GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 );
@ -47,7 +47,7 @@ int main(int, char** argv)
}
/// Show your results
namedWindow( "Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE );
namedWindow( "Hough Circle Transform Demo", WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo", src );
waitKey(0);

@ -46,7 +46,7 @@ int main( int, char** argv )
}
/// Pass the image to gray
cvtColor( src, src_gray, CV_RGB2GRAY );
cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Apply Canny edge detector
Canny( src_gray, edges, 50, 200, 3 );
@ -55,10 +55,10 @@ int main( int, char** argv )
char thresh_label[50];
sprintf( thresh_label, "Thres: %d + input", min_threshold );
namedWindow( standard_name, CV_WINDOW_AUTOSIZE );
namedWindow( standard_name, WINDOW_AUTOSIZE );
createTrackbar( thresh_label, standard_name, &s_trackbar, max_trackbar, Standard_Hough);
namedWindow( probabilistic_name, CV_WINDOW_AUTOSIZE );
namedWindow( probabilistic_name, WINDOW_AUTOSIZE );
createTrackbar( thresh_label, probabilistic_name, &p_trackbar, max_trackbar, Probabilistic_Hough);
/// Initialize

@ -34,10 +34,10 @@ int main( int, char** argv )
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
/// Convert the image to grayscale
cvtColor( src, src_gray, CV_RGB2GRAY );
cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Apply Laplace function
Mat abs_dst;

@ -34,7 +34,7 @@ int main( int, char** argv )
map_y.create( src.size(), CV_32FC1 );
/// Create window
namedWindow( remap_window, CV_WINDOW_AUTOSIZE );
namedWindow( remap_window, WINDOW_AUTOSIZE );
/// Loop
for(;;)

@ -33,10 +33,10 @@ int main( int, char** argv )
GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT );
/// Convert it to gray
cvtColor( src, src_gray, CV_RGB2GRAY );
cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Generate grad_x and grad_y
Mat grad_x, grad_y;

@ -43,7 +43,7 @@ int main( int, char** argv )
printf( " ** Press 'ESC' to exit the program \n");
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Initialize arguments for the filter
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);

@ -35,7 +35,7 @@ int main ( int, char** argv )
{ return -1; }
/// Create window
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Initialize arguments for the filter
anchor = Point( -1, -1 );

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
@ -68,6 +68,6 @@ void thresh_callback(int, void* )
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
}

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
@ -83,6 +83,6 @@ void thresh_callback(int, void* )
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
}

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
@ -85,6 +85,6 @@ void thresh_callback(int, void* )
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
}

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
@ -62,7 +62,7 @@ void thresh_callback(int, void* )
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the convex hull object for each contour
vector<vector<Point> >hull( contours.size() );
vector<vector<Point> >hull( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{ convexHull( Mat(contours[i]), hull[i], false ); }
@ -76,6 +76,6 @@ void thresh_callback(int, void* )
}
/// Show in a window
namedWindow( "Hull demo", CV_WINDOW_AUTOSIZE );
namedWindow( "Hull demo", WINDOW_AUTOSIZE );
imshow( "Hull demo", drawing );
}

@ -30,12 +30,12 @@ int main( int, char** argv )
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
@ -79,7 +79,7 @@ void thresh_callback(int, void* )
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function

@ -71,9 +71,9 @@ int main( void )
/// Create Window and show your results
const char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
namedWindow( "Distance", CV_WINDOW_AUTOSIZE );
namedWindow( "Distance", WINDOW_AUTOSIZE );
imshow( "Distance", drawing );
waitKey(0);

@ -40,7 +40,7 @@ int main( int, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Set some parameters
int blockSize = 3; int apertureSize = 3;
@ -64,7 +64,7 @@ int main( int, char** argv )
minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */
namedWindow( myHarris_window, CV_WINDOW_AUTOSIZE );
namedWindow( myHarris_window, WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function );
myHarris_function( 0, 0 );
@ -75,7 +75,7 @@ int main( int, char** argv )
minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */
namedWindow( myShiTomasi_window, CV_WINDOW_AUTOSIZE );
namedWindow( myShiTomasi_window, WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function );
myShiTomasi_function( 0, 0 );

@ -31,10 +31,10 @@ int main( int, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create a window and a trackbar
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
imshow( source_window, src );
@ -77,6 +77,6 @@ void cornerHarris_demo( int, void* )
}
}
/// Showing the result
namedWindow( corners_window, CV_WINDOW_AUTOSIZE );
namedWindow( corners_window, WINDOW_AUTOSIZE );
imshow( corners_window, dst_norm_scaled );
}

@ -32,10 +32,10 @@ int main( int, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create Window
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
@ -87,7 +87,7 @@ void goodFeaturesToTrack_Demo( int, void* )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); }
/// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy );
/// Set the neeed parameters to find the refined corners

@ -32,10 +32,10 @@ int main( int, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create Window
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
@ -87,6 +87,6 @@ void goodFeaturesToTrack_Demo( int, void* )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); }
/// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy );
}

@ -294,7 +294,7 @@ int main(int argc, char* argv[])
if( s.calibrationPattern == Settings::CHESSBOARD)
{
Mat viewGray;
cvtColor(view, viewGray, CV_BGR2GRAY);
cvtColor(view, viewGray, COLOR_BGR2GRAY);
cornerSubPix( viewGray, pointBuf, Size(11,11),
Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
}

@ -56,7 +56,7 @@ int main( int argc, char** argv )
//-- 4. Display it as a CV_8UC1 image
imgDisparity16S.convertTo( imgDisparity8U, CV_8UC1, 255/(maxVal - minVal));
namedWindow( windowDisparity, CV_WINDOW_NORMAL );
namedWindow( windowDisparity, WINDOW_NORMAL );
imshow( windowDisparity, imgDisparity8U );
//-- 5. Save the image

@ -64,9 +64,9 @@ int main( void ){
/// 3. Display your stuff!
imshow( atom_window, atom_image );
cvMoveWindow( atom_window, 0, 200 );
moveWindow( atom_window, 0, 200 );
imshow( rook_window, rook_image );
cvMoveWindow( rook_window, w, 200 );
moveWindow( rook_window, w, 200 );
waitKey( 0 );
return(0);

@ -304,7 +304,7 @@ int Displaying_Random_Text( Mat image, char* window_name, RNG rng )
*/
int Displaying_Big_End( Mat image, char* window_name, RNG )
{
Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2);
int lineType = 8;
@ -313,7 +313,7 @@ int Displaying_Big_End( Mat image, char* window_name, RNG )
for( int i = 0; i < 255; i += 2 )
{
image2 = image - Scalar::all(i);
putText( image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3,
putText( image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType );
imshow( window_name, image2 );

@ -45,7 +45,7 @@ int main( int argc, char** argv )
// convert image to YUV color space. The output image will be created automatically.
Mat I_YUV;
cvtColor(I, I_YUV, CV_BGR2YCrCb);
cvtColor(I, I_YUV, COLOR_BGR2YCrCb);
vector<Mat> planes; // Use the STL's vector structure to store multiple Mat objects
split(I_YUV, planes); // split the image into separate color planes (Y U V)
@ -117,7 +117,7 @@ int main( int argc, char** argv )
cvtColor(I_YUV, I, CV_YCrCb2BGR); // and produce the output RGB image
namedWindow("image with grain", CV_WINDOW_AUTOSIZE); // use this to create images
namedWindow("image with grain", WINDOW_AUTOSIZE); // use this to create images
#ifdef DEMO_MIXED_API_USE
// this is to demonstrate that I and IplI really share the data - the result of the above

@ -30,8 +30,8 @@ int main( int argc, char* argv[])
else
I = imread( filename, CV_LOAD_IMAGE_COLOR);
namedWindow("Input", CV_WINDOW_AUTOSIZE);
namedWindow("Output", CV_WINDOW_AUTOSIZE);
namedWindow("Input", WINDOW_AUTOSIZE);
namedWindow("Output", WINDOW_AUTOSIZE);
imshow("Input", I);
double t = (double)getTickCount();
@ -42,7 +42,7 @@ int main( int argc, char* argv[])
cout << "Hand written function times passed in seconds: " << t << endl;
imshow("Output", J);
cvWaitKey(0);
waitKey(0);
Mat kern = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
@ -54,7 +54,7 @@ int main( int argc, char* argv[])
imshow("Output", K);
cvWaitKey(0);
waitKey(0);
return 0;
}
void Sharpen(const Mat& myImage,Mat& Result)

@ -95,8 +95,8 @@ int main( int argc, char** argv )
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);

@ -22,7 +22,7 @@ int main( int argc, char** argv )
return -1;
}
namedWindow( "Display window", CV_WINDOW_AUTOSIZE );// Create a window for display.
namedWindow( "Display window", WINDOW_AUTOSIZE );// Create a window for display.
imshow( "Display window", image ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window

@ -74,10 +74,10 @@ int main(int argc, char *argv[])
const char* WIN_RF = "Reference";
// Windows
namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE );
namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE );
cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2
namedWindow(WIN_RF, WINDOW_AUTOSIZE );
namedWindow(WIN_UT, WINDOW_AUTOSIZE );
moveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
moveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Frame resolution: Width=" << refS.width << " Height=" << refS.height
<< " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
@ -124,7 +124,7 @@ int main(int argc, char *argv[])
imshow( WIN_RF, frameReference);
imshow( WIN_UT, frameUnderTest);
c = (char)cvWaitKey(delay);
c = (char)waitKey(delay);
if (c == 27) break;
}

@ -30,7 +30,7 @@ RNG rng(12345);
*/
int main( void )
{
CvCapture* capture;
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
@ -38,12 +38,12 @@ int main( void )
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
capture.open( -1 );
if( capture.isOpened() )
{
for(;;)
{
frame = cvQueryFrame( capture );
capture >> frame;
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
@ -67,7 +67,7 @@ void detectAndDisplay( Mat frame )
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

@ -30,7 +30,7 @@ RNG rng(12345);
*/
int main( void )
{
CvCapture* capture;
VideoCapture capture;
Mat frame;
//-- 1. Load the cascade
@ -38,12 +38,12 @@ int main( void )
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
capture.open( -1 );
if( capture.isOpened() )
{
for(;;)
{
frame = cvQueryFrame( capture );
capture >> frame;
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
@ -67,7 +67,7 @@ void detectAndDisplay( Mat frame )
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces

@ -52,7 +52,7 @@ namespace
if (frame.empty())
break;
cv::Mat gray;
cv::cvtColor(frame,gray,CV_RGB2GRAY);
cv::cvtColor(frame,gray,COLOR_RGB2GRAY);
vector<string> codes;
Mat corners;
findDataMatrix(gray, codes, corners);

@ -161,7 +161,7 @@ int main(int ac, char ** av)
if (frame.empty())
break;
cvtColor(frame, gray, CV_RGB2GRAY);
cvtColor(frame, gray, COLOR_RGB2GRAY);
detector.detect(gray, query_kpts); //Find interest points

@ -58,8 +58,8 @@ int main( int argc, char** argv )
namedWindow( "image", 1 );
img0.copyTo(img);
cvtColor(img, markerMask, CV_BGR2GRAY);
cvtColor(markerMask, imgGray, CV_GRAY2BGR);
cvtColor(img, markerMask, COLOR_BGR2GRAY);
cvtColor(markerMask, imgGray, COLOR_GRAY2BGR);
markerMask = Scalar::all(0);
imshow( "image", img );
setMouseCallback( "image", onMouse, 0 );

Loading…
Cancel
Save