added new Structured Light algorithm

Added new algorithm for Structured light based on a single camera and signle projector
pull/2828/head
Vladyslav Selotkin 4 years ago
parent ae14c7cee5
commit b44fba392b
  1. 1
      modules/structured_light/CMakeLists.txt
  2. 55
      modules/structured_light/images/calibration_result.xml
  3. BIN
      modules/structured_light/images/hf_phase0.png
  4. BIN
      modules/structured_light/images/hf_phase1.png
  5. BIN
      modules/structured_light/images/hf_phase2.png
  6. BIN
      modules/structured_light/images/hf_phase3.png
  7. BIN
      modules/structured_light/images/hf_ref0.png
  8. BIN
      modules/structured_light/images/hf_ref1.png
  9. BIN
      modules/structured_light/images/hf_ref2.png
  10. BIN
      modules/structured_light/images/hf_ref3.png
  11. BIN
      modules/structured_light/images/lf_phase0.png
  12. BIN
      modules/structured_light/images/lf_phase1.png
  13. BIN
      modules/structured_light/images/lf_phase2.png
  14. BIN
      modules/structured_light/images/lf_phase3.png
  15. BIN
      modules/structured_light/images/lf_ref0.png
  16. BIN
      modules/structured_light/images/lf_ref1.png
  17. BIN
      modules/structured_light/images/lf_ref2.png
  18. BIN
      modules/structured_light/images/lf_ref3.png
  19. BIN
      modules/structured_light/images/o_1000000.bmp
  20. BIN
      modules/structured_light/images/o_1000001.bmp
  21. BIN
      modules/structured_light/images/o_1000002.bmp
  22. BIN
      modules/structured_light/images/o_1000003.bmp
  23. BIN
      modules/structured_light/images/o_1000005.bmp
  24. BIN
      modules/structured_light/images/o_1000006.bmp
  25. BIN
      modules/structured_light/images/o_1000007.bmp
  26. BIN
      modules/structured_light/images/o_1000008.bmp
  27. 3
      modules/structured_light/include/opencv2/structured_light.hpp
  28. 66
      modules/structured_light/include/opencv2/structured_light/slmono.hpp
  29. 59
      modules/structured_light/include/opencv2/structured_light/slmono_calibration.hpp
  30. 41
      modules/structured_light/include/opencv2/structured_light/slmono_utils.hpp
  31. 209
      modules/structured_light/samples/calibrate_example.cpp
  32. 149
      modules/structured_light/samples/sl_example.cpp
  33. 2
      modules/structured_light/src/precomp.hpp
  34. 172
      modules/structured_light/src/slmono.cpp
  35. 333
      modules/structured_light/src/slmono_calibration.cpp
  36. 252
      modules/structured_light/src/slmono_utils.cpp

@ -1,2 +1,3 @@
set(the_description "Structured Light API")
ocv_define_module(structured_light opencv_core opencv_imgproc opencv_calib3d opencv_phase_unwrapping OPTIONAL opencv_viz WRAP python java objc)

@ -0,0 +1,55 @@
<?xml version="1.0"?>
<opencv_storage>
<img_shape type_id="opencv-matrix">
<rows>2</rows>
<cols>1</cols>
<dt>d</dt>
<data>
720. 1280.</data></img_shape>
<rms>8.9865323037520906e+00</rms>
<cam_int type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>d</dt>
<data>
2.2902176176240550e+03 0. 4.7760365772945374e+02 0.
2.0789910149720636e+03 5.9663906901935786e+02 0. 0. 1.</data></cam_int>
<cam_dist type_id="opencv-matrix">
<rows>1</rows>
<cols>5</cols>
<dt>d</dt>
<data>
2.2272502988720682e+00 6.2041480268790341e+00 5.1365359803581978e-01
-1.1065215615966861e-01 -4.8897144273472620e+01</data></cam_dist>
<proj_int type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>d</dt>
<data>
1.4173607712558548e+03 0. 5.1752307732265035e+02 0.
1.3963061950617591e+03 7.1503556186206947e+02 0. 0. 1.</data></proj_int>
<proj_dist type_id="opencv-matrix">
<rows>1</rows>
<cols>5</cols>
<dt>d</dt>
<data>
1.4490791728765842e-01 -7.4856581249561327e-01
-9.0651830613910390e-03 -3.4131371258920178e-02
1.3733497765243312e+00</data></proj_dist>
<roration type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>d</dt>
<data>
9.9215888102252536e-01 -9.4771920438436966e-03
1.2462318259093863e-01 1.0446695919745458e-02 9.9992002393776458e-01
-7.1282727683994072e-03 -1.2454565970956474e-01
8.3742796265969446e-03 9.9217853740556439e-01</data></roration>
<translation type_id="opencv-matrix">
<rows>3</rows>
<cols>1</cols>
<dt>d</dt>
<data>
-3.5484301147652934e+02 2.2059644511693236e+02
-1.3115183374509279e+03</data></translation>
</opencv_storage>

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 117 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 768 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 768 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 768 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 768 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 768 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 768 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 768 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 768 KiB

@ -46,6 +46,9 @@
#include "opencv2/structured_light/structured_light.hpp"
#include "opencv2/structured_light/graycodepattern.hpp"
#include "opencv2/structured_light/sinusoidalpattern.hpp"
#include "opencv2/structured_light/slmono_calibration.hpp"
#include "opencv2/structured_light/slmono_utils.hpp"
#include "opencv2/structured_light/slmono.hpp"
/** @defgroup structured_light Structured Light API

@ -0,0 +1,66 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_structured_light_HPP
#define OPENCV_structured_light_HPP
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
namespace cv{
namespace structured_light{
class CV_EXPORTS StructuredLightMono : public virtual Algorithm
{
public:
StructuredLightMono(Size img_size, int patterns, int stripes_number, std::string algs_type)
{
projector_size = img_size;
pattern_num = patterns;
alg_type = algs_type;
stripes_num = stripes_number;
}
//generate patterns for projecting
void generatePatterns(OutputArrayOfArrays patterns, float stripes_angle);
//project patterns and capture with camera
//CV_WRAP
// void captureImages(InputArrayOfArrays patterns, OutputArrayOfArrays refs, OutputArrayOfArrays imgs, bool isCaptureRefs = true);
//main phase unwrapping algorithm
void unwrapPhase(InputOutputArrayOfArrays refs, InputOutputArrayOfArrays imgs, OutputArray out);
//read references and phases from file
//CV_WRAP
// void readImages(std::vector<std::string> refs_files, std::vector<std::string> imgs_files, OutputArrayOfArrays refs, OutputArrayOfArrays imgs);
private:
//size of the image for whole algorithm
Size projector_size;
//number of pattern used in SL algorithm starting from 3
int pattern_num;
//number of stripes in the image pattern
int stripes_num;
//PCG or TPU
std::string alg_type;
//remove shadows from images
void removeShadows(InputOutputArrayOfArrays refs, InputOutputArrayOfArrays imgs);
//phase unwrapping with PCG algorithm based on DCT
void computePhasePCG(InputOutputArrayOfArrays refs, InputOutputArrayOfArrays imgs, OutputArray out);
//standart temporal unwrap algorithm
void computePhaseTPU(InputOutputArrayOfArrays refs, InputOutputArrayOfArrays imgs, OutputArray out);
};
}} // cv::structured_light::
#endif

@ -0,0 +1,59 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_structured_light_mono_calibration_HPP
#define OPENCV_structured_light_mono_calibration_HPP
#include <opencv2/core.hpp>
//#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include "opencv2/core/utility.hpp"
#include <opencv2/calib3d.hpp>
using namespace std;
namespace cv{
namespace structured_light{
enum calibrationPattern{CHESSBOARD, CIRCLES_GRID, ASYMETRIC_CIRCLES_GRID};
struct Settings
{
Settings();
int patternType;
Size patternSize;
Size subpixelSize;
Size imageSize;
float squareSize;
int nbrOfFrames;
};
void loadSettings(String path, Settings &sttngs);
void createObjectPoints( InputArrayOfArrays patternCorners, Size patternSize, float squareSize, int patternType );
void createProjectorObjectPoints(InputArrayOfArrays patternCorners, Size patternSize, float squareSize, int patternType );
double calibrate(InputArrayOfArrays objPoints, InputArrayOfArrays imgPoints, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays r, OutputArrayOfArrays t, Size imgSize );
void fromCamToWorld(InputArray cameraMatrix, InputArrayOfArrays rV, InputArrayOfArrays tV, InputArrayOfArrays imgPoints, OutputArrayOfArrays worldPoints );
void saveCalibrationResults( String path, InputArray camK, InputArray camDistCoeffs, InputArray projK, InputArray projDistCoeffs, InputArray fundamental);
void saveCalibrationData( String path, InputArrayOfArrays T1, InputArrayOfArrays T2,InputArrayOfArrays ptsProjCam, InputArrayOfArrays ptsProjProj, InputArrayOfArrays ptsProjCamN, InputArrayOfArrays ptsProjProjN);
void normalize(InputArray pts, const int& dim, InputOutputArray normpts, OutputArray T);
void fromVectorToMat(InputArrayOfArrays v, OutputArray pts);
void fromMatToVector(InputArray pts, OutputArrayOfArrays v);
void loadCalibrationData(string filename, OutputArray cameraIntrinsic, OutputArray projectorIntrinsic, OutputArray cameraDistortion, OutputArray projectorDistortion, OutputArray rotation, OutputArray translation);
void distortImage(InputArray input, InputArray camMat, InputArray dist, OutputArray output);
}
}
#endif

@ -0,0 +1,41 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_structured_light_mono_utils_HPP
#define OPENCV_structured_light_mono_utils_HPP
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
using namespace std;
namespace cv{
namespace structured_light{
//compute atan2 for object and reference images
void computeAtanDiff(InputOutputArrayOfArrays src, OutputArray dst);
/**
Phase unwrapping algorithm based on PCG.
**/
void unwrapPCG(InputArray img, OutputArray out, Size imgSize);
/**
Phase unwrapping algorithm based on TPU.
**/
void unwrapTPU(InputArray phase1, InputArray phase2, OutputArray out, int scale);
void lowPassFilter(InputArray img, OutputArray out, int filterSize = 30);
void highPassFilter(InputArray img, OutputArray out, int filterSize = 30);
void calibrateCameraProjector();
void distortPatterns();
void undistortPatterns();
void savePointCloud(InputArray phase, string filename); //filter image from outliers and save as txt
}
}
#endif

@ -0,0 +1,209 @@
#include <iostream>
#include <opencv2/structured_light.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
static const char* keys =
{
"{@camSettingsPath | | Path of camera calibration file}"
"{@projSettingsPath | | Path of projector settings}"
"{@patternPath | | Path to checkerboard pattern}"
"{@outputName | | Base name for the calibration data}"
};
enum calibrationPattern{CHESSBOARD, CIRCLES_GRID, ASYMETRIC_CIRCLES_GRID};
int calibrate( int argc, char **argv )
{
VideoCapture cap(CAP_PVAPI);
Mat frame;
int nbrOfValidFrames = 0;
vector<vector<Point2f>> imagePointsCam, imagePointsProj, PointsInProj, imagePointsProjN, pointsInProjN;
vector<vector<Point3f>> objectPointsCam, worldPointsProj;
vector<Point3f> tempCam;
vector<Point2f> tempProj;
vector<Mat> T1, T2;
vector<Mat> projInProj, projInCam;
vector<Mat> projInProjN, projInCamN;
vector<Mat> rVecs, tVecs, projectorRVecs, projectorTVecs;
Mat cameraMatrix, distCoeffs, projectorMatrix, projectorDistCoeffs;
Mat pattern;
vector<Mat> images;
structured_light::Settings camSettings, projSettings;
CommandLineParser parser(argc, argv, keys);
String camSettingsPath = parser.get<String>(0);
String projSettingsPath = parser.get<String>(1);
String patternPath = parser.get<String>(2);
String outputName = parser.get<String>(3);
if( camSettingsPath.empty() || projSettingsPath.empty() || patternPath.empty() || outputName.empty() ){
//structured_light::help();
return -1;
}
pattern = imread(patternPath);
loadSettings(camSettingsPath, camSettings);
loadSettings(projSettingsPath, projSettings);
projSettings.imageSize = Size(pattern.rows, pattern.cols);
structured_light::createObjectPoints(tempCam, camSettings.patternSize,
camSettings.squareSize, camSettings.patternType);
structured_light::createProjectorObjectPoints(tempProj, projSettings.patternSize,
projSettings.squareSize, projSettings.patternType);
if(!cap.isOpened())
{
std::cout << "Camera could not be opened" << std::endl;
return -1;
}
cap.set(CAP_PROP_PVAPI_PIXELFORMAT, CAP_PVAPI_PIXELFORMAT_BAYER8);
namedWindow("pattern", WINDOW_NORMAL);
setWindowProperty("pattern", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN);
namedWindow("camera view", WINDOW_NORMAL);
imshow("pattern", pattern);
std::cout << "Press any key when ready" << std::endl;
waitKey(0);
while( nbrOfValidFrames < camSettings.nbrOfFrames )
{
cap >> frame;
if( frame.data )
{
Mat color;
cvtColor(frame, color, COLOR_BayerBG2BGR);
if( camSettings.imageSize.height == 0 || camSettings.imageSize.width == 0 )
{
camSettings.imageSize = Size(frame.rows, frame.cols);
}
bool foundProj, foundCam;
vector<Point2f> projPointBuf;
vector<Point2f> camPointBuf;
imshow("camera view", color);
if( camSettings.patternType == CHESSBOARD && projSettings.patternType == CHESSBOARD )
{
int calibFlags = CALIB_CB_ADAPTIVE_THRESH;
foundCam = findChessboardCorners(color, camSettings.patternSize,
camPointBuf, calibFlags);
foundProj = findChessboardCorners(color, projSettings.patternSize,
projPointBuf, calibFlags);
if( foundCam && foundProj )
{
Mat gray;
cvtColor(color, gray, COLOR_BGR2GRAY);
cout << "found pattern" << endl;
Mat projCorners, camCorners;
cornerSubPix(gray, camPointBuf, camSettings.subpixelSize, Size(-1, -1),
TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, 0.1));
cornerSubPix(gray, projPointBuf, projSettings.subpixelSize, Size(-1, -1),
TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, 0.1));
drawChessboardCorners(gray, camSettings.patternSize, camPointBuf, foundCam);
drawChessboardCorners(gray, projSettings.patternSize, projPointBuf, foundProj);
imshow("camera view", gray);
char c = (char)waitKey(0);
if( c == 10 )
{
std::cout << "saving pattern #" << nbrOfValidFrames << " for calibration" << std::endl;
ostringstream name;
name << nbrOfValidFrames;
nbrOfValidFrames += 1;
imagePointsCam.push_back(camPointBuf);
imagePointsProj.push_back(projPointBuf);
objectPointsCam.push_back(tempCam);
PointsInProj.push_back(tempProj);
images.push_back(frame);
Mat ptsProjProj, ptsProjCam;
Mat ptsProjProjN, ptsProjCamN;
Mat TProjProj, TProjCam;
vector<Point2f> ptsProjProjVec;
vector<Point2f> ptsProjCamVec;
structured_light::fromVectorToMat(tempProj, ptsProjProj);
structured_light::normalize(ptsProjProj, 2, ptsProjProjN, TProjProj);
structured_light::fromMatToVector(ptsProjProjN, ptsProjProjVec);
pointsInProjN.push_back(ptsProjProjVec);
T2.push_back(TProjProj);
projInProj.push_back(ptsProjProj);
projInProjN.push_back(ptsProjProjN);
structured_light::fromVectorToMat(projPointBuf, ptsProjCam);
structured_light::normalize(ptsProjCam, 2, ptsProjCamN, TProjCam);
structured_light::fromMatToVector(ptsProjCamN, ptsProjCamVec);
imagePointsProjN.push_back(ptsProjCamVec);
T1.push_back(TProjCam);
projInCam.push_back(ptsProjCam);
projInCamN.push_back(ptsProjCamN);
}
else if( c == 32 )
{
std::cout << "capture discarded" << std::endl;
}
else if( c == 27 )
{
std::cout << "closing program" << std::endl;
return -1;
}
}
else
{
cout << "no pattern found, move board and press any key" << endl;
imshow("camera view", frame);
waitKey(0);
}
}
}
}
structured_light::saveCalibrationData(outputName + "_points.yml", T1, T2, projInCam, projInProj, projInCamN, projInProjN);
double rms = structured_light::calibrate(objectPointsCam, imagePointsCam, cameraMatrix, distCoeffs,
rVecs, tVecs, camSettings.imageSize);
cout << "rms = " << rms << endl;
cout << "camera matrix = \n" << cameraMatrix << endl;
cout << "dist coeffs = \n" << distCoeffs << endl;
structured_light::fromCamToWorld(cameraMatrix, rVecs, tVecs, imagePointsProj, worldPointsProj);
rms = structured_light::calibrate(worldPointsProj, PointsInProj, projectorMatrix, projectorDistCoeffs,
projectorRVecs, projectorTVecs, projSettings.imageSize);
cout << "rms = " << rms << endl;
cout << "projector matrix = \n" << projectorMatrix << endl;
cout << "projector dist coeffs = \n" << distCoeffs << endl;
Mat stereoR, stereoT, essential, fundamental;
Mat RCam, RProj, PCam, PProj, Q;
rms = stereoCalibrate(worldPointsProj, imagePointsProj, PointsInProj, cameraMatrix, distCoeffs,
projectorMatrix, projectorDistCoeffs, camSettings.imageSize, stereoR, stereoT,
essential, fundamental);
cout << "stereo calibrate: \n" << fundamental << endl;
structured_light::saveCalibrationResults(outputName, cameraMatrix, distCoeffs, projectorMatrix, projectorDistCoeffs, fundamental );
return 0;
}

@ -0,0 +1,149 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/structured_light.hpp>
using namespace std;
using namespace cv;
// 1. Calibrate camera=projector
// 2. Save calibration params
// 3. Generate projected pattern
// 4. Distort projected pattern
// 5. Capture patterns reference pattern - optional
// 6. Capture patterns with object
// 7. Image preprocessing
// 7. Load refs and images to unwrap algorithm
// 8. Save points to file
void readImages(vector<string> refs_files, vector<string> imgs_files, OutputArrayOfArrays refs, OutputArrayOfArrays imgs)
{
vector<Mat>& refs_ = *(vector<Mat>*) refs.getObj();
vector<Mat>& imgs_ = *(vector<Mat>*) imgs.getObj();
for(auto i = 0; i < refs_files.size(); i++)
{
auto img = imread(refs_files[i], IMREAD_COLOR);
cvtColor(img, img, COLOR_RGBA2GRAY);
img.convertTo(img, CV_32FC1, 1.f/255);
refs_.push_back(img);
img = imread(imgs_files[i], IMREAD_COLOR);
cvtColor(img, img, COLOR_RGBA2GRAY);
img.convertTo(img, CV_32FC1, 1.f/255);
imgs_.push_back(img);
}
}
void captureImages(InputArrayOfArrays patterns, OutputArrayOfArrays refs, OutputArrayOfArrays imgs, bool isCaptureRefs)
{
vector<Mat>& patterns_ = *(vector<Mat>*)patterns.getObj();
vector<Mat>& refs_ = *(vector<Mat>*)refs.getObj();
vector<Mat>& imgs_ = *(vector<Mat>*)imgs.getObj();
VideoCapture cap;
if(cap.open(0))
{
Mat pause(projector_size, CV_64FC3, Scalar(0));
putText(pause, "Place the object", Point(projector_size.width/4, projector_size.height/4), FONT_HERSHEY_COMPLEX_SMALL, projector_size.width/400, Scalar(255,255,255), 2);
putText(pause, "Press any key when ready", Point(projector_size.width/4, projector_size.height/4+projector_size.height/15), FONT_HERSHEY_COMPLEX_SMALL, projector_size.width/400, Scalar(255,255,255), 2);
namedWindow("Display pattern", WINDOW_NORMAL);// Create a window for display.
setWindowProperty("Display pattern", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN);
imshow("Display pattern", pause);
waitKey();
if (isCaptureRefs)
{
for(auto i = 0; i < patterns_.size(); i++)
{
Mat frame;
cap >> frame;
if(frame.empty()) break; // end of video stream
namedWindow("Display pattern", WINDOW_NORMAL);// Create a window for display.
setWindowProperty("Display pattern", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN);
imshow("Display pattern", patterns_[i]);
waitKey();
Mat grayFrame;
cv::cvtColor(frame, grayFrame, COLOR_RGB2GRAY);
grayFrame.convertTo(grayFrame, CV_32FC1, 1.f/255);
refs_.push_back(grayFrame); //ADD ADDITIONAL SWITCH TO SELECT WHERE to SAVE
}
}
pause = Mat(projector_size, CV_64FC3, Scalar(0));
putText(pause, "Place the object", Point(projector_size.width/4, projector_size.height/4), FONT_HERSHEY_COMPLEX_SMALL, projector_size.width/400, Scalar(255,255,255), 2);
putText(pause, "Press any key when ready", Point(projector_size.width/4, projector_size.height/4+projector_size.height/15), FONT_HERSHEY_COMPLEX_SMALL, projector_size.width/400, Scalar(255,255,255), 2);
namedWindow("Display pattern", WINDOW_NORMAL);// Create a window for display.
setWindowProperty("Display pattern", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN);
imshow( "Display pattern", pause);
waitKey();
for(auto i = 0; i < patterns_.size(); i++)
{
Mat frame;
cap >> frame;
if( frame.empty() ) break; // end of video stream
namedWindow("Display pattern", WINDOW_NORMAL);// Create a window for display.
setWindowProperty("Display pattern", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN);
imshow( "Display pattern", patterns_[i]);
waitKey();
Mat grayFrame;
cv::cvtColor(frame, grayFrame, COLOR_RGB2GRAY);
grayFrame.convertTo(grayFrame, CV_32FC1, 1.f/255);
imgs_.push_back(grayFrame); //ADD ADDITIONAL SWITCH TO SELECT WHERE to SAVE
}
cap.release();
}
}
int main()
{
int imgNum = 4;
cv::Size projector_size = cv::Size(512, 512);
string alg_type = "PCG";
vector<cv::Mat> patterns, refs, imgs;
structured_light::StructuredLightMono sl(projector_size, imgNum, 37, alg_type);
sl.generatePatterns(patterns, 0.3);
captureImages(patterns, refs, imgs);
string filename = "../images/calibration_result.xml";
Mat cameraMatrix, projectorMatrix, cameraDistortion, projectorDistortion, rotation, translation;
structured_light::loadCalibrationData(filename, cameraMatrix, projectorMatrix, cameraDistortion, projectorDistortion, rotation, translation);
for (unsigned i = 0; i < refs.size(); i++)
{
Mat undistored;
undistort(refs[i], undistored, cameraMatrix, cameraDistortion);
GaussianBlur(undistored, refs[i], cv::Size(5, 5), 0);
undistort(imgs[i], undistored, cameraMatrix, cameraDistortion);
GaussianBlur(undistored, imgs[i], cv::Size(5, 5), 0);
}
Mat phase;
sl.unwrapPhase(refs, imgs, phase);
double min, max;
minMaxLoc(phase, &min, &max);
phase -= min;
phase.convertTo(phase, CV_32FC1, 1.f/(max-min));
namedWindow( "Display window", cv::WINDOW_AUTOSIZE );// Create a window for display.
imshow( "Display window", phase ); // Show our image inside it.
cv::waitKey();
return 0;
}

@ -46,4 +46,4 @@
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#endif
#endif

@ -0,0 +1,172 @@
#include <opencv2/structured_light/slmono.hpp>
#include "opencv2/structured_light/slmono_utils.hpp"
namespace cv {
namespace structured_light {
//read reference images and object images from specified files
//void StructuredLightMono::readImages(vector<string> refs_files, vector<string> imgs_files, OutputArrayOfArrays refs, OutputArrayOfArrays imgs)
//{
// vector<Mat>& refs_ = *(vector<Mat>*) refs.getObj();
// vector<Mat>& imgs_ = *(vector<Mat>*) imgs.getObj();
//
// for(auto i = 0; i < refs_files.size(); i++)
// {
// auto img = imread(refs_files[i], IMREAD_COLOR);
// cvtColor(img, img, COLOR_RGBA2GRAY);
// img.convertTo(img, CV_32FC1, 1.f/255);
// refs_.push_back(img);
//
// img = imread(imgs_files[i], IMREAD_COLOR);
// cvtColor(img, img, COLOR_RGBA2GRAY);
// img.convertTo(img, CV_32FC1, 1.f/255);
// imgs_.push_back(img);
// }
//}
//main phase unwrapping function
void StructuredLightMono::unwrapPhase(InputOutputArrayOfArrays refs, InputOutputArrayOfArrays imgs, OutputArray out)
{
if (alg_type == "PCG")
{
computePhasePCG(refs, imgs, out);
}
else if (alg_type == "TPU")
{
computePhaseTPU(refs, imgs, out);
}
}
//algorithm for shadow removing from images
void StructuredLightMono::removeShadows(InputOutputArrayOfArrays refs, InputOutputArrayOfArrays imgs)
{
vector<Mat>& refs_ = *(vector<Mat>*)refs.getObj();
vector<Mat>& imgs_ = *(vector<Mat>*)imgs.getObj();
Size size = refs_[0].size();
Mat mean(size, CV_32FC1);
for( int i = 0; i < size.height; ++i )
{
for( int j = 0; j < size.width; ++j )
{
float average = 0;
for (int k = 0; k < imgs_.size(); k++)
{
average += (float) imgs_[k].at<float>(i, j);
}
mean.at<float>(i, j) = average/imgs_.size();
}
}
mean.convertTo(mean, CV_32FC1);
Mat shadowMask;
threshold(mean, shadowMask, 0.05, 1, 0);
for (int k = 0; k < imgs_.size(); k++)
{
multiply(shadowMask, refs_[k], refs_[k]);
multiply(shadowMask, imgs_[k], imgs_[k]);
}
}
//generate patterns for projection
//TPU algorithm requires low and high frequency patterns
void StructuredLightMono::generatePatterns(OutputArrayOfArrays patterns, float stripes_angle)
{
vector<Mat>& patterns_ = *(vector<Mat>*) patterns.getObj();
float phi = (float)projector_size.width/(float)stripes_num;
float delta = 2*(float)CV_PI/phi;
float shift = 2*(float)CV_PI/pattern_num;
Mat pattern(projector_size, CV_32FC1, Scalar(0));
for(auto k = 0; k < pattern_num; k++)
{
for( int i = 0; i < projector_size.height; ++i )
{
for( int j = 0; j < projector_size.width; ++j )
{
pattern.at<float>(i, j) = (cos((stripes_angle*i+(1-stripes_angle)*j)*delta+k*shift) + 1)/2;
}
}
Mat temp = pattern.clone();
patterns_.push_back(temp);
}
if (alg_type == "TPU")
{
phi = (float)projector_size.width;
delta = 2*(float)CV_PI/phi;
for(auto k = 0; k < pattern_num; k++)
{
for( int i = 0; i < projector_size.height; ++i )
{
for( int j = 0; j < projector_size.width; ++j )
{
pattern.at<float>(i, j) = (cos((stripes_angle*i+(1-stripes_angle)*j)*delta+k*shift) + 1)/2;
}
}
Mat temp = pattern.clone();
patterns_.push_back(temp);
}
}
}
//phase computation based on PCG algorithm
void StructuredLightMono::computePhasePCG(InputOutputArrayOfArrays refs, InputOutputArrayOfArrays imgs, OutputArray out){
vector<Mat>& refs_ = *(vector<Mat>* ) refs.getObj();
Size size = refs_[0].size();
Mat wrapped = Mat(size, CV_32FC1);
Mat wrapped_ref = Mat(size, CV_32FC1);
removeShadows(refs, imgs);
computeAtanDiff(refs, wrapped_ref);
computeAtanDiff(imgs, wrapped);
subtract(wrapped, wrapped_ref, wrapped);
unwrapPCG(wrapped, out, size);
}
//phase computation based on TPU algorithm
void StructuredLightMono::computePhaseTPU(InputOutputArrayOfArrays refs, InputOutputArrayOfArrays imgs, OutputArray out)
{
vector<Mat>& refs_ = *(vector<Mat>* ) refs.getObj();
vector<Mat>& imgs_ = *(vector<Mat>* ) imgs.getObj();
Size size = refs_[0].size();
removeShadows(refs, imgs);
int split = (int)(refs_.size()/2);
auto hf_refs = vector<Mat>(refs_.begin(), refs_.begin()+split);
auto lf_refs = vector<Mat>(refs_.begin()+split, refs_.end());
auto hf_phases = vector<Mat>(imgs_.begin(), imgs_.begin()+split);
auto lf_phases = vector<Mat>(imgs_.begin()+split, imgs_.end());
Mat _lf_ref_phase = Mat(size, CV_32FC1);
Mat _hf_ref_phase= Mat(size, CV_32FC1);
Mat _lf_phase = Mat(size, CV_32FC1);
Mat _hf_phase = Mat(size, CV_32FC1);
computeAtanDiff(lf_refs, _lf_ref_phase);
computeAtanDiff(hf_refs, _hf_ref_phase);
computeAtanDiff(lf_phases, _lf_phase);
computeAtanDiff(hf_phases, _hf_phase);
subtract(_lf_phase, _lf_ref_phase, _lf_phase);
subtract(_hf_phase, _hf_ref_phase, _hf_phase);
unwrapTPU(_lf_phase, _hf_phase, out, stripes_num);
}
}
}

@ -0,0 +1,333 @@
#include "precomp.hpp"
#include <iostream>
#include <opencv2/structured_light/slmono_calibration.hpp>
namespace cv{
namespace structured_light{
//Deafault setting for calibration
Settings::Settings(){
patternType = CHESSBOARD;
patternSize = Size(13, 9);
subpixelSize = Size(11, 11);
squareSize = 50;
nbrOfFrames = 25;
}
void loadSettings( String path, Settings &sttngs )
{
FileStorage fsInput(path, FileStorage::READ);
fsInput["PatternWidth"] >> sttngs.patternSize.width;
fsInput["PatternHeight"] >> sttngs.patternSize.height;
fsInput["SubPixelWidth"] >> sttngs.subpixelSize.width;
fsInput["SubPixelHeight"] >> sttngs.subpixelSize.height;
fsInput["SquareSize"] >> sttngs.squareSize;
fsInput["NbrOfFrames"] >> sttngs.nbrOfFrames;
fsInput["PatternType"] >> sttngs.patternType;
fsInput.release();
}
double calibrate(InputArrayOfArrays objPoints, InputArrayOfArrays imgPoints,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays r, OutputArrayOfArrays t, Size imgSize )
{
int calibFlags = 0;
double rms = calibrateCamera(objPoints, imgPoints, imgSize, cameraMatrix,
distCoeffs, r, t, calibFlags);
return rms;
}
void createObjectPoints(InputArrayOfArrays patternCorners, Size patternSize, float squareSize, int patternType)
{
std::vector<Point3f>& patternCorners_ = *( std::vector<Point3f>* ) patternCorners.getObj();
switch( patternType )
{
case CHESSBOARD:
case CIRCLES_GRID:
for( int i = 0; i < patternSize.height; ++i )
{
for( int j = 0; j < patternSize.width; ++j )
{
patternCorners_.push_back(Point3f(float(i*squareSize), float(j*squareSize), 0));
}
}
break;
case ASYMETRIC_CIRCLES_GRID:
break;
}
}
void createProjectorObjectPoints(InputArrayOfArrays patternCorners, Size patternSize, float squareSize,
int patternType )
{
std::vector<Point2f>& patternCorners_ = *( std::vector<Point2f>* ) patternCorners.getObj();
switch( patternType )
{
case CHESSBOARD:
case CIRCLES_GRID:
for( int i = 1; i <= patternSize.height; ++i )
{
for( int j = 1; j <= patternSize.width; ++j )
{
patternCorners_.push_back(Point2f(float(j*squareSize), float(i*squareSize)));
}
}
break;
case ASYMETRIC_CIRCLES_GRID:
break;
}
}
void fromCamToWorld(InputArray cameraMatrix, InputArrayOfArrays rV, InputArrayOfArrays tV,
InputArrayOfArrays imgPoints, OutputArrayOfArrays worldPoints)
{
std::vector<std::vector<Point2f>>& imgPoints_ = *( std::vector< std::vector<Point2f> >* ) imgPoints.getObj();
std::vector<std::vector<Point3f>>& worldPoints_ = *( std::vector< std::vector<Point3f> >* ) worldPoints.getObj();
std::vector<Mat>& rV_ = *( std::vector<Mat>* ) rV.getObj();
std::vector<Mat>& tV_ = *( std::vector<Mat>* ) tV.getObj();
int s = (int) rV_.size();
Mat invK64, invK;
//invK64 = cameraMatrix.inv();
invert(cameraMatrix, invK64);
invK64.convertTo(invK, CV_32F);
for(int i = 0; i < s; ++i)
{
Mat r, t, rMat;
rV_[i].convertTo(r, CV_32F);
tV_[i].convertTo(t, CV_32F);
Rodrigues(r, rMat);
Mat transPlaneToCam = rMat.inv()*t;
vector<Point3f> wpTemp;
int s2 = (int) imgPoints_[i].size();
for(int j = 0; j < s2; ++j){
Mat coords(3, 1, CV_32F);
coords.at<float>(0, 0) = imgPoints_[i][j].x;
coords.at<float>(1, 0) = imgPoints_[i][j].y;
coords.at<float>(2, 0) = 1.0f;
Mat worldPtCam = invK*coords;
Mat worldPtPlane = rMat.inv()*worldPtCam;
float scale = transPlaneToCam.at<float>(2)/worldPtPlane.at<float>(2);
Mat worldPtPlaneReproject = scale*worldPtPlane - transPlaneToCam;
Point3f pt;
pt.x = worldPtPlaneReproject.at<float>(0);
pt.y = worldPtPlaneReproject.at<float>(1);
pt.z = 0;
wpTemp.push_back(pt);
}
worldPoints_.push_back(wpTemp);
}
}
void saveCalibrationResults( String path, InputArray camK, InputArray camDistCoeffs, InputArray projK, InputArray projDistCoeffs,
InputArray fundamental )
{
Mat& camK_ = *(Mat*) camK.getObj();
Mat& camDistCoeffs_ = *(Mat*) camDistCoeffs.getObj();
Mat& projK_ = *(Mat*) projK.getObj();
Mat& projDistCoeffs_ = *(Mat*) projDistCoeffs.getObj();
Mat& fundamental_ = *(Mat*) fundamental.getObj();
FileStorage fs(path + ".yml", FileStorage::WRITE);
fs << "camIntrinsics" << camK_;
fs << "camDistCoeffs" << camDistCoeffs_;
fs << "projIntrinsics" << projK_;
fs << "projDistCoeffs" << projDistCoeffs_;
fs << "fundamental" << fundamental_;
fs.release();
}
void saveCalibrationData(String path, InputArrayOfArrays T1, InputArrayOfArrays T2, InputArrayOfArrays ptsProjCam,
InputArrayOfArrays ptsProjProj, InputArrayOfArrays ptsProjCamN, InputArrayOfArrays ptsProjProjN)
{
std::vector<Mat>& T1_ = *( std::vector<Mat>* ) T1.getObj();
std::vector<Mat>& T2_ = *( std::vector<Mat>* ) T2.getObj();
std::vector<Mat>& ptsProjCam_ = *( std::vector<Mat>* ) ptsProjCam.getObj();
std::vector<Mat>& ptsProjProj_ = *( std::vector<Mat>* ) ptsProjProj.getObj();
std::vector<Mat>& ptsProjCamN_ = *( std::vector<Mat>* ) ptsProjCamN.getObj();
std::vector<Mat>& ptsProjProjN_ = *( std::vector<Mat>* ) ptsProjProjN.getObj();
FileStorage fs(path + ".yml", FileStorage::WRITE);
int size = (int) T1_.size();
fs << "size" << size;
for( int i = 0; i < (int)T1_.size(); ++i )
{
ostringstream nbr;
nbr << i;
fs << "TprojCam" + nbr.str() << T1_[i];
fs << "TProjProj" + nbr.str() << T2_[i];
fs << "ptsProjCam" + nbr.str() << ptsProjCam_[i];
fs << "ptsProjProj" + nbr.str() << ptsProjProj_[i];
fs << "ptsProjCamN" + nbr.str() << ptsProjCamN_[i];
fs << "ptsProjProjN" + nbr.str() << ptsProjProjN_[i];
}
fs.release();
}
void loadCalibrationData(string filename, OutputArray cameraIntrinsic, OutputArray projectorIntrinsic,
OutputArray cameraDistortion, OutputArray projectorDistortion, OutputArray rotation, OutputArray translation)
{
Mat& cameraIntrinsic_ = *(Mat*) cameraIntrinsic.getObj();
Mat& projectorIntrinsic_ = *(Mat*) projectorIntrinsic.getObj();
Mat& cameraDistortion_ = *(Mat*) cameraDistortion.getObj();
Mat& projectorDistortion_ = *(Mat*) projectorDistortion.getObj();
Mat& rotation_ = *(Mat*) rotation.getObj();
Mat& translation_ = *(Mat*) translation.getObj();
FileStorage fs;
fs.open(filename, FileStorage::READ);
if (!fs.isOpened())
{
cerr << "Failed to open " << filename << endl;
}
fs["cam_int"] >> cameraIntrinsic_;
fs["cam_dist"] >> cameraDistortion_;
fs["proj_int"] >> projectorIntrinsic_;
fs["proj_dist"] >> projectorDistortion_;
fs["roration"] >> rotation_;
fs["translation"] >> translation_;
fs.release();
}
void normalize( InputArray pts, const int& dim, InputOutputArray normpts, OutputArray T)
{
Mat& pts_ = *(Mat*) pts.getObj();
Mat& normpts_ = *(Mat*) normpts.getObj();
Mat& T_ = *(Mat*) T.getObj();
float averagedist = 0;
float scale = 0;
//centroid
Mat centroid(dim,1,CV_32F);
Scalar tmp;
if( normpts_.empty() )
{
normpts_ = Mat(pts_.rows,pts_.cols,CV_32F);
}
for( int i = 0 ; i < dim ; ++i )
{
tmp = mean(pts_.row(i));
centroid.at<float>(i,0) = (float)tmp[0];
subtract(pts_.row(i), centroid.at<float>(i, 0), normpts_.row(i));
}
//average distance
Mat ptstmp;
for( int i = 0 ; i < normpts_.cols; ++i )
{
ptstmp = normpts_.col(i);
averagedist = averagedist+(float)norm(ptstmp);
}
averagedist = averagedist / normpts_.cols;
scale = (float)(sqrt(static_cast<float>(dim)) / averagedist);
normpts_ = normpts_ * scale;
T_=cv::Mat::eye(dim+1,dim+1,CV_32F);
for( int i = 0; i < dim; ++i )
{
T_.at<float>(i, i) = scale;
T_.at<float>(i, dim) = -scale*centroid.at<float>(i, 0);
}
}
void fromVectorToMat(InputArrayOfArrays v, OutputArray pts)
{
Mat& pts_ = *(Mat*) pts.getObj();
std::vector<Point2f>& v_ = *( std::vector<Point2f>* ) v.getObj();
int nbrOfPoints = (int) v_.size();
if( pts_.empty() )
pts_.create(2, nbrOfPoints, CV_32F);
for( int i = 0; i < nbrOfPoints; ++i )
{
pts_.at<float>(0, i) = v_[i].x;
pts_.at<float>(1, i) = v_[i].y;
}
}
void fromMatToVector(InputArray pts, OutputArrayOfArrays v)
{
Mat& pts_ = *(Mat*) pts.getObj();
std::vector<Point2f>& v_ = *(std::vector<Point2f>* ) v.getObj();
int nbrOfPoints = pts_.cols;
for( int i = 0; i < nbrOfPoints; ++i )
{
Point2f temp;
temp.x = pts_.at<float>(0, i);
temp.y = pts_.at<float>(1, i);
v_.push_back(temp);
}
}
Point2f back(Point2f point, double fx, double fy, double ux, double uy)
{
double x = point.x * fx + ux;
double y = point.y * fy + uy;
return Point2f((float)x, (float)y);
}
void distortImage(InputArray input, InputArray camMat, InputArray distCoef, OutputArray output)
{
Mat& camMat_ = *(Mat*) camMat.getObj();
Mat& input_ = *(Mat*) input.getObj();
double fx = camMat_.at<double>(0,0);
double fy = camMat_.at<double>(1,1);
double ux = camMat_.at<double>(0,2);
double uy = camMat_.at<double>(1,2);
vector<Point2f> undistortedPoints, distortedPoints;
for (int i = 0; i < input_.rows; i++)
{
for (int j = 0; j < input_.cols; j++)
{
undistortedPoints.push_back(Point2f((float)i, (float)j));
}
}
undistortPoints(undistortedPoints, distortedPoints, camMat, distCoef, Mat(), Mat());
Mat mapx(cv::Size(input_.rows, input_.cols), CV_32FC1);
Mat mapy(cv::Size(input_.rows, input_.cols), CV_32FC1);
for (int i = 0; i < input_.rows; i++)
{
for (int j = 0; j < input_.cols; j++)
{
distortedPoints[i*input_.cols+j] = back(distortedPoints[i*input_.cols+j], fx, fy, ux, uy);
mapx.at<float>(j, i) = distortedPoints[i*input_.cols+j].x;
mapy.at<float>(j, i) = distortedPoints[i*input_.cols+j].y;
}
}
remap(input, output, mapx, mapy, INTER_CUBIC);
}
}
}

@ -0,0 +1,252 @@
#include "opencv2/structured_light/slmono_utils.hpp"
namespace cv{
namespace structured_light{
//quadrand swapping for FFT
void circshift(Mat &out, const Mat in, int xdim, int ydim, bool isFftshift = true) {
if (isFftshift) {
int xshift = (xdim / 2);
int yshift = (ydim / 2);
for (int i = 0; i < xdim; i++) {
int ii = (i + xshift) % xdim;
for (int j = 0; j < ydim; j++) {
int jj = (j + yshift) % ydim;
out.at<float>(ii * ydim + jj) = in.at<float>(i * ydim + j);
}
}
}
else {
int xshift = ((xdim + 1) / 2);
int yshift = ((ydim + 1) / 2);
for (int i = 0; i < xdim; i++) {
int ii = (i + xshift) % xdim;
for (int j = 0; j < ydim; j++) {
int jj = (j + yshift) % ydim;
out.at<float>(ii * ydim + jj) = in.at<float>(i * ydim + j);
}
}
}
}
void createGrid(OutputArray output, Size size) {
auto gridX = Mat(size, CV_32FC1);
auto gridY = Mat(size, CV_32FC1);
for (auto i = 0; i < size.height; i++) {
for (auto j = 0; j < size.width; j++) {
gridX.at<float>(i, j) = float(j + 1);
gridY.at<float>(i, j) = float(i + 1);
}
}
multiply(gridX, gridX, gridX);
multiply(gridY, gridY, gridY);
add(gridX, gridY, output);
}
void wrapSin(InputArray img, OutputArray out) {
Mat img_ = img.getMat();
Mat& out_ = *(Mat*) out.getObj();
out_ = Mat(img_.rows, img_.cols, CV_32FC1);
for (auto i = 0; i < img_.rows; i++) {
for (auto j = 0; j < img_.cols; j++) {
float x = img_.at<float>(i, j);
while (abs(x) >= M_PI_2) {
x += ((x > 0) - (x < 0)) * (float(M_PI) - 2 * abs(x));
}
out_.at<float>(i, j) = x;
}
}
}
void wrapCos(InputArray img, OutputArray out) {
Mat img_ = img.getMat();
Mat& out_ = *(Mat*) out.getObj();
out_ = Mat(img_.rows, img_.cols, CV_32FC1);
for (auto i = 0; i < img_.rows; i++) {
for (auto j = 0; j < img_.cols; j++) {
float x = img_.at<float>(i, j) - (float)M_PI_2;
while (abs(x) > M_PI_2) {
x += ((x > 0) - (x < 0)) * ((float)M_PI - 2 * abs(x));
}
out_.at<float>(i, j) = -x;
}
}
}
void computeAtanDiff(InputOutputArrayOfArrays src, OutputArray dst) {
std::vector<Mat>& src_ = *( std::vector<Mat>* ) src.getObj();
Mat& dst_ = *(Mat*) dst.getObj();
for (int i = 0; i < src_[0].rows; i++) {
for (int j = 0; j < src_[0].cols; j++) {
float x = src_[3].at<float>(i, j) - src_[1].at<float>(i, j);
float y = src_[0].at<float>(i, j) - src_[2].at<float>(i, j);
dst_.at<float>(i, j) = std::atan2(x, y);
}
}
}
void Laplacian(InputArray img, InputArray grid, OutputArray out, int flag = 0) {
Mat& img_ = *(Mat*) img.getObj();
Mat& out_ = *(Mat*) out.getObj();
if (flag == 0){
dct(img, out_);
multiply(out_, grid, out_);
dct(out_, out_, DCT_INVERSE);
out_ = out_ * (-4 * M_PI * M_PI / (img_.rows * img_.cols));
}
else if (flag == 1)
{
dct(img, out_);
divide(out_, grid, out_);
dct(out_, out_, DCT_INVERSE);
out_ = out_ * (-img_.rows * img_.cols) / (4 * M_PI * M_PI);
}
}
void computeDelta(InputArray img, InputArray grid, OutputArray out) {
Mat x1, x2;
Mat img_sin, img_cos;
wrapSin(img, img_sin);
wrapCos(img, img_cos);
Mat laplacian1, laplacian2;
Laplacian(img_sin, grid, laplacian1);
Laplacian(img_cos, grid, laplacian2);
multiply(img_cos, laplacian1, x1);
multiply(img_sin, laplacian2, x2);
subtract(x1, x2, out);
}
void unwrapPCG(InputArray img, OutputArray out, Size imgSize) {
Mat g_laplacian;
Mat phase1;
Mat error, k1, k2, phase2;
Mat phiError;
createGrid(g_laplacian, imgSize);
computeDelta(img, g_laplacian, phase1);
Laplacian(phase1, g_laplacian, phase1, 1);
subtract(phase1, img, k1);
k1 *= 0.5 / M_PI;
abs(k1);
k1 *= 2 * M_PI;
add(img, k1, out);
for (auto i = 0; i < 0; i++) {
subtract(phase2, phase1, error);
computeDelta(error, g_laplacian, phiError);
Laplacian(phiError, g_laplacian, phiError, 1);
add(phase1, phiError, phase1);
subtract(phase1, img, k2);
k2 *= 0.5 / M_PI;
abs(k2);
k2 *= 2 * M_PI;
add(img, k2, out);
k2.copyTo(k1);
}
}
void unwrapTPU(InputArray phase1, InputArray phase2, OutputArray out, int scale) {
Mat& phase1_ = *(Mat*) phase1.getObj();
Mat& phase2_ = *(Mat*) phase2.getObj();
phase1_.convertTo(phase1_, phase1_.type(), scale);
subtract(phase1_, phase2_, phase1_);
phase1_.convertTo(phase1_, phase1_.type(), 0.5f / CV_PI);
abs(phase1_);
phase1_.convertTo(phase1_, phase1_.type(), 2 * CV_PI);
add(phase1_, phase2_, out);
}
void fft2(const cv::Mat in, cv::Mat &complexI) {
Mat padded;
int m = getOptimalDFTSize(in.rows);
int n = getOptimalDFTSize(in.cols);
copyMakeBorder(in, padded, 0, m - in.rows, 0, n - in.cols,
BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
merge(planes, 2, complexI);
dft(complexI, complexI);
}
void lowPassFilter(InputArray image, OutputArray out, int filterSize) {
Mat& image_ = *(Mat*) image.getObj();
int rows = image_.rows;
int cols = image_.cols;
Mat greyMat;
cvtColor(image, greyMat, COLOR_BGR2GRAY);
Mat result;
fft2(greyMat, result);
Mat matrix_(Size(rows, cols), CV_64FC1);
circshift(matrix_, result, result.rows, result.cols, true);
Mat lowPass(Size(rows, cols), CV_64FC1, Scalar(0));
lowPass(Rect_<int>((int)(0.5*rows-filterSize), (int)(0.5 * cols - filterSize),
(int)(0.5*rows+filterSize), (int)(0.5 * cols + filterSize))) = 1;
Mat pass = matrix_.mul(lowPass);
Mat J1(Size(rows, cols), CV_64FC1);
circshift(J1, pass, rows, cols, false);
idft(J1, out);
}
void highPassFilter(InputArray image, OutputArray out, int filterSize) {
Mat& image_ = *(Mat*) image.getObj();
int rows = image_.rows;
int cols = image_.cols;
Mat greyMat;
cvtColor(image, greyMat, COLOR_BGR2GRAY);
Mat result;
fft2(greyMat, result);
Mat matrix_(Size(rows, cols), CV_64FC1);
circshift(matrix_, result, result.rows, result.cols, true);
Mat highPass(Size(rows, cols), CV_64FC1, Scalar(1));
highPass(Rect_<int>((int)(0.5*rows-filterSize), (int)(0.5 * cols - filterSize),
(int)(0.5*rows+filterSize), (int)(0.5 * cols + filterSize))) = 0;
Mat pass = matrix_.mul(highPass);
Mat filter(Size(rows, cols), CV_64FC1);
circshift(filter, pass, rows, cols, false);
idft(filter, out);
}
}
}
Loading…
Cancel
Save