Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/1897/head
Alexander Alekhin 7 years ago
commit a113dc61e1
  1. 9
      modules/aruco/src/aruco.cpp
  2. 12
      modules/aruco/src/charuco.cpp
  3. 2
      modules/bioinspired/samples/cpp/OpenEXRimages_HDR_Retina_toneMapping.cpp
  4. 2
      modules/bioinspired/samples/cpp/OpenEXRimages_HDR_Retina_toneMapping_video.cpp
  5. 2
      modules/bioinspired/samples/cpp/retinaDemo.cpp
  6. 2
      modules/bioinspired/samples/cpp/tutorial_code/bioinspired/retina_tutorial.cpp
  7. 2
      modules/bioinspired/samples/ocl/retina_ocl.cpp
  8. 4
      modules/bioinspired/src/retina.cpp
  9. 4
      modules/bioinspired/src/retina_ocl.cpp
  10. 4
      modules/bioinspired/src/transientareassegmentationmodule.cpp
  11. 18
      modules/cnn_3dobj/src/cnn_feature.cpp
  12. 2
      modules/cvv/samples/cvv_demo.cpp
  13. 4
      modules/cvv/src/qtutil/filter/diffFilterWidget.cpp
  14. 2
      modules/face/samples/facerec_demo.cpp
  15. 2
      modules/face/samples/facerec_eigenfaces.cpp
  16. 2
      modules/face/samples/facerec_fisherfaces.cpp
  17. 2
      modules/face/samples/facerec_lbph.cpp
  18. 2
      modules/face/samples/facerec_save_load.cpp
  19. 2
      modules/face/samples/facerec_video.cpp
  20. 2
      modules/face/tutorials/face_landmark/face_landmark_detection.markdown
  21. 4
      modules/face/tutorials/face_landmark/face_landmark_trainer.markdown
  22. 4
      modules/face/tutorials/face_landmark/face_landmark_video.markdown
  23. 4
      modules/face/tutorials/face_landmark/sample_face_swapping.markdown
  24. 12
      modules/img_hash/src/average_hash.cpp
  25. 12
      modules/img_hash/src/block_mean_hash.cpp
  26. 11
      modules/img_hash/src/color_moment_hash.cpp
  27. 13
      modules/img_hash/src/marr_hildreth_hash.cpp
  28. 12
      modules/img_hash/src/phash.cpp
  29. 4
      modules/img_hash/src/radial_variance_hash.cpp
  30. 3
      modules/line_descriptor/src/binary_descriptor.cpp
  31. 4
      modules/matlab/generator/templates/functional.cpp
  32. 2
      modules/matlab/test/cv_exception.cpp
  33. 2
      modules/matlab/test/std_exception.cpp
  34. 4
      modules/ovis/src/ovis.cpp
  35. 16
      modules/reg/samples/map_test.cpp
  36. 2
      modules/tracking/samples/tutorial_customizing_cn_tracker.cpp
  37. 2
      modules/tracking/src/TrackingFunctionPF.hpp
  38. 4
      modules/tracking/src/trackerBoosting.cpp
  39. 22
      modules/tracking/src/trackerCSRT.cpp
  40. 2
      modules/tracking/src/trackerCSRTUtils.cpp
  41. 2
      modules/tracking/src/trackerKCF.cpp
  42. 9
      modules/ximgproc/doc/ximgproc.bib
  43. 20
      modules/ximgproc/include/opencv2/ximgproc/deriche_filter.hpp
  44. 3
      modules/ximgproc/include/opencv2/ximgproc/edge_filter.hpp
  45. 60
      modules/ximgproc/samples/dericheSample.py
  46. 1
      modules/ximgproc/samples/disparity_filtering.cpp
  47. 194
      modules/ximgproc/src/deriche_filter.cpp
  48. 41
      modules/ximgproc/test/test_deriche_filter.cpp
  49. 7
      modules/ximgproc/test/test_fbs_filter.cpp
  50. 15
      modules/xphoto/samples/inpainting.cpp

@ -103,13 +103,12 @@ Ptr<DetectorParameters> DetectorParameters::create() {
*/
static void _convertToGrey(InputArray _in, OutputArray _out) {
CV_Assert(_in.getMat().channels() == 1 || _in.getMat().channels() == 3);
CV_Assert(_in.type() == CV_8UC1 || _in.type() == CV_8UC3);
_out.create(_in.getMat().size(), CV_8UC1);
if(_in.getMat().type() == CV_8UC3)
cvtColor(_in.getMat(), _out.getMat(), COLOR_BGR2GRAY);
if(_in.type() == CV_8UC3)
cvtColor(_in, _out, COLOR_BGR2GRAY);
else
_in.getMat().copyTo(_out);
_in.copyTo(_out);
}

@ -345,10 +345,10 @@ static int _selectAndRefineChessboardCorners(InputArray _allCorners, InputArray
// corner refinement, first convert input image to grey
Mat grey;
if(_image.getMat().type() == CV_8UC3)
cvtColor(_image.getMat(), grey, COLOR_BGR2GRAY);
if(_image.type() == CV_8UC3)
cvtColor(_image, grey, COLOR_BGR2GRAY);
else
_image.getMat().copyTo(grey);
_image.copyTo(grey);
const Ptr<DetectorParameters> params = DetectorParameters::create(); // use default params for corner refinement
@ -754,10 +754,10 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners,
// convert input image to grey
Mat grey;
if(_image.getMat().type() == CV_8UC3)
cvtColor(_image.getMat(), grey, COLOR_BGR2GRAY);
if(_image.type() == CV_8UC3)
cvtColor(_image, grey, COLOR_BGR2GRAY);
else
_image.getMat().copyTo(grey);
_image.copyTo(grey);
// for each of the detected markers, try to find a diamond
for(unsigned int i = 0; i < _markerIds.total(); i++) {

@ -292,7 +292,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
*/
cv::waitKey(10);
}
}catch(cv::Exception e)
}catch(const cv::Exception& e)
{
std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
}

@ -353,7 +353,7 @@ static void loadNewFrame(const std::string filenamePrototype, const int currentF
// jump to next frame
++currentFrameIndex;
}
}catch(cv::Exception e)
}catch(const cv::Exception& e)
{
std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
}

@ -146,7 +146,7 @@ int main(int argc, char* argv[]) {
cv::waitKey(5);
}
}catch(cv::Exception e)
}catch(const cv::Exception& e)
{
std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
}

@ -137,7 +137,7 @@ int main(int argc, char* argv[]) {
cv::imshow("Retina Magno", retinaOutput_magno);
cv::waitKey(10);
}
}catch(cv::Exception e)
}catch(const cv::Exception& e)
{
std::cerr<<"Error using Retina or end of video sequence reached : "<<e.what()<<std::endl;
}

@ -109,7 +109,7 @@ int main(int argc, char* argv[])
}
printf("Average: %.4fms\n", (double)total_time / total_loop_count / cv::getTickFrequency() * 1000.0);
}
catch(cv::Exception e)
catch(const cv::Exception& e)
{
std::cerr << "Error using Retina : " << e.what() << std::endl;
}

@ -368,7 +368,7 @@ void RetinaImpl::setup(String retinaParameterFile, const bool applyDefaultSetupO
cv::FileStorage fs(retinaParameterFile, cv::FileStorage::READ);
setup(fs, applyDefaultSetupOnFailure);
}
catch(Exception &e)
catch(const Exception &e)
{
printf("Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>%s\n", e.what());
if (applyDefaultSetupOnFailure)
@ -422,7 +422,7 @@ void RetinaImpl::setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailur
setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency,_retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k);
}
catch(Exception &e)
catch(const Exception &e)
{
printf("RetinaImpl::setup: resetting retina with default parameters\n");
if (applyDefaultSetupOnFailure)

@ -127,7 +127,7 @@ void RetinaOCLImpl::setup(String retinaParameterFile, const bool applyDefaultSet
cv::FileStorage fs(retinaParameterFile, cv::FileStorage::READ);
setup(fs, applyDefaultSetupOnFailure);
}
catch(Exception &e)
catch(const Exception &e)
{
std::cout << "RetinaOCLImpl::setup: wrong/inappropriate xml parameter file : error report :`n=>" << e.what() << std::endl;
if (applyDefaultSetupOnFailure)
@ -181,7 +181,7 @@ void RetinaOCLImpl::setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFai
setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency, _retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k);
}
catch(Exception &e)
catch(const Exception &e)
{
std::cout << "RetinaOCLImpl::setup: resetting retina with default parameters" << std::endl;
if (applyDefaultSetupOnFailure)

@ -298,7 +298,7 @@ void TransientAreasSegmentationModuleImpl::setup(String segmentationParameterFil
// opening retinaParameterFile in read mode
cv::FileStorage fs(segmentationParameterFile, cv::FileStorage::READ);
setup(fs, applyDefaultSetupOnFailure);
}catch(cv::Exception &e)
}catch(const cv::Exception &e)
{
printf("Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>%s\n", e.what());
if (applyDefaultSetupOnFailure)
@ -338,7 +338,7 @@ void TransientAreasSegmentationModuleImpl::setup(cv::FileStorage &fs, const bool
currFn["contextEnergy_spatialConstant"]>>_segmentationParameters.contextEnergy_spatialConstant;
setup(_segmentationParameters);
}catch(cv::Exception &e)
}catch(const cv::Exception &e)
{
std::cout<<"Retina::setup: resetting retina with default parameters"<<std::endl;
if (applyDefaultSetupOnFailure)

@ -222,26 +222,24 @@ namespace cnn_3dobj
{
/* Convert the input image to the input image format of the network. */
cv::Mat sample;
if (img.channels() == 3 && num_channels == 1)
cv::cvtColor(img, sample, CV_BGR2GRAY);
else if (img.channels() == 4 && num_channels == 1)
cv::cvtColor(img, sample, CV_BGRA2GRAY);
if (num_channels == 1)
cv::cvtColor(img, sample, COLOR_BGR2GRAY);
else if (img.channels() == 4 && num_channels == 3)
cv::cvtColor(img, sample, CV_BGRA2BGR);
cv::cvtColor(img, sample, COLOR_BGRA2BGR);
else if (img.channels() == 1 && num_channels == 3)
cv::cvtColor(img, sample, CV_GRAY2BGR);
cv::cvtColor(img, sample, COLOR_GRAY2BGR);
else
sample = img;
cv::Mat sample_resized;
if (sample.size() != input_geometry)
cv::resize(sample, sample_resized, input_geometry);
else
sample_resized = sample;
cv::Mat sample_float;
if (num_channels == 3)
sample_resized.convertTo(sample_float, CV_32FC3);
else
sample_resized.convertTo(sample_float, CV_32FC1);
sample_resized.convertTo(sample_float, CV_32F);
cv::Mat sample_normalized;
if (net_ready == 2)
cv::subtract(sample_float, mean_, sample_normalized);

@ -84,7 +84,7 @@ main(int argc, char** argv)
// convert to grayscale
cv::Mat imgGray;
cv::cvtColor(imgRead, imgGray, CV_BGR2GRAY);
cv::cvtColor(imgRead, imgGray, COLOR_BGR2GRAY);
cvv::debugFilter(imgRead, imgGray, CVVISUAL_LOCATION, "to gray");
// detect ORB features

@ -69,8 +69,8 @@ void DiffFilterFunction::applyFilter(InputArray in, OutputArray out) const
}
cv::Mat originalHSV, filteredHSV;
cv::cvtColor(in.at(0).get(), originalHSV, CV_BGR2HSV);
cv::cvtColor(in.at(1).get(), filteredHSV, CV_BGR2HSV);
cv::cvtColor(in.at(0).get(), originalHSV, COLOR_BGR2HSV);
cv::cvtColor(in.at(1).get(), filteredHSV, COLOR_BGR2HSV);
auto diffHSV = cv::abs(originalHSV - filteredHSV);
std::array<cv::Mat, 3> splitVector;

@ -93,7 +93,7 @@ int main(int argc, const char *argv[]) {
// input filename is given.
try {
read_csv(fn_csv, images, labels, labelsInfo);
} catch (cv::Exception& e) {
} catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);

@ -85,7 +85,7 @@ int main(int argc, const char *argv[]) {
// input filename is given.
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
} catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);

@ -85,7 +85,7 @@ int main(int argc, const char *argv[]) {
// input filename is given.
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
} catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);

@ -62,7 +62,7 @@ int main(int argc, const char *argv[]) {
// input filename is given.
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
} catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);

@ -85,7 +85,7 @@ int main(int argc, const char *argv[]) {
// input filename is given.
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
} catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);

@ -68,7 +68,7 @@ int main(int argc, const char *argv[]) {
// Read in the data (fails if no valid input filename is given, but you'll get an error message):
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
} catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);

@ -46,7 +46,7 @@ resize(img,img,Size(460,460),0,0,INTER_LINEAR_EXACT);
Mat gray;
std::vector<Rect> faces;
if(img.channels()>1){
cvtColor(img.getMat(),gray,CV_BGR2GRAY);
cvtColor(img.getMat(),gray,COLOR_BGR2GRAY);
}
else{
gray = img.getMat().clone();

@ -65,7 +65,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){
Mat gray;
std::vector<Rect> faces;
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
}
else{
gray = image.getMat().clone();
@ -174,4 +174,4 @@ filename specified.As the training starts successfully you will see something li
**The error rate on test images depends on the number of images used for training used as follows :**
![](images/test.png)
![](images/test.png)

@ -26,7 +26,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){
Mat gray;
std::vector<Rect> faces;
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
}
else{
gray = image.getMat().clone();
@ -107,4 +107,4 @@ Sample video:
@htmlonly
<iframe width="560" height="315" src="https://www.youtube.com/embed/ZtaV07T90D8" frameborder="0" allowfullscreen></iframe>
@endhtmlonly
@endhtmlonly

@ -25,7 +25,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){
Mat gray;
std::vector<Rect> faces;
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
}
else{
gray = image.getMat().clone();
@ -144,4 +144,4 @@ Second image
Results after swapping
----------------------
![](images/face_swapped.jpg)
![](images/face_swapped.jpg)

@ -27,18 +27,10 @@ public:
input.type() == CV_8U);
cv::resize(input, resizeImg, cv::Size(8,8), 0, 0, INTER_LINEAR_EXACT);
if(input.type() == CV_8UC3)
{
cv::cvtColor(resizeImg, grayImg, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(resizeImg, grayImg, CV_BGRA2GRAY);
}
if(input.channels() > 1)
cv::cvtColor(resizeImg, grayImg, COLOR_BGR2GRAY);
else
{
grayImg = resizeImg;
}
uchar const imgMean = static_cast<uchar>(cvRound(cv::mean(grayImg)[0]));
cv::compare(grayImg, imgMean, bitsImg, CMP_GT);

@ -40,18 +40,10 @@ public:
input.type() == CV_8U);
cv::resize(input, resizeImg_, cv::Size(imgWidth,imgHeight), 0, 0, INTER_LINEAR_EXACT);
if(input.type() == CV_8UC3)
{
cv::cvtColor(resizeImg_, grayImg_, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(resizeImg_, grayImg_, CV_BGRA2GRAY);
}
if(input.channels() > 1)
cv::cvtColor(resizeImg_, grayImg_, COLOR_BGR2GRAY);
else
{
grayImg_ = resizeImg_;
}
int pixColStep = blockWidth;
int pixRowStep = blockHeigth;

@ -28,25 +28,24 @@ public:
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(input, colorImg_, CV_BGRA2BGR);
cv::cvtColor(input, colorImg_, COLOR_BGRA2BGR);
}
else
{
cv::cvtColor(input, colorImg_, CV_GRAY2BGR);
cv::cvtColor(input, colorImg_, COLOR_GRAY2BGR);
}
cv::resize(colorImg_, resizeImg_, cv::Size(512,512), 0, 0,
INTER_CUBIC);
cv::resize(colorImg_, resizeImg_, cv::Size(512,512), 0, 0, INTER_CUBIC);
cv::GaussianBlur(resizeImg_, blurImg_, cv::Size(3,3), 0, 0);
cv::cvtColor(blurImg_, colorSpace_, CV_BGR2HSV);
cv::cvtColor(blurImg_, colorSpace_, COLOR_BGR2HSV);
cv::split(colorSpace_, channels_);
outputArr.create(1, 42, CV_64F);
cv::Mat hash = outputArr.getMat();
hash.setTo(0);
computeMoments(hash.ptr<double>(0));
cv::cvtColor(blurImg_, colorSpace_, CV_BGR2YCrCb);
cv::cvtColor(blurImg_, colorSpace_, COLOR_BGR2YCrCb);
cv::split(colorSpace_, channels_);
computeMoments(hash.ptr<double>(0) + 21);
}

@ -105,18 +105,11 @@ public:
input.type() == CV_8UC3 ||
input.type() == CV_8U);
if(input.type() == CV_8UC3)
{
cv::cvtColor(input, grayImg, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(input, grayImg, CV_BGRA2GRAY);
}
if(input.channels() > 1)
cv::cvtColor(input, grayImg, COLOR_BGR2GRAY);
else
{
grayImg = input;
}
//pHash use Canny-deritch filter to blur the image
cv::GaussianBlur(grayImg, blurImg, cv::Size(7, 7), 0);
cv::resize(blurImg, resizeImg, cv::Size(512, 512), 0, 0, INTER_CUBIC);

@ -21,18 +21,10 @@ public:
input.type() == CV_8U);
cv::resize(input, resizeImg, cv::Size(32,32), 0, 0, INTER_LINEAR_EXACT);
if(input.type() == CV_8UC3)
{
cv::cvtColor(resizeImg, grayImg, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(resizeImg, grayImg, CV_BGRA2GRAY);
}
if(input.channels() > 1)
cv::cvtColor(resizeImg, grayImg, COLOR_BGR2GRAY);
else
{
grayImg = resizeImg;
}
grayImg.convertTo(grayFImg, CV_32F);
cv::dct(grayFImg, dctImg);

@ -53,11 +53,11 @@ public:
if(input.type() == CV_8UC3)
{
cv::cvtColor(input, grayImg_, CV_BGR2GRAY);
cv::cvtColor(input, grayImg_, COLOR_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(input, grayImg_, CV_BGRA2GRAY);
cv::cvtColor(input, grayImg_, COLOR_BGRA2GRAY);
}
else
{

@ -548,7 +548,7 @@ void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector<KeyLine>& k
if( imageSrc.channels() != 1 )
cvtColor( imageSrc, image, COLOR_BGR2GRAY );
else
image = imageSrc.clone();
image = imageSrc;
/*check whether image's depth is different from 0 */
if( image.depth() != 0 )
@ -627,7 +627,6 @@ void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector<KeyLine>& k
{
for ( size_t j = 0; j < sl[i].size(); )
{
//if( (int) ( sl[i][j] ).octaveCount > params.numOfOctave_ )
if( (int) ( sl[i][j] ).octaveCount > octaveIndex )
( sl[i] ).erase( ( sl[i] ).begin() + j );
else j++;

@ -85,9 +85,9 @@ addVariant("{{ fun.name }}", {{ fun.req|inputs|length }}, {{ fun.opt|inputs|leng
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
try {
{{ compose(fun) }}
} catch(cv::Exception& e) {
} catch(const cv::Exception& e) {
error(std::string("cv::exception caught: ").append(e.what()).c_str());
} catch(std::exception& e) {
} catch(const std::exception& e) {
error(std::string("std::exception caught: ").append(e.what()).c_str());
} catch(...) {
error("Uncaught exception occurred in {{fun.name}}");

@ -25,7 +25,7 @@ void mexFunction(int nlhs, mxArray* plhs[],
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
try {
throw cv::Exception(-1, "OpenCV exception thrown", __func__, __FILE__, __LINE__);
} catch(cv::Exception& e) {
} catch(const cv::Exception& e) {
mexErrMsgTxt(e.what());
} catch(...) {
mexErrMsgTxt("Incorrect exception caught!");

@ -24,7 +24,7 @@ void mexFunction(int nlhs, mxArray* plhs[],
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
try {
throw std::exception();
} catch(std::exception& e) {
} catch(const std::exception& e) {
mexErrMsgTxt(e.what());
} catch(...) {
mexErrMsgTxt("Incorrect exception caught!");

@ -149,7 +149,7 @@ static SceneNode& _getSceneNode(SceneManager* sceneMgr, const String& name)
if(mo)
return *mo->getParentSceneNode()->getParentSceneNode();
}
catch (ItemIdentityException&)
catch (const ItemIdentityException&)
{
// ignore
}
@ -159,7 +159,7 @@ static SceneNode& _getSceneNode(SceneManager* sceneMgr, const String& name)
if (!mo)
mo = sceneMgr->getMovableObject(name, "Light");
}
catch (ItemIdentityException&)
catch (const ItemIdentityException&)
{
// ignore
}

@ -70,9 +70,9 @@ static void showDifference(const Mat& image1, const Mat& image2, const char* tit
image1.convertTo(img1, CV_32FC3);
image2.convertTo(img2, CV_32FC3);
if(img1.channels() != 1)
cvtColor(img1, img1, COLOR_RGB2GRAY);
cvtColor(img1, img1, COLOR_BGR2GRAY);
if(img2.channels() != 1)
cvtColor(img2, img2, COLOR_RGB2GRAY);
cvtColor(img2, img2, COLOR_BGR2GRAY);
Mat imgDiff;
img1.copyTo(imgDiff);
@ -270,11 +270,11 @@ static void calcHomographyFeature(const Mat& image1, const Mat& image2)
Mat gray_image2;
// Convert to Grayscale
if(image1.channels() != 1)
cvtColor(image1, gray_image1, COLOR_RGB2GRAY);
cvtColor(image1, gray_image1, COLOR_BGR2GRAY);
else
image1.copyTo(gray_image1);
if(image2.channels() != 1)
cvtColor(image2, gray_image2, COLOR_RGB2GRAY);
cvtColor(image2, gray_image2, COLOR_BGR2GRAY);
else
image2.copyTo(gray_image2);
@ -332,7 +332,7 @@ static void calcHomographyFeature(const Mat& image1, const Mat& image2)
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
Mat H = findHomography( obj, scene, RANSAC );
// Use the Homography Matrix to warp the images
Mat result;
Mat Hinv = H.inv();
@ -390,7 +390,7 @@ static void comparePixelVsFeature(const Mat& img1_8b, const Mat& img2_8b)
int main(void)
{
Mat img1;
img1 = imread("home.png", CV_LOAD_IMAGE_UNCHANGED);
img1 = imread("home.png", IMREAD_UNCHANGED);
if(!img1.data) {
cout << "Could not open or find file" << endl;
return -1;
@ -405,13 +405,13 @@ int main(void)
testProjective(img1);
#ifdef COMPARE_FEATURES
Mat imgcmp1 = imread("LR_05.png", CV_LOAD_IMAGE_UNCHANGED);
Mat imgcmp1 = imread("LR_05.png", IMREAD_UNCHANGED);
if(!imgcmp1.data) {
cout << "Could not open or find file" << endl;
return -1;
}
Mat imgcmp2 = imread("LR_06.png", CV_LOAD_IMAGE_UNCHANGED);
Mat imgcmp2 = imread("LR_06.png", IMREAD_UNCHANGED);
if(!imgcmp2.data) {
cout << "Could not open or find file" << endl;
return -1;

@ -102,7 +102,7 @@ void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
//! [insideimage]
patch=img(region).clone();
cvtColor(patch,patch, CV_BGR2GRAY);
cvtColor(patch,patch, COLOR_BGR2GRAY);
//! [padding]
// add some padding to compensate when the patch is outside image border

@ -31,7 +31,7 @@ namespace cv{
Mat hsv;
img.convertTo(hsv,CV_32F,1.0/255.0);
cvtColor(hsv,hsv,CV_BGR2HSV);
cvtColor(hsv,hsv,COLOR_BGR2HSV);
HShist=Mat_<double>(nh,ns,0.0);
Vhist=Mat_<double>(1,nv,0.0);

@ -126,7 +126,7 @@ bool TrackerBoostingImpl::initImpl( const Mat& image, const Rect2d& boundingBox
Mat_<int> intImage;
Mat_<double> intSqImage;
Mat image_;
cvtColor( image, image_, CV_RGB2GRAY );
cvtColor( image, image_, COLOR_BGR2GRAY );
integral( image_, intImage, intSqImage, CV_32S );
TrackerSamplerCS::Params CSparameters;
CSparameters.overlap = params.samplerOverlap;
@ -208,7 +208,7 @@ bool TrackerBoostingImpl::updateImpl( const Mat& image, Rect2d& boundingBox )
Mat_<int> intImage;
Mat_<double> intSqImage;
Mat image_;
cvtColor( image, image_, CV_RGB2GRAY );
cvtColor( image, image_, COLOR_BGR2GRAY );
integral( image_, intImage, intSqImage, CV_32S );
//get the last location [AAM] X(k-1)
Ptr<TrackerTargetState> lastLocation = model->getLastTargetState();

@ -201,7 +201,7 @@ std::vector<Mat> TrackerCSRTImpl::get_features(const Mat &patch, const Size2i &f
}
if(params.use_gray) {
Mat gray_m;
cvtColor(patch, gray_m, CV_BGR2GRAY);
cvtColor(patch, gray_m, COLOR_BGR2GRAY);
resize(gray_m, gray_m, feature_size, 0, 0, INTER_CUBIC);
gray_m.convertTo(gray_m, CV_32FC1, 1.0/255.0, -0.5);
features.push_back(gray_m);
@ -465,15 +465,11 @@ Point2f TrackerCSRTImpl::estimate_new_position(const Mat &image)
// *********************************************************************
bool TrackerCSRTImpl::updateImpl(const Mat& image_, Rect2d& boundingBox)
{
//treat gray image as color image
Mat image;
if(image_.channels() == 1) {
std::vector<Mat> channels(3);
channels[0] = channels[1] = channels[2] = image_;
merge(channels, image);
} else {
if(image_.channels() == 1) //treat gray image as color image
cvtColor(image_, image, COLOR_GRAY2BGR);
else
image = image_;
}
object_center = estimate_new_position(image);
if (object_center.x < 0 && object_center.y < 0)
@ -512,15 +508,11 @@ bool TrackerCSRTImpl::updateImpl(const Mat& image_, Rect2d& boundingBox)
// *********************************************************************
bool TrackerCSRTImpl::initImpl(const Mat& image_, const Rect2d& boundingBox)
{
//treat gray image as color image
Mat image;
if(image_.channels() == 1) {
std::vector<Mat> channels(3);
channels[0] = channels[1] = channels[2] = image_;
merge(channels, image);
} else {
if(image_.channels() == 1) //treat gray image as color image
cvtColor(image_, image, COLOR_GRAY2BGR);
else
image = image_;
}
current_scale_factor = 1.0;
image_size = image.size();

@ -552,7 +552,7 @@ double get_min(const Mat &m)
Mat bgr2hsv(const Mat &img)
{
Mat hsv_img;
cvtColor(img, hsv_img, CV_BGR2HSV);
cvtColor(img, hsv_img, COLOR_BGR2HSV);
std::vector<Mat> hsv_img_channels;
split(hsv_img, hsv_img_channels);
hsv_img_channels.at(0).convertTo(hsv_img_channels.at(0), CV_8UC1, 255.0 / 180.0);

@ -700,7 +700,7 @@ namespace cv{
break;
default: // GRAY
if(img.channels()>1)
cvtColor(patch,feat, CV_BGR2GRAY);
cvtColor(patch,feat, COLOR_BGR2GRAY);
else
feat=patch;
//feat.convertTo(feat,CV_32F);

@ -319,3 +319,12 @@ year={2016},
publisher={Springer International Publishing},
pages={617--632},
}
@inproceedings{BarronPoole2016,
author = {Jonathan T Barron and Ben Poole},
title={The Fast Bilateral Solver},
booktitle={European Conference on Computer Vision (ECCV)},
year={2016},
publisher={Springer International Publishing},
pages={617--632},
}

@ -51,25 +51,25 @@ namespace ximgproc {
*
* For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf
*
* @param _op Source 8-bit or 16bit image, 1-channel or 3-channel image.
* @param _dst result CV_32FC image with same number of channel than _op.
* @param alphaDerive double see paper
* @param alphaMean double see paper
* @param op Source 8-bit or 16bit image, 1-channel or 3-channel image.
* @param dst result CV_32FC image with same number of channel than _op.
* @param alpha double see paper
* @param omega double see paper
*
*/
CV_EXPORTS void GradientDericheY(InputArray _op, OutputArray _dst, double alphaDerive,double alphaMean);
CV_EXPORTS_W void GradientDericheY(InputArray op, OutputArray dst, double alpha,double omega);
/**
* @brief Applies X Deriche filter to an image.
*
* For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf
*
* @param _op Source 8-bit or 16bit image, 1-channel or 3-channel image.
* @param _dst result CV_32FC image with same number of channel than _op.
* @param alphaDerive double see paper
* @param alphaMean double see paper
* @param op Source 8-bit or 16bit image, 1-channel or 3-channel image.
* @param dst result CV_32FC image with same number of channel than _op.
* @param alpha double see paper
* @param omega double see paper
*
*/
CV_EXPORTS void GradientDericheX(InputArray _op, OutputArray _dst, double alphaDerive,double alphaMean);
CV_EXPORTS_W void GradientDericheX(InputArray op, OutputArray dst, double alpha,double omega);
}
}

@ -437,8 +437,7 @@ guide then use FastBilateralSolverFilter interface to avoid extra computations.
@note Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
*/
CV_EXPORTS_W void fastBilateralSolverFilter(InputArray guide, InputArray src, InputArray confidence, OutputArray dst, double sigma_spatial = 8, double sigma_luma = 8, double sigma_chroma = 8, int num_iter = 25, double max_tol = 1e-5);
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/** @brief Interface for implementations of Fast Global Smoother filter.

@ -0,0 +1,60 @@
import sys
import numpy as np
import cv2 as cv
def AddSlider(sliderName,windowName,minSlider,maxSlider,valDefault, update=[]):
if update is None:
cv.createTrackbar(sliderName, windowName, valDefault,maxSlider-minSlider+1)
else:
cv.createTrackbar(sliderName, windowName, valDefault,maxSlider-minSlider+1, update)
cv.setTrackbarMin(sliderName, windowName, minSlider)
cv.setTrackbarMax(sliderName, windowName, maxSlider)
cv.setTrackbarPos(sliderName, windowName, valDefault)
class Filtrage:
def __init__(self):
self.s =0
self.alpha = 100
self.omega = 100
self.updateFiltre=True
self.img=[]
self.dximg=[]
self.dyimg=[]
self.module=[]
def DericheFilter(self):
self.dximg = cv.ximgproc.GradientDericheX( self.img, self.alpha/100., self.omega/1000. )
self.dyimg = cv.ximgproc.GradientDericheY( self.img, self.alpha/100., self.omega/1000. )
dx2=self.dximg*self.dximg
dy2=self.dyimg*self.dyimg
self.module = np.sqrt(dx2+dy2)
cv.normalize(src=self.module,dst=self.module,norm_type=cv.NORM_MINMAX)
def SlideBarDeriche(self):
cv.destroyWindow(self.filename)
cv.namedWindow(self.filename)
AddSlider("alpha",self.filename,1,400,self.alpha,self.UpdateAlpha)
AddSlider("omega",self.filename,1,1000,self.omega,self.UpdateOmega)
def UpdateOmega(self,x ):
self.updateFiltre=True
self.omega=x
def UpdateAlpha(self,x ):
self.updateFiltre=True
self.alpha=x
def run(self,argv):
# Load the source image
self.filename = argv[0] if len(argv) > 0 else "../doc/pics/corridor_fld.jpg"
self.img=cv.imread(self.filename,cv.IMREAD_GRAYSCALE)
if self.img is None:
print ('cannot read file')
return
self.SlideBarDeriche()
while True:
cv.imshow(self.filename,self.img)
if self.updateFiltre:
self.DericheFilter()
cv.imshow("module",self.module)
self.updateFiltre =False
code = cv.waitKey(10)
if code==27:
break
if __name__ == '__main__':
Filtrage().run(sys.argv[1:])

@ -305,7 +305,6 @@ int main(int argc, char** argv)
(void)fbs_luma;
(void)fbs_chroma;
#endif
}
else if(filter=="wls_no_conf")
{

@ -46,23 +46,23 @@ Using Canny's criteria to derive a recursively implemented optimal edge detector
namespace cv {
namespace ximgproc {
template<typename T> static void
VerticalIIRFilter(Mat &img,Mat &dst,const Range &r,double alphaDerive)
VerticalIIRFilter(Mat &img,Mat &dst,const Range &r,double alpha,double omega)
{
float *f2;
int tailleSequence = (img.rows>img.cols) ? img.rows : img.cols;
Mat matG1(1, tailleSequence, CV_64FC1), matG2(1, tailleSequence, CV_64FC1);
double *g1 = matG1.ptr<double>(0), *g2 = (double*)matG2.ptr<double>(0);
double kp = pow(1 - exp(-alphaDerive), 2.0) / exp(-alphaDerive);
double a1, a2, a3, a4;
double a2, a3;
double b1, b2;
int rows = img.rows, cols = img.cols;
double c = (1 - 2 * exp(-alpha)*cos(omega) + exp(-2 * alpha)) / (exp(-alpha)*sin(omega));
double a = -c * exp(-alpha)*sin(omega);
a2 = 1;// kp*exp(-alpha);
a3 = 1;//-kp*exp(-alpha);
b1 = -2 * exp(-alpha)*cos(omega);
b2 = exp(-2 * alpha);
kp = pow(1 - exp(-alphaDerive), 2.0) / exp(-alphaDerive);
a1 = 0;
a2 = kp*exp(-alphaDerive), a3 = -kp*exp(-alphaDerive);
a4 = 0;
b1 = 2 * exp(-alphaDerive);
b2 = -exp(-2 * alphaDerive);
for (int j = r.start; j<r.end; j++)
{
// Causal vertical IIR filter
@ -71,78 +71,76 @@ VerticalIIRFilter(Mat &img,Mat &dst,const Range &r,double alphaDerive)
f2 += j;
c1 += j;
int i = 0;
g1[i] = (a1 + a2)* *c1;
g1[i] = a2* *c1;
i++;
c1 += cols;
g1[i] = a1 * *c1 + a2 * c1[-cols] + (b1)* g1[i - 1];
g1[i] =a2 * c1[-cols] - (b1)* g1[i - 1];
i++;
c1 += cols;
for (i = 2; i<rows; i++, c1 += cols)
g1[i] = a1 * *c1 + a2 * c1[-cols] + b1*g1[i - 1] + b2 *g1[i - 2];
g1[i] = a2 * c1[-cols] - b1*g1[i - 1] - b2 *g1[i - 2];
// Anticausal vertical IIR filter
c1 = img.ptr<T>(0);
c1 += (rows - 1)*cols + j;
i = rows - 1;
g2[i] = (a3 + a4)* *c1;
g2[i] = a3 * *c1;
i--;
c1 -= cols;
g2[i] = a3* c1[cols] + a4 * c1[cols] + (b1)*g2[i + 1];
g2[i] = a3* c1[cols] + (b1)*g2[i + 1];
i--;
c1 -= cols;
for (i = rows - 3; i >= 0; i--, c1 -= cols)
g2[i] = a3*c1[cols] + a4* c1[2 * cols] +
b1*g2[i + 1] + b2*g2[i + 2];
g2[i] = a3*c1[cols] -
b1*g2[i + 1] - b2*g2[i + 2];
for (i = 0; i<rows; i++, f2 += cols)
*f2 = (float)(g1[i] + g2[i]);
*f2 = static_cast<float>(a*(g1[i] - g2[i]));
}
}
template<typename T> static void
HorizontalIIRFilter(Mat &img, Mat &dst, const Range &r, double alphaDerive)
HorizontalIIRFilter(Mat &img, Mat &dst, const Range &r, double alpha, double omega)
{
float *f1;
int rows = img.rows, cols = img.cols;
int tailleSequence = (rows>cols) ? rows : cols;
Mat matG1(1, tailleSequence, CV_64FC1), matG2(1, tailleSequence, CV_64FC1);
double *g1 = (double*)matG1.ptr(0), *g2 = (double*)matG2.ptr(0);
double kp;;
double a1, a2, a3, a4;
double a,a2, a3;
double b1, b2;
double c = (1 - 2 * exp(-alpha)*cos(omega) + exp(-2 * alpha)) / (exp(-alpha)*sin(omega));
kp = pow(1 - exp(-alphaDerive), 2.0) / exp(-alphaDerive);
a1 = 0;
a2 = kp*exp(-alphaDerive);
a3 = -kp*exp(-alphaDerive);
a4 = 0;
b1 = 2 * exp(-alphaDerive);
b2 = -exp(-2 * alphaDerive);
a = -c*exp(-alpha)*sin(omega);
a2 = 1;// kp*exp(-alpha);
a3 = 1;//-kp*exp(-alpha);
b1 = -2 * exp(-alpha)*cos(omega);
b2 = exp(-2 * alpha);
for (int i = r.start; i<r.end; i++)
{
f1 = dst.ptr<float>(i);
T *c1 = img.ptr<T>(i);
int j = 0;
g1[j] = (a1 + a2)* *c1;
g1[j] = a2* *c1;
j++;
c1++;
g1[j] = a1 * c1[0] + a2*c1[j - 1] + (b1)* g1[j - 1];
g1[j] = a2*c1[j - 1] - (b1)* g1[j - 1];
j++;
c1++;
for (j = 2; j<cols; j++, c1++)
g1[j] = a1 * c1[0] + a2 * c1[-1] + b1*g1[j - 1] + b2*g1[j - 2];
g1[j] = a2 * c1[-1] - b1*g1[j - 1] - b2*g1[j - 2];
c1 = img.ptr<T>(0);
c1 += i*cols + cols - 1;
j = cols - 1;
g2[j] = (a3 + a4)* *c1;
g2[j] = a3* *c1;
j--;
c1--;
g2[j] = (a3 + a4) * c1[1] + b1 * g2[j + 1];
g2[j] = a3 * c1[1] - b1 * g2[j + 1];
j--;
c1--;
for (j = cols - 3; j >= 0; j--, c1--)
g2[j] = a3*c1[1] + a4*c1[2] + b1*g2[j + 1] + b2*g2[j + 2];
g2[j] = a3*c1[1] - b1*g2[j + 1] - b2*g2[j + 2];
for (j = 0; j<cols; j++, f1++)
*f1 = (float)(g1[j] + g2[j]);
*f1 = static_cast<float>(a*(g1[j] - g2[j]));
}
}
@ -151,15 +149,17 @@ class ParallelGradientDericheYCols : public ParallelLoopBody
private:
Mat &img;
Mat &dst;
double alphaDerive;
double alpha;
double omega;
bool verbose;
public:
ParallelGradientDericheYCols(Mat &imgSrc, Mat &d, double ald) :
ParallelGradientDericheYCols(Mat &imgSrc, Mat &d, double ald,double o) :
img(imgSrc),
dst(d),
alphaDerive(ald),
alpha(ald),
omega(o),
verbose(false)
{
int type = img.depth();
@ -175,19 +175,19 @@ public:
switch (img.depth()) {
case CV_8U:
VerticalIIRFilter<uchar>(img,dst,range, alphaDerive);
VerticalIIRFilter<uchar>(img,dst,range, alpha,omega);
break;
case CV_8S:
VerticalIIRFilter<char>(img, dst, range, alphaDerive);
VerticalIIRFilter<char>(img, dst, range, alpha, omega);
break;
case CV_16U:
VerticalIIRFilter<ushort>(img, dst, range, alphaDerive);
VerticalIIRFilter<ushort>(img, dst, range, alpha, omega);
break;
case CV_16S:
VerticalIIRFilter<short>(img, dst, range, alphaDerive);
VerticalIIRFilter<short>(img, dst, range, alpha, omega);
break;
case CV_32F:
VerticalIIRFilter<float>(img, dst, range, alphaDerive);
VerticalIIRFilter<float>(img, dst, range, alpha, omega);
break;
default:
return;
@ -204,14 +204,16 @@ class ParallelGradientDericheYRows : public ParallelLoopBody
private:
Mat &img;
Mat &dst;
double alphaMoyenne;
double alpha;
double omega;
bool verbose;
public:
ParallelGradientDericheYRows(Mat& imgSrc, Mat &d, double alm) :
ParallelGradientDericheYRows(Mat& imgSrc, Mat &d, double ald,double o) :
img(imgSrc),
dst(d),
alphaMoyenne(alm),
alpha(ald),
omega(o),
verbose(false)
{
int type = img.depth();
@ -228,42 +230,44 @@ public:
int tailleSequence = (img.rows>img.cols) ? img.rows : img.cols;
Mat matG1(1,tailleSequence,CV_64FC1), matG2(1,tailleSequence,CV_64FC1);
double *g1 = matG1.ptr<double>(0), *g2 = matG2.ptr<double>(0);
double k, a5, a6, a7, a8;
double b3, b4;
int cols = img.cols;
k = pow(1 - exp(-alphaMoyenne), 2.0) / (1 + 2 * alphaMoyenne*exp(-alphaMoyenne) - exp(-2 * alphaMoyenne));
a5 = k;
a6 = k*exp(-alphaMoyenne)*(alphaMoyenne - 1);
a7 = k*exp(-alphaMoyenne)*(alphaMoyenne + 1);
a8 = -k*exp(-2 * alphaMoyenne);
b3 = 2 * exp(-alphaMoyenne);
b4 = -exp(-2 * alphaMoyenne);
double a2po2 = (alpha*alpha + omega * omega);
double k = (1 - 2 * exp(-alpha)*cos(omega) + exp(-2 * alpha))*a2po2;
k = k / (2 * alpha*exp(-alpha)*sin(omega) + omega - omega * exp(-2 * alpha));
double c1 = k * alpha / a2po2;
double c2 = k * omega / a2po2;
double a0 = c2;
double a1 = (-c2 * cos(omega) + c1 * sin(omega))*exp(-alpha);
double b1 = -2 * exp(-alpha)*cos(omega);
double b2 = exp(-2 * alpha);
double a2 = a1 - c2 * b1, a3 = -c2 * b2;
for (int i = range.start; i<range.end; i++)
{
f2 = dst.ptr<float>(i);
f1 = img.ptr<float>(i);
int j = 0;
g1[j] = (a5 + a6)* *f1;
g1[j] = (a0 + a1)* *f1;
j++;
f1++;
g1[j] = a5 * f1[0] + a6*f1[j - 1] + (b3)* g1[j - 1];
g1[j] = a0 * f1[0] + a1*f1[j - 1] - b1* g1[j - 1];
j++;
f1++;
for (j = 2; j<cols; j++, f1++)
g1[j] = a5 * f1[0] + a6 * f1[-1] + b3*g1[j - 1] + b4*g1[j - 2];
g1[j] = a0 * f1[0] + a1 * f1[-1] - b1*g1[j - 1] - b2*g1[j - 2];
f1 = ((float*)img.ptr(0));
f1 += i*cols + cols - 1;
j = cols - 1;
g2[j] = (a7 + a8)* *f1;
g2[j] = (a2 + a3)* *f1;
j--;
f1--;
g2[j] = (a7 + a8) * f1[1] + (b3)* g2[j + 1];
g2[j] = (a2 + a3) * f1[1] - b2* g2[j + 1];
j--;
f1--;
for (j = cols - 3; j >= 0; j--, f1--)
g2[j] = a7*f1[1] + a8*f1[2] + b3*g2[j + 1] + b4*g2[j + 2];
g2[j] = a2*f1[1] + a3*f1[2] - b1*g2[j + 1] - b2*g2[j + 2];
for (j = 0; j<cols; j++, f2++)
*f2 = (float)(g1[j] + g2[j]);
}
@ -280,14 +284,16 @@ class ParallelGradientDericheXCols : public ParallelLoopBody
private:
Mat &img;
Mat &dst;
double alphaMoyenne;
double alpha;
double omega;
bool verbose;
public:
ParallelGradientDericheXCols(Mat& imgSrc, Mat &d, double alm) :
ParallelGradientDericheXCols(Mat& imgSrc, Mat &d, double alm,double o) :
img(imgSrc),
dst(d),
alphaMoyenne(alm),
alpha(alm),
omega(o),
verbose(false)
{
int type = img.depth();
@ -306,40 +312,43 @@ public:
int tailleSequence = (rows>cols) ? rows : cols;
Mat matG1(1,tailleSequence,CV_64FC1), matG2(1,tailleSequence,CV_64FC1);
double *g1 = (double*)matG1.ptr(0), *g2 = (double*)matG2.ptr(0);
double k, a5, a6, a7, a8 = 0;
double b3, b4;
double a2po2 = (alpha*alpha + omega * omega);
double k = (1 - 2 * exp(-alpha)*cos(omega) + exp(-2 * alpha))*a2po2;
k = k / (2 * alpha*exp(-alpha)*sin(omega) + omega - omega * exp(-2 * alpha));
double c1 = k * alpha / a2po2;
double c2 = k * omega / a2po2;
double a0 = c2;
double a1 = (-c2 * cos(omega) + c1 * sin(omega))*exp(-alpha);
double b1 = -2 * exp(-alpha)*cos(omega);
double b2=exp(-2*alpha);
double a2=a1-c2*b1, a3=-c2*b2;
k = pow(1 - exp(-alphaMoyenne), 2.0) / (1 + 2 * alphaMoyenne*exp(-alphaMoyenne) - exp(-2 * alphaMoyenne));
a5 = k, a6 = k*exp(-alphaMoyenne)*(alphaMoyenne - 1);
a7 = k*exp(-alphaMoyenne)*(alphaMoyenne + 1), a8 = -k*exp(-2 * alphaMoyenne);
b3 = 2 * exp(-alphaMoyenne);
b4 = -exp(-2 * alphaMoyenne);
for (int j = range.start; j<range.end; j++)
{
f1 = img.ptr<float>(0);
f1 += j;
int i = 0;
g1[i] = (a5 + a6)* *f1;
g1[i] = (a0 + a1)* *f1;
i++;
f1 += cols;
g1[i] = a5 * *f1 + a6 * f1[-cols] + (b3)* g1[i - 1];
g1[i] = a0 * *f1 + a1 * f1[-cols] - (b1)* g1[i - 1];
i++;
f1 += cols;
for (i = 2; i<rows; i++, f1 += cols)
g1[i] = a5 * *f1 + a6 * f1[-cols] + b3*g1[i - 1] + b4 *g1[i - 2];
g1[i] = a0 * *f1 + a1 * f1[-cols] - b1*g1[i - 1] - b2 *g1[i - 2];
f1 = img.ptr<float>(0);
f1 += (rows - 1)*cols + j;
i = rows - 1;
g2[i] = (a7 + a8)* *f1;
g2[i] = (a2 + a3)* *f1;
i--;
f1 -= cols;
g2[i] = (a7 + a8)* f1[cols] + (b3)*g2[i + 1];
g2[i] = (a2 + a3)* f1[cols] - b2*g2[i + 1];
i--;
f1 -= cols;
for (i = rows - 3; i >= 0; i--, f1 -= cols)
g2[i] = a7*f1[cols] + a8* f1[2 * cols] +
b3*g2[i + 1] + b4*g2[i + 2];
g2[i] = a2*f1[cols] + a3* f1[2 * cols] -
b1*g2[i + 1] - b2*g2[i + 2];
for (i = 0; i<rows; i++, f2 += cols)
{
f2 = (dst.ptr<float>(i)) + (j*img.channels());
@ -358,14 +367,16 @@ class ParallelGradientDericheXRows : public ParallelLoopBody
private:
Mat &img;
Mat &dst;
double alphaDerive;
double alpha;
double omega;
bool verbose;
public:
ParallelGradientDericheXRows(Mat& imgSrc, Mat &d, double ald) :
ParallelGradientDericheXRows(Mat& imgSrc, Mat &d, double ald, double o) :
img(imgSrc),
dst(d),
alphaDerive(ald),
alpha(ald),
omega(o),
verbose(false)
{
int type = img.depth();
@ -381,19 +392,19 @@ public:
switch (img.depth()) {
case CV_8U:
HorizontalIIRFilter<uchar>(img,dst,range,alphaDerive);
HorizontalIIRFilter<uchar>(img,dst,range, alpha,omega);
break;
case CV_8S:
HorizontalIIRFilter<char>(img, dst, range, alphaDerive);
HorizontalIIRFilter<char>(img, dst, range, alpha, omega);
break;
case CV_16U:
HorizontalIIRFilter<ushort>(img, dst, range, alphaDerive);
HorizontalIIRFilter<ushort>(img, dst, range, alpha, omega);
break;
case CV_16S:
HorizontalIIRFilter<short>(img, dst, range, alphaDerive);
HorizontalIIRFilter<short>(img, dst, range, alpha, omega);
break;
case CV_32F:
HorizontalIIRFilter<float>(img, dst, range, alphaDerive);
HorizontalIIRFilter<float>(img, dst, range, alpha, omega);
break;
default:
return;
@ -404,10 +415,11 @@ public:
};
};
void GradientDericheY(InputArray _op, OutputArray _dst,double alphaDerive, double alphaMean)
void GradientDericheY(InputArray _op, OutputArray _dst,double alphaDerive, double omega)
{
std::vector<Mat> planSrc;
split(_op, planSrc);
std::vector<Mat> planTmp;
std::vector<Mat> planDst;
for (size_t i = 0; i < planSrc.size(); i++)
@ -415,15 +427,15 @@ void GradientDericheY(InputArray _op, OutputArray _dst,double alphaDerive, doubl
planTmp.push_back(Mat(_op.size(), CV_32FC1));
planDst.push_back(Mat(_op.size(), CV_32FC1));
CV_Assert(planSrc[i].isContinuous() && planTmp[i].isContinuous() && planDst[i].isContinuous());
ParallelGradientDericheYCols x(planSrc[i], planTmp[i], alphaDerive);
ParallelGradientDericheYCols x(planSrc[i], planTmp[i], alphaDerive,omega);
parallel_for_(Range(0, planSrc[i].cols), x, getNumThreads());
ParallelGradientDericheYRows xr(planTmp[i], planDst[i], alphaMean);
ParallelGradientDericheYRows xr(planTmp[i], planDst[i], alphaDerive, omega);
parallel_for_(Range(0, planTmp[i].rows), xr, getNumThreads());
}
merge(planDst, _dst);
}
void GradientDericheX(InputArray _op, OutputArray _dst, double alphaDerive, double alphaMean)
void GradientDericheX(InputArray _op, OutputArray _dst, double alpha, double omega)
{
std::vector<Mat> planSrc;
split(_op, planSrc);
@ -435,9 +447,9 @@ void GradientDericheX(InputArray _op, OutputArray _dst, double alphaDerive, doub
planDst.push_back(Mat(_op.size(), CV_32FC1));
CV_Assert(planSrc[i].isContinuous() && planTmp[i].isContinuous() && planDst[i].isContinuous());
ParallelGradientDericheXRows x(planSrc[i], planTmp[i], alphaDerive);
ParallelGradientDericheXRows x(planSrc[i], planTmp[i], alpha, omega);
parallel_for_(Range(0, planSrc[i].rows), x, getNumThreads());
ParallelGradientDericheXCols xr(planTmp[i], planDst[i], alphaMean);
ParallelGradientDericheXCols xr(planTmp[i], planDst[i], alpha, omega);
parallel_for_(Range(0, planTmp[i].cols), xr, getNumThreads());
}
merge(planDst, _dst);

@ -0,0 +1,41 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(ximgproc_DericheFilter, regression)
{
Mat img = Mat::zeros(64, 64, CV_8UC3);
Mat res = Mat::zeros(64, 64, CV_32FC3);
img.at<Vec3b>(31, 31) = Vec3b(1, 2, 4);
double a = 0.5;
double w = 0.0005;
Mat dst;
ximgproc::GradientDericheX(img, dst, a, w);
double c = pow(1 - exp(-a), 2.0) * exp(a);
double k = pow(a*(1 - exp(-a)), 2.0) / (1 + 2 * a*exp(-a) - exp(-2 * a));
for (int i = 0; i < img.rows; i++)
{
double n = -31 + i;
for (int j = 0; j < img.cols; j++)
{
double m = -31 + j;
double x = -c * exp(-a * fabs(m))*sin(w*m);
x = x * (k*(a*sin(w*fabs(n)) + w * cos(w*fabs(n)))*exp(-a * fabs(n))) / (a*a + w * w);
x = x / (w*w);
float xx=static_cast<float>(x);
res.at<Vec3f>(i, j) = Vec3f(xx, 2 * xx, 4 * xx);
}
}
EXPECT_LE(cv::norm(res, dst, NORM_INF), 1e-5);
Mat dst2;
ximgproc::GradientDericheY(img, dst2, a, w);
cv::transpose(dst2, dst2);
EXPECT_LE(cv::norm(dst2, dst, NORM_INF), 1e-5);
}
}
} // namespace

@ -82,7 +82,7 @@ TEST(FastBilateralSolverTest, SplatSurfaceAccuracy)
// When filtering a constant image we should get the same image:
double normL1 = cvtest::norm(src, res, NORM_L1)/src.total()/src.channels();
EXPECT_LE(normL1, 1.0);
EXPECT_LE(normL1, 1.0/64);
}
}
@ -91,7 +91,8 @@ TEST(FastBilateralSolverTest, ReferenceAccuracy)
string dir = getDataDir() + "cv/edgefilter";
Mat src = imread(dir + "/kodim23.png");
Mat ref = imread(dir + "/fgs/kodim23_lambda=1000_sigma=10.png");
Mat ref = imread(dir + "/fbs/kodim23_spatial=16_luma=16_chroma=16.png");
Mat confidence(src.size(), CV_MAKE_TYPE(CV_8U, 1), 255);
ASSERT_FALSE(src.empty());
@ -103,7 +104,7 @@ TEST(FastBilateralSolverTest, ReferenceAccuracy)
double totalMaxError = 1.0/64.0*src.total()*src.channels();
EXPECT_LE(cvtest::norm(res, ref, NORM_L2), totalMaxError);
EXPECT_LE(cvtest::norm(res, ref, NORM_INF), 100);
EXPECT_LE(cvtest::norm(res, ref, NORM_INF), 1);
}
INSTANTIATE_TEST_CASE_P(FullSet, FastBilateralSolverTest,Combine(Values(szODD, szQVGA), SrcTypes::all(), GuideTypes::all()));

@ -3,9 +3,6 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc/types_c.h"
#include <ctime>
#include <iostream>
@ -41,22 +38,22 @@ int main( int argc, const char** argv )
std::string maskFilename = parser.get<std::string>("m");
std::string outFilename = parser.get<std::string>("o");
cv::Mat src = cv::imread(inFilename, -1);
cv::Mat src = cv::imread(inFilename, cv::IMREAD_UNCHANGED);
if ( src.empty() )
{
printf( "Cannot read image file: %s\n", inFilename.c_str() );
return -1;
}
cv::cvtColor(src, src, CV_RGB2Lab);
cv::cvtColor(src, src, cv::COLOR_BGR2Lab);
cv::Mat mask = cv::imread(maskFilename, 0);
cv::Mat mask = cv::imread(maskFilename, cv::IMREAD_GRAYSCALE);
if ( mask.empty() )
{
printf( "Cannot read image file: %s\n", maskFilename.c_str() );
return -1;
}
cv::threshold(mask, mask, 128, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
cv::threshold(mask, mask, 128, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
cv::Mat res(src.size(), src.type());
@ -65,7 +62,7 @@ int main( int argc, const char** argv )
std::cout << "time = " << (clock() - time)
/ double(CLOCKS_PER_SEC) << std::endl;
cv::cvtColor(res, res, CV_Lab2RGB);
cv::cvtColor(res, res, cv::COLOR_Lab2BGR);
if ( outFilename == "" )
{
@ -78,4 +75,4 @@ int main( int argc, const char** argv )
cv::imwrite(outFilename, res);
return 0;
}
}

Loading…
Cancel
Save