Merge branch 'GSOC_text_detect_DNN_backend' of https://github.com/sghoshcvc/opencv_contrib into GSOC_text_detect_DNN_backend

merge conflict
pull/1348/head
sghoshcvc 8 years ago
commit e98f42eea5
  1. 4
      modules/aruco/src/charuco.cpp
  2. 2
      modules/bgsegm/include/opencv2/bgsegm.hpp
  3. 48
      modules/bioinspired/include/opencv2/bioinspired/retina.hpp
  4. 4
      modules/bioinspired/include/opencv2/bioinspired/retinafasttonemapping.hpp
  5. 11
      modules/bioinspired/include/opencv2/bioinspired/transientareassegmentationmodule.hpp
  6. 2
      modules/bioinspired/perf/opencl/perf_retina.ocl.cpp
  7. 4
      modules/bioinspired/samples/OpenEXRimages_HDR_Retina_toneMapping.cpp
  8. 91
      modules/bioinspired/samples/basicRetina.cpp
  9. 159
      modules/bioinspired/samples/retinaDemo.cpp
  10. 100
      modules/bioinspired/src/basicretinafilter.cpp
  11. 32
      modules/bioinspired/src/basicretinafilter.hpp
  12. 4
      modules/bioinspired/src/imagelogpolprojection.cpp
  13. 20
      modules/bioinspired/src/magnoretinafilter.cpp
  14. 12
      modules/bioinspired/src/magnoretinafilter.hpp
  15. 10
      modules/bioinspired/src/parvoretinafilter.cpp
  16. 22
      modules/bioinspired/src/retina.cpp
  17. 36
      modules/bioinspired/src/retinacolor.cpp
  18. 14
      modules/bioinspired/src/retinacolor.hpp
  19. 2
      modules/bioinspired/src/retinafasttonemapping.cpp
  20. 20
      modules/bioinspired/src/retinafilter.cpp
  21. 26
      modules/bioinspired/src/templatebuffer.hpp
  22. 4
      modules/bioinspired/src/transientareassegmentationmodule.cpp
  23. 2
      modules/bioinspired/test/test_retina_ocl.cpp
  24. 4
      modules/cnn_3dobj/CMakeLists.txt
  25. 9
      modules/dnn_modern/CMakeLists.txt
  26. 3
      modules/line_descriptor/samples/compute_descriptors.cpp
  27. 3
      modules/line_descriptor/samples/knn_matching.cpp
  28. 3
      modules/line_descriptor/samples/lines_extraction.cpp
  29. 3
      modules/line_descriptor/samples/lsd_lines_extraction.cpp
  30. 3
      modules/line_descriptor/samples/matching.cpp
  31. 4
      modules/line_descriptor/samples/radius_matching.cpp
  32. 18
      modules/optflow/samples/gpc_evaluate.cpp
  33. 3
      modules/optflow/src/sparse_matching_gpc.cpp
  34. 40
      modules/plot/include/opencv2/plot.hpp
  35. 57
      modules/plot/src/plot.cpp
  36. 346
      modules/rgbd/include/opencv2/rgbd.hpp
  37. 53
      modules/rgbd/src/odometry.cpp
  38. 24
      modules/structured_light/include/opencv2/structured_light/sinusoidalpattern.hpp
  39. 5
      modules/structured_light/samples/capsinpattern.cpp
  40. 6
      modules/structured_light/src/sinusoidalpattern.cpp
  41. 11
      modules/structured_light/test/test_faps.cpp
  42. 34
      modules/text/doc/text.bib
  43. 6
      modules/text/include/opencv2/text.hpp
  44. 41
      modules/text/include/opencv2/text/erfilter.hpp
  45. 1
      modules/text/include/opencv2/text/ocr.hpp
  46. 3
      modules/text/src/precomp.hpp
  47. 2
      modules/tracking/samples/benchmark.cpp
  48. 4
      modules/tracking/src/trackerKCF.cpp
  49. 2
      modules/xfeatures2d/CMakeLists.txt
  50. 2
      modules/xfeatures2d/samples/bagofwords_classification.cpp
  51. 3
      modules/xfeatures2d/samples/shape_transformation.cpp
  52. 1
      modules/xfeatures2d/samples/video_homography.cpp
  53. 4
      modules/xfeatures2d/test/test_features2d.cpp
  54. 9
      modules/ximgproc/src/selectivesearchsegmentation.cpp
  55. 96
      modules/ximgproc/src/structured_edge_detection.cpp

@ -738,7 +738,7 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners,
CV_Assert(_markerIds.total() > 0 && _markerIds.total() == _markerCorners.total());
const float minRepDistanceRate = 0.12f;
const float minRepDistanceRate = 1.302455f;
// create Charuco board layout for diamond (3x3 layout)
Ptr<Dictionary> dict = getPredefinedDictionary(PREDEFINED_DICTIONARY_NAME(0));
@ -771,7 +771,7 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners,
perimeterSq += edge.x*edge.x + edge.y*edge.y;
}
// maximum reprojection error relative to perimeter
float minRepDistance = perimeterSq * minRepDistanceRate * minRepDistanceRate;
float minRepDistance = sqrt(perimeterSq) * minRepDistanceRate;
int currentId = _markerIds.getMat().at< int >(i);

@ -189,6 +189,8 @@ CV_EXPORTS_W Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int init
About as fast as MOG2 on a high end system.
More than twice faster than MOG2 on cheap hardware (benchmarked on Raspberry Pi3).
%Algorithm by Sagi Zeevi ( https://github.com/sagi-z/BackgroundSubtractorCNT )
*/
class CV_EXPORTS_W BackgroundSubtractorCNT : public BackgroundSubtractor
{

@ -422,32 +422,30 @@ public:
Retina::getParvo methods
*/
CV_WRAP virtual void activateContoursProcessing(const bool activate)=0;
};
//! @relates bioinspired::Retina
//! @{
/** @overload */
CV_EXPORTS_W Ptr<Retina> createRetina(Size inputSize);
/** @brief Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
@param inputSize the input frame size
@param colorMode the chosen processing mode : with or without color processing
@param colorSamplingMethod specifies which kind of color sampling will be used :
- cv::bioinspired::RETINA_COLOR_RANDOM: each pixel position is either R, G or B in a random choice
- cv::bioinspired::RETINA_COLOR_DIAGONAL: color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
- cv::bioinspired::RETINA_COLOR_BAYER: standard bayer sampling
@param useRetinaLogSampling activate retina log sampling, if true, the 2 following parameters can
be used
@param reductionFactor only usefull if param useRetinaLogSampling=true, specifies the reduction
factor of the output frame (as the center (fovea) is high resolution and corners can be
underscaled, then a reduction of the output is allowed without precision leak
@param samplingStrenght only usefull if param useRetinaLogSampling=true, specifies the strenght of
the log scale that is applied
*/
CV_EXPORTS_W Ptr<Retina> createRetina(Size inputSize, const bool colorMode, int colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const float reductionFactor=1.0f, const float samplingStrenght=10.0f);
//! @}
/** @overload */
CV_WRAP static Ptr<Retina> create(Size inputSize);
/** @brief Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
@param inputSize the input frame size
@param colorMode the chosen processing mode : with or without color processing
@param colorSamplingMethod specifies which kind of color sampling will be used :
- cv::bioinspired::RETINA_COLOR_RANDOM: each pixel position is either R, G or B in a random choice
- cv::bioinspired::RETINA_COLOR_DIAGONAL: color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
- cv::bioinspired::RETINA_COLOR_BAYER: standard bayer sampling
@param useRetinaLogSampling activate retina log sampling, if true, the 2 following parameters can
be used
@param reductionFactor only usefull if param useRetinaLogSampling=true, specifies the reduction
factor of the output frame (as the center (fovea) is high resolution and corners can be
underscaled, then a reduction of the output is allowed without precision leak
@param samplingStrenght only usefull if param useRetinaLogSampling=true, specifies the strenght of
the log scale that is applied
*/
CV_WRAP static Ptr<Retina> create(Size inputSize, const bool colorMode,
int colorSamplingMethod=RETINA_COLOR_BAYER,
const bool useRetinaLogSampling=false,
const float reductionFactor=1.0f, const float samplingStrenght=10.0f);
};
//! @}

@ -126,10 +126,10 @@ public:
(default is 1, see reference paper)
*/
CV_WRAP virtual void setup(const float photoreceptorsNeighborhoodRadius=3.f, const float ganglioncellsNeighborhoodRadius=1.f, const float meanLuminanceModulatorK=1.f)=0;
CV_WRAP static Ptr<RetinaFastToneMapping> create(Size inputSize);
};
//! @relates bioinspired::RetinaFastToneMapping
CV_EXPORTS_W Ptr<RetinaFastToneMapping> createRetinaFastToneMapping(Size inputSize);
//! @}

@ -187,13 +187,12 @@ public:
/** @brief cleans all the buffers of the instance
*/
CV_WRAP virtual void clearAllBuffers()=0;
};
/** @brief allocator
@param inputSize : size of the images input to segment (output will be the same size)
@relates bioinspired::TransientAreasSegmentationModule
*/
CV_EXPORTS_W Ptr<TransientAreasSegmentationModule> createTransientAreasSegmentationModule(Size inputSize);
/** @brief allocator
@param inputSize : size of the images input to segment (output will be the same size)
*/
CV_WRAP static Ptr<TransientAreasSegmentationModule> create(Size inputSize);
};
//! @}

@ -29,7 +29,7 @@ OCL_PERF_TEST_P(RetinaFixture, Retina,
UMat ocl_parvo, ocl_magno;
{
Ptr<cv::bioinspired::Retina> retina = cv::bioinspired::createRetina(
Ptr<cv::bioinspired::Retina> retina = cv::bioinspired::Retina::create(
input.size(), colorMode, colorSamplingMethod, useLogSampling,
reductionFactor, samplingStrength);

@ -220,10 +220,10 @@ int main(int argc, char* argv[])
*/
if (useLogSampling)
{
retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
retina = cv::bioinspired::Retina::create(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
retina = cv::bioinspired::createRetina(inputImage.size());
retina = cv::bioinspired::Retina::create(inputImage.size());
// create a fast retina tone mapper (Meyla&al algorithm)
std::cout<<"Allocating fast tone mapper..."<<std::endl;

@ -1,91 +0,0 @@
//============================================================================
// Name : retinademo.cpp
// Author : Alexandre Benoit, benoit.alexandre.vision@gmail.com
// Version : 0.1
// Copyright : LISTIC/GIPSA French Labs, May 2015
// Description : Gipsa/LISTIC Labs quick retina demo in C++, Ansi-style
//============================================================================
// include bioinspired module and OpenCV core utilities
#include "opencv2/bioinspired.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <cstring>
// main function
int main(int argc, char* argv[]) {
// basic input arguments checking
if (argc>1)
{
std::cout<<"****************************************************"<<std::endl;
std::cout<<"* Retina demonstration : demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
std::cout<<"* This retina model allows spatio-temporal image processing (applied on a webcam sequences)."<<std::endl;
std::cout<<"* As a summary, these are the retina model properties:"<<std::endl;
std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
std::cout<<"* for more information, reer to the following papers :"<<std::endl;
std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
std::cout<<"****************************************************"<<std::endl;
std::cout<<" NOTE : this program generates the default retina parameters file 'RetinaDefaultParameters.xml'"<<std::endl;
std::cout<<" => you can use this to fine tune parameters and load them if you save to file 'RetinaSpecificParameters.xml'"<<std::endl;
if (strcmp(argv[1], "help")==0){
std::cout<<"No help provided for now, please test the retina Demo for a more complete program"<<std::endl;
}
}
std::string inputMediaType=argv[1];
// declare the retina input buffer.
cv::Mat inputFrame;
// setup webcam reader and grab a first frame to get its size
cv::VideoCapture videoCapture(0);
videoCapture>>inputFrame;
// allocate a retina instance with input size equal to the one of the loaded image
cv::Ptr<cv::bioinspired::Retina> myRetina = cv::bioinspired::createRetina(inputFrame.size());
/* retina parameters management methods use sample
-> save current (here default) retina parameters to a xml file (you may use it only one time to get the file and modify it)
*/
myRetina->write("RetinaDefaultParameters.xml");
// -> load parameters if file exists
myRetina->setup("RetinaSpecificParameters.xml");
// reset all retina buffers (open your eyes)
myRetina->clearBuffers();
// declare retina output buffers
cv::Mat retinaOutput_parvo;
cv::Mat retinaOutput_magno;
//main processing loop
bool stillProcess=true;
while(stillProcess){
// if using video stream, then, grabbing a new frame, else, input remains the same
if (videoCapture.isOpened())
videoCapture>>inputFrame;
else
stillProcess=false;
// run retina filter
myRetina->run(inputFrame);
// Retrieve and display retina output
myRetina->getParvo(retinaOutput_parvo);
myRetina->getMagno(retinaOutput_magno);
cv::imshow("retina input", inputFrame);
cv::imshow("Retina Parvo", retinaOutput_parvo);
cv::imshow("Retina Magno", retinaOutput_magno);
cv::waitKey(5);
}
}

@ -13,96 +13,66 @@
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/ocl.hpp"
static void help(std::string errorMessage)
{
std::cout<<"Program init error : "<<errorMessage<<std::endl;
std::cout<<"\nProgram call procedure : retinaDemo [processing mode] [Optional : media target] [Optional LAST parameter: \"log\" to activate retina log sampling]"<<std::endl;
std::cout<<"\t[processing mode] :"<<std::endl;
std::cout<<"\t -image : for still image processing"<<std::endl;
std::cout<<"\t -video : for video stream processing"<<std::endl;
std::cout<<"\t[Optional : media target] :"<<std::endl;
std::cout<<"\t if processing an image or video file, then, specify the path and filename of the target to process"<<std::endl;
std::cout<<"\t leave empty if processing video stream coming from a connected video device"<<std::endl;
std::cout<<"\t[Optional : activate retina log sampling] : an optional last parameter can be specified for retina spatial log sampling"<<std::endl;
std::cout<<"\t set \"log\" without quotes to activate this sampling, output frame size will be divided by 4"<<std::endl;
std::cout<<"\nExamples:"<<std::endl;
std::cout<<"\t-Image processing : ./retinaDemo -image lena.jpg"<<std::endl;
std::cout<<"\t-Image processing with log sampling : ./retinaDemo -image lena.jpg log"<<std::endl;
std::cout<<"\t-Video processing : ./retinaDemo -video myMovie.mp4"<<std::endl;
std::cout<<"\t-Live video processing : ./retinaDemo -video"<<std::endl;
std::cout<<"\nPlease start again with new parameters"<<std::endl;
}
const std::string keys =
"{image | | Input from image file }"
"{video | | Input from video file }"
"{camera | 0 | Index of input camera. If image or video is not specified, camera 0 will be used }"
"{log | | Activate retina log sampling }"
"{ocl | | Use OpenCL acceleration if possible }"
"{help | | Print help}";
int main(int argc, char* argv[])
{
// welcome message
std::cout<<"****************************************************"<<std::endl;
std::cout<<"* Retina demonstration : demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
std::cout<<"* As a summary, these are the retina model properties:"<<std::endl;
std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
std::cout<<"* for more information, reer to the following papers :"<<std::endl;
std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
std::cout<<"****************************************************"<<std::endl;
std::cout<<" NOTE : this program generates the default retina parameters file 'RetinaDefaultParameters.xml'"<<std::endl;
std::cout<<" => you can use this to fine tune parameters and load them if you save to file 'RetinaSpecificParameters.xml'"<<std::endl;
// basic input arguments checking
if (argc<2)
{
help("bad number of parameter");
return -1;
}
bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
std::string inputMediaType=argv[1];
std::cout<<"****************************************************"<<std::endl
<<"* Retina demonstration : demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl
<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl
<<"* As a summary, these are the retina model properties:"<<std::endl
<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl
<<"* => high frequency spatio-temporal noise reduction"<<std::endl
<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl
<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl
<<"* for more information, reer to the following papers :"<<std::endl
<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl
<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl
<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl
<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl
<<"****************************************************"<<std::endl
<<" NOTE : this program generates the default retina parameters file 'RetinaDefaultParameters.xml'"<<std::endl
<<" => you can use this to fine tune parameters and load them if you save to file 'RetinaSpecificParameters.xml'"<<std::endl;
cv::CommandLineParser parser(argc, argv, keys);
if(!parser.check() || parser.has("help")) {
parser.printMessage();
return 0;
}
bool useLogSampling = parser.has("log"); // check if user wants retina log sampling processing
bool useOCL = parser.has("ocl");
cv::ocl::setUseOpenCL(useOCL);
if(useOCL && !cv::ocl::useOpenCL())
std::cout << "Failed to enable OpenCL\n";
// declare the retina input buffer... that will be fed differently in regard of the input media
cv::Mat inputFrame;
cv::VideoCapture videoCapture; // in case a video media is used, its manager is declared here
//////////////////////////////////////////////////////////////////////////////
// checking input media type (still image, video file, live video acquisition)
if (!strcmp(inputMediaType.c_str(), "-image") && argc >= 3)
{
std::cout<<"RetinaDemo: processing image "<<argv[2]<<std::endl;
// image processing case
inputFrame = cv::imread(std::string(argv[2]), 1); // load image in RGB mode
}else
if (!strcmp(inputMediaType.c_str(), "-video"))
{
if (argc == 2 || (argc == 3 && useLogSampling)) // attempt to grab images from a video capture device
{
videoCapture.open(0);
}else// attempt to grab images from a video filestream
{
std::cout<<"RetinaDemo: processing video stream "<<argv[2]<<std::endl;
videoCapture.open(argv[2]);
}
// grab a first frame to check if everything is ok
videoCapture>>inputFrame;
}else
{
// bad command parameter
help("bad command parameter");
return -1;
}
if(parser.has("video"))
videoCapture.open(parser.get<cv::String>("video"));
else if(parser.has("image"))
inputFrame = cv::imread(parser.get<cv::String>("image"));
else
videoCapture.open(parser.get<int>("camera"));
if (inputFrame.empty())
{
help("Input media could not be loaded, aborting");
return -1;
}
videoCapture >> inputFrame;
if(inputFrame.empty())
{
std::cout << "Failed to open media source\n";
return 0;
}
//////////////////////////////////////////////////////////////////////////////
// Program start in a try/catch safety context (Retina may throw errors)
@ -114,10 +84,11 @@ int main(int argc, char* argv[])
// if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
if (useLogSampling)
{
myRetina = cv::bioinspired::createRetina(inputFrame.size(), true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
myRetina = cv::bioinspired::Retina::create(inputFrame.size(),
true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
myRetina = cv::bioinspired::createRetina(inputFrame.size());
myRetina = cv::bioinspired::Retina::create(inputFrame.size());
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
myRetina->write("RetinaDefaultParameters.xml");
@ -127,35 +98,43 @@ int main(int argc, char* argv[])
myRetina->clearBuffers();
// declare retina output buffers
cv::Mat retinaOutput_parvo;
cv::Mat retinaOutput_magno;
cv::UMat retinaOutput_parvo;
cv::UMat retinaOutput_magno;
// processing loop with stop condition
bool continueProcessing=true; // FIXME : not yet managed during process...
while(continueProcessing)
int64 totalTime = 0;
int64 totalFrames = 0;
while(true)
{
// if using video stream, then, grabbing a new frame, else, input remains the same
if (videoCapture.isOpened())
videoCapture>>inputFrame;
if(inputFrame.empty())
break;
// run retina filter
int64 frameTime = cv::getTickCount();
myRetina->run(inputFrame);
// Retrieve and display retina output
frameTime = cv::getTickCount() - frameTime;
totalTime += frameTime;
totalFrames++;
myRetina->getParvo(retinaOutput_parvo);
myRetina->getMagno(retinaOutput_magno);
cv::imshow("retina input", inputFrame);
cv::imshow("Retina Parvo", retinaOutput_parvo);
cv::imshow("Retina Magno", retinaOutput_magno);
cv::waitKey(5);
int key = cv::waitKey(5);
if(key == 'q')
break;
}
}catch(cv::Exception e)
std::cout << "\nMean frame processing time: " << (totalTime / cv::getTickFrequency()) / totalFrames << " s" << std::endl;
std::cout << "Retina demo end" << std::endl;
}
catch(const cv::Exception& e)
{
std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
}
// Program end message
std::cout<<"Retina demo end"<<std::endl;
return 0;
}

@ -326,7 +326,7 @@ void BasicRetinaFilter::_localLuminanceAdaptation(float *inputOutputFrame, const
/* const float *localLuminancePTR=localLuminance;
float *inputOutputFramePTR=inputOutputFrame;
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel, ++inputOutputFramePTR)
for (unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel, ++inputOutputFramePTR)
{
float X0=*(localLuminancePTR++)*_localLuminanceFactor+_localLuminanceAddon;
*(inputOutputFramePTR) = (_maxInputValue+X0)**inputOutputFramePTR/(*inputOutputFramePTR +X0+0.00000000001);
@ -353,7 +353,7 @@ void BasicRetinaFilter::_localLuminanceAdaptation(const float *inputFrame, const
const float *localLuminancePTR=localLuminance;
const float *inputFramePTR=inputFrame;
float *outputFramePTR=outputFrame;
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel, ++inputFramePTR, ++outputFramePTR)
for (unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel, ++inputFramePTR, ++outputFramePTR)
{
float X0=*(localLuminancePTR++)*_localLuminanceFactor+_localLuminanceAddon;
// TODO : the following line can lead to a divide by zero ! A small offset is added, take care if the offset is too large in case of High Dynamic Range images which can use very small values...
@ -370,7 +370,7 @@ void BasicRetinaFilter::_localLuminanceAdaptationPosNegValues(const float *input
const float *inputFramePTR=inputFrame;
float *outputFramePTR=outputFrame;
float factor=_maxInputValue*2.0f/(float)CV_PI;
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel, ++inputFramePTR)
for (unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel, ++inputFramePTR)
{
float X0=*(localLuminancePTR++)*_localLuminanceFactor+_localLuminanceAddon;
*(outputFramePTR++) = factor*atan(*inputFramePTR/X0);//(_maxInputValue+X0)**inputFramePTR/(*inputFramePTR +X0);
@ -455,8 +455,8 @@ void BasicRetinaFilter::_horizontalCausalFilter(float *outputFrame, unsigned int
//#pragma omp parallel for
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float* outputPTR=outputFrame+(IDrowStart+IDrow)*_filterOutput.getNBcolumns();
register float result=0;
float* outputPTR=outputFrame+(IDrowStart+IDrow)*_filterOutput.getNBcolumns();
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
result = *(outputPTR)+ _a* result;
@ -472,9 +472,9 @@ void BasicRetinaFilter::_horizontalCausalFilter_addInput(const float *inputFrame
#else
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float* outputPTR=outputFrame+(IDrowStart+IDrow)*_filterOutput.getNBcolumns();
register const float* inputPTR=inputFrame+(IDrowStart+IDrow)*_filterOutput.getNBcolumns();
register float result=0;
float* outputPTR=outputFrame+(IDrowStart+IDrow)*_filterOutput.getNBcolumns();
const float* inputPTR=inputFrame+(IDrowStart+IDrow)*_filterOutput.getNBcolumns();
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
result = *(inputPTR++) + _tau**(outputPTR)+ _a* result;
@ -493,8 +493,8 @@ void BasicRetinaFilter::_horizontalAnticausalFilter(float *outputFrame, unsigned
#else
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float* outputPTR=outputFrame+(IDrowEnd-IDrow)*(_filterOutput.getNBcolumns())-1;
register float result=0;
float* outputPTR=outputFrame+(IDrowEnd-IDrow)*(_filterOutput.getNBcolumns())-1;
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
result = *(outputPTR)+ _a* result;
@ -511,8 +511,8 @@ void BasicRetinaFilter::_horizontalAnticausalFilter_multGain(float *outputFrame,
//#pragma omp parallel for
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float* outputPTR=outputFrame+(IDrowEnd-IDrow)*(_filterOutput.getNBcolumns())-1;
register float result=0;
float* outputPTR=outputFrame+(IDrowEnd-IDrow)*(_filterOutput.getNBcolumns())-1;
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
result = *(outputPTR)+ _a* result;
@ -529,8 +529,8 @@ void BasicRetinaFilter::_verticalCausalFilter(float *outputFrame, unsigned int I
#else
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn)
{
register float result=0;
register float *outputPTR=outputFrame+IDcolumn;
float result=0;
float *outputPTR=outputFrame+IDcolumn;
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index)
{
@ -551,8 +551,8 @@ void BasicRetinaFilter::_verticalAnticausalFilter(float *outputFrame, unsigned i
//#pragma omp parallel for
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn)
{
register float result=0;
register float *outputPTR=offset+IDcolumn;
float result=0;
float *outputPTR=offset+IDcolumn;
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index)
{
@ -574,8 +574,8 @@ void BasicRetinaFilter::_verticalAnticausalFilter_multGain(float *outputFrame, u
//#pragma omp parallel for
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn)
{
register float result=0;
register float *outputPTR=offset+IDcolumn;
float result=0;
float *outputPTR=offset+IDcolumn;
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index)
{
@ -594,11 +594,11 @@ void BasicRetinaFilter::_verticalAnticausalFilter_multGain(float *outputFrame, u
// -> squaring horizontal causal filter
void BasicRetinaFilter::_squaringHorizontalCausalFilter(const float *inputFrame, float *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd)
{
register float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
register const float* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns();
float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
const float* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns();
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float result=0;
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
result = *(inputPTR)**(inputPTR) + _tau**(outputPTR)+ _a* result;
@ -611,12 +611,12 @@ void BasicRetinaFilter::_squaringHorizontalCausalFilter(const float *inputFrame,
// vertical anticausal filter that returns the mean value of its result
float BasicRetinaFilter::_verticalAnticausalFilter_returnMeanValue(float *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd)
{
register float meanValue=0;
float meanValue=0;
float* offset=outputFrame+_filterOutput.getNBpixels()-_filterOutput.getNBcolumns();
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn)
{
register float result=0;
register float *outputPTR=offset+IDcolumn;
float result=0;
float *outputPTR=offset+IDcolumn;
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index)
{
@ -653,12 +653,12 @@ void BasicRetinaFilter::_localSquaringSpatioTemporalLPfilter(const float *inputF
// this function take an image in input and squares it befor computing
void BasicRetinaFilter::_local_squaringHorizontalCausalFilter(const float *inputFrame, float *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd, const unsigned int *integrationAreas)
{
register float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
register const float* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns();
float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
const float* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns();
const unsigned int *integrationAreasPTR=integrationAreas;
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float result=0;
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
if (*(integrationAreasPTR++))
@ -675,12 +675,12 @@ void BasicRetinaFilter::_local_squaringHorizontalCausalFilter(const float *input
void BasicRetinaFilter::_local_horizontalAnticausalFilter(float *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd, const unsigned int *integrationAreas)
{
register float* outputPTR=outputFrame+IDrowEnd*(_filterOutput.getNBcolumns())-1;
float* outputPTR=outputFrame+IDrowEnd*(_filterOutput.getNBcolumns())-1;
const unsigned int *integrationAreasPTR=integrationAreas;
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float result=0;
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
if (*(integrationAreasPTR++))
@ -699,8 +699,8 @@ void BasicRetinaFilter::_local_verticalCausalFilter(float *outputFrame, unsigned
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn)
{
register float result=0;
register float *outputPTR=outputFrame+IDcolumn;
float result=0;
float *outputPTR=outputFrame+IDcolumn;
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index)
{
@ -722,8 +722,8 @@ void BasicRetinaFilter::_local_verticalAnticausalFilter_multGain(float *outputFr
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn)
{
register float result=0;
register float *outputPTR=offset+IDcolumn;
float result=0;
float *outputPTR=offset+IDcolumn;
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index)
{
@ -786,11 +786,11 @@ void BasicRetinaFilter::_spatiotemporalLPfilter_Irregular(const float *inputFram
// horizontal causal filter wich runs on its input buffer
void BasicRetinaFilter::_horizontalCausalFilter_Irregular(float *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd)
{
register float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
register const float* spatialConstantPTR=&_progressiveSpatialConstant[0]+IDrowStart*_filterOutput.getNBcolumns();
float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
const float* spatialConstantPTR=&_progressiveSpatialConstant[0]+IDrowStart*_filterOutput.getNBcolumns();
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float result=0;
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
result = *(outputPTR)+ *(spatialConstantPTR++)* result;
@ -802,12 +802,12 @@ void BasicRetinaFilter::_horizontalCausalFilter_Irregular(float *outputFrame, un
// horizontal causal filter with add input
void BasicRetinaFilter::_horizontalCausalFilter_Irregular_addInput(const float *inputFrame, float *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd)
{
register float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
register const float* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns();
register const float* spatialConstantPTR=&_progressiveSpatialConstant[0]+IDrowStart*_filterOutput.getNBcolumns();
float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
const float* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns();
const float* spatialConstantPTR=&_progressiveSpatialConstant[0]+IDrowStart*_filterOutput.getNBcolumns();
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float result=0;
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
result = *(inputPTR++) + _tau**(outputPTR)+ *(spatialConstantPTR++)* result;
@ -823,12 +823,12 @@ void BasicRetinaFilter::_horizontalAnticausalFilter_Irregular(float *outputFrame
#ifdef MAKE_PARALLEL
cv::parallel_for_(cv::Range(IDrowStart,IDrowEnd), Parallel_horizontalAnticausalFilter_Irregular(outputFrame, spatialConstantBuffer, IDrowEnd, _filterOutput.getNBcolumns()));
#else
register float* outputPTR=outputFrame+IDrowEnd*(_filterOutput.getNBcolumns())-1;
register const float* spatialConstantPTR=spatialConstantBuffer+IDrowEnd*(_filterOutput.getNBcolumns())-1;
float* outputPTR=outputFrame+IDrowEnd*(_filterOutput.getNBcolumns())-1;
const float* spatialConstantPTR=spatialConstantBuffer+IDrowEnd*(_filterOutput.getNBcolumns())-1;
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float result=0;
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
result = *(outputPTR)+ *(spatialConstantPTR--)* result;
@ -847,9 +847,9 @@ void BasicRetinaFilter::_verticalCausalFilter_Irregular(float *outputFrame, unsi
#else
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn)
{
register float result=0;
register float *outputPTR=outputFrame+IDcolumn;
register const float *spatialConstantPTR=spatialConstantBuffer+IDcolumn;
float result=0;
float *outputPTR=outputFrame+IDcolumn;
const float *spatialConstantPTR=spatialConstantBuffer+IDcolumn;
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index)
{
result = *(outputPTR) + *(spatialConstantPTR) * result;
@ -869,10 +869,10 @@ void BasicRetinaFilter::_verticalAnticausalFilter_Irregular_multGain(float *outp
const float* gainOffset=&_progressiveGain[0]+_filterOutput.getNBpixels()-_filterOutput.getNBcolumns();
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn)
{
register float result=0;
register float *outputPTR=outputOffset+IDcolumn;
register const float *spatialConstantPTR=constantOffset+IDcolumn;
register const float *progressiveGainPTR=gainOffset+IDcolumn;
float result=0;
float *outputPTR=outputOffset+IDcolumn;
const float *spatialConstantPTR=constantOffset+IDcolumn;
const float *progressiveGainPTR=gainOffset+IDcolumn;
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index)
{
result = *(outputPTR) + *(spatialConstantPTR) * result;

@ -498,8 +498,8 @@ namespace bioinspired
#endif
for (int IDrow=r.start; IDrow!=r.end; ++IDrow)
{
register float* outputPTR=outputFrame+(IDrowEnd-IDrow)*(nbColumns)-1;
register float result=0;
float* outputPTR=outputFrame+(IDrowEnd-IDrow)*(nbColumns)-1;
float result=0;
for (unsigned int index=0; index<nbColumns; ++index)
{
result = *(outputPTR)+ filterParam_a* result;
@ -523,9 +523,9 @@ namespace bioinspired
virtual void operator()( const Range& r ) const {
for (int IDrow=r.start; IDrow!=r.end; ++IDrow)
{
register float* outputPTR=outputFrame+(IDrowStart+IDrow)*nbColumns;
register const float* inputPTR=inputFrame+(IDrowStart+IDrow)*nbColumns;
register float result=0;
float* outputPTR=outputFrame+(IDrowStart+IDrow)*nbColumns;
const float* inputPTR=inputFrame+(IDrowStart+IDrow)*nbColumns;
float result=0;
for (unsigned int index=0; index<nbColumns; ++index)
{
result = *(inputPTR++) + filterParam_tau**(outputPTR)+ filterParam_a* result;
@ -548,8 +548,8 @@ namespace bioinspired
virtual void operator()( const Range& r ) const {
for (int IDcolumn=r.start; IDcolumn!=r.end; ++IDcolumn)
{
register float result=0;
register float *outputPTR=outputFrame+IDcolumn;
float result=0;
float *outputPTR=outputFrame+IDcolumn;
for (unsigned int index=0; index<nbRows; ++index)
{
@ -576,8 +576,8 @@ namespace bioinspired
float* offset=outputFrame+nbColumns*nbRows-nbColumns;
for (int IDcolumn=r.start; IDcolumn!=r.end; ++IDcolumn)
{
register float result=0;
register float *outputPTR=offset+IDcolumn;
float result=0;
float *outputPTR=offset+IDcolumn;
for (unsigned int index=0; index<nbRows; ++index)
{
@ -604,7 +604,7 @@ namespace bioinspired
const float *localLuminancePTR=localLuminance+r.start;
const float *inputFramePTR=inputFrame+r.start;
float *outputFramePTR=outputFrame+r.start;
for (register int IDpixel=r.start ; IDpixel!=r.end ; ++IDpixel, ++inputFramePTR, ++outputFramePTR)
for (int IDpixel=r.start ; IDpixel!=r.end ; ++IDpixel, ++inputFramePTR, ++outputFramePTR)
{
float X0=*(localLuminancePTR++)*localLuminanceFactor+localLuminanceAddon;
// TODO : the following line can lead to a divide by zero ! A small offset is added, take care if the offset is too large in case of High Dynamic Range images which can use very small values...
@ -630,9 +630,9 @@ namespace bioinspired
for (int IDrow=r.start; IDrow!=r.end; ++IDrow)
{
register float* outputPTR=outputFrame+(IDrowEnd-IDrow)*(nbColumns)-1;
register const float* spatialConstantPTR=spatialConstantBuffer+(IDrowEnd-IDrow)*(nbColumns)-1;
register float result=0;
float* outputPTR=outputFrame+(IDrowEnd-IDrow)*(nbColumns)-1;
const float* spatialConstantPTR=spatialConstantBuffer+(IDrowEnd-IDrow)*(nbColumns)-1;
float result=0;
for (unsigned int index=0; index<nbColumns; ++index)
{
result = *(outputPTR)+ *(spatialConstantPTR--)* result;
@ -655,9 +655,9 @@ namespace bioinspired
virtual void operator()( const Range& r ) const {
for (int IDcolumn=r.start; IDcolumn!=r.end; ++IDcolumn)
{
register float result=0;
register float *outputPTR=outputFrame+IDcolumn;
register const float* spatialConstantPTR=spatialConstantBuffer+IDcolumn;
float result=0;
float *outputPTR=outputFrame+IDcolumn;
const float* spatialConstantPTR=spatialConstantBuffer+IDcolumn;
for (unsigned int index=0; index<nbRows; ++index)
{
result = *(outputPTR) + *(spatialConstantPTR) * result;

@ -410,7 +410,7 @@ std::valarray<float> &ImageLogPolProjection::runProjection(const std::valarray<f
_spatiotemporalLPfilter_Irregular(&_irregularLPfilteredFrame[0], &_tempBuffer[0]+_filterOutput.getNBpixels()*2);
// applying image projection/resampling
register unsigned int *transformTablePTR=&_transformTable[0];
unsigned int *transformTablePTR=&_transformTable[0];
for (unsigned int i=0 ; i<_usefullpixelIndex ; i+=2, transformTablePTR+=2)
{
#ifdef IMAGELOGPOLPROJECTION_DEBUG
@ -430,7 +430,7 @@ std::valarray<float> &ImageLogPolProjection::runProjection(const std::valarray<f
_spatiotemporalLPfilter_Irregular(get_data(inputFrame), &_irregularLPfilteredFrame[0]);
_spatiotemporalLPfilter_Irregular(&_irregularLPfilteredFrame[0], &_irregularLPfilteredFrame[0]);
// applying image projection/resampling
register unsigned int *transformTablePTR=&_transformTable[0];
unsigned int *transformTablePTR=&_transformTable[0];
for (unsigned int i=0 ; i<_usefullpixelIndex ; i+=2, transformTablePTR+=2)
{
#ifdef IMAGELOGPOLPROJECTION_DEBUG

@ -158,12 +158,12 @@ void MagnoRetinaFilter::_amacrineCellsComputing(const float *OPL_ON, const float
#ifdef MAKE_PARALLEL
cv::parallel_for_(cv::Range(0,_filterOutput.getNBpixels()), Parallel_amacrineCellsComputing(OPL_ON, OPL_OFF, &_previousInput_ON[0], &_previousInput_OFF[0], &_amacrinCellsTempOutput_ON[0], &_amacrinCellsTempOutput_OFF[0], _temporalCoefficient));
#else
register const float *OPL_ON_PTR=OPL_ON;
register const float *OPL_OFF_PTR=OPL_OFF;
register float *previousInput_ON_PTR= &_previousInput_ON[0];
register float *previousInput_OFF_PTR= &_previousInput_OFF[0];
register float *amacrinCellsTempOutput_ON_PTR= &_amacrinCellsTempOutput_ON[0];
register float *amacrinCellsTempOutput_OFF_PTR= &_amacrinCellsTempOutput_OFF[0];
const float *OPL_ON_PTR=OPL_ON;
const float *OPL_OFF_PTR=OPL_OFF;
float *previousInput_ON_PTR= &_previousInput_ON[0];
float *previousInput_OFF_PTR= &_previousInput_OFF[0];
float *amacrinCellsTempOutput_ON_PTR= &_amacrinCellsTempOutput_ON[0];
float *amacrinCellsTempOutput_OFF_PTR= &_amacrinCellsTempOutput_OFF[0];
for (unsigned int IDpixel=0 ; IDpixel<this->getNBpixels(); ++IDpixel)
{
@ -200,10 +200,10 @@ const std::valarray<float> &MagnoRetinaFilter::runFilter(const std::valarray<flo
_localLuminanceAdaptation(&_magnoXOutputOFF[0], &_localProcessBufferOFF[0]);
/* Compute MagnoY */
register float *magnoYOutput= &(*_magnoYOutput)[0];
register float *magnoXOutputON_PTR= &_magnoXOutputON[0];
register float *magnoXOutputOFF_PTR= &_magnoXOutputOFF[0];
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel)
float *magnoYOutput= &(*_magnoYOutput)[0];
float *magnoXOutputON_PTR= &_magnoXOutputON[0];
float *magnoXOutputOFF_PTR= &_magnoXOutputOFF[0];
for (unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel)
*(magnoYOutput++)=*(magnoXOutputON_PTR++)+*(magnoXOutputOFF_PTR++);
return (*_magnoYOutput);

@ -212,12 +212,12 @@ namespace bioinspired
:OPL_ON(OPL_ON_PTR), OPL_OFF(OPL_OFF_PTR), previousInput_ON(previousInput_ON_PTR), previousInput_OFF(previousInput_OFF_PTR), amacrinCellsTempOutput_ON(amacrinCellsTempOutput_ON_PTR), amacrinCellsTempOutput_OFF(amacrinCellsTempOutput_OFF_PTR), temporalCoefficient(temporalCoefficientVal) {}
virtual void operator()( const Range& r ) const {
register const float *OPL_ON_PTR=OPL_ON+r.start;
register const float *OPL_OFF_PTR=OPL_OFF+r.start;
register float *previousInput_ON_PTR= previousInput_ON+r.start;
register float *previousInput_OFF_PTR= previousInput_OFF+r.start;
register float *amacrinCellsTempOutput_ON_PTR= amacrinCellsTempOutput_ON+r.start;
register float *amacrinCellsTempOutput_OFF_PTR= amacrinCellsTempOutput_OFF+r.start;
const float *OPL_ON_PTR=OPL_ON+r.start;
const float *OPL_OFF_PTR=OPL_OFF+r.start;
float *previousInput_ON_PTR= previousInput_ON+r.start;
float *previousInput_OFF_PTR= previousInput_OFF+r.start;
float *amacrinCellsTempOutput_ON_PTR= amacrinCellsTempOutput_ON+r.start;
float *amacrinCellsTempOutput_OFF_PTR= amacrinCellsTempOutput_OFF+r.start;
for (int IDpixel=r.start ; IDpixel!=r.end; ++IDpixel)
{

@ -191,11 +191,11 @@ const std::valarray<float> &ParvoRetinaFilter::runFilter(const std::valarray<flo
//
//// loop that makes the difference between photoreceptor cells output and horizontal cells
//// positive part goes on the ON way, negative pat goes on the OFF way
register float *parvocellularOutputONminusOFF_PTR=&(*_parvocellularOutputONminusOFF)[0];
register float *parvocellularOutputON_PTR=&_parvocellularOutputON[0];
register float *parvocellularOutputOFF_PTR=&_parvocellularOutputOFF[0];
float *parvocellularOutputONminusOFF_PTR=&(*_parvocellularOutputONminusOFF)[0];
float *parvocellularOutputON_PTR=&_parvocellularOutputON[0];
float *parvocellularOutputOFF_PTR=&_parvocellularOutputOFF[0];
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel)
for (unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel)
*(parvocellularOutputONminusOFF_PTR++)= (*(parvocellularOutputON_PTR++)-*(parvocellularOutputOFF_PTR++));
}
return (*_parvocellularOutputONminusOFF);
@ -217,7 +217,7 @@ void ParvoRetinaFilter::_OPL_OnOffWaysComputing() // WARNING : this method requi
float *parvocellularOutputOFF_PTR= &_parvocellularOutputOFF[0];
// compute bipolar cells response equal to photoreceptors minus horizontal cells response
// and copy the result on parvo cellular outputs... keeping time before their local contrast adaptation for final result
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel)
for (unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel)
{
float pixelDifference = *(photoreceptorsOutput_PTR++) -*(horizontalCellsOutput_PTR++);
// test condition to allow write pixelDifference in ON or OFF buffer and 0 in the over

@ -293,6 +293,7 @@ private:
bool _convertCvMat2ValarrayBuffer(InputArray inputMatToConvert, std::valarray<float> &outputValarrayMatrix);
bool _wasOCLRunCalled;
#ifdef HAVE_OPENCL
ocl::RetinaOCLImpl* _ocl_retina;
@ -305,12 +306,12 @@ private:
};
// smart pointers allocation :
Ptr<Retina> createRetina(Size inputSize)
Ptr<Retina> Retina::create(Size inputSize)
{
return makePtr<RetinaImpl>(inputSize);
}
Ptr<Retina> createRetina(Size inputSize, const bool colorMode, int colorSamplingMethod, const bool useRetinaLogSampling, const float reductionFactor, const float samplingStrenght)
Ptr<Retina> Retina::create(Size inputSize, const bool colorMode, int colorSamplingMethod, const bool useRetinaLogSampling, const float reductionFactor, const float samplingStrenght)
{
return makePtr<RetinaImpl>(inputSize, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
}
@ -334,7 +335,7 @@ RetinaImpl::RetinaImpl(const cv::Size inputSz, const bool colorMode, int colorSa
_init(inputSz, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
#ifdef HAVE_OPENCL
_ocl_retina = 0;
if (inputSz.width % 4 == 0)
if (inputSz.width % 4 == 0 && !useRetinaLogSampling)
_ocl_retina = new ocl::RetinaOCLImpl(inputSz, colorMode, colorSamplingMethod,
useRetinaLogSampling, reductionFactor, samplingStrenght);
#endif
@ -564,6 +565,7 @@ void RetinaImpl::setupIPLMagnoChannel(const bool normaliseOutput, const float pa
bool RetinaImpl::ocl_run(InputArray inputMatToConvert)
{
_ocl_retina->run(inputMatToConvert);
_wasOCLRunCalled = true;
return true;
}
#endif
@ -572,6 +574,7 @@ void RetinaImpl::run(InputArray inputMatToConvert)
{
CV_OCL_RUN((_ocl_retina != 0), ocl_run(inputMatToConvert));
_wasOCLRunCalled = false;
// first convert input image to the compatible format : std::valarray<float>
const bool colorMode = _convertCvMat2ValarrayBuffer(inputMatToConvert.getMat(), _inputBuffer);
// process the retina
@ -603,6 +606,7 @@ void RetinaImpl::applyFastToneMapping(InputArray inputImage, OutputArray outputT
#ifdef HAVE_OPENCL
bool RetinaImpl::ocl_getParvo(OutputArray retinaOutput_parvo)
{
CV_Assert(_wasOCLRunCalled);
_ocl_retina->getParvo(retinaOutput_parvo);
return true;
}
@ -611,6 +615,7 @@ bool RetinaImpl::ocl_getParvo(OutputArray retinaOutput_parvo)
void RetinaImpl::getParvo(OutputArray retinaOutput_parvo)
{
CV_OCL_RUN((_ocl_retina != 0) && retinaOutput_parvo.isUMat(), ocl_getParvo(retinaOutput_parvo));
CV_Assert(!_wasOCLRunCalled);
if (_retinaFilter->getColorMode())
{
@ -627,6 +632,7 @@ void RetinaImpl::getParvo(OutputArray retinaOutput_parvo)
#ifdef HAVE_OPENCL
bool RetinaImpl::ocl_getMagno(OutputArray retinaOutput_magno)
{
CV_Assert(_wasOCLRunCalled);
_ocl_retina->getMagno(retinaOutput_magno);
return true;
}
@ -635,6 +641,7 @@ bool RetinaImpl::ocl_getMagno(OutputArray retinaOutput_magno)
void RetinaImpl::getMagno(OutputArray retinaOutput_magno)
{
CV_OCL_RUN((_ocl_retina != 0) && retinaOutput_magno.isUMat(), ocl_getMagno(retinaOutput_magno));
CV_Assert(!_wasOCLRunCalled);
// reallocate output buffer (if necessary)
_convertValarrayBuffer2cvMat(_retinaFilter->getMovingContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_magno);
@ -644,6 +651,7 @@ void RetinaImpl::getMagno(OutputArray retinaOutput_magno)
#ifdef HAVE_OPENCL
bool RetinaImpl::ocl_getMagnoRAW(OutputArray magnoOutputBufferCopy)
{
CV_Assert(_wasOCLRunCalled);
_ocl_retina->getMagnoRAW(magnoOutputBufferCopy);
return true;
}
@ -653,7 +661,7 @@ bool RetinaImpl::ocl_getMagnoRAW(OutputArray magnoOutputBufferCopy)
void RetinaImpl::getMagnoRAW(OutputArray magnoOutputBufferCopy){
CV_OCL_RUN((_ocl_retina != 0) && magnoOutputBufferCopy.isUMat(), ocl_getMagnoRAW(magnoOutputBufferCopy));
CV_Assert(!_wasOCLRunCalled);
// get magno channel header
const cv::Mat magnoChannel=cv::Mat(getMagnoRAW());
// copy data
@ -663,6 +671,7 @@ void RetinaImpl::getMagnoRAW(OutputArray magnoOutputBufferCopy){
#ifdef HAVE_OPENCL
bool RetinaImpl::ocl_getParvoRAW(OutputArray parvoOutputBufferCopy)
{
CV_Assert(_wasOCLRunCalled);
_ocl_retina->getParvoRAW(parvoOutputBufferCopy);
return true;
}
@ -671,7 +680,7 @@ bool RetinaImpl::ocl_getParvoRAW(OutputArray parvoOutputBufferCopy)
void RetinaImpl::getParvoRAW(OutputArray parvoOutputBufferCopy){
CV_OCL_RUN((_ocl_retina != 0) && parvoOutputBufferCopy.isUMat(), ocl_getParvoRAW(parvoOutputBufferCopy));
CV_Assert(!_wasOCLRunCalled);
// get parvo channel header
const cv::Mat parvoChannel=cv::Mat(getParvoRAW());
// copy data
@ -680,12 +689,14 @@ void RetinaImpl::getParvoRAW(OutputArray parvoOutputBufferCopy){
// original API level data accessors : get buffers addresses...
const Mat RetinaImpl::getMagnoRAW() const {
CV_Assert(!_wasOCLRunCalled);
// create a cv::Mat header for the valarray
return Mat((int)_retinaFilter->getMovingContours().size(),1, CV_32F, (void*)get_data(_retinaFilter->getMovingContours()));
}
const Mat RetinaImpl::getParvoRAW() const {
CV_Assert(!_wasOCLRunCalled);
if (_retinaFilter->getColorMode()) // check if color mode is enabled
{
// create a cv::Mat table (for RGB planes as a single vector)
@ -699,6 +710,7 @@ const Mat RetinaImpl::getParvoRAW() const {
// private method called by constructors
void RetinaImpl::_init(const cv::Size inputSz, const bool colorMode, int colorSamplingMethod, const bool useRetinaLogSampling, const float reductionFactor, const float samplingStrenght)
{
_wasOCLRunCalled = false;
// basic error check
if (inputSz.height*inputSz.width <= 0)
throw cv::Exception(-1, "Bad retina size setup : size height and with must be superior to zero", "RetinaImpl::setup", "Retina.cpp", 0);

@ -239,7 +239,7 @@ void RetinaColor::_initColorSampling()
_spatiotemporalLPfilter(&_RGBmosaic[0]+_filterOutput.getNBpixels(), &_colorLocalDensity[0]+_filterOutput.getNBpixels());
_spatiotemporalLPfilter(&_RGBmosaic[0]+_filterOutput.getDoubleNBpixels(), &_colorLocalDensity[0]+_filterOutput.getDoubleNBpixels());
unsigned int maxNBpixels=3*_filterOutput.getNBpixels();
register float *colorLocalDensityPTR=&_colorLocalDensity[0];
float *colorLocalDensityPTR=&_colorLocalDensity[0];
for (unsigned int i=0;i<maxNBpixels;++i, ++colorLocalDensityPTR)
*colorLocalDensityPTR=1.f/ *colorLocalDensityPTR;
@ -258,8 +258,8 @@ void RetinaColor::runColorDemultiplexing(const std::valarray<float> &multiplexed
// -> first set demultiplexed frame to 0
_demultiplexedTempBuffer=0;
// -> demultiplex process
register unsigned int *colorSamplingPRT=&_colorSampling[0];
register const float *multiplexedColorFramePtr=get_data(multiplexedColorFrame);
unsigned int *colorSamplingPRT=&_colorSampling[0];
const float *multiplexedColorFramePtr=get_data(multiplexedColorFrame);
for (unsigned int indexa=0; indexa<_filterOutput.getNBpixels() ; ++indexa)
_demultiplexedTempBuffer[*(colorSamplingPRT++)]=*(multiplexedColorFramePtr++);
@ -280,9 +280,9 @@ void RetinaColor::runColorDemultiplexing(const std::valarray<float> &multiplexed
}*/
// normalize by the photoreceptors local density and retrieve the local luminance
register float *chrominancePTR= &_chrominance[0];
register float *colorLocalDensityPTR= &_colorLocalDensity[0];
register float *luminance= &(*_luminance)[0];
float *chrominancePTR= &_chrominance[0];
float *colorLocalDensityPTR= &_colorLocalDensity[0];
float *luminance= &(*_luminance)[0];
if (!adaptiveFiltering)// compute the gradient on the luminance
{
if (_samplingMethod==RETINA_COLOR_RANDOM)
@ -326,7 +326,7 @@ void RetinaColor::runColorDemultiplexing(const std::valarray<float> &multiplexed
}else
{
register const float *multiplexedColorFramePTR= get_data(multiplexedColorFrame);
const float *multiplexedColorFramePTR= get_data(multiplexedColorFrame);
for (unsigned int indexc=0; indexc<_filterOutput.getNBpixels() ; ++indexc, ++chrominancePTR, ++colorLocalDensityPTR, ++luminance, ++multiplexedColorFramePTR)
{
// normalize by photoreceptors density
@ -412,8 +412,8 @@ void RetinaColor::runColorDemultiplexing(const std::valarray<float> &multiplexed
void RetinaColor::runColorMultiplexing(const std::valarray<float> &demultiplexedInputFrame, std::valarray<float> &multiplexedFrame)
{
// multiply each color layer by its bayer mask
register unsigned int *colorSamplingPTR= &_colorSampling[0];
register float *multiplexedFramePTR= &multiplexedFrame[0];
unsigned int *colorSamplingPTR= &_colorSampling[0];
float *multiplexedFramePTR= &multiplexedFrame[0];
for (unsigned int indexp=0; indexp<_filterOutput.getNBpixels(); ++indexp)
*(multiplexedFramePTR++)=demultiplexedInputFrame[*(colorSamplingPTR++)];
}
@ -440,8 +440,8 @@ void RetinaColor::clipRGBOutput_0_maxInputValue(float *inputOutputBuffer, const
#ifdef MAKE_PARALLEL // call the TemplateBuffer TBB clipping method
cv::parallel_for_(cv::Range(0,_filterOutput.getNBpixels()*3), Parallel_clipBufferValues<float>(inputOutputBuffer, 0, maxInputValue));
#else
register float *inputOutputBufferPTR=inputOutputBuffer;
for (register unsigned int jf = 0; jf < _filterOutput.getNBpixels()*3; ++jf, ++inputOutputBufferPTR)
float *inputOutputBufferPTR=inputOutputBuffer;
for (unsigned int jf = 0; jf < _filterOutput.getNBpixels()*3; ++jf, ++inputOutputBufferPTR)
{
if (*inputOutputBufferPTR>maxInputValue)
*inputOutputBufferPTR=maxInputValue;
@ -587,12 +587,12 @@ void RetinaColor::_adaptiveHorizontalCausalFilter_addInput(const float *inputFra
#ifdef MAKE_PARALLEL
cv::parallel_for_(cv::Range(IDrowStart,IDrowEnd), Parallel_adaptiveHorizontalCausalFilter_addInput(inputFrame, outputFrame, &_imageGradient[0], _filterOutput.getNBcolumns()));
#else
register float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
register const float* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns();
register const float *imageGradientPTR= &_imageGradient[0]+IDrowStart*_filterOutput.getNBcolumns();
float* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns();
const float* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns();
const float *imageGradientPTR= &_imageGradient[0]+IDrowStart*_filterOutput.getNBcolumns();
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow)
{
register float result=0;
float result=0;
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index)
{
//std::cout<<(*imageGradientPTR)<<" ";
@ -616,9 +616,9 @@ void RetinaColor::_adaptiveVerticalAnticausalFilter_multGain(float *outputFrame,
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn)
{
register float result=0;
register float *outputPTR=outputOffset+IDcolumn;
register float *imageGradientPTR=gradOffset+IDcolumn;
float result=0;
float *outputPTR=outputOffset+IDcolumn;
float *imageGradientPTR=gradOffset+IDcolumn;
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index)
{
result = *(outputPTR) + (*(imageGradientPTR)) * result;

@ -291,12 +291,12 @@ namespace bioinspired
virtual void operator()( const Range& r ) const
{
register float* outputPTR=outputFrame+r.start*nbColumns;
register const float* inputPTR=inputFrame+r.start*nbColumns;
register const float *imageGradientPTR= imageGradient+r.start*nbColumns;
float* outputPTR=outputFrame+r.start*nbColumns;
const float* inputPTR=inputFrame+r.start*nbColumns;
const float *imageGradientPTR= imageGradient+r.start*nbColumns;
for (int IDrow=r.start; IDrow!=r.end; ++IDrow)
{
register float result=0;
float result=0;
for (unsigned int index=0; index<nbColumns; ++index)
{
result = *(inputPTR++) + (*imageGradientPTR++)* result;
@ -322,9 +322,9 @@ namespace bioinspired
const float* gradOffset= imageGradient+nbColumns*nbRows-nbColumns;
for (int IDcolumn=r.start; IDcolumn!=r.end; ++IDcolumn)
{
register float result=0;
register float *outputPTR=offset+IDcolumn;
register const float *imageGradientPTR=gradOffset+IDcolumn;
float result=0;
float *outputPTR=offset+IDcolumn;
const float *imageGradientPTR=gradOffset+IDcolumn;
for (unsigned int index=0; index<nbRows; ++index)
{
result = *(outputPTR) + *(imageGradientPTR) * result;

@ -309,7 +309,7 @@ void _runRGBToneMapping(const std::valarray<float> &RGBimageInput, std::valarray
};
CV_EXPORTS Ptr<RetinaFastToneMapping> createRetinaFastToneMapping(Size inputSize)
Ptr<RetinaFastToneMapping> RetinaFastToneMapping::create(Size inputSize)
{
return makePtr<RetinaFastToneMappingImpl>(inputSize);
}

@ -469,10 +469,10 @@ namespace bioinspired
// return image with center Parvo and peripheral Magno channels
void RetinaFilter::_processRetinaParvoMagnoMapping()
{
register float *hybridParvoMagnoPTR= &_retinaParvoMagnoMappedFrame[0];
register const float *parvoOutputPTR= get_data(_ParvoRetinaFilter.getOutput());
register const float *magnoXOutputPTR= get_data(_MagnoRetinaFilter.getOutput());
register float *hybridParvoMagnoCoefTablePTR= &_retinaParvoMagnoMapCoefTable[0];
float *hybridParvoMagnoPTR= &_retinaParvoMagnoMappedFrame[0];
const float *parvoOutputPTR= get_data(_ParvoRetinaFilter.getOutput());
const float *magnoXOutputPTR= get_data(_MagnoRetinaFilter.getOutput());
float *hybridParvoMagnoCoefTablePTR= &_retinaParvoMagnoMapCoefTable[0];
for (unsigned int i=0 ; i<_photoreceptorsPrefilter.getNBpixels() ; ++i, hybridParvoMagnoCoefTablePTR+=2)
{
@ -491,9 +491,9 @@ namespace bioinspired
if (parvoFovealResponse.size() != _ParvoRetinaFilter.getNBpixels())
return false;
register const float *parvoOutputPTR= get_data(_ParvoRetinaFilter.getOutput());
register float *fovealParvoResponsePTR= &parvoFovealResponse[0];
register float *hybridParvoMagnoCoefTablePTR= &_retinaParvoMagnoMapCoefTable[0];
const float *parvoOutputPTR= get_data(_ParvoRetinaFilter.getOutput());
float *fovealParvoResponsePTR= &parvoFovealResponse[0];
float *hybridParvoMagnoCoefTablePTR= &_retinaParvoMagnoMapCoefTable[0];
for (unsigned int i=0 ; i<_photoreceptorsPrefilter.getNBpixels() ; ++i, hybridParvoMagnoCoefTablePTR+=2)
{
@ -511,9 +511,9 @@ namespace bioinspired
if (magnoParafovealResponse.size() != _MagnoRetinaFilter.getNBpixels())
return false;
register const float *magnoXOutputPTR= get_data(_MagnoRetinaFilter.getOutput());
register float *parafovealMagnoResponsePTR=&magnoParafovealResponse[0];
register float *hybridParvoMagnoCoefTablePTR=&_retinaParvoMagnoMapCoefTable[0]+1;
const float *magnoXOutputPTR= get_data(_MagnoRetinaFilter.getOutput());
float *parafovealMagnoResponsePTR=&magnoParafovealResponse[0];
float *hybridParvoMagnoCoefTablePTR=&_retinaParvoMagnoMapCoefTable[0]+1;
for (unsigned int i=0 ; i<_photoreceptorsPrefilter.getNBpixels() ; ++i, hybridParvoMagnoCoefTablePTR+=2)
{

@ -95,8 +95,8 @@ public:
: bufferToClip(bufferToProcess), minValue(min), maxValue(max) { }
virtual void operator()( const cv::Range &r ) const {
register type *inputOutputBufferPTR=bufferToClip+r.start;
for (register int jf = r.start; jf != r.end; ++jf, ++inputOutputBufferPTR)
type *inputOutputBufferPTR=bufferToClip+r.start;
for (int jf = r.start; jf != r.end; ++jf, ++inputOutputBufferPTR)
{
if (*inputOutputBufferPTR>maxValue)
*inputOutputBufferPTR=maxValue;
@ -430,8 +430,8 @@ public:
type maxValue=inputOutputBuffer[0], minValue=inputOutputBuffer[0];
// get the min and max value
register type *inputOutputBufferPTR=inputOutputBuffer;
for (register size_t j = 0; j<processedPixels; ++j)
type *inputOutputBufferPTR=inputOutputBuffer;
for (size_t j = 0; j<processedPixels; ++j)
{
type pixValue = *(inputOutputBufferPTR++);
if (maxValue < pixValue)
@ -445,7 +445,7 @@ public:
type offset = (type)(-minValue*factor);
inputOutputBufferPTR=inputOutputBuffer;
for (register size_t j = 0; j < processedPixels; ++j, ++inputOutputBufferPTR)
for (size_t j = 0; j < processedPixels; ++j, ++inputOutputBufferPTR)
*inputOutputBufferPTR=*(inputOutputBufferPTR)*factor+offset;
}
@ -460,10 +460,10 @@ public:
type X0cube=sensitivity*sensitivity*sensitivity;
register type *inputBufferPTR=inputBuffer;
register type *outputBufferPTR=outputBuffer;
type *inputBufferPTR=inputBuffer;
type *outputBufferPTR=outputBuffer;
for (register size_t j = 0; j < _NBpixels; ++j, ++inputBufferPTR)
for (size_t j = 0; j < _NBpixels; ++j, ++inputBufferPTR)
{
type currentCubeLuminance=*inputBufferPTR**inputBufferPTR**inputBufferPTR;
@ -485,10 +485,10 @@ public:
type X0=maxOutputValue/(sensitivity-(type)1.0);
register type *inputBufferPTR=inputBuffer;
register type *outputBufferPTR=outputBuffer;
type *inputBufferPTR=inputBuffer;
type *outputBufferPTR=outputBuffer;
for (register size_t j = 0; j < nbPixels; ++j, ++inputBufferPTR)
for (size_t j = 0; j < nbPixels; ++j, ++inputBufferPTR)
*(outputBufferPTR++)=(meanValue+(meanValue+X0)*(*(inputBufferPTR)-meanValue)/(_abs(*(inputBufferPTR)-meanValue)+X0));
}
@ -503,12 +503,12 @@ public:
type meanValue=0, stdValue=0;
// compute mean value
for (register size_t j = 0; j < _NBpixels; ++j)
for (size_t j = 0; j < _NBpixels; ++j)
meanValue+=inputOutputBuffer[j];
meanValue/=((type)_NBpixels);
// compute std value
register type *inputOutputBufferPTR=inputOutputBuffer;
type *inputOutputBufferPTR=inputOutputBuffer;
for (size_t index=0;index<_NBpixels;++index)
{
type inputMinusMean=*(inputOutputBufferPTR++)-meanValue;

@ -247,7 +247,7 @@ private:
* allocator
* @param Size : size of the images input to segment (output will be the same size)
*/
Ptr<TransientAreasSegmentationModule> createTransientAreasSegmentationModule(Size inputSize){
Ptr<TransientAreasSegmentationModule> TransientAreasSegmentationModule::create(Size inputSize){
return makePtr<TransientAreasSegmentationModuleImpl_>(inputSize);
}
@ -462,7 +462,7 @@ void TransientAreasSegmentationModuleImpl::_run(const std::valarray<float> &inpu
float*localMotionPTR=&_localMotion[0], *neighborhoodMotionPTR=&_neighborhoodMotion[0], *contextMotionPTR=&_contextMotionEnergy[0];
// float meanEnergy=LPfilter2.sum()/(float)_LPfilter2.size();
register bool *segmentationPicturePTR= &_segmentedAreas[0];
bool *segmentationPicturePTR= &_segmentedAreas[0];
for (unsigned int index=0; index<_filterOutput.getNBpixels() ; ++index, ++segmentationPicturePTR, ++localMotionPTR, ++neighborhoodMotionPTR, contextMotionPTR++)
{
float generalMotionContextDecision=*neighborhoodMotionPTR-*contextMotionPTR;

@ -76,7 +76,7 @@ OCL_TEST_P(Retina_OCL, Accuracy)
Mat input = imread(cvtest::TS::ptr()->get_data_path() + "shared/lena.png", colorMode);
CV_Assert(!input.empty());
Ptr<bioinspired::Retina> retina = bioinspired::createRetina(
Ptr<bioinspired::Retina> retina = bioinspired::Retina::create(
input.size(),
colorMode,
colorSamplingMethod,

@ -36,12 +36,12 @@ ocv_define_module(cnn_3dobj opencv_core opencv_imgproc ${Caffe_LIBS} ${Glog_LIBS
ocv_add_testdata(testdata/cv contrib/cnn_3dobj)
if(TARGET opencv_test_cnn_3dobj)
target_link_libraries(opencv_test_cnn_3dobj boost_system)
target_link_libraries(opencv_test_cnn_3dobj PUBLIC boost_system)
endif()
foreach(exe_TGT classify video sphereview_data model_analysis)
if(TARGET example_cnn_3dobj_${exe_TGT})
target_link_libraries(example_cnn_3dobj_${exe_TGT} boost_system)
target_link_libraries(example_cnn_3dobj_${exe_TGT} PUBLIC boost_system)
endif()
endforeach()
endif()

@ -6,8 +6,6 @@ if(${CMAKE_VERSION} VERSION_LESS 3.2)
return()
endif()
cmake_policy(SET CMP0028 OLD)
# Using cmake scripts and modules
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
@ -83,6 +81,12 @@ add_definitions(-DCNN_NO_SERIALIZATION -DCNN_USE_CAFFE_CONVERTER)
# this is not needed anymore.
find_package(Protobuf QUIET)
if(NOT ${Protobuf_FOUND})
message(STATUS "Module opencv_dnn_modern disabled because Protobuf is not found")
ocv_module_disable(dnn_modern)
return()
endif()
if(DEFINED PROTOBUF_PROTOC_EXECUTABLE AND EXISTS ${PROTOBUF_PROTOC_EXECUTABLE})
execute_process(COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} caffe.proto --cpp_out=./
WORKING_DIRECTORY ${TINYDNN_INCLUDE_DIRS}/tiny_dnn/io/caffe)
@ -157,6 +161,7 @@ elseif(MSVC)
# this is fine
add_definitions(-D _CRT_SECURE_NO_WARNINGS)
add_definitions(-D _SCL_SECURE_NO_WARNINGS)
add_definitions(-D NO_STRICT)
# prolly powerless with header-only project
set(EXTRA_C_FLAGS "${EXTRA_C_FLAGS} /MP")
endif()

@ -40,11 +40,12 @@
//M*/
#include <iostream>
#include <opencv2/opencv_modules.hpp>
#ifdef HAVE_OPENCV_FEATURES2D
#include <opencv2/line_descriptor.hpp>
#include "opencv2/core/utility.hpp"
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>

@ -40,11 +40,12 @@
//M*/
#include <iostream>
#include <opencv2/opencv_modules.hpp>
#ifdef HAVE_OPENCV_FEATURES2D
#include <opencv2/line_descriptor.hpp>
#include "opencv2/core/utility.hpp"
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>

@ -40,11 +40,12 @@
//M*/
#include <iostream>
#include <opencv2/opencv_modules.hpp>
#ifdef HAVE_OPENCV_FEATURES2D
#include <opencv2/line_descriptor.hpp>
#include "opencv2/core/utility.hpp"
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>

@ -40,11 +40,12 @@
//M*/
#include <iostream>
#include <opencv2/opencv_modules.hpp>
#ifdef HAVE_OPENCV_FEATURES2D
#include <opencv2/line_descriptor.hpp>
#include "opencv2/core/utility.hpp"
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>

@ -40,11 +40,12 @@
//M*/
#include <iostream>
#include <opencv2/opencv_modules.hpp>
#ifdef HAVE_OPENCV_FEATURES2D
#include <opencv2/line_descriptor.hpp>
#include "opencv2/core/utility.hpp"
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>

@ -40,12 +40,12 @@
//M*/
#include <iostream>
#include <opencv2/opencv_modules.hpp>
#ifdef HAVE_OPENCV_FEATURES2D
#include <opencv2/line_descriptor.hpp>
#include "opencv2/core/utility.hpp"
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/highgui.hpp>

@ -114,6 +114,7 @@ int main( int argc, const char **argv )
std::cout << "Found " << corr.size() << " matches." << std::endl;
std::cout << "Time: " << meter.getTimeSec() << " sec." << std::endl;
double error = 0;
int totalCorrectFlowVectors = 0;
Mat dispErr = Mat::zeros( from.size(), CV_32FC3 );
dispErr = Scalar( 0, 0, 1 );
Mat disp = Mat::zeros( from.size(), CV_32FC3 );
@ -123,13 +124,22 @@ int main( int argc, const char **argv )
{
const Point2f a = corr[i].first;
const Point2f b = corr[i].second;
const Point2f c = a + gt.at< Point2f >( corr[i].first.y, corr[i].first.x );
error += normL2( b - c );
const Point2f gtDisplacement = gt.at< Point2f >( corr[i].first.y, corr[i].first.x );
// Check that flow vector is correct
if (!cvIsNaN(gtDisplacement.x) && !cvIsNaN(gtDisplacement.y) && gtDisplacement.x < 1e9 && gtDisplacement.y < 1e9)
{
const Point2f c = a + gtDisplacement;
error += normL2( b - c );
circle( dispErr, a, 3, getFlowColor( b - c, false, 32 ), -1 );
++totalCorrectFlowVectors;
}
circle( disp, a, 3, getFlowColor( b - a ), -1 );
circle( dispErr, a, 3, getFlowColor( b - c, false, 32 ), -1 );
}
error /= corr.size();
if (totalCorrectFlowVectors)
error /= totalCorrectFlowVectors;
std::cout << "Average endpoint error: " << error << " px." << std::endl;

@ -729,6 +729,9 @@ Ptr< GPCTrainingSamples > GPCTrainingSamples::create( InputArrayOfArrays imagesF
void GPCDetails::dropOutliers( std::vector< std::pair< Point2i, Point2i > > &corr )
{
if ( corr.size() == 0 )
return;
std::vector< float > mag( corr.size() );
for ( size_t i = 0; i < corr.size(); ++i )

@ -84,23 +84,33 @@ namespace cv
CV_WRAP virtual void setPlotGridColor(Scalar _plotGridColor) = 0;
CV_WRAP virtual void setPlotTextColor(Scalar _plotTextColor) = 0;
CV_WRAP virtual void setPlotSize(int _plotSizeWidth, int _plotSizeHeight) = 0;
CV_WRAP virtual void setShowGrid(bool needShowGrid) = 0;
CV_WRAP virtual void setShowText(bool needShowText) = 0;
CV_WRAP virtual void setGridLinesNumber(int gridLinesNumber) = 0;
/**
* @brief Sets the index of a point which coordinates will be printed on the top left corner of the plot (if ShowText flag is true).
*
* @param pointIdx index of the required point in data array.
*/
CV_WRAP virtual void setPointIdxToPrint(int pointIdx) = 0;
CV_WRAP virtual void render(OutputArray _plotResult) = 0;
};
/**
* @brief Creates Plot2d object
*
* @param data \f$1xN\f$ or \f$Nx1\f$ matrix containing \f$Y\f$ values of points to plot. \f$X\f$ values
* will be equal to indexes of correspondind elements in data matrix.
*/
CV_EXPORTS_W Ptr<Plot2d> createPlot2d(InputArray data);
/**
* @brief Creates Plot2d object
*
* @param dataX \f$1xN\f$ or \f$Nx1\f$ matrix \f$X\f$ values of points to plot.
* @param dataY \f$1xN\f$ or \f$Nx1\f$ matrix containing \f$Y\f$ values of points to plot.
*/
CV_EXPORTS_W Ptr<Plot2d> createPlot2d(InputArray dataX, InputArray dataY);
/**
* @brief Creates Plot2d object
*
* @param data \f$1xN\f$ or \f$Nx1\f$ matrix containing \f$Y\f$ values of points to plot. \f$X\f$ values
* will be equal to indexes of correspondind elements in data matrix.
*/
CV_WRAP static Ptr<Plot2d> create(InputArray data);
/**
* @brief Creates Plot2d object
*
* @param dataX \f$1xN\f$ or \f$Nx1\f$ matrix \f$X\f$ values of points to plot.
* @param dataY \f$1xN\f$ or \f$Nx1\f$ matrix containing \f$Y\f$ values of points to plot.
*/
CV_WRAP static Ptr<Plot2d> create(InputArray dataX, InputArray dataY);
};
//! @}
}
}

@ -168,7 +168,26 @@ namespace cv
else
plotSizeHeight = 300;
}
void setShowGrid(bool _needShowGrid)
{
needShowGrid = _needShowGrid;
}
void setShowText(bool _needShowText)
{
needShowText = _needShowText;
}
void setGridLinesNumber(int _gridLinesNumber)
{
if(_gridLinesNumber <= 0)
_gridLinesNumber = 1;
gridLinesNumber = _gridLinesNumber;
}
void setPointIdxToPrint(int _cursorPos)
{
if(_cursorPos >= plotDataX.rows || _cursorPos < 0)
_cursorPos = plotDataX.rows - 1;
cursorPos = _cursorPos;
}
//render the plotResult to a Mat
void render(OutputArray _plotResult)
{
@ -189,8 +208,8 @@ namespace cv
int ImageXzero = (int)InterpXdataFindZero.at<double>(NumVecElements,0);
int ImageYzero = (int)InterpYdataFindZero.at<double>(NumVecElements,0);
double CurrentX = plotDataX.at<double>(NumVecElements-1,0);
double CurrentY = plotDataY.at<double>(NumVecElements-1,0);
double CurrentX = plotDataX.at<double>(cursorPos,0);
double CurrentY = plotDataY.at<double>(cursorPos,0);
drawAxis(ImageXzero,ImageYzero, CurrentX, CurrentY, plotAxisColor, plotGridColor);
@ -245,6 +264,10 @@ namespace cv
double plotMinY_plusZero;
double plotMaxY_plusZero;
int plotLineWidth;
bool needShowGrid;
bool needShowText;
int gridLinesNumber;
int cursorPos;
//colors of each plot element
Scalar plotLineColor;
@ -319,22 +342,30 @@ namespace cv
setPlotBackgroundColor(Scalar(0, 0, 0));
setPlotLineColor(Scalar(0, 255, 255));
setPlotTextColor(Scalar(255, 255, 255));
setShowGrid(true);
setShowText(true);
setGridLinesNumber(10);
setPointIdxToPrint(-1);
}
void drawAxis(int ImageXzero, int ImageYzero, double CurrentX, double CurrentY, Scalar axisColor, Scalar gridColor)
{
drawValuesAsText(0, ImageXzero, ImageYzero, 10, 20);
drawValuesAsText(0, ImageXzero, ImageYzero, -20, 20);
drawValuesAsText(0, ImageXzero, ImageYzero, 10, -10);
drawValuesAsText(0, ImageXzero, ImageYzero, -20, -10);
drawValuesAsText("X = %g",CurrentX, 0, 0, 40, 20);
drawValuesAsText("Y = %g",CurrentY, 0, 20, 40, 20);
if(needShowText)
{
drawValuesAsText(0, ImageXzero, ImageYzero, 10, 20);
drawValuesAsText(0, ImageXzero, ImageYzero, -20, 20);
drawValuesAsText(0, ImageXzero, ImageYzero, 10, -10);
drawValuesAsText(0, ImageXzero, ImageYzero, -20, -10);
drawValuesAsText((format("X_%d = ", cursorPos) + "%g").c_str(), CurrentX, 0, 0, 40, 20);
drawValuesAsText((format("Y_%d = ", cursorPos) + "%g").c_str(), CurrentY, 0, 20, 40, 20);
}
//Horizontal X axis and equispaced horizontal lines
int LineSpace = 50;
int LineSpace = cvRound(plotSizeHeight / (float)gridLinesNumber);
int TraceSize = 5;
drawLine(0, plotSizeWidth, ImageYzero, ImageYzero, axisColor);
if(needShowGrid)
for(int i=-plotSizeHeight; i<plotSizeHeight; i=i+LineSpace){
if(i!=0){
@ -349,7 +380,9 @@ namespace cv
//Vertical Y axis
drawLine(ImageXzero, ImageXzero, 0, plotSizeHeight, axisColor);
LineSpace = cvRound(LineSpace * (float)plotSizeWidth / plotSizeHeight );
if(needShowGrid)
for(int i=-plotSizeWidth; i<plotSizeWidth; i=i+LineSpace){
if(i!=0){
@ -420,13 +453,13 @@ namespace cv
};
Ptr<Plot2d> createPlot2d(InputArray _plotData)
Ptr<Plot2d> Plot2d::create(InputArray _plotData)
{
return Ptr<Plot2dImpl> (new Plot2dImpl (_plotData));
}
Ptr<Plot2d> createPlot2d(InputArray _plotDataX, InputArray _plotDataY)
Ptr<Plot2d> Plot2d::create(InputArray _plotDataX, InputArray _plotDataY)
{
return Ptr<Plot2dImpl> (new Plot2dImpl (_plotDataX, _plotDataY));
}

@ -104,12 +104,14 @@ namespace rgbd
* ``Gradient Response Maps for Real-Time Detection of Texture-Less Objects``
* by S. Hinterstoisser, C. Cagniart, S. Ilic, P. Sturm, N. Navab, P. Fua, and V. Lepetit
*/
class CV_EXPORTS RgbdNormals: public Algorithm
class CV_EXPORTS_W RgbdNormals: public Algorithm
{
public:
enum RGBD_NORMALS_METHOD
{
RGBD_NORMALS_METHOD_FALS, RGBD_NORMALS_METHOD_LINEMOD, RGBD_NORMALS_METHOD_SRI
RGBD_NORMALS_METHOD_FALS = 0,
RGBD_NORMALS_METHOD_LINEMOD = 1,
RGBD_NORMALS_METHOD_SRI = 2
};
RgbdNormals()
@ -133,10 +135,13 @@ namespace rgbd
* @param method one of the methods to use: RGBD_NORMALS_METHOD_SRI, RGBD_NORMALS_METHOD_FALS
*/
RgbdNormals(int rows, int cols, int depth, InputArray K, int window_size = 5, int method =
RGBD_NORMALS_METHOD_FALS);
RgbdNormals::RGBD_NORMALS_METHOD_FALS);
~RgbdNormals();
CV_WRAP static Ptr<RgbdNormals> create(int rows, int cols, int depth, InputArray K, int window_size = 5, int method =
RgbdNormals::RGBD_NORMALS_METHOD_FALS);
/** Given a set of 3d points in a depth image, compute the normals at each point.
* @param points a rows x cols x 3 matrix of CV_32F/CV64F or a rows x cols x 1 CV_U16S
* @param normals a rows x cols x 3 matrix
@ -147,54 +152,54 @@ namespace rgbd
/** Initializes some data that is cached for later computation
* If that function is not called, it will be called the first time normals are computed
*/
void
CV_WRAP void
initialize() const;
int getRows() const
CV_WRAP int getRows() const
{
return rows_;
}
void setRows(int val)
CV_WRAP void setRows(int val)
{
rows_ = val;
}
int getCols() const
CV_WRAP int getCols() const
{
return cols_;
}
void setCols(int val)
CV_WRAP void setCols(int val)
{
cols_ = val;
}
int getWindowSize() const
CV_WRAP int getWindowSize() const
{
return window_size_;
}
void setWindowSize(int val)
CV_WRAP void setWindowSize(int val)
{
window_size_ = val;
}
int getDepth() const
CV_WRAP int getDepth() const
{
return depth_;
}
void setDepth(int val)
CV_WRAP void setDepth(int val)
{
depth_ = val;
}
cv::Mat getK() const
CV_WRAP cv::Mat getK() const
{
return K_;
}
void setK(const cv::Mat &val)
CV_WRAP void setK(const cv::Mat &val)
{
K_ = val;
}
int getMethod() const
CV_WRAP int getMethod() const
{
return method_;
}
void setMethod(int val)
CV_WRAP void setMethod(int val)
{
method_ = val;
}
@ -212,7 +217,7 @@ namespace rgbd
/** Object that can clean a noisy depth image
*/
class CV_EXPORTS DepthCleaner: public Algorithm
class CV_EXPORTS_W DepthCleaner: public Algorithm
{
public:
/** NIL method is from
@ -238,10 +243,12 @@ namespace rgbd
* @param window_size the window size to compute the normals: can only be 1,3,5 or 7
* @param method one of the methods to use: RGBD_NORMALS_METHOD_SRI, RGBD_NORMALS_METHOD_FALS
*/
DepthCleaner(int depth, int window_size = 5, int method = DEPTH_CLEANER_NIL);
DepthCleaner(int depth, int window_size = 5, int method = DepthCleaner::DEPTH_CLEANER_NIL);
~DepthCleaner();
CV_WRAP static Ptr<DepthCleaner> create(int depth, int window_size = 5, int method = DepthCleaner::DEPTH_CLEANER_NIL);
/** Given a set of 3d points in a depth image, compute the normals at each point.
* @param points a rows x cols x 3 matrix of CV_32F/CV64F or a rows x cols x 1 CV_U16S
* @param depth a rows x cols matrix of the cleaned up depth
@ -252,30 +259,30 @@ namespace rgbd
/** Initializes some data that is cached for later computation
* If that function is not called, it will be called the first time normals are computed
*/
void
CV_WRAP void
initialize() const;
int getWindowSize() const
CV_WRAP int getWindowSize() const
{
return window_size_;
}
void setWindowSize(int val)
CV_WRAP void setWindowSize(int val)
{
window_size_ = val;
}
int getDepth() const
CV_WRAP int getDepth() const
{
return depth_;
}
void setDepth(int val)
CV_WRAP void setDepth(int val)
{
depth_ = val;
}
int getMethod() const
CV_WRAP int getMethod() const
{
return method_;
}
void setMethod(int val)
CV_WRAP void setMethod(int val)
{
method_ = val;
}
@ -309,7 +316,7 @@ namespace rgbd
* @param registeredDepth the result of transforming the depth into the external camera
* @param depthDilation whether or not the depth is dilated to avoid holes and occlusion errors (optional)
*/
CV_EXPORTS
CV_EXPORTS_W
void
registerDepth(InputArray unregisteredCameraMatrix, InputArray registeredCameraMatrix, InputArray registeredDistCoeffs,
InputArray Rt, InputArray unregisteredDepth, const Size& outputImagePlaneSize,
@ -321,7 +328,7 @@ namespace rgbd
* @param in_points the list of xy coordinates
* @param points3d the resulting 3d points
*/
CV_EXPORTS
CV_EXPORTS_W
void
depthTo3dSparse(InputArray depth, InputArray in_K, InputArray in_points, OutputArray points3d);
@ -346,13 +353,13 @@ namespace rgbd
* @param depth the desired output depth (floats or double)
* @param out The rescaled float depth image
*/
CV_EXPORTS
CV_EXPORTS_W
void
rescaleDepth(InputArray in, int depth, OutputArray out);
/** Object that can compute planes in an image
*/
class CV_EXPORTS RgbdPlane: public Algorithm
class CV_EXPORTS_W RgbdPlane: public Algorithm
{
public:
enum RGBD_PLANE_METHOD
@ -360,7 +367,7 @@ namespace rgbd
RGBD_PLANE_METHOD_DEFAULT
};
RgbdPlane(RGBD_PLANE_METHOD method = RGBD_PLANE_METHOD_DEFAULT)
RgbdPlane(int method = RgbdPlane::RGBD_PLANE_METHOD_DEFAULT)
:
method_(method),
block_size_(40),
@ -393,59 +400,59 @@ namespace rgbd
void
operator()(InputArray points3d, OutputArray mask, OutputArray plane_coefficients);
int getBlockSize() const
CV_WRAP int getBlockSize() const
{
return block_size_;
}
void setBlockSize(int val)
CV_WRAP void setBlockSize(int val)
{
block_size_ = val;
}
int getMinSize() const
CV_WRAP int getMinSize() const
{
return min_size_;
}
void setMinSize(int val)
CV_WRAP void setMinSize(int val)
{
min_size_ = val;
}
int getMethod() const
CV_WRAP int getMethod() const
{
return method_;
}
void setMethod(int val)
CV_WRAP void setMethod(int val)
{
method_ = val;
}
double getThreshold() const
CV_WRAP double getThreshold() const
{
return threshold_;
}
void setThreshold(double val)
CV_WRAP void setThreshold(double val)
{
threshold_ = val;
}
double getSensorErrorA() const
CV_WRAP double getSensorErrorA() const
{
return sensor_error_a_;
}
void setSensorErrorA(double val)
CV_WRAP void setSensorErrorA(double val)
{
sensor_error_a_ = val;
}
double getSensorErrorB() const
CV_WRAP double getSensorErrorB() const
{
return sensor_error_b_;
}
void setSensorErrorB(double val)
CV_WRAP void setSensorErrorB(double val)
{
sensor_error_b_ = val;
}
double getSensorErrorC() const
CV_WRAP double getSensorErrorC() const
{
return sensor_error_c_;
}
void setSensorErrorC(double val)
CV_WRAP void setSensorErrorC(double val)
{
sensor_error_c_ = val;
}
@ -465,27 +472,29 @@ namespace rgbd
/** Object that contains a frame data.
*/
struct CV_EXPORTS RgbdFrame
struct CV_EXPORTS_W RgbdFrame
{
RgbdFrame();
RgbdFrame(const Mat& image, const Mat& depth, const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);
virtual ~RgbdFrame();
virtual void
CV_WRAP static Ptr<RgbdFrame> create(const Mat& image=Mat(), const Mat& depth=Mat(), const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);
CV_WRAP virtual void
release();
int ID;
Mat image;
Mat depth;
Mat mask;
Mat normals;
CV_PROP int ID;
CV_PROP Mat image;
CV_PROP Mat depth;
CV_PROP Mat mask;
CV_PROP Mat normals;
};
/** Object that contains a frame data that is possibly needed for the Odometry.
* It's used for the efficiency (to pass precomputed/cached data of the frame that participates
* in the Odometry processing several times).
*/
struct CV_EXPORTS OdometryFrame : public RgbdFrame
struct CV_EXPORTS_W OdometryFrame : public RgbdFrame
{
/** These constants are used to set a type of cache which has to be prepared depending on the frame role:
* srcFrame or dstFrame (see compute method of the Odometry class). For the srcFrame and dstFrame different cache data may be required,
@ -502,29 +511,31 @@ namespace rgbd
OdometryFrame();
OdometryFrame(const Mat& image, const Mat& depth, const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);
virtual void
CV_WRAP static Ptr<OdometryFrame> create(const Mat& image=Mat(), const Mat& depth=Mat(), const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);
CV_WRAP virtual void
release();
void
CV_WRAP void
releasePyramids();
std::vector<Mat> pyramidImage;
std::vector<Mat> pyramidDepth;
std::vector<Mat> pyramidMask;
CV_PROP std::vector<Mat> pyramidImage;
CV_PROP std::vector<Mat> pyramidDepth;
CV_PROP std::vector<Mat> pyramidMask;
std::vector<Mat> pyramidCloud;
CV_PROP std::vector<Mat> pyramidCloud;
std::vector<Mat> pyramid_dI_dx;
std::vector<Mat> pyramid_dI_dy;
std::vector<Mat> pyramidTexturedMask;
CV_PROP std::vector<Mat> pyramid_dI_dx;
CV_PROP std::vector<Mat> pyramid_dI_dy;
CV_PROP std::vector<Mat> pyramidTexturedMask;
std::vector<Mat> pyramidNormals;
std::vector<Mat> pyramidNormalsMask;
CV_PROP std::vector<Mat> pyramidNormals;
CV_PROP std::vector<Mat> pyramidNormalsMask;
};
/** Base class for computation of odometry.
*/
class CV_EXPORTS Odometry: public Algorithm
class CV_EXPORTS_W Odometry: public Algorithm
{
public:
@ -534,32 +545,32 @@ namespace rgbd
ROTATION = 1, TRANSLATION = 2, RIGID_BODY_MOTION = 4
};
static inline float
CV_WRAP static inline float
DEFAULT_MIN_DEPTH()
{
return 0.f; // in meters
}
static inline float
CV_WRAP static inline float
DEFAULT_MAX_DEPTH()
{
return 4.f; // in meters
}
static inline float
CV_WRAP static inline float
DEFAULT_MAX_DEPTH_DIFF()
{
return 0.07f; // in meters
}
static inline float
CV_WRAP static inline float
DEFAULT_MAX_POINTS_PART()
{
return 0.07f; // in [0, 1]
}
static inline float
CV_WRAP static inline float
DEFAULT_MAX_TRANSLATION()
{
return 0.15f; // in meters
}
static inline float
CV_WRAP static inline float
DEFAULT_MAX_ROTATION()
{
return 15; // in degrees
@ -583,15 +594,15 @@ namespace rgbd
Rt is 4x4 matrix of CV_64FC1 type.
* @param initRt Initial transformation from the source frame to the destination one (optional)
*/
bool
CV_WRAP bool
compute(const Mat& srcImage, const Mat& srcDepth, const Mat& srcMask, const Mat& dstImage, const Mat& dstDepth,
const Mat& dstMask, Mat& Rt, const Mat& initRt = Mat()) const;
const Mat& dstMask, OutputArray Rt, const Mat& initRt = Mat()) const;
/** One more method to compute a transformation from the source frame to the destination one.
* It is designed to save on computing the frame data (image pyramids, normals, etc.).
*/
bool
compute(Ptr<OdometryFrame>& srcFrame, Ptr<OdometryFrame>& dstFrame, Mat& Rt, const Mat& initRt = Mat()) const;
CV_WRAP_AS(compute2) bool
compute(Ptr<OdometryFrame>& srcFrame, Ptr<OdometryFrame>& dstFrame, OutputArray Rt, const Mat& initRt = Mat()) const;
/** Prepare a cache for the frame. The function checks the precomputed/passed data (throws the error if this data
* does not satisfy) and computes all remaining cache data needed for the frame. Returned size is a resolution
@ -599,32 +610,32 @@ namespace rgbd
* @param frame The odometry which will process the frame.
* @param cacheType The cache type: CACHE_SRC, CACHE_DST or CACHE_ALL.
*/
virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;
CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;
static Ptr<Odometry> create(const String & odometryType);
CV_WRAP static Ptr<Odometry> create(const String & odometryType);
/** @see setCameraMatrix */
virtual cv::Mat getCameraMatrix() const = 0;
CV_WRAP virtual cv::Mat getCameraMatrix() const = 0;
/** @copybrief getCameraMatrix @see getCameraMatrix */
virtual void setCameraMatrix(const cv::Mat &val) = 0;
CV_WRAP virtual void setCameraMatrix(const cv::Mat &val) = 0;
/** @see setTransformType */
virtual int getTransformType() const = 0;
CV_WRAP virtual int getTransformType() const = 0;
/** @copybrief getTransformType @see getTransformType */
virtual void setTransformType(int val) = 0;
CV_WRAP virtual void setTransformType(int val) = 0;
protected:
virtual void
checkParams() const = 0;
virtual bool
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt,
const Mat& initRt) const = 0;
};
/** Odometry based on the paper "Real-Time Visual Odometry from Dense RGB-D Images",
* F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.
*/
class CV_EXPORTS RgbdOdometry: public Odometry
class CV_EXPORTS_W RgbdOdometry: public Odometry
{
public:
RgbdOdometry();
@ -640,90 +651,95 @@ namespace rgbd
* @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart
* @param transformType Class of transformation
*/
RgbdOdometry(const Mat& cameraMatrix, float minDepth = DEFAULT_MIN_DEPTH(), float maxDepth = DEFAULT_MAX_DEPTH(),
float maxDepthDiff = DEFAULT_MAX_DEPTH_DIFF(), const std::vector<int>& iterCounts = std::vector<int>(),
const std::vector<float>& minGradientMagnitudes = std::vector<float>(), float maxPointsPart = DEFAULT_MAX_POINTS_PART(),
int transformType = RIGID_BODY_MOTION);
RgbdOdometry(const Mat& cameraMatrix, float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), const std::vector<int>& iterCounts = std::vector<int>(),
const std::vector<float>& minGradientMagnitudes = std::vector<float>(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
int transformType = Odometry::RIGID_BODY_MOTION);
CV_WRAP static Ptr<RgbdOdometry> create(const Mat& cameraMatrix = Mat(), float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), const std::vector<int>& iterCounts = std::vector<int>(),
const std::vector<float>& minGradientMagnitudes = std::vector<float>(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
int transformType = Odometry::RIGID_BODY_MOTION);
virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;
CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;
cv::Mat getCameraMatrix() const
CV_WRAP cv::Mat getCameraMatrix() const
{
return cameraMatrix;
}
void setCameraMatrix(const cv::Mat &val)
CV_WRAP void setCameraMatrix(const cv::Mat &val)
{
cameraMatrix = val;
}
double getMinDepth() const
CV_WRAP double getMinDepth() const
{
return minDepth;
}
void setMinDepth(double val)
CV_WRAP void setMinDepth(double val)
{
minDepth = val;
}
double getMaxDepth() const
CV_WRAP double getMaxDepth() const
{
return maxDepth;
}
void setMaxDepth(double val)
CV_WRAP void setMaxDepth(double val)
{
maxDepth = val;
}
double getMaxDepthDiff() const
CV_WRAP double getMaxDepthDiff() const
{
return maxDepthDiff;
}
void setMaxDepthDiff(double val)
CV_WRAP void setMaxDepthDiff(double val)
{
maxDepthDiff = val;
}
cv::Mat getIterationCounts() const
CV_WRAP cv::Mat getIterationCounts() const
{
return iterCounts;
}
void setIterationCounts(const cv::Mat &val)
CV_WRAP void setIterationCounts(const cv::Mat &val)
{
iterCounts = val;
}
cv::Mat getMinGradientMagnitudes() const
CV_WRAP cv::Mat getMinGradientMagnitudes() const
{
return minGradientMagnitudes;
}
void setMinGradientMagnitudes(const cv::Mat &val)
CV_WRAP void setMinGradientMagnitudes(const cv::Mat &val)
{
minGradientMagnitudes = val;
}
double getMaxPointsPart() const
CV_WRAP double getMaxPointsPart() const
{
return maxPointsPart;
}
void setMaxPointsPart(double val)
CV_WRAP void setMaxPointsPart(double val)
{
maxPointsPart = val;
}
int getTransformType() const
CV_WRAP int getTransformType() const
{
return transformType;
}
void setTransformType(int val)
CV_WRAP void setTransformType(int val)
{
transformType = val;
}
double getMaxTranslation() const
CV_WRAP double getMaxTranslation() const
{
return maxTranslation;
}
void setMaxTranslation(double val)
CV_WRAP void setMaxTranslation(double val)
{
maxTranslation = val;
}
double getMaxRotation() const
CV_WRAP double getMaxRotation() const
{
return maxRotation;
}
void setMaxRotation(double val)
CV_WRAP void setMaxRotation(double val)
{
maxRotation = val;
}
@ -733,7 +749,7 @@ namespace rgbd
checkParams() const;
virtual bool
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt,
const Mat& initRt) const;
// Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.
@ -754,7 +770,7 @@ namespace rgbd
/** Odometry based on the paper "KinectFusion: Real-Time Dense Surface Mapping and Tracking",
* Richard A. Newcombe, Andrew Fitzgibbon, at al, SIGGRAPH, 2011.
*/
class ICPOdometry: public Odometry
class CV_EXPORTS_W ICPOdometry: public Odometry
{
public:
ICPOdometry();
@ -768,85 +784,89 @@ namespace rgbd
* @param iterCounts Count of iterations on each pyramid level.
* @param transformType Class of trasformation
*/
ICPOdometry(const Mat& cameraMatrix, float minDepth = DEFAULT_MIN_DEPTH(), float maxDepth = DEFAULT_MAX_DEPTH(),
float maxDepthDiff = DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = DEFAULT_MAX_POINTS_PART(),
const std::vector<int>& iterCounts = std::vector<int>(), int transformType = RIGID_BODY_MOTION);
ICPOdometry(const Mat& cameraMatrix, float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
const std::vector<int>& iterCounts = std::vector<int>(), int transformType = Odometry::RIGID_BODY_MOTION);
virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;
CV_WRAP static Ptr<ICPOdometry> create(const Mat& cameraMatrix = Mat(), float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
const std::vector<int>& iterCounts = std::vector<int>(), int transformType = Odometry::RIGID_BODY_MOTION);
cv::Mat getCameraMatrix() const
CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;
CV_WRAP cv::Mat getCameraMatrix() const
{
return cameraMatrix;
}
void setCameraMatrix(const cv::Mat &val)
CV_WRAP void setCameraMatrix(const cv::Mat &val)
{
cameraMatrix = val;
}
double getMinDepth() const
CV_WRAP double getMinDepth() const
{
return minDepth;
}
void setMinDepth(double val)
CV_WRAP void setMinDepth(double val)
{
minDepth = val;
}
double getMaxDepth() const
CV_WRAP double getMaxDepth() const
{
return maxDepth;
}
void setMaxDepth(double val)
CV_WRAP void setMaxDepth(double val)
{
maxDepth = val;
}
double getMaxDepthDiff() const
CV_WRAP double getMaxDepthDiff() const
{
return maxDepthDiff;
}
void setMaxDepthDiff(double val)
CV_WRAP void setMaxDepthDiff(double val)
{
maxDepthDiff = val;
}
cv::Mat getIterationCounts() const
CV_WRAP cv::Mat getIterationCounts() const
{
return iterCounts;
}
void setIterationCounts(const cv::Mat &val)
CV_WRAP void setIterationCounts(const cv::Mat &val)
{
iterCounts = val;
}
double getMaxPointsPart() const
CV_WRAP double getMaxPointsPart() const
{
return maxPointsPart;
}
void setMaxPointsPart(double val)
CV_WRAP void setMaxPointsPart(double val)
{
maxPointsPart = val;
}
int getTransformType() const
CV_WRAP int getTransformType() const
{
return transformType;
}
void setTransformType(int val)
CV_WRAP void setTransformType(int val)
{
transformType = val;
}
double getMaxTranslation() const
CV_WRAP double getMaxTranslation() const
{
return maxTranslation;
}
void setMaxTranslation(double val)
CV_WRAP void setMaxTranslation(double val)
{
maxTranslation = val;
}
double getMaxRotation() const
CV_WRAP double getMaxRotation() const
{
return maxRotation;
}
void setMaxRotation(double val)
CV_WRAP void setMaxRotation(double val)
{
maxRotation = val;
}
Ptr<RgbdNormals> getNormalsComputer() const
CV_WRAP Ptr<RgbdNormals> getNormalsComputer() const
{
return normalsComputer;
}
@ -856,7 +876,7 @@ namespace rgbd
checkParams() const;
virtual bool
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt,
const Mat& initRt) const;
// Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.
@ -878,7 +898,7 @@ namespace rgbd
/** Odometry that merges RgbdOdometry and ICPOdometry by minimize sum of their energy functions.
*/
class RgbdICPOdometry: public Odometry
class CV_EXPORTS_W RgbdICPOdometry: public Odometry
{
public:
RgbdICPOdometry();
@ -894,95 +914,101 @@ namespace rgbd
* if they have gradient magnitude less than minGradientMagnitudes[level].
* @param transformType Class of trasformation
*/
RgbdICPOdometry(const Mat& cameraMatrix, float minDepth = DEFAULT_MIN_DEPTH(), float maxDepth = DEFAULT_MAX_DEPTH(),
float maxDepthDiff = DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = DEFAULT_MAX_POINTS_PART(),
RgbdICPOdometry(const Mat& cameraMatrix, float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
const std::vector<int>& iterCounts = std::vector<int>(),
const std::vector<float>& minGradientMagnitudes = std::vector<float>(),
int transformType = Odometry::RIGID_BODY_MOTION);
CV_WRAP static Ptr<RgbdICPOdometry> create(const Mat& cameraMatrix = Mat(), float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
const std::vector<int>& iterCounts = std::vector<int>(),
const std::vector<float>& minGradientMagnitudes = std::vector<float>(),
int transformType = RIGID_BODY_MOTION);
int transformType = Odometry::RIGID_BODY_MOTION);
virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;
CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;
cv::Mat getCameraMatrix() const
CV_WRAP cv::Mat getCameraMatrix() const
{
return cameraMatrix;
}
void setCameraMatrix(const cv::Mat &val)
CV_WRAP void setCameraMatrix(const cv::Mat &val)
{
cameraMatrix = val;
}
double getMinDepth() const
CV_WRAP double getMinDepth() const
{
return minDepth;
}
void setMinDepth(double val)
CV_WRAP void setMinDepth(double val)
{
minDepth = val;
}
double getMaxDepth() const
CV_WRAP double getMaxDepth() const
{
return maxDepth;
}
void setMaxDepth(double val)
CV_WRAP void setMaxDepth(double val)
{
maxDepth = val;
}
double getMaxDepthDiff() const
CV_WRAP double getMaxDepthDiff() const
{
return maxDepthDiff;
}
void setMaxDepthDiff(double val)
CV_WRAP void setMaxDepthDiff(double val)
{
maxDepthDiff = val;
}
double getMaxPointsPart() const
CV_WRAP double getMaxPointsPart() const
{
return maxPointsPart;
}
void setMaxPointsPart(double val)
CV_WRAP void setMaxPointsPart(double val)
{
maxPointsPart = val;
}
cv::Mat getIterationCounts() const
CV_WRAP cv::Mat getIterationCounts() const
{
return iterCounts;
}
void setIterationCounts(const cv::Mat &val)
CV_WRAP void setIterationCounts(const cv::Mat &val)
{
iterCounts = val;
}
cv::Mat getMinGradientMagnitudes() const
CV_WRAP cv::Mat getMinGradientMagnitudes() const
{
return minGradientMagnitudes;
}
void setMinGradientMagnitudes(const cv::Mat &val)
CV_WRAP void setMinGradientMagnitudes(const cv::Mat &val)
{
minGradientMagnitudes = val;
}
int getTransformType() const
CV_WRAP int getTransformType() const
{
return transformType;
}
void setTransformType(int val)
CV_WRAP void setTransformType(int val)
{
transformType = val;
}
double getMaxTranslation() const
CV_WRAP double getMaxTranslation() const
{
return maxTranslation;
}
void setMaxTranslation(double val)
CV_WRAP void setMaxTranslation(double val)
{
maxTranslation = val;
}
double getMaxRotation() const
CV_WRAP double getMaxRotation() const
{
return maxRotation;
}
void setMaxRotation(double val)
CV_WRAP void setMaxRotation(double val)
{
maxRotation = val;
}
Ptr<RgbdNormals> getNormalsComputer() const
CV_WRAP Ptr<RgbdNormals> getNormalsComputer() const
{
return normalsComputer;
}
@ -992,7 +1018,7 @@ namespace rgbd
checkParams() const;
virtual bool
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt,
computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt,
const Mat& initRt) const;
// Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.

@ -804,7 +804,7 @@ bool testDeltaTransformation(const Mat& deltaRt, double maxTranslation, double m
}
static
bool RGBDICPOdometryImpl(Mat& Rt, const Mat& initRt,
bool RGBDICPOdometryImpl(OutputArray _Rt, const Mat& initRt,
const Ptr<OdometryFrame>& srcFrame,
const Ptr<OdometryFrame>& dstFrame,
const Mat& cameraMatrix,
@ -920,8 +920,9 @@ bool RGBDICPOdometryImpl(Mat& Rt, const Mat& initRt,
isOk = true;
}
}
Rt = resultRt;
_Rt.create(resultRt.size(), resultRt.type());
Mat Rt = _Rt.getMat();
resultRt.copyTo(Rt);
if(isOk)
{
@ -991,6 +992,14 @@ warpFrameImpl(const Mat& image, const Mat& depth, const Mat& mask,
///////////////////////////////////////////////////////////////////////////////////////////////
Ptr<RgbdNormals> RgbdNormals::create(int rows_in, int cols_in, int depth_in, InputArray K_in, int window_size_in, int method_in) {
return makePtr<RgbdNormals>(rows_in, cols_in, depth_in, K_in, window_size_in, method_in);
}
Ptr<DepthCleaner> DepthCleaner::create(int depth_in, int window_size_in, int method_in) {
return makePtr<DepthCleaner>(depth_in, window_size_in, method_in);
}
RgbdFrame::RgbdFrame() : ID(-1)
{}
@ -1001,6 +1010,10 @@ RgbdFrame::RgbdFrame(const Mat& image_in, const Mat& depth_in, const Mat& mask_i
RgbdFrame::~RgbdFrame()
{}
Ptr<RgbdFrame> RgbdFrame::create(const Mat& image_in, const Mat& depth_in, const Mat& mask_in, const Mat& normals_in, int ID_in) {
return makePtr<RgbdFrame>(image_in, depth_in, mask_in, normals_in, ID_in);
}
void RgbdFrame::release()
{
ID = -1;
@ -1017,6 +1030,10 @@ OdometryFrame::OdometryFrame(const Mat& image_in, const Mat& depth_in, const Mat
: RgbdFrame(image_in, depth_in, mask_in, normals_in, ID_in)
{}
Ptr<OdometryFrame> OdometryFrame::create(const Mat& image_in, const Mat& depth_in, const Mat& mask_in, const Mat& normals_in, int ID_in) {
return makePtr<OdometryFrame>(image_in, depth_in, mask_in, normals_in, ID_in);
}
void OdometryFrame::release()
{
RgbdFrame::release();
@ -1041,7 +1058,7 @@ void OdometryFrame::releasePyramids()
bool Odometry::compute(const Mat& srcImage, const Mat& srcDepth, const Mat& srcMask,
const Mat& dstImage, const Mat& dstDepth, const Mat& dstMask,
Mat& Rt, const Mat& initRt) const
OutputArray Rt, const Mat& initRt) const
{
Ptr<OdometryFrame> srcFrame(new OdometryFrame(srcImage, srcDepth, srcMask));
Ptr<OdometryFrame> dstFrame(new OdometryFrame(dstImage, dstDepth, dstMask));
@ -1049,7 +1066,7 @@ bool Odometry::compute(const Mat& srcImage, const Mat& srcDepth, const Mat& srcM
return compute(srcFrame, dstFrame, Rt, initRt);
}
bool Odometry::compute(Ptr<OdometryFrame>& srcFrame, Ptr<OdometryFrame>& dstFrame, Mat& Rt, const Mat& initRt) const
bool Odometry::compute(Ptr<OdometryFrame>& srcFrame, Ptr<OdometryFrame>& dstFrame, OutputArray Rt, const Mat& initRt) const
{
checkParams();
@ -1116,6 +1133,13 @@ RgbdOdometry::RgbdOdometry(const Mat& _cameraMatrix,
}
}
Ptr<RgbdOdometry> RgbdOdometry::create(const Mat& _cameraMatrix, float _minDepth, float _maxDepth,
float _maxDepthDiff, const std::vector<int>& _iterCounts,
const std::vector<float>& _minGradientMagnitudes, float _maxPointsPart,
int _transformType) {
return makePtr<RgbdOdometry>(_cameraMatrix, _minDepth, _maxDepth, _maxDepthDiff, _iterCounts, _minGradientMagnitudes, _maxPointsPart, _transformType);
}
Size RgbdOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const
{
Odometry::prepareFrameCache(frame, cacheType);
@ -1177,7 +1201,7 @@ void RgbdOdometry::checkParams() const
CV_Assert(minGradientMagnitudes.size() == iterCounts.size() || minGradientMagnitudes.size() == iterCounts.t().size());
}
bool RgbdOdometry::computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt, const Mat& initRt) const
bool RgbdOdometry::computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt, const Mat& initRt) const
{
return RGBDICPOdometryImpl(Rt, initRt, srcFrame, dstFrame, cameraMatrix, (float)maxDepthDiff, iterCounts, maxTranslation, maxRotation, RGBD_ODOMETRY, transformType);
}
@ -1204,6 +1228,12 @@ ICPOdometry::ICPOdometry(const Mat& _cameraMatrix,
setDefaultIterCounts(iterCounts);
}
Ptr<ICPOdometry> ICPOdometry::create(const Mat& _cameraMatrix, float _minDepth, float _maxDepth,
float _maxDepthDiff, float _maxPointsPart, const std::vector<int>& _iterCounts,
int _transformType) {
return makePtr<ICPOdometry>(_cameraMatrix, _minDepth, _maxDepth, _maxDepthDiff, _maxPointsPart, _iterCounts, _transformType);
}
Size ICPOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const
{
Odometry::prepareFrameCache(frame, cacheType);
@ -1276,7 +1306,7 @@ void ICPOdometry::checkParams() const
CV_Assert(cameraMatrix.size() == Size(3,3) && (cameraMatrix.type() == CV_32FC1 || cameraMatrix.type() == CV_64FC1));
}
bool ICPOdometry::computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt, const Mat& initRt) const
bool ICPOdometry::computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt, const Mat& initRt) const
{
return RGBDICPOdometryImpl(Rt, initRt, srcFrame, dstFrame, cameraMatrix, (float)maxDepthDiff, iterCounts, maxTranslation, maxRotation, ICP_ODOMETRY, transformType);
}
@ -1309,6 +1339,13 @@ RgbdICPOdometry::RgbdICPOdometry(const Mat& _cameraMatrix,
}
}
Ptr<RgbdICPOdometry> RgbdICPOdometry::create(const Mat& _cameraMatrix, float _minDepth, float _maxDepth,
float _maxDepthDiff, float _maxPointsPart, const std::vector<int>& _iterCounts,
const std::vector<float>& _minGradientMagnitudes,
int _transformType) {
return makePtr<RgbdICPOdometry>(_cameraMatrix, _minDepth, _maxDepth, _maxDepthDiff, _maxPointsPart, _iterCounts, _minGradientMagnitudes, _transformType);
}
Size RgbdICPOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const
{
if(frame->image.empty())
@ -1397,7 +1434,7 @@ void RgbdICPOdometry::checkParams() const
CV_Assert(minGradientMagnitudes.size() == iterCounts.size() || minGradientMagnitudes.size() == iterCounts.t().size());
}
bool RgbdICPOdometry::computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, Mat& Rt, const Mat& initRt) const
bool RgbdICPOdometry::computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt, const Mat& initRt) const
{
return RGBDICPOdometryImpl(Rt, initRt, srcFrame, dstFrame, cameraMatrix, (float)maxDepthDiff, iterCounts, maxTranslation, maxRotation, MERGED_ODOMETRY, transformType);
}

@ -79,17 +79,17 @@ public:
* @param setMarkers Allow to set markers on the patterns.
* @param markersLocation vector used to store markers location on the patterns.
*/
struct CV_EXPORTS Params
struct CV_EXPORTS_W Params
{
Params();
int width;
int height;
int nbrOfPeriods;
float shiftValue;
int methodId;
int nbrOfPixelsBetweenMarkers;
bool horizontal;
bool setMarkers;
CV_WRAP Params();
CV_PROP_RW int width;
CV_PROP_RW int height;
CV_PROP_RW int nbrOfPeriods;
CV_PROP_RW float shiftValue;
CV_PROP_RW int methodId;
CV_PROP_RW int nbrOfPixelsBetweenMarkers;
CV_PROP_RW bool horizontal;
CV_PROP_RW bool setMarkers;
std::vector<Point2f> markersLocation;
};
/**
@ -97,8 +97,8 @@ public:
* @param parameters SinusoidalPattern parameters SinusoidalPattern::Params: width, height of the projector and patterns parameters.
*
*/
static Ptr<SinusoidalPattern> create( const SinusoidalPattern::Params &parameters =
SinusoidalPattern::Params() );
CV_WRAP static Ptr<SinusoidalPattern> create( Ptr<SinusoidalPattern::Params> parameters =
makePtr<SinusoidalPattern::Params>() );
/**
* @brief Compute a wrapped phase map from sinusoidal patterns.
* @param patternImages Input data to compute the wrapped phase map.

@ -103,7 +103,8 @@ int main(int argc, char **argv)
String outputUnwrappedPhasePath = parser.get<String>(9);
String reliabilitiesPath = parser.get<String>(10);
Ptr<structured_light::SinusoidalPattern> sinus = structured_light::SinusoidalPattern::create(params);
Ptr<structured_light::SinusoidalPattern> sinus =
structured_light::SinusoidalPattern::create(makePtr<structured_light::SinusoidalPattern::Params>(params));
Ptr<phase_unwrapping::HistogramPhaseUnwrapping> phaseUnwrapping;
vector<Mat> patterns;
@ -332,4 +333,4 @@ int main(int argc, char **argv)
}
}
return 0;
}
}

@ -911,9 +911,9 @@ void SinusoidalPatternProfilometry_Impl::convertToAbsolutePhaseMap( InputArrayOf
computeCorrespondEpilines(params.markersLocation, 2, fundamental, epilines);
}
Ptr<SinusoidalPattern> SinusoidalPattern::create( const SinusoidalPattern::Params &params )
Ptr<SinusoidalPattern> SinusoidalPattern::create( Ptr<SinusoidalPattern::Params> params )
{
return makePtr<SinusoidalPatternProfilometry_Impl>(params);
return makePtr<SinusoidalPatternProfilometry_Impl>(*params);
}
}
}
}

@ -54,10 +54,13 @@ TEST( SinusoidalPattern, unwrapPhaseMap )
{
string folder = cvtest::TS::ptr()->get_data_path() + "/" + STRUCTURED_LIGHT_DIR + "/" + FOLDER_DATA + "/";
structured_light::SinusoidalPattern::Params paramsPsp, paramsFtp, paramsFaps;
paramsFtp.methodId = 0;
paramsPsp.methodId = 1;
paramsFaps.methodId = 2;
Ptr<structured_light::SinusoidalPattern::Params> paramsPsp, paramsFtp, paramsFaps;
paramsPsp = makePtr<structured_light::SinusoidalPattern::Params>();
paramsFtp = makePtr<structured_light::SinusoidalPattern::Params>();
paramsFaps = makePtr<structured_light::SinusoidalPattern::Params>();
paramsFtp->methodId = 0;
paramsPsp->methodId = 1;
paramsFaps->methodId = 2;
Ptr<structured_light::SinusoidalPattern> sinusPsp = structured_light::SinusoidalPattern::create(paramsPsp);
Ptr<structured_light::SinusoidalPattern> sinusFtp = structured_light::SinusoidalPattern::create(paramsFtp);

@ -0,0 +1,34 @@
@inproceedings{Neumann12,
title={Scene Text Localization and Recognition},
author={Neumann and L., Matas and J.},
journal={ Computer Vision and Pattern Recognition (CVPR), 2012 IEEE Conference on},
pages={3538--3545},
year={2012},
organization={IEEE}
}
@inproceedings{Neumann11,
author = {Lukáš Neumann and Jiří Matas},
title = {Text localization in real-world images using efficiently pruned exhaustive search},
booktitle = {in Document Analysis and Recognition, 2011 International Conference on. IEEE, 2011},
year = {},
pages = {687--691}
}
@inproceedings{Gomez13,
author = {G\'{o}mez, Llu\'{\i}s and Karatzas, Dimosthenis},
title={Multi-script Text Extraction from Natural Scenes},
booktitle = {Proceedings of the 2013 12th International Conference on Document Analysis and Recognition},
series = {ICDAR '13},
year = {2013},
isbn = {978-0-7695-4999-6},
pages = {467--471},
publisher = {IEEE Computer Society}
}
@article{Gomez14,
author = {Lluis Gomez i Bigorda and
Dimosthenis Karatzas},
title = {A Fast Hierarchical Method for Multi-script and Arbitrary Oriented
Scene Text Extraction},
journal = {CoRR},
volume = {abs/1407.7504},
year = {2014},
}

@ -55,7 +55,7 @@ Class-specific Extremal Regions for Scene Text Detection
--------------------------------------------------------
The scene text detection algorithm described below has been initially proposed by Lukás Neumann &
Jiri Matas [Neumann12]. The main idea behind Class-specific Extremal Regions is similar to the MSER
Jiri Matas @cite Neumann11. The main idea behind Class-specific Extremal Regions is similar to the MSER
in that suitable Extremal Regions (ERs) are selected from the whole component tree of the image.
However, this technique differs from MSER in that selection of suitable ERs is done by a sequential
classifier trained for character detection, i.e. dropping the stability requirement of MSERs and
@ -88,9 +88,9 @@ order to increase the character localization recall.
After the ER filtering is done on each input channel, character candidates must be grouped in
high-level text blocks (i.e. words, text lines, paragraphs, ...). The opencv_text module implements
two different grouping algorithms: the Exhaustive Search algorithm proposed in [Neumann11] for
two different grouping algorithms: the Exhaustive Search algorithm proposed in @cite Neumann12 for
grouping horizontally aligned text, and the method proposed by Lluis Gomez and Dimosthenis Karatzas
in [Gomez13][Gomez14] for grouping arbitrary oriented text (see erGrouping).
in @cite Gomez13 @cite Gomez14 for grouping arbitrary oriented text (see erGrouping).
To see the text detector at work, have a look at the textdetection demo:
<https://github.com/Itseez/opencv_contrib/blob/master/modules/text/samples/textdetection.cpp>

@ -112,7 +112,7 @@ public:
ERStat* min_probability_ancestor;
};
/** @brief Base class for 1st and 2nd stages of Neumann and Matas scene text detection algorithm [Neumann12]. :
/** @brief Base class for 1st and 2nd stages of Neumann and Matas scene text detection algorithm @cite Neumann12. :
Extracts the component tree (if needed) and filter the extremal regions (ER's) by using a given classifier.
*/
@ -164,31 +164,8 @@ public:
};
/*!
Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
The component tree of the image is extracted by a threshold increased step by step
from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
number of holes, and number of horizontal crossings) are computed for each ER
and used as features for a classifier which estimates the class-conditional
probability P(er|character). The value of P(er|character) is tracked using the inclusion
relation of ER across all thresholds and only the ERs which correspond to local maximum
of the probability P(er|character) are selected (if the local maximum of the
probability is above a global limit pmin and the difference between local maximum and
local minimum is greater than minProbabilityDiff).
@param cb Callback with the classifier. Default classifier can be implicitly load with function
loadClassifierNM1(), e.g. from file in samples/cpp/trained_classifierNM1.xml
@param thresholdDelta Threshold step in subsequent thresholds when extracting the component tree
@param minArea The minimum area (% of image size) allowed for retreived ERs
@param maxArea The maximum area (% of image size) allowed for retreived ERs
@param minProbability The minimum probability P(er|character) allowed for retreived ERs
@param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
@param minProbabilityDiff The minimum probability difference between local maxima and local minima ERs
*/
/** @brief Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm [Neumann12].
/** @brief Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm @cite Neumann12.
@param cb : Callback with the classifier. Default classifier can be implicitly load with function
loadClassifierNM1, e.g. from file in samples/cpp/trained_classifierNM1.xml
@ -214,7 +191,7 @@ CV_EXPORTS_W Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb,
bool nonMaxSuppression = true,
float minProbabilityDiff = (float)0.1);
/** @brief Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm [Neumann12].
/** @brief Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm @cite Neumann12.
@param cb : Callback with the classifier. Default classifier can be implicitly load with function
loadClassifierNM2, e.g. from file in samples/cpp/trained_classifierNM2.xml
@ -269,7 +246,7 @@ enum { ERFILTER_NM_RGBLGrad,
ERFILTER_NM_IHSGrad
};
/** @brief Compute the different channels to be processed independently in the N&M algorithm [Neumann12].
/** @brief Compute the different channels to be processed independently in the N&M algorithm @cite Neumann12.
@param _src Source image. Must be RGB CV_8UC3.
@ -290,7 +267,7 @@ CV_EXPORTS_W void computeNMChannels(InputArray _src, CV_OUT OutputArrayOfArrays
//! text::erGrouping operation modes
enum erGrouping_Modes {
/** Exhaustive Search algorithm proposed in [Neumann11] for grouping horizontally aligned text.
/** Exhaustive Search algorithm proposed in @cite Neumann11 for grouping horizontally aligned text.
The algorithm models a verification function for all the possible ER sequences. The
verification fuction for ER pairs consists in a set of threshold-based pairwise rules which
compare measurements of two regions (height ratio, centroid angle, and region distance). The
@ -301,7 +278,7 @@ enum erGrouping_Modes {
consistent.
*/
ERGROUPING_ORIENTATION_HORIZ,
/** Text grouping method proposed in [Gomez13][Gomez14] for grouping arbitrary oriented text. Regions
/** Text grouping method proposed in @cite Gomez13 @cite Gomez14 for grouping arbitrary oriented text. Regions
are agglomerated by Single Linkage Clustering in a weighted feature space that combines proximity
(x,y coordinates) and similarity measures (color, size, gradient magnitude, stroke width, etc.).
SLC provides a dendrogram where each node represents a text group hypothesis. Then the algorithm
@ -376,8 +353,8 @@ CV_EXPORTS_W void detectRegions(InputArray image, const Ptr<ERFilter>& er_filter
/** @brief Extracts text regions from image.
@param image Source image where text blocks needs to be extracted from. Should be CV_8UC3 (color).
@param er_filter1 Extremal Region Filter for the 1st stage classifier of N&M algorithm [Neumann12]
@param er_filter2 Extremal Region Filter for the 2nd stage classifier of N&M algorithm [Neumann12]
@param er_filter1 Extremal Region Filter for the 1st stage classifier of N&M algorithm @cite Neumann12
@param er_filter2 Extremal Region Filter for the 2nd stage classifier of N&M algorithm @cite Neumann12
@param groups_rects Output list of rectangle blocks with text
@param method Grouping method (see text::erGrouping_Modes). Can be one of ERGROUPING_ORIENTATION_HORIZ, ERGROUPING_ORIENTATION_ANY.
@param filename The XML or YAML file with the classifier model (e.g. samples/trained_classifier_erGrouping.xml). Only to use when grouping method is ERGROUPING_ORIENTATION_ANY.

@ -164,6 +164,7 @@ public:
(e.g. words or text lines).
@param component_level OCR_LEVEL_WORD (by default), or OCR_LEVEL_TEXT_LINE.
*/
using BaseOCR::run;
virtual void run (Mat& image, std::string& output_text,

@ -46,6 +46,9 @@
#include "opencv2/text.hpp"
#ifdef HAVE_TESSERACT
#if !defined(USE_STD_NAMESPACE)
#define USE_STD_NAMESPACE
#endif
#include <tesseract/baseapi.h>
#include <tesseract/resultiterator.h>
#endif

@ -165,7 +165,7 @@ struct AlgoWrap
void plotLTRC(Mat &img) const
{
Ptr<plot::Plot2d> p_ = plot::createPlot2d(getLTRC());
Ptr<plot::Plot2d> p_ = plot::Plot2d::create(getLTRC());
p_->render(img);
}

@ -223,8 +223,8 @@ namespace cv{
// create gaussian response
y=Mat::zeros((int)roi.height,(int)roi.width,CV_64F);
for(unsigned i=0;i<roi.height;i++){
for(unsigned j=0;j<roi.width;j++){
for(unsigned i=0;i<(unsigned)roi.height;i++){
for(unsigned j=0;j<(unsigned)roi.width;j++){
y.at<double>(i,j)=(i-roi.height/2+1)*(i-roi.height/2+1)+(j-roi.width/2+1)*(j-roi.width/2+1);
}
}

@ -1,6 +1,6 @@
set(the_description "Contributed/Experimental Algorithms for Salient 2D Features Detection")
ocv_define_module(xfeatures2d opencv_core opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_shape opencv_cudaarithm WRAP python java)
ocv_define_module(xfeatures2d opencv_core opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_shape opencv_ml opencv_cudaarithm WRAP python java)
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/download_vgg.cmake)
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/download_boostdesc.cmake)

@ -1,8 +1,8 @@
#include <iostream>
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_ML
#include "opencv2/opencv_modules.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"

@ -2,6 +2,7 @@
* shape_context.cpp -- Shape context demo for shape matching
*/
#include <iostream>
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_SHAPE
@ -11,7 +12,7 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/core/utility.hpp"
#include <string>
using namespace std;

@ -6,6 +6,7 @@
*/
#include <iostream>
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_CALIB3D

@ -327,7 +327,7 @@ protected:
stringstream ss;
ss << "Max distance between valid and computed descriptors " << curMaxDist;
if( curMaxDist < maxDist )
if( curMaxDist <= maxDist )
ss << "." << endl;
else
{
@ -1020,7 +1020,7 @@ TEST( Features2d_Detector_Harris_Laplace_Affine, regression )
*/
TEST( Features2d_DescriptorExtractor_SIFT, regression )
{
CV_DescriptorExtractorTest<L2<float> > test( "descriptor-sift", 0.03f,
CV_DescriptorExtractorTest<L1<float> > test( "descriptor-sift", 1.0f,
SIFT::create() );
test.safe_run();
}

@ -163,14 +163,7 @@ namespace cv {
for (int r = 0; r < nb_segs; r++) {
// Generate mask
Mat mask = Mat(img.rows, img.cols, CV_8UC1);
int* regions_data = (int*)regions.data;
char* mask_data = (char*)mask.data;
for (unsigned int x = 0; x < regions.total(); x++) {
mask_data[x] = regions_data[x] == r ? 255 : 0;
}
Mat mask = regions == r;
// Compute histogram for each channels
float tt = 0;

@ -581,8 +581,8 @@ public:
{
int xysign = -((oxy[i] > 0) - (oxy[i] < 0));
o[i] = (atan((oyy[i] * xysign / (oxx[i] + 1e-5))) > 0) ? (float) fmod(
atan((oyy[i] * xysign / (oxx[i] + 1e-5))), M_PI) : (float) fmod(
atan((oyy[i] * xysign / (oxx[i] + 1e-5))) + M_PI, M_PI);
atan((oyy[i] * xysign / (oxx[i] + 1e-5))), CV_PI) : (float) fmod(
atan((oyy[i] * xysign / (oxx[i] + 1e-5))) + CV_PI, CV_PI);
}
}
@ -729,65 +729,73 @@ protected:
offsetY[n] = x2*features.cols*nchannels + y2*nchannels + z;
}
// lookup tables for mapping linear index to offset pairs
#ifdef _OPENMP
#pragma omp parallel for
#ifdef CV_CXX11
parallel_for_(cv::Range(0, height), [&](const cv::Range& range)
#else
const cv::Range range(0, height);
#endif
for (int i = 0; i < height; ++i)
{
float *regFeaturesPtr = regFeatures.ptr<float>(i*stride/shrink);
float *ssFeaturesPtr = ssFeatures.ptr<float>(i*stride/shrink);
int *indexPtr = indexes.ptr<int>(i);
for(int i = range.start; i < range.end; ++i) {
float *regFeaturesPtr = regFeatures.ptr<float>(i*stride/shrink);
float *ssFeaturesPtr = ssFeatures.ptr<float>(i*stride/shrink);
for (int j = 0, k = 0; j < width; ++k, j += !(k %= nTreesEval))
// for j,k in [0;width)x[0;nTreesEval)
{
int baseNode = ( ((i + j)%(2*nTreesEval) + k)%nTrees )*nTreesNodes;
int currentNode = baseNode;
// select root node of the tree to evaluate
int *indexPtr = indexes.ptr<int>(i);
int offset = (j*stride/shrink)*nchannels;
while ( __rf.childs[currentNode] != 0 )
for (int j = 0, k = 0; j < width; ++k, j += !(k %= nTreesEval))
// for j,k in [0;width)x[0;nTreesEval)
{
int currentId = __rf.featureIds[currentNode];
float currentFeature;
int baseNode = ( ((i + j)%(2*nTreesEval) + k)%nTrees )*nTreesNodes;
int currentNode = baseNode;
// select root node of the tree to evaluate
if (currentId >= nFeatures)
int offset = (j*stride/shrink)*nchannels;
while ( __rf.childs[currentNode] != 0 )
{
int xIndex = offsetX[currentId - nFeatures];
float A = ssFeaturesPtr[offset + xIndex];
int yIndex = offsetY[currentId - nFeatures];
float B = ssFeaturesPtr[offset + yIndex];
currentFeature = A - B;
int currentId = __rf.featureIds[currentNode];
float currentFeature;
if (currentId >= nFeatures)
{
int xIndex = offsetX[currentId - nFeatures];
float A = ssFeaturesPtr[offset + xIndex];
int yIndex = offsetY[currentId - nFeatures];
float B = ssFeaturesPtr[offset + yIndex];
currentFeature = A - B;
}
else
currentFeature = regFeaturesPtr[offset + offsetI[currentId]];
// compare feature to threshold and move left or right accordingly
if (currentFeature < __rf.thresholds[currentNode])
currentNode = baseNode + __rf.childs[currentNode] - 1;
else
currentNode = baseNode + __rf.childs[currentNode];
}
else
currentFeature = regFeaturesPtr[offset + offsetI[currentId]];
// compare feature to threshold and move left or right accordingly
if (currentFeature < __rf.thresholds[currentNode])
currentNode = baseNode + __rf.childs[currentNode] - 1;
else
currentNode = baseNode + __rf.childs[currentNode];
}
indexPtr[j*nTreesEval + k] = currentNode;
indexPtr[j*nTreesEval + k] = currentNode;
}
}
}
#ifdef CV_CXX11
);
#endif
NChannelsMat dstM(dst.size(),
CV_MAKETYPE(DataType<float>::type, outNum));
dstM.setTo(0);
float step = 2.0f * CV_SQR(stride) / CV_SQR(ipSize) / nTreesEval;
#ifdef _OPENMP
#pragma omp parallel for
#ifdef CV_CXX11
parallel_for_(cv::Range(0, height), [&](const cv::Range& range)
#endif
for (int i = 0; i < height; ++i)
{
int *pIndex = indexes.ptr<int>(i);
float *pDst = dstM.ptr<float>(i*stride);
for(int i = range.start; i < range.end; ++i)
{
int *pIndex = indexes.ptr<int>(i);
float *pDst = dstM.ptr<float>(i*stride);
for (int j = 0, k = 0; j < width; ++k, j += !(k %= nTreesEval))
{// for j,k in [0;width)x[0;nTreesEval)
@ -804,7 +812,11 @@ protected:
for (int p = start; p < finish; ++p)
pDst[offset + offsetE[__rf.edgeBins[p]]] += step;
}
}
}
#ifdef CV_CXX11
);
#endif
cv::reduce( dstM.reshape(1, int( dstM.total() ) ), dstM, 2, CV_REDUCE_SUM);
imsmooth( dstM.reshape(1, dst.rows), 1 ).copyTo(dst);

Loading…
Cancel
Save