mirror of https://github.com/opencv/opencv.git
parent
04ebef1be2
commit
2842d34611
17 changed files with 6228 additions and 0 deletions
@ -0,0 +1,222 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#ifndef __OPENCV_CONTRIB_RETINA_HPP__ |
||||
#define __OPENCV_CONTRIB_RETINA_HPP__ |
||||
|
||||
/*
|
||||
* Retina.hpp |
||||
* |
||||
* Created on: Jul 19, 2011 |
||||
* Author: Alexandre Benoit |
||||
*/ |
||||
|
||||
#include "opencv2/core/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support |
||||
#include <valarray> |
||||
|
||||
namespace cv |
||||
{ |
||||
|
||||
enum RETINA_COLORSAMPLINGMETHOD |
||||
{ |
||||
RETINA_COLOR_RANDOM, /// each pixel position is either R, G or B in a random choice
|
||||
RETINA_COLOR_DIAGONAL,/// color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
|
||||
RETINA_COLOR_BAYER/// standard bayer sampling
|
||||
}; |
||||
|
||||
class RetinaFilter; |
||||
|
||||
/**
|
||||
* @brief a wrapper class which allows the use of the Gipsa/Listic Labs retina model |
||||
* @class Retina object is a wrapper class which allows the Gipsa/Listic Labs model to be used. |
||||
* This retina model allows spatio-temporal image processing (applied on still images, video sequences). |
||||
* As a summary, these are the retina model properties: |
||||
* => It applies a spectral whithening (mid-frequency details enhancement) |
||||
* => high frequency spatio-temporal noise reduction |
||||
* => low frequency luminance to be reduced (luminance range compression) |
||||
* => local logarithmic luminance compression allows details to be enhanced in low light conditions |
||||
* |
||||
* for more information, reer to the following papers : |
||||
* Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
*/ |
||||
class CV_EXPORTS Retina { |
||||
|
||||
public: |
||||
|
||||
/**
|
||||
* Main constructor with most commun use setup : create an instance of color ready retina model |
||||
* @param inputSize : the input frame size |
||||
*/ |
||||
Retina(const std::string parametersSaveFile, Size inputSize); |
||||
|
||||
/**
|
||||
* Complete Retina filter constructor which allows all basic structural parameters definition |
||||
* @param inputSize : the input frame size |
||||
* @param colorMode : the chosen processing mode : with or without color processing |
||||
* @param samplingMethod: specifies which kind of color sampling will be used |
||||
* @param useRetinaLogSampling: activate retina log sampling, if true, the 2 following parameters can be used |
||||
* @param reductionFactor: only usefull if param useRetinaLogSampling=true, specifies the reduction factor of the output frame (as the center (fovea) is high resolution and corners can be underscaled, then a reduction of the output is allowed without precision leak |
||||
* @param samplingStrenght: only usefull if param useRetinaLogSampling=true, specifies the strenght of the log scale that is applied |
||||
*/ |
||||
Retina(const std::string parametersSaveFile, Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); |
||||
|
||||
virtual ~Retina(); |
||||
|
||||
/**
|
||||
* try to open an XML retina parameters file to adjust current retina instance setup |
||||
* => if the xml file does not exist, then default setup is applied |
||||
* => warning, Exceptions are thrown if read XML file is not valid |
||||
* @param retinaParameterFile : the parameters filename |
||||
*/ |
||||
void setup(std::string retinaParameterFile="", const bool applyDefaultSetupOnFailure=true); |
||||
|
||||
/**
|
||||
* parameters setup display method |
||||
* @return a string which contains formatted parameters information |
||||
*/ |
||||
const std::string printSetup(); |
||||
|
||||
/**
|
||||
* setup the OPL and IPL parvo channels (see biologocal model) |
||||
* OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance (low frequency energy) |
||||
* IPL parvo is the OPL next processing stage, it refers to Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. |
||||
* for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
* @param colorMode : specifies if (true) color is processed of not (false) to then processing gray level image |
||||
* @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false) |
||||
* @param photoreceptorsLocalAdaptationSensitivity: the photoreceptors sensitivity renage is 0-1 (more log compression effect when value increases) |
||||
* @param photoreceptorsTemporalConstant: the time constant of the first order low pass filter of the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 1 frame |
||||
* @param photoreceptorsSpatialConstant: the spatial constant of the first order low pass filter of the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 1 pixel |
||||
* @param horizontalCellsGain: gain of the horizontal cells network, if 0, then the mean value of the output is zero, if the parameter is near 1, then, the luminance is not filtered and is still reachable at the output, typicall value is 0 |
||||
* @param HcellsTemporalConstant: the time constant of the first order low pass filter of the horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is frames, typical value is 1 frame, as the photoreceptors |
||||
* @param HcellsSpatialConstant: the spatial constant of the first order low pass filter of the horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels, typical value is 5 pixel, this value is also used for local contrast computing when computing the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular channel model) |
||||
* @param ganglionCellsSensitivity: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 230 |
||||
*/ |
||||
void setupOPLandIPLParvoChannel(const bool colorMode=true, const bool normaliseOutput = true, const double photoreceptorsLocalAdaptationSensitivity=0.7, const double photoreceptorsTemporalConstant=0.5, const double photoreceptorsSpatialConstant=0.53, const double horizontalCellsGain=0, const double HcellsTemporalConstant=1, const double HcellsSpatialConstant=7, const double ganglionCellsSensitivity=0.7); |
||||
|
||||
/**
|
||||
* set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel |
||||
* this channel processes signals outpint from OPL processing stage in peripheral vision, it allows motion information enhancement. It is decorrelated from the details channel. See reference paper for more details. |
||||
* @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false) |
||||
* @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0 |
||||
* @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response) |
||||
* @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5 |
||||
* @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5 |
||||
* @param V0CompressionParameter: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 200 |
||||
* @param localAdaptintegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation |
||||
* @param localAdaptintegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation |
||||
*/ |
||||
void setupIPLMagnoChannel(const bool normaliseOutput = true, const double parasolCells_beta=0, const double parasolCells_tau=0, const double parasolCells_k=7, const double amacrinCellsTemporalCutFrequency=1.2, const double V0CompressionParameter=0.95, const double localAdaptintegration_tau=0, const double localAdaptintegration_k=7); |
||||
/**
|
||||
* method which allows retina to be applied on an input image |
||||
* @param |
||||
* /// encapsulated retina module is ready to deliver its outputs using dedicated acccessors, see getParvo and getMagno methods
|
||||
* |
||||
*/ |
||||
void run(const Mat &inputImage); |
||||
/**
|
||||
* accessor of the details channel of the retina (models foveal vision) |
||||
* @param retinaOutput_parvo : the output buffer (reallocated if necessary) |
||||
*/ |
||||
void getParvo(Mat &retinaOutput_parvo); |
||||
|
||||
/**
|
||||
* accessor of the motion channel of the retina (models peripheral vision) |
||||
* @param retinaOutput_magno : the output buffer (reallocated if necessary) |
||||
*/ |
||||
void getMagno(Mat &retinaOutput_magno); |
||||
|
||||
void clearBuffers(); |
||||
|
||||
protected: |
||||
//// Parameteres setup members
|
||||
// parameters file ... saved on instance delete
|
||||
FileStorage _parametersSaveFile; |
||||
std::string _parametersSaveFileName; |
||||
//// Retina model related modules
|
||||
// buffer that ensure library cross-compatibility
|
||||
std::valarray<double> _inputBuffer; |
||||
|
||||
// pointer to retina model
|
||||
RetinaFilter* _retinaFilter; |
||||
|
||||
/**
|
||||
* exports a valarray buffer outing from HVStools objects to a cv::Mat in CV_8UC1 (gray level picture) or CV_8UC3 (color) format |
||||
* @param grayMatrixToConvert the valarray to export to OpenCV |
||||
* @param nbRows : the number of rows of the valarray flatten matrix |
||||
* @param nbColumns : the number of rows of the valarray flatten matrix |
||||
* @param colorMode : a flag which mentions if matrix is color (true) or graylevel (false) |
||||
* @param outBuffer : the output matrix which is reallocated to satisfy Retina output buffer dimensions |
||||
*/ |
||||
void _convertValarrayGrayBuffer2cvMat(const std::valarray<double> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, Mat &outBuffer); |
||||
|
||||
// private method called by constructirs
|
||||
void _init(const std::string parametersSaveFile, Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); |
||||
|
||||
|
||||
}; |
||||
|
||||
} |
||||
#endif /* __OPENCV_CONTRIB_RETINA_HPP__ */ |
||||
|
@ -0,0 +1,867 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#include <iostream> |
||||
#include <cstdlib> |
||||
#include "basicretinafilter.hpp" |
||||
#include <cmath> |
||||
|
||||
|
||||
namespace cv |
||||
{ |
||||
|
||||
// @author Alexandre BENOIT, benoit.alexandre.vision@gmail.com, LISTIC : www.listic.univ-savoie.fr Gipsa-Lab, France: www.gipsa-lab.inpg.fr/
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// BASIC RETINA FILTER
|
||||
//////////////////////////////////////////////////////////
|
||||
|
||||
// Constructor and Desctructor of the basic retina filter
|
||||
BasicRetinaFilter::BasicRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns, const unsigned int parametersListSize, const bool useProgressiveFilter) |
||||
:_filterOutput(NBrows, NBcolumns), |
||||
_localBuffer(NBrows*NBcolumns), |
||||
_filteringCoeficientsTable(3*parametersListSize), |
||||
_progressiveSpatialConstant(0),// pointer to a local table containing local spatial constant (allocated with the object)
|
||||
_progressiveGain(0) |
||||
{ |
||||
#ifdef T_BASIC_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"BasicRetinaFilter::BasicRetinaFilter: new filter, size="<<NBrows<<", "<<NBcolumns<<std::endl; |
||||
#endif |
||||
_halfNBrows=_filterOutput.getNBrows()/2; |
||||
_halfNBcolumns=_filterOutput.getNBcolumns()/2; |
||||
|
||||
if (useProgressiveFilter) |
||||
{ |
||||
#ifdef T_BASIC_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"BasicRetinaFilter::BasicRetinaFilter: _progressiveSpatialConstant_Tbuffer"<<std::endl; |
||||
#endif |
||||
_progressiveSpatialConstant.resize(_filterOutput.size()); |
||||
#ifdef T_BASIC_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"BasicRetinaFilter::BasicRetinaFilter: new _progressiveGain_Tbuffer"<<NBrows<<", "<<NBcolumns<<std::endl; |
||||
#endif |
||||
_progressiveGain.resize(_filterOutput.size()); |
||||
} |
||||
#ifdef T_BASIC_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"BasicRetinaFilter::BasicRetinaFilter: new filter, size="<<NBrows<<", "<<NBcolumns<<std::endl; |
||||
#endif |
||||
|
||||
// set default values
|
||||
_maxInputValue=256.0; |
||||
|
||||
// reset all buffers
|
||||
clearAllBuffers(); |
||||
|
||||
#ifdef T_BASIC_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"BasicRetinaFilter::Init BasicRetinaElement at specified frame size OK, size="<<this->size()<<std::endl; |
||||
#endif |
||||
|
||||
} |
||||
|
||||
BasicRetinaFilter::~BasicRetinaFilter() |
||||
{ |
||||
|
||||
#ifdef BASIC_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"BasicRetinaFilter::BasicRetinaElement Deleted OK"<<std::endl; |
||||
#endif |
||||
|
||||
} |
||||
|
||||
////////////////////////////////////
|
||||
// functions of the basic filter
|
||||
////////////////////////////////////
|
||||
|
||||
|
||||
// resize all allocated buffers
|
||||
void BasicRetinaFilter::resize(const unsigned int NBrows, const unsigned int NBcolumns) |
||||
{ |
||||
|
||||
std::cout<<"BasicRetinaFilter::resize( "<<NBrows<<", "<<NBcolumns<<")"<<std::endl; |
||||
|
||||
// resizing buffers
|
||||
_filterOutput.resizeBuffer(NBrows, NBcolumns); |
||||
|
||||
// updating variables
|
||||
_halfNBrows=_filterOutput.getNBrows()/2; |
||||
_halfNBcolumns=_filterOutput.getNBcolumns()/2; |
||||
|
||||
_localBuffer.resize(_filterOutput.size()); |
||||
// in case of spatial adapted filter
|
||||
if (_progressiveSpatialConstant.size()>0) |
||||
{ |
||||
_progressiveSpatialConstant.resize(_filterOutput.size()); |
||||
_progressiveGain.resize(_filterOutput.size()); |
||||
} |
||||
// reset buffers
|
||||
clearAllBuffers(); |
||||
} |
||||
|
||||
// Change coefficients table
|
||||
void BasicRetinaFilter::setLPfilterParameters(const double beta, const double tau, const double desired_k, const unsigned int filterIndex) |
||||
{ |
||||
double _beta = beta+tau; |
||||
double k=desired_k; |
||||
// check if the spatial constant is correct (avoid 0 value to avoid division by 0)
|
||||
if (desired_k<=0) |
||||
{ |
||||
k=0.001; |
||||
std::cerr<<"BasicRetinaFilter::spatial constant of the low pass filter must be superior to zero !!! correcting parameter setting to 0,001"<<std::endl; |
||||
} |
||||
|
||||
double _alpha = k*k; |
||||
double _mu = 0.8; |
||||
unsigned int tableOffset=filterIndex*3; |
||||
if (k<=0) |
||||
{ |
||||
std::cerr<<"BasicRetinaFilter::spatial filtering coefficient must be superior to zero, correcting value to 0.01"<<std::endl; |
||||
_alpha=0.0001; |
||||
} |
||||
|
||||
double _temp = (1.0+_beta)/(2.0*_mu*_alpha); |
||||
double _a = _filteringCoeficientsTable[tableOffset] = 1.0 + _temp - sqrt( (1.0+_temp)*(1.0+_temp) - 1.0); |
||||
_filteringCoeficientsTable[1+tableOffset]=(1.0-_a)*(1.0-_a)*(1.0-_a)*(1.0-_a)/(1.0+_beta); |
||||
_filteringCoeficientsTable[2+tableOffset] =tau; |
||||
|
||||
//std::cout<<"BasicRetinaFilter::normal:"<<(1.0-_a)*(1.0-_a)*(1.0-_a)*(1.0-_a)/(1.0+_beta)<<" -> old:"<<(1-_a)*(1-_a)*(1-_a)*(1-_a)/(1+_beta)<<std::endl;
|
||||
|
||||
//std::cout<<"BasicRetinaFilter::_a="<<_a<<", gain="<<_filteringCoeficientsTable[1+tableOffset]<<", tau="<<tau<<std::endl;
|
||||
} |
||||
|
||||
void BasicRetinaFilter::setProgressiveFilterConstants_CentredAccuracy(const double beta, const double tau, const double alpha0, const unsigned int filterIndex) |
||||
{ |
||||
// check if dedicated buffers are already allocated, if not create them
|
||||
if (_progressiveSpatialConstant.size()!=_filterOutput.size()) |
||||
{ |
||||
_progressiveSpatialConstant.resize(_filterOutput.size()); |
||||
_progressiveGain.resize(_filterOutput.size()); |
||||
} |
||||
|
||||
double _beta = beta+tau; |
||||
double _mu=0.8; |
||||
if (alpha0<=0) |
||||
{ |
||||
std::cerr<<"BasicRetinaFilter::spatial filtering coefficient must be superior to zero, correcting value to 0.01"<<std::endl; |
||||
//alpha0=0.0001;
|
||||
} |
||||
|
||||
unsigned int tableOffset=filterIndex*3; |
||||
|
||||
double _alpha=0.8; |
||||
double _temp = (1.0+_beta)/(2.0*_mu*_alpha); |
||||
double _a=_filteringCoeficientsTable[tableOffset] = 1.0 + _temp - sqrt( (1.0+_temp)*(1.0+_temp) - 1.0); |
||||
_filteringCoeficientsTable[tableOffset+1]=(1.0-_a)*(1.0-_a)*(1.0-_a)*(1.0-_a)/(1.0+_beta); |
||||
_filteringCoeficientsTable[tableOffset+2] =tau; |
||||
|
||||
double commonFactor=alpha0/sqrt((double)(_halfNBcolumns*_halfNBcolumns+_halfNBrows*_halfNBrows)+1.0); |
||||
//memset(_progressiveSpatialConstant, 255, _filterOutput.getNBpixels());
|
||||
for (unsigned int idColumn=0;idColumn<_halfNBcolumns; ++idColumn) |
||||
for (unsigned int idRow=0;idRow<_halfNBrows; ++idRow) |
||||
{ |
||||
// computing local spatial constant
|
||||
double localSpatialConstantValue=commonFactor*sqrt((double)(idColumn*idColumn)+(double)(idRow*idRow)); |
||||
if (localSpatialConstantValue>1) |
||||
localSpatialConstantValue=1; |
||||
|
||||
_progressiveSpatialConstant[_halfNBcolumns-1+idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1+idRow)]=localSpatialConstantValue; |
||||
_progressiveSpatialConstant[_halfNBcolumns-1-idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1+idRow)]=localSpatialConstantValue; |
||||
_progressiveSpatialConstant[_halfNBcolumns-1+idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1-idRow)]=localSpatialConstantValue; |
||||
_progressiveSpatialConstant[_halfNBcolumns-1-idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1-idRow)]=localSpatialConstantValue; |
||||
|
||||
// computing local gain
|
||||
double localGain=(1-localSpatialConstantValue)*(1-localSpatialConstantValue)*(1-localSpatialConstantValue)*(1-localSpatialConstantValue)/(1+_beta); |
||||
_progressiveGain[_halfNBcolumns-1+idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1+idRow)]=localGain; |
||||
_progressiveGain[_halfNBcolumns-1-idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1+idRow)]=localGain; |
||||
_progressiveGain[_halfNBcolumns-1+idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1-idRow)]=localGain; |
||||
_progressiveGain[_halfNBcolumns-1-idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1-idRow)]=localGain; |
||||
|
||||
//std::cout<<commonFactor<<", "<<sqrt((_halfNBcolumns-1-idColumn)+(_halfNBrows-idRow-1))<<", "<<(_halfNBcolumns-1-idColumn)<<", "<<(_halfNBrows-idRow-1)<<", "<<localSpatialConstantValue<<std::endl;
|
||||
} |
||||
} |
||||
|
||||
void BasicRetinaFilter::setProgressiveFilterConstants_CustomAccuracy(const double beta, const double tau, const double k, const std::valarray<double> &accuracyMap, const unsigned int filterIndex) |
||||
{ |
||||
|
||||
if (accuracyMap.size()!=_filterOutput.size()) |
||||
{ |
||||
std::cerr<<"BasicRetinaFilter::setProgressiveFilterConstants_CustomAccuracy: error: input accuracy map does not match filter size, init skept"<<std::endl; |
||||
return ; |
||||
} |
||||
|
||||
// check if dedicated buffers are already allocated, if not create them
|
||||
if (_progressiveSpatialConstant.size()!=_filterOutput.size()) |
||||
{ |
||||
_progressiveSpatialConstant.resize(accuracyMap.size()); |
||||
_progressiveGain.resize(accuracyMap.size()); |
||||
} |
||||
|
||||
double _beta = beta+tau; |
||||
double _alpha=k*k; |
||||
double _mu=0.8; |
||||
if (k<=0) |
||||
{ |
||||
std::cerr<<"BasicRetinaFilter::spatial filtering coefficient must be superior to zero, correcting value to 0.01"<<std::endl; |
||||
//alpha0=0.0001;
|
||||
} |
||||
unsigned int tableOffset=filterIndex*3; |
||||
double _temp = (1.0+_beta)/(2.0*_mu*_alpha); |
||||
double _a=_filteringCoeficientsTable[tableOffset] = 1.0 + _temp - sqrt( (1.0+_temp)*(1.0+_temp) - 1.0); |
||||
_filteringCoeficientsTable[tableOffset+1]=(1.0-_a)*(1.0-_a)*(1.0-_a)*(1.0-_a)/(1.0+_beta); |
||||
_filteringCoeficientsTable[tableOffset+2] =tau; |
||||
|
||||
//memset(_progressiveSpatialConstant, 255, _filterOutput.getNBpixels());
|
||||
for (unsigned int idColumn=0;idColumn<_filterOutput.getNBcolumns(); ++idColumn) |
||||
for (unsigned int idRow=0;idRow<_filterOutput.getNBrows(); ++idRow) |
||||
{ |
||||
// computing local spatial constant
|
||||
unsigned int index=idColumn+idRow*_filterOutput.getNBcolumns(); |
||||
double localSpatialConstantValue=_a*accuracyMap[index]; |
||||
if (localSpatialConstantValue>1) |
||||
localSpatialConstantValue=1; |
||||
|
||||
_progressiveSpatialConstant[index]=localSpatialConstantValue; |
||||
|
||||
// computing local gain
|
||||
double localGain=(1-localSpatialConstantValue)*(1-localSpatialConstantValue)*(1-localSpatialConstantValue)*(1-localSpatialConstantValue)/(1+_beta); |
||||
_progressiveGain[index]=localGain; |
||||
|
||||
//std::cout<<commonFactor<<", "<<sqrt((_halfNBcolumns-1-idColumn)+(_halfNBrows-idRow-1))<<", "<<(_halfNBcolumns-1-idColumn)<<", "<<(_halfNBrows-idRow-1)<<", "<<localSpatialConstantValue<<std::endl;
|
||||
} |
||||
} |
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
/// Local luminance adaptation functions
|
||||
// run local adaptation filter and save result in _filterOutput
|
||||
const std::valarray<double> &BasicRetinaFilter::runFilter_LocalAdapdation(const std::valarray<double> &inputFrame, const std::valarray<double> &localLuminance) |
||||
{ |
||||
_localLuminanceAdaptation(&inputFrame[0], &localLuminance[0], &_filterOutput[0]); |
||||
return _filterOutput; |
||||
} |
||||
// run local adaptation filter at a specific output adress
|
||||
void BasicRetinaFilter::runFilter_LocalAdapdation(const std::valarray<double> &inputFrame, const std::valarray<double> &localLuminance, std::valarray<double> &outputFrame) |
||||
{ |
||||
_localLuminanceAdaptation(&inputFrame[0], &localLuminance[0], &outputFrame[0]); |
||||
} |
||||
// run local adaptation filter and save result in _filterOutput with autonomous low pass filtering before adaptation
|
||||
const std::valarray<double> &BasicRetinaFilter::runFilter_LocalAdapdation_autonomous(const std::valarray<double> &inputFrame) |
||||
{ |
||||
_spatiotemporalLPfilter(&inputFrame[0], &_filterOutput[0]); |
||||
_localLuminanceAdaptation(&inputFrame[0], &_filterOutput[0], &_filterOutput[0]); |
||||
return _filterOutput; |
||||
} |
||||
// run local adaptation filter at a specific output adress with autonomous low pass filtering before adaptation
|
||||
void BasicRetinaFilter::runFilter_LocalAdapdation_autonomous(const std::valarray<double> &inputFrame, std::valarray<double> &outputFrame) |
||||
{ |
||||
_spatiotemporalLPfilter(&inputFrame[0], &_filterOutput[0]); |
||||
_localLuminanceAdaptation(&inputFrame[0], &_filterOutput[0], &outputFrame[0]); |
||||
} |
||||
// local luminance adaptation of the input in regard of localLuminance buffer
|
||||
void BasicRetinaFilter::_localLuminanceAdaptation(const double *inputFrame, const double *localLuminance, double *outputFrame) |
||||
{ |
||||
double meanLuminance=0; |
||||
const double *luminancePTR=inputFrame; |
||||
for (unsigned int i=0;i<_filterOutput.getNBpixels();++i) |
||||
meanLuminance+=*(luminancePTR++); |
||||
meanLuminance/=_filterOutput.getNBpixels(); |
||||
//double tempMeanValue=meanLuminance+_meanInputValue*_tau;
|
||||
|
||||
updateCompressionParameter(meanLuminance); |
||||
//std::cout<<meanLuminance<<std::endl;
|
||||
const double *localLuminancePTR=localLuminance; |
||||
const double *inputFramePTR=inputFrame; |
||||
double *outputFramePTR=outputFrame; |
||||
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel, ++inputFramePTR) |
||||
{ |
||||
double X0=*(localLuminancePTR++)*_localLuminanceFactor+_localLuminanceAddon; |
||||
*(outputFramePTR++) = (_maxInputValue+X0)**inputFramePTR/(*inputFramePTR +X0); |
||||
//std::cout<<"BasicRetinaFilter::inputFrame[IDpixel]=%f, X0=%f, outputFrame[IDpixel]=%f\n", inputFrame[IDpixel], X0, outputFrame[IDpixel]);
|
||||
} |
||||
} |
||||
|
||||
// local adaptation applied on a range of values which can be positive and negative
|
||||
void BasicRetinaFilter::_localLuminanceAdaptationPosNegValues(const double *inputFrame, const double *localLuminance, double *outputFrame) |
||||
{ |
||||
const double *localLuminancePTR=localLuminance; |
||||
const double *inputFramePTR=inputFrame; |
||||
double *outputFramePTR=outputFrame; |
||||
double factor=_maxInputValue*2/CV_PI; |
||||
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel, ++inputFramePTR) |
||||
{ |
||||
double X0=*(localLuminancePTR++)*_localLuminanceFactor+_localLuminanceAddon; |
||||
*(outputFramePTR++) = factor*atan(*inputFramePTR/X0);//(_maxInputValue+X0)**inputFramePTR/(*inputFramePTR +X0);
|
||||
//std::cout<<"BasicRetinaFilter::inputFrame[IDpixel]=%f, X0=%f, outputFrame[IDpixel]=%f\n", inputFrame[IDpixel], X0, outputFrame[IDpixel]);
|
||||
} |
||||
} |
||||
|
||||
// local luminance adaptation of the input in regard of localLuminance buffer, the input is rewrited and becomes the output
|
||||
void BasicRetinaFilter::_localLuminanceAdaptation(double *inputOutputFrame, const double *localLuminance) |
||||
{ |
||||
/*double meanLuminance=0;
|
||||
const double *luminancePTR=inputOutputFrame; |
||||
for (unsigned int i=0;i<_filterOutput.getNBpixels();++i) |
||||
meanLuminance+=*(luminancePTR++); |
||||
meanLuminance/=_filterOutput.getNBpixels(); |
||||
//double tempMeanValue=meanLuminance+_meanInputValue*_tau;
|
||||
|
||||
updateCompressionParameter(meanLuminance); |
||||
*/ |
||||
const double *localLuminancePTR=localLuminance; |
||||
double *inputOutputFramePTR=inputOutputFrame; |
||||
|
||||
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel, ++inputOutputFramePTR) |
||||
{ |
||||
double X0=*(localLuminancePTR++)*_localLuminanceFactor+_localLuminanceAddon; |
||||
*(inputOutputFramePTR) = (_maxInputValue+X0)**inputOutputFramePTR/(*inputOutputFramePTR +X0); |
||||
} |
||||
} |
||||
///////////////////////////////////////////////////////////////////////
|
||||
/// Spatio temporal Low Pass filter functions
|
||||
// run LP filter and save result in the basic retina element buffer
|
||||
const std::valarray<double> &BasicRetinaFilter::runFilter_LPfilter(const std::valarray<double> &inputFrame, const unsigned int filterIndex) |
||||
{ |
||||
_spatiotemporalLPfilter(&inputFrame[0], &_filterOutput[0], filterIndex); |
||||
return _filterOutput; |
||||
} |
||||
|
||||
// run LP filter for a new frame input and save result at a specific output adress
|
||||
void BasicRetinaFilter::runFilter_LPfilter(const std::valarray<double> &inputFrame, std::valarray<double> &outputFrame, const unsigned int filterIndex) |
||||
{ |
||||
_spatiotemporalLPfilter(&inputFrame[0], &outputFrame[0], filterIndex); |
||||
} |
||||
|
||||
// run LP filter on the input data and rewrite it
|
||||
void BasicRetinaFilter::runFilter_LPfilter_Autonomous(std::valarray<double> &inputOutputFrame, const unsigned int filterIndex) |
||||
{ |
||||
unsigned int coefTableOffset=filterIndex*3; |
||||
|
||||
/**********/ |
||||
_a=_filteringCoeficientsTable[coefTableOffset]; |
||||
_gain=_filteringCoeficientsTable[1+coefTableOffset]; |
||||
_tau=_filteringCoeficientsTable[2+coefTableOffset]; |
||||
|
||||
// launch the serie of 1D directional filters in order to compute the 2D low pass filter
|
||||
_horizontalCausalFilter(&inputOutputFrame[0], 0, _filterOutput.getNBrows()); |
||||
_horizontalAnticausalFilter(&inputOutputFrame[0], 0, _filterOutput.getNBrows()); |
||||
_verticalCausalFilter(&inputOutputFrame[0], 0, _filterOutput.getNBcolumns()); |
||||
_verticalAnticausalFilter_multGain(&inputOutputFrame[0], 0, _filterOutput.getNBcolumns()); |
||||
|
||||
} |
||||
// run LP filter for a new frame input and save result at a specific output adress
|
||||
void BasicRetinaFilter::_spatiotemporalLPfilter(const double *inputFrame, double *outputFrame, const unsigned int filterIndex) |
||||
{ |
||||
unsigned int coefTableOffset=filterIndex*3; |
||||
/**********/ |
||||
_a=_filteringCoeficientsTable[coefTableOffset]; |
||||
_gain=_filteringCoeficientsTable[1+coefTableOffset]; |
||||
_tau=_filteringCoeficientsTable[2+coefTableOffset]; |
||||
|
||||
// launch the serie of 1D directional filters in order to compute the 2D low pass filter
|
||||
_horizontalCausalFilter_addInput(inputFrame, outputFrame, 0,_filterOutput.getNBrows()); |
||||
_horizontalAnticausalFilter(outputFrame, 0, _filterOutput.getNBrows()); |
||||
_verticalCausalFilter(outputFrame, 0, _filterOutput.getNBcolumns()); |
||||
_verticalAnticausalFilter_multGain(outputFrame, 0, _filterOutput.getNBcolumns()); |
||||
|
||||
} |
||||
|
||||
// run SQUARING LP filter for a new frame input and save result at a specific output adress
|
||||
const double BasicRetinaFilter::_squaringSpatiotemporalLPfilter(const double *inputFrame, double *outputFrame, const unsigned int filterIndex) |
||||
{ |
||||
unsigned int coefTableOffset=filterIndex*3; |
||||
/**********/ |
||||
_a=_filteringCoeficientsTable[coefTableOffset]; |
||||
_gain=_filteringCoeficientsTable[1+coefTableOffset]; |
||||
_tau=_filteringCoeficientsTable[2+coefTableOffset]; |
||||
|
||||
// launch the serie of 1D directional filters in order to compute the 2D low pass filter
|
||||
|
||||
_squaringHorizontalCausalFilter(inputFrame, outputFrame, 0, _filterOutput.getNBrows()); |
||||
_horizontalAnticausalFilter(outputFrame, 0, _filterOutput.getNBrows()); |
||||
_verticalCausalFilter(outputFrame, 0, _filterOutput.getNBcolumns()); |
||||
return _verticalAnticausalFilter_returnMeanValue(outputFrame, 0, _filterOutput.getNBcolumns()); |
||||
} |
||||
|
||||
/////////////////////////////////////////////////
|
||||
// standard version of the 1D low pass filters
|
||||
|
||||
// horizontal causal filter which adds the input inside
|
||||
void BasicRetinaFilter::_horizontalCausalFilter(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd) |
||||
{ |
||||
|
||||
|
||||
//#pragma omp parallel for
|
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double* outputPTR=outputFrame+(IDrowStart+IDrow)*_filterOutput.getNBcolumns(); |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
result = *(outputPTR)+ _a* result; |
||||
*(outputPTR++) = result; |
||||
} |
||||
} |
||||
} |
||||
// horizontal causal filter which adds the input inside
|
||||
void BasicRetinaFilter::_horizontalCausalFilter_addInput(const double *inputFrame, double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd) |
||||
{ |
||||
//#pragma omp parallel for
|
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double* outputPTR=outputFrame+(IDrowStart+IDrow)*_filterOutput.getNBcolumns(); |
||||
register const double* inputPTR=inputFrame+(IDrowStart+IDrow)*_filterOutput.getNBcolumns(); |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
result = *(inputPTR++) + _tau**(outputPTR)+ _a* result; |
||||
*(outputPTR++) = result; |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
// horizontal anticausal filter (basic way, no add on)
|
||||
void BasicRetinaFilter::_horizontalAnticausalFilter(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd) |
||||
{ |
||||
|
||||
//#pragma omp parallel for
|
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double* outputPTR=outputFrame+(IDrowEnd-IDrow)*(_filterOutput.getNBcolumns())-1; |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
result = *(outputPTR)+ _a* result; |
||||
*(outputPTR--) = result; |
||||
} |
||||
} |
||||
|
||||
|
||||
} |
||||
// horizontal anticausal filter which multiplies the output by _gain
|
||||
void BasicRetinaFilter::_horizontalAnticausalFilter_multGain(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd) |
||||
{ |
||||
|
||||
//#pragma omp parallel for
|
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double* outputPTR=outputFrame+(IDrowEnd-IDrow)*(_filterOutput.getNBcolumns())-1; |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
result = *(outputPTR)+ _a* result; |
||||
*(outputPTR--) = _gain*result; |
||||
} |
||||
} |
||||
} |
||||
|
||||
// vertical anticausal filter
|
||||
void BasicRetinaFilter::_verticalCausalFilter(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd) |
||||
{ |
||||
//#pragma omp parallel for
|
||||
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn) |
||||
{ |
||||
register double result=0; |
||||
register double *outputPTR=outputFrame+IDcolumn; |
||||
|
||||
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index) |
||||
{ |
||||
result = *(outputPTR) + _a * result; |
||||
*(outputPTR) = result; |
||||
outputPTR+=_filterOutput.getNBcolumns(); |
||||
|
||||
} |
||||
} |
||||
} |
||||
|
||||
|
||||
// vertical anticausal filter (basic way, no add on)
|
||||
void BasicRetinaFilter::_verticalAnticausalFilter(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd) |
||||
{ |
||||
double* offset=outputFrame+_filterOutput.getNBpixels()-_filterOutput.getNBcolumns(); |
||||
//#pragma omp parallel for
|
||||
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn) |
||||
{ |
||||
register double result=0; |
||||
register double *outputPTR=offset+IDcolumn; |
||||
|
||||
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index) |
||||
{ |
||||
result = *(outputPTR) + _a * result; |
||||
*(outputPTR) = result; |
||||
outputPTR-=_filterOutput.getNBcolumns(); |
||||
|
||||
} |
||||
} |
||||
} |
||||
|
||||
// vertical anticausal filter which multiplies the output by _gain
|
||||
void BasicRetinaFilter::_verticalAnticausalFilter_multGain(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd) |
||||
{ |
||||
double* offset=outputFrame+_filterOutput.getNBpixels()-_filterOutput.getNBcolumns(); |
||||
//#pragma omp parallel for
|
||||
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn) |
||||
{ |
||||
register double result=0; |
||||
register double *outputPTR=offset+IDcolumn; |
||||
|
||||
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index) |
||||
{ |
||||
result = *(outputPTR) + _a * result; |
||||
*(outputPTR) = _gain*result; |
||||
outputPTR-=_filterOutput.getNBcolumns(); |
||||
|
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
/////////////////////////////////////////
|
||||
// specific modifications of 1D filters
|
||||
|
||||
// -> squaring horizontal causal filter
|
||||
void BasicRetinaFilter::_squaringHorizontalCausalFilter(const double *inputFrame, double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd) |
||||
{ |
||||
register double* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns(); |
||||
register const double* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns(); |
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
result = *(inputPTR)**(inputPTR) + _tau**(outputPTR)+ _a* result; |
||||
*(outputPTR++) = result; |
||||
++inputPTR; |
||||
} |
||||
} |
||||
} |
||||
|
||||
// vertical anticausal filter that returns the mean value of its result
|
||||
const double BasicRetinaFilter::_verticalAnticausalFilter_returnMeanValue(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd) |
||||
{ |
||||
register double meanValue=0; |
||||
double* offset=outputFrame+_filterOutput.getNBpixels()-_filterOutput.getNBcolumns(); |
||||
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn) |
||||
{ |
||||
register double result=0; |
||||
register double *outputPTR=offset+IDcolumn; |
||||
|
||||
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index) |
||||
{ |
||||
result = *(outputPTR) + _a * result; |
||||
*(outputPTR) = _gain*result; |
||||
meanValue+=*(outputPTR); |
||||
outputPTR-=_filterOutput.getNBcolumns(); |
||||
|
||||
} |
||||
} |
||||
|
||||
return meanValue/(double)_filterOutput.getNBpixels(); |
||||
} |
||||
|
||||
// LP filter with integration in specific areas (regarding true values of a binary parameters image)
|
||||
void BasicRetinaFilter::_localSquaringSpatioTemporalLPfilter(const double *inputFrame, double *LPfilterOutput, const unsigned int *integrationAreas, const unsigned int filterIndex) |
||||
{ |
||||
unsigned int coefTableOffset=filterIndex*3; |
||||
_a=_filteringCoeficientsTable[coefTableOffset+0]; |
||||
_gain=_filteringCoeficientsTable[coefTableOffset+1]; |
||||
_tau=_filteringCoeficientsTable[coefTableOffset+2]; |
||||
// launch the serie of 1D directional filters in order to compute the 2D low pass filter
|
||||
|
||||
_local_squaringHorizontalCausalFilter(inputFrame, LPfilterOutput, 0, _filterOutput.getNBrows(), integrationAreas); |
||||
_local_horizontalAnticausalFilter(LPfilterOutput, 0, _filterOutput.getNBrows(), integrationAreas); |
||||
_local_verticalCausalFilter(LPfilterOutput, 0, _filterOutput.getNBcolumns(), integrationAreas); |
||||
_local_verticalAnticausalFilter_multGain(LPfilterOutput, 0, _filterOutput.getNBcolumns(), integrationAreas); |
||||
|
||||
} |
||||
|
||||
// LP filter on specific parts of the picture instead of all the image
|
||||
// same functions (some of them) but take a binary flag to allow integration, false flag means, no data change at the output...
|
||||
|
||||
// this function take an image in input and squares it befor computing
|
||||
void BasicRetinaFilter::_local_squaringHorizontalCausalFilter(const double *inputFrame, double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd, const unsigned int *integrationAreas) |
||||
{ |
||||
register double* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns(); |
||||
register const double* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns(); |
||||
const unsigned int *integrationAreasPTR=integrationAreas; |
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
if (*(integrationAreasPTR++)) |
||||
result = *(inputPTR)**(inputPTR) + _tau**(outputPTR)+ _a* result; |
||||
else |
||||
result=0; |
||||
*(outputPTR++) = result; |
||||
++inputPTR; |
||||
|
||||
} |
||||
} |
||||
} |
||||
|
||||
void BasicRetinaFilter::_local_horizontalAnticausalFilter(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd, const unsigned int *integrationAreas) |
||||
{ |
||||
|
||||
register double* outputPTR=outputFrame+IDrowEnd*(_filterOutput.getNBcolumns())-1; |
||||
const unsigned int *integrationAreasPTR=integrationAreas; |
||||
|
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
if (*(integrationAreasPTR++)) |
||||
result = *(outputPTR)+ _a* result; |
||||
else |
||||
result=0; |
||||
*(outputPTR--) = result; |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
void BasicRetinaFilter::_local_verticalCausalFilter(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd, const unsigned int *integrationAreas) |
||||
{ |
||||
const unsigned int *integrationAreasPTR=integrationAreas; |
||||
|
||||
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn) |
||||
{ |
||||
register double result=0; |
||||
register double *outputPTR=outputFrame+IDcolumn; |
||||
|
||||
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index) |
||||
{ |
||||
if (*(integrationAreasPTR++)) |
||||
result = *(outputPTR)+ _a* result; |
||||
else |
||||
result=0; |
||||
*(outputPTR) = result; |
||||
outputPTR+=_filterOutput.getNBcolumns(); |
||||
|
||||
} |
||||
} |
||||
} |
||||
// this functions affects _gain at the output
|
||||
void BasicRetinaFilter::_local_verticalAnticausalFilter_multGain(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd, const unsigned int *integrationAreas) |
||||
{ |
||||
const unsigned int *integrationAreasPTR=integrationAreas; |
||||
double* offset=outputFrame+_filterOutput.getNBpixels()-_filterOutput.getNBcolumns(); |
||||
|
||||
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn) |
||||
{ |
||||
register double result=0; |
||||
register double *outputPTR=offset+IDcolumn; |
||||
|
||||
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index) |
||||
{ |
||||
if (*(integrationAreasPTR++)) |
||||
result = *(outputPTR)+ _a* result; |
||||
else |
||||
result=0; |
||||
*(outputPTR) = _gain*result; |
||||
outputPTR-=_filterOutput.getNBcolumns(); |
||||
|
||||
} |
||||
} |
||||
} |
||||
|
||||
////////////////////////////////////////////////////
|
||||
// run LP filter for a new frame input and save result at a specific output adress
|
||||
// -> USE IRREGULAR SPATIAL CONSTANT
|
||||
|
||||
// irregular filter computed from a buffer and rewrites it
|
||||
void BasicRetinaFilter::_spatiotemporalLPfilter_Irregular(double *inputOutputFrame, const unsigned int filterIndex) |
||||
{ |
||||
if (_progressiveGain.size()==0) |
||||
{ |
||||
std::cerr<<"BasicRetinaFilter::runProgressiveFilter: cannot perform filtering, no progressive filter settled up"<<std::endl; |
||||
return; |
||||
} |
||||
unsigned int coefTableOffset=filterIndex*3; |
||||
/**********/ |
||||
//_a=_filteringCoeficientsTable[coefTableOffset];
|
||||
_tau=_filteringCoeficientsTable[2+coefTableOffset]; |
||||
|
||||
// launch the serie of 1D directional filters in order to compute the 2D low pass filter
|
||||
_horizontalCausalFilter_Irregular(inputOutputFrame, 0, (int)_filterOutput.getNBrows()); |
||||
_horizontalAnticausalFilter_Irregular(inputOutputFrame, 0, (int)_filterOutput.getNBrows()); |
||||
_verticalCausalFilter_Irregular(inputOutputFrame, 0, (int)_filterOutput.getNBcolumns()); |
||||
_verticalAnticausalFilter_Irregular_multGain(inputOutputFrame, 0, (int)_filterOutput.getNBcolumns()); |
||||
|
||||
} |
||||
// irregular filter computed from a buffer and puts result on another
|
||||
void BasicRetinaFilter::_spatiotemporalLPfilter_Irregular(const double *inputFrame, double *outputFrame, const unsigned int filterIndex) |
||||
{ |
||||
if (_progressiveGain.size()==0) |
||||
{ |
||||
std::cerr<<"BasicRetinaFilter::runProgressiveFilter: cannot perform filtering, no progressive filter settled up"<<std::endl; |
||||
return; |
||||
} |
||||
unsigned int coefTableOffset=filterIndex*3; |
||||
/**********/ |
||||
//_a=_filteringCoeficientsTable[coefTableOffset];
|
||||
_tau=_filteringCoeficientsTable[2+coefTableOffset]; |
||||
|
||||
// launch the serie of 1D directional filters in order to compute the 2D low pass filter
|
||||
_horizontalCausalFilter_Irregular_addInput(inputFrame, outputFrame, 0, (int)_filterOutput.getNBrows()); |
||||
_horizontalAnticausalFilter_Irregular(outputFrame, 0, (int)_filterOutput.getNBrows()); |
||||
_verticalCausalFilter_Irregular(outputFrame, 0, (int)_filterOutput.getNBcolumns()); |
||||
_verticalAnticausalFilter_Irregular_multGain(outputFrame, 0, (int)_filterOutput.getNBcolumns()); |
||||
|
||||
} |
||||
// 1D filters with irregular spatial constant
|
||||
// horizontal causal filter wich runs on its input buffer
|
||||
void BasicRetinaFilter::_horizontalCausalFilter_Irregular(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd) |
||||
{ |
||||
register double* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns(); |
||||
register const double* spatialConstantPTR=&_progressiveSpatialConstant[0]+IDrowStart*_filterOutput.getNBcolumns(); |
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
result = *(outputPTR)+ *(spatialConstantPTR++)* result; |
||||
*(outputPTR++) = result; |
||||
} |
||||
} |
||||
} |
||||
|
||||
// horizontal causal filter with add input
|
||||
void BasicRetinaFilter::_horizontalCausalFilter_Irregular_addInput(const double *inputFrame, double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd) |
||||
{ |
||||
register double* outputPTR=outputFrame+IDrowStart*_filterOutput.getNBcolumns(); |
||||
register const double* inputPTR=inputFrame+IDrowStart*_filterOutput.getNBcolumns(); |
||||
register const double* spatialConstantPTR=&_progressiveSpatialConstant[0]+IDrowStart*_filterOutput.getNBcolumns(); |
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
result = *(inputPTR++) + _tau**(outputPTR)+ *(spatialConstantPTR++)* result; |
||||
*(outputPTR++) = result; |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
// horizontal anticausal filter (basic way, no add on)
|
||||
void BasicRetinaFilter::_horizontalAnticausalFilter_Irregular(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd) |
||||
{ |
||||
register double* outputPTR=outputFrame+IDrowEnd*(_filterOutput.getNBcolumns())-1; |
||||
register const double* spatialConstantPTR=&_progressiveSpatialConstant[0]+IDrowEnd*(_filterOutput.getNBcolumns())-1; |
||||
|
||||
for (unsigned int IDrow=IDrowStart; IDrow<IDrowEnd; ++IDrow) |
||||
{ |
||||
register double result=0; |
||||
for (unsigned int index=0; index<_filterOutput.getNBcolumns(); ++index) |
||||
{ |
||||
result = *(outputPTR)+ *(spatialConstantPTR--)* result; |
||||
*(outputPTR--) = result; |
||||
} |
||||
} |
||||
|
||||
|
||||
} |
||||
|
||||
// vertical anticausal filter
|
||||
void BasicRetinaFilter::_verticalCausalFilter_Irregular(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd) |
||||
{ |
||||
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn) |
||||
{ |
||||
register double result=0; |
||||
register double *outputPTR=outputFrame+IDcolumn; |
||||
register const double *spatialConstantPTR=&_progressiveSpatialConstant[0]+IDcolumn; |
||||
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index) |
||||
{ |
||||
result = *(outputPTR) + *(spatialConstantPTR) * result; |
||||
*(outputPTR) = result; |
||||
outputPTR+=_filterOutput.getNBcolumns(); |
||||
spatialConstantPTR+=_filterOutput.getNBcolumns(); |
||||
} |
||||
} |
||||
} |
||||
|
||||
// vertical anticausal filter which multiplies the output by _gain
|
||||
void BasicRetinaFilter::_verticalAnticausalFilter_Irregular_multGain(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd) |
||||
{ |
||||
double* outputOffset=outputFrame+_filterOutput.getNBpixels()-_filterOutput.getNBcolumns(); |
||||
const double* constantOffset=&_progressiveSpatialConstant[0]+_filterOutput.getNBpixels()-_filterOutput.getNBcolumns(); |
||||
const double* gainOffset=&_progressiveGain[0]+_filterOutput.getNBpixels()-_filterOutput.getNBcolumns(); |
||||
for (unsigned int IDcolumn=IDcolumnStart; IDcolumn<IDcolumnEnd; ++IDcolumn) |
||||
{ |
||||
register double result=0; |
||||
register double *outputPTR=outputOffset+IDcolumn; |
||||
register const double *spatialConstantPTR=constantOffset+IDcolumn; |
||||
register const double *progressiveGainPTR=gainOffset+IDcolumn; |
||||
for (unsigned int index=0; index<_filterOutput.getNBrows(); ++index) |
||||
{ |
||||
result = *(outputPTR) + *(spatialConstantPTR) * result; |
||||
*(outputPTR) = *(progressiveGainPTR)*result; |
||||
outputPTR-=_filterOutput.getNBcolumns(); |
||||
spatialConstantPTR-=_filterOutput.getNBcolumns(); |
||||
progressiveGainPTR-=_filterOutput.getNBcolumns(); |
||||
} |
||||
} |
||||
|
||||
} |
||||
} |
@ -0,0 +1,438 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#ifndef BASICRETINAELEMENT_HPP_ |
||||
#define BASICRETINAELEMENT_HPP_ |
||||
|
||||
#include <cstring> |
||||
|
||||
|
||||
/**
|
||||
* @class BasicRetinaFilter |
||||
* @brief Brief overview, this class provides tools for low level image processing: |
||||
* --> this class is able to perform: |
||||
* -> first order Low pass optimized filtering |
||||
* -> local luminance adaptation (able to correct back light problems and contrast enhancement) |
||||
* -> progressive low pass filter filtering (higher filtering on the borders than on the center) |
||||
* -> image data between 0 and 255 resampling with different options, linear rescaling, sigmoide) |
||||
* |
||||
* TYPICAL USE: |
||||
* |
||||
* // create object at a specified picture size
|
||||
* BasicRetinaFilter *_photoreceptorsPrefilter; |
||||
* _photoreceptorsPrefilter =new BasicRetinaFilter(sizeRows, sizeWindows); |
||||
* |
||||
* // init gain, spatial and temporal parameters:
|
||||
* _photoreceptorsPrefilter->setCoefficientsTable(gain,temporalConstant, spatialConstant); |
||||
* |
||||
* // during program execution, call the filter for local luminance correction or low pass filtering for an input picture called "FrameBuffer":
|
||||
* _photoreceptorsPrefilter->runFilter_LocalAdapdation(FrameBuffer); |
||||
* // or (Low pass first order filter)
|
||||
* _photoreceptorsPrefilter->runFilter_LPfilter(FrameBuffer); |
||||
* // get output frame and its size:
|
||||
* const unsigned int output_nbRows=_photoreceptorsPrefilter->getNBrows(); |
||||
* const unsigned int output_nbColumns=_photoreceptorsPrefilter->getNBcolumns(); |
||||
* const double *outputFrame=_photoreceptorsPrefilter->getOutput(); |
||||
* |
||||
* // at the end of the program, destroy object:
|
||||
* delete _photoreceptorsPrefilter; |
||||
|
||||
* @author Alexandre BENOIT, benoit.alexandre.vision@gmail.com, LISTIC : www.listic.univ-savoie.fr, Gipsa-Lab, France: www.gipsa-lab.inpg.fr/ |
||||
* Creation date 2007 |
||||
* synthesis of the work described in Alexandre BENOIT thesis: "Le systeme visuel humain au secours de la vision par ordinateur" |
||||
*/ |
||||
|
||||
#include <iostream> |
||||
#include "templatebuffer.hpp" |
||||
|
||||
//#define __BASIC_RETINA_ELEMENT_DEBUG
|
||||
|
||||
//using namespace std;
|
||||
namespace cv |
||||
{ |
||||
class BasicRetinaFilter |
||||
{ |
||||
|
||||
public: |
||||
|
||||
/**
|
||||
* constructor of the base bio-inspired toolbox, parameters are only linked to imae input size and number of filtering capabilities of the object |
||||
* @param NBrows: number of rows of the input image |
||||
* @param NBcolumns: number of columns of the input image |
||||
* @param parametersListSize: specifies the number of parameters set (each parameters set represents a specific low pass spatio-temporal filter) |
||||
* @param useProgressiveFilter: specifies if the filter has irreguar (progressive) filtering capabilities (this can be activated later using setProgressiveFilterConstants_xxx methods) |
||||
*/ |
||||
BasicRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns, const unsigned int parametersListSize=1, const bool useProgressiveFilter=false); |
||||
|
||||
/**
|
||||
* standrad destructore |
||||
*/ |
||||
~BasicRetinaFilter(); |
||||
|
||||
/**
|
||||
* function which clears the output buffer of the object |
||||
*/ |
||||
inline void clearOutputBuffer(){_filterOutput=0;}; |
||||
|
||||
/**
|
||||
* function which clears the secondary buffer of the object |
||||
*/ |
||||
inline void clearSecondaryBuffer(){_localBuffer=0;}; |
||||
|
||||
/**
|
||||
* function which clears the output and the secondary buffer of the object |
||||
*/ |
||||
inline void clearAllBuffers(){clearOutputBuffer();clearSecondaryBuffer();}; |
||||
|
||||
/**
|
||||
* resize basic retina filter object (resize all allocated buffers |
||||
* @param NBrows: the new height size |
||||
* @param NBcolumns: the new width size |
||||
*/ |
||||
void resize(const unsigned int NBrows, const unsigned int NBcolumns); |
||||
|
||||
/**
|
||||
* forbiden method inherited from parent std::valarray |
||||
* prefer not to use this method since the filter matrix become vectors |
||||
*/ |
||||
void resize(const unsigned int NBpixels){std::cerr<<"error, not accessible method"<<std::endl;}; |
||||
|
||||
/**
|
||||
* low pass filter call and run (models the homogeneous cells network at the retina level, for example horizontal cells or photoreceptors) |
||||
* @param inputFrame: the input image to be processed |
||||
* @param filterIndex: the offset which specifies the parameter set that should be used for the filtering |
||||
* @return the processed image, the output is reachable later by using function getOutput() |
||||
*/ |
||||
const std::valarray<double> &runFilter_LPfilter(const std::valarray<double> &inputFrame, const unsigned int filterIndex=0); // run the LP filter for a new frame input and save result in _filterOutput
|
||||
|
||||
/**
|
||||
* low pass filter call and run (models the homogeneous cells network at the retina level, for example horizontal cells or photoreceptors) |
||||
* @param inputFrame: the input image to be processed |
||||
* @param outputFrame: the output buffer in which the result is writed |
||||
* @param filterIndex: the offset which specifies the parameter set that should be used for the filtering |
||||
*/ |
||||
void runFilter_LPfilter(const std::valarray<double> &inputFrame, std::valarray<double> &outputFrame, const unsigned int filterIndex=0); // run LP filter on a specific output adress
|
||||
|
||||
/**
|
||||
* low pass filter call and run (models the homogeneous cells network at the retina level, for example horizontal cells or photoreceptors) |
||||
* @param inputOutputFrame: the input image to be processed on which the result is rewrited |
||||
* @param filterIndex: the offset which specifies the parameter set that should be used for the filtering |
||||
*/ |
||||
void runFilter_LPfilter_Autonomous(std::valarray<double> &inputOutputFrame, const unsigned int filterIndex=0);// run LP filter on the input data and rewrite it
|
||||
|
||||
/**
|
||||
* local luminance adaptation call and run (contrast enhancement property of the photoreceptors) |
||||
* @param inputOutputFrame: the input image to be processed |
||||
* @param localLuminance: an image which represents the local luminance of the inputFrame parameter, in general, it is its low pass spatial filtering |
||||
* @return the processed image, the output is reachable later by using function getOutput() |
||||
*/ |
||||
const std::valarray<double> &runFilter_LocalAdapdation(const std::valarray<double> &inputOutputFrame, const std::valarray<double> &localLuminance);// run local adaptation filter and save result in _filterOutput
|
||||
|
||||
/**
|
||||
* local luminance adaptation call and run (contrast enhancement property of the photoreceptors) |
||||
* @param inputFrame: the input image to be processed |
||||
* @param localLuminance: an image which represents the local luminance of the inputFrame parameter, in general, it is its low pass spatial filtering |
||||
* @param outputFrame: the output buffer in which the result is writed |
||||
*/ |
||||
void runFilter_LocalAdapdation(const std::valarray<double> &inputFrame, const std::valarray<double> &localLuminance, std::valarray<double> &outputFrame); // run local adaptation filter on a specific output adress
|
||||
|
||||
/**
|
||||
* local luminance adaptation call and run (contrast enhancement property of the photoreceptors) |
||||
* @param inputFrame: the input image to be processed |
||||
* @return the processed image, the output is reachable later by using function getOutput() |
||||
*/ |
||||
const std::valarray<double> &runFilter_LocalAdapdation_autonomous(const std::valarray<double> &inputFrame);// run local adaptation filter and save result in _filterOutput
|
||||
|
||||
/**
|
||||
* local luminance adaptation call and run (contrast enhancement property of the photoreceptors) |
||||
* @param inputFrame: the input image to be processed |
||||
* @param outputFrame: the output buffer in which the result is writen |
||||
*/ |
||||
void runFilter_LocalAdapdation_autonomous(const std::valarray<double> &inputFrame, std::valarray<double> &outputFrame); // run local adaptation filter on a specific output adress
|
||||
|
||||
/**
|
||||
* run low pass filtering with progressive parameters (models the retina log sampling of the photoreceptors and its low pass filtering effect consequence: more powerfull low pass filtering effect on the corners) |
||||
* @param inputFrame: the input image to be processed |
||||
* @param filterIndex: the index which specifies the parameter set that should be used for the filtering |
||||
* @return the processed image, the output is reachable later by using function getOutput() if outputFrame is NULL |
||||
*/ |
||||
inline void runProgressiveFilter(std::valarray<double> &inputFrame, const unsigned int filterIndex=0){_spatiotemporalLPfilter_Irregular(&inputFrame[0], filterIndex);}; |
||||
|
||||
/**
|
||||
* run low pass filtering with progressive parameters (models the retina log sampling of the photoreceptors and its low pass filtering effect consequence: more powerfull low pass filtering effect on the corners) |
||||
* @param inputFrame: the input image to be processed |
||||
* @param outputFrame: the output buffer in which the result is writen |
||||
* @param filterIndex: the index which specifies the parameter set that should be used for the filtering |
||||
*/ |
||||
inline void runProgressiveFilter(const std::valarray<double> &inputFrame, std::valarray<double> &outputFrame, const unsigned int filterIndex=0){_spatiotemporalLPfilter_Irregular(&inputFrame[0], &outputFrame[0], filterIndex);}; |
||||
|
||||
/**
|
||||
* first order spatio-temporal low pass filter setup function |
||||
* @param beta: gain of the filter (generally set to zero) |
||||
* @param tau: time constant of the filter (unit is frame for video processing) |
||||
* @param k: spatial constant of the filter (unit is pixels) |
||||
* @param filterIndex: the index which specifies the parameter set that should be used for the filtering |
||||
*/ |
||||
void setLPfilterParameters(const double beta, const double tau, const double k, const unsigned int filterIndex=0); // change the parameters of the filter
|
||||
|
||||
/**
|
||||
* first order spatio-temporal low pass filter setup function |
||||
* @param beta: gain of the filter (generally set to zero) |
||||
* @param tau: time constant of the filter (unit is frame for video processing) |
||||
* @param alpha0: spatial constant of the filter (unit is pixels) on the border of the image |
||||
* @param filterIndex: the index which specifies the parameter set that should be used for the filtering |
||||
*/ |
||||
void setProgressiveFilterConstants_CentredAccuracy(const double beta, const double tau, const double alpha0, const unsigned int filterIndex=0); |
||||
|
||||
/**
|
||||
* first order spatio-temporal low pass filter setup function |
||||
* @param beta: gain of the filter (generally set to zero) |
||||
* @param tau: time constant of the filter (unit is frame for video processing) |
||||
* @param alpha0: spatial constant of the filter (unit is pixels) on the border of the image |
||||
* @param accuracyMap an image (double format) which values range is between 0 and 1, where 0 means, apply no filtering and 1 means apply the filtering as specified in the parameters set, intermediate values allow to smooth variations of the filtering strenght |
||||
* @param filterIndex: the index which specifies the parameter set that should be used for the filtering |
||||
*/ |
||||
void setProgressiveFilterConstants_CustomAccuracy(const double beta, const double tau, const double alpha0, const std::valarray<double> &accuracyMap, const unsigned int filterIndex=0); |
||||
|
||||
/**
|
||||
* local luminance adaptation setup, this function should be applied for normal local adaptation (not for tone mapping operation) |
||||
* @param v0: compression effect for the local luminance adaptation processing, set a value between 0.6 and 0.9 for best results, a high value yields to a high compression effect |
||||
* @param maxInputValue: the maximum amplitude value measured after local adaptation processing (c.f. function runFilter_LocalAdapdation & runFilter_LocalAdapdation_autonomous) |
||||
* @param meanLuminance: the a priori meann luminance of the input data (should be 128 for 8bits images but can vary greatly in case of High Dynamic Range Images (HDRI) |
||||
*/ |
||||
void setV0CompressionParameter(const double v0, const double maxInputValue, const double meanLuminance){ _v0=v0*maxInputValue; _localLuminanceFactor=v0; _localLuminanceAddon=maxInputValue*(1.0-v0); _maxInputValue=maxInputValue;}; |
||||
|
||||
/**
|
||||
* update local luminance adaptation setup, initial maxInputValue is kept. This function should be applied for normal local adaptation (not for tone mapping operation) |
||||
* @param v0: compression effect for the local luminance adaptation processing, set a value between 0.6 and 0.9 for best results, a high value yields to a high compression effect |
||||
* @param meanLuminance: the a priori meann luminance of the input data (should be 128 for 8bits images but can vary greatly in case of High Dynamic Range Images (HDRI) |
||||
*/ |
||||
void setV0CompressionParameter(const double v0, const double meanLuminance){ this->setV0CompressionParameter(v0, _maxInputValue, meanLuminance);}; |
||||
|
||||
/**
|
||||
* local luminance adaptation setup, this function should be applied for normal local adaptation (not for tone mapping operation) |
||||
* @param v0: compression effect for the local luminance adaptation processing, set a value between 0.6 and 0.9 for best results, a high value yields to a high compression effect |
||||
*/ |
||||
void setV0CompressionParameter(const double v0){ _v0=v0*_maxInputValue; _localLuminanceFactor=v0; _localLuminanceAddon=_maxInputValue*(1.0-v0);}; |
||||
|
||||
/**
|
||||
* local luminance adaptation setup, this function should be applied for local adaptation applied to tone mapping operation |
||||
* @param v0: compression effect for the local luminance adaptation processing, set a value between 0.6 and 0.9 for best results, a high value yields to a high compression effect |
||||
* @param maxInputValue: the maximum amplitude value measured after local adaptation processing (c.f. function runFilter_LocalAdapdation & runFilter_LocalAdapdation_autonomous) |
||||
* @param meanLuminance: the a priori meann luminance of the input data (should be 128 for 8bits images but can vary greatly in case of High Dynamic Range Images (HDRI) |
||||
*/ |
||||
void setV0CompressionParameterToneMapping(const double v0, const double maxInputValue, const double meanLuminance=128.0){ _v0=v0*maxInputValue; _localLuminanceFactor=1; _localLuminanceAddon=meanLuminance*_v0; _maxInputValue=maxInputValue;}; |
||||
|
||||
/**
|
||||
* update compression parameters while keeping v0 parameter value |
||||
* @param meanLuminance the input frame mean luminance |
||||
*/ |
||||
inline void updateCompressionParameter(const double meanLuminance){_localLuminanceFactor=1; _localLuminanceAddon=meanLuminance*_v0;}; |
||||
|
||||
/**
|
||||
* @return the v0 compression parameter used to compute the local adaptation |
||||
*/ |
||||
const double getV0CompressionParameter(){ return _v0/_maxInputValue;}; |
||||
|
||||
/**
|
||||
* @return the output result of the object |
||||
*/ |
||||
inline const std::valarray<double> &getOutput() const {return _filterOutput;}; |
||||
|
||||
/**
|
||||
* @return number of rows of the filter |
||||
*/ |
||||
inline const unsigned int getNBrows(){return _filterOutput.getNBrows();}; |
||||
|
||||
/**
|
||||
* @return number of columns of the filter |
||||
*/ |
||||
inline const unsigned int getNBcolumns(){return _filterOutput.getNBcolumns();}; |
||||
|
||||
/**
|
||||
* @return number of pixels of the filter |
||||
*/ |
||||
inline const unsigned int getNBpixels(){return _filterOutput.getNBpixels();}; |
||||
|
||||
/**
|
||||
* force filter output to be normalized between 0 and maxValue |
||||
* @param maxValue: the maximum output value that is required |
||||
*/ |
||||
inline void normalizeGrayOutput_0_maxOutputValue(const double maxValue){_filterOutput.normalizeGrayOutput_0_maxOutputValue(maxValue);}; |
||||
|
||||
/**
|
||||
* force filter output to be normalized around 0 and rescaled with a sigmoide effect (extrem values saturation) |
||||
* @param maxValue: the maximum output value that is required |
||||
*/ |
||||
inline void normalizeGrayOutputCentredSigmoide(){_filterOutput.normalizeGrayOutputCentredSigmoide();}; |
||||
|
||||
/**
|
||||
* force filter output to be normalized : data centering and std normalisation |
||||
* @param maxValue: the maximum output value that is required |
||||
*/ |
||||
inline void centerReductImageLuminance(){_filterOutput.centerReductImageLuminance();}; |
||||
|
||||
/**
|
||||
* @return the maximum input buffer value |
||||
*/ |
||||
inline const double getMaxInputValue(){return this->_maxInputValue;}; |
||||
|
||||
/**
|
||||
* @return the maximum input buffer value |
||||
*/ |
||||
inline void setMaxInputValue(const double newMaxInputValue){this->_maxInputValue=newMaxInputValue;}; |
||||
|
||||
protected: |
||||
|
||||
/////////////////////////
|
||||
// data buffers
|
||||
TemplateBuffer<double> _filterOutput; // primary buffer (contains processing outputs)
|
||||
std::valarray<double> _localBuffer; // local secondary buffer
|
||||
/////////////////////////
|
||||
// PARAMETERS
|
||||
unsigned int _halfNBrows; |
||||
unsigned int _halfNBcolumns; |
||||
|
||||
// parameters buffers
|
||||
std::valarray <double>_filteringCoeficientsTable; |
||||
std::valarray <double>_progressiveSpatialConstant;// pointer to a local table containing local spatial constant (allocated with the object)
|
||||
std::valarray <double>_progressiveGain;// pointer to a local table containing local spatial constant (allocated with the object)
|
||||
|
||||
// local adaptation filtering parameters
|
||||
double _v0; //value used for local luminance adaptation function
|
||||
double _maxInputValue; |
||||
double _meanInputValue; |
||||
double _localLuminanceFactor; |
||||
double _localLuminanceAddon; |
||||
|
||||
// protected data related to standard low pass filters parameters
|
||||
double _a; |
||||
double _tau; |
||||
double _gain; |
||||
|
||||
/////////////////////////
|
||||
// FILTERS METHODS
|
||||
|
||||
// Basic low pass spation temporal low pass filter used by each retina filters
|
||||
void _spatiotemporalLPfilter(const double *inputFrame, double *LPfilterOutput, const unsigned int coefTableOffset=0); |
||||
const double _squaringSpatiotemporalLPfilter(const double *inputFrame, double *outputFrame, const unsigned int filterIndex=0); |
||||
|
||||
// LP filter with an irregular spatial filtering
|
||||
|
||||
// -> rewrites the input buffer
|
||||
void _spatiotemporalLPfilter_Irregular(double *inputOutputFrame, const unsigned int filterIndex=0); |
||||
// writes the output on another buffer
|
||||
void _spatiotemporalLPfilter_Irregular(const double *inputFrame, double *outputFrame, const unsigned int filterIndex=0); |
||||
// LP filter that squares the input and computes the output ONLY on the areas where the integrationAreas map are TRUE
|
||||
void _localSquaringSpatioTemporalLPfilter(const double *inputFrame, double *LPfilterOutput, const unsigned int *integrationAreas, const unsigned int filterIndex=0); |
||||
|
||||
// local luminance adaptation of the input in regard of localLuminance buffer
|
||||
void _localLuminanceAdaptation(const double *inputFrame, const double *localLuminance, double *outputFrame); |
||||
// local luminance adaptation of the input in regard of localLuminance buffer, the input is rewrited and becomes the output
|
||||
void _localLuminanceAdaptation(double *inputOutputFrame, const double *localLuminance); |
||||
// local adaptation applied on a range of values which can be positive and negative
|
||||
void _localLuminanceAdaptationPosNegValues(const double *inputFrame, const double *localLuminance, double *outputFrame); |
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
// 1D directional filters used for the 2D low pass filtering
|
||||
|
||||
// 1D filters with image input
|
||||
void _horizontalCausalFilter_addInput(const double *inputFrame, double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd); |
||||
// 1D filters with image input that is squared in the function
|
||||
void _squaringHorizontalCausalFilter(const double *inputFrame, double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd); |
||||
// vertical anticausal filter that returns the mean value of its result
|
||||
const double _verticalAnticausalFilter_returnMeanValue(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd); |
||||
|
||||
// most simple functions: only perform 1D filtering with output=input (no add on)
|
||||
void _horizontalCausalFilter(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd); |
||||
void _horizontalAnticausalFilter(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd); |
||||
void _verticalCausalFilter(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd); |
||||
void _verticalAnticausalFilter(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd); |
||||
|
||||
// perform 1D filtering with output with varrying spatial coefficient
|
||||
void _horizontalCausalFilter_Irregular(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd); |
||||
void _horizontalCausalFilter_Irregular_addInput(const double *inputFrame, double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd); |
||||
void _horizontalAnticausalFilter_Irregular(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd); |
||||
void _verticalCausalFilter_Irregular(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd); |
||||
void _verticalAnticausalFilter_Irregular_multGain(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd); |
||||
|
||||
|
||||
// 1D filters in which the output is multiplied by _gain
|
||||
void _verticalAnticausalFilter_multGain(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd); // this functions affects _gain at the output
|
||||
void _horizontalAnticausalFilter_multGain(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd); // this functions affects _gain at the output
|
||||
|
||||
// LP filter on specific parts of the picture instead of all the image
|
||||
// same functions (some of them) but take a binary flag to allow integration, false flag means, 0 at the output...
|
||||
void _local_squaringHorizontalCausalFilter(const double *inputFrame, double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd, const unsigned int *integrationAreas); |
||||
void _local_horizontalAnticausalFilter(double *outputFrame, unsigned int IDrowStart, unsigned int IDrowEnd, const unsigned int *integrationAreas); |
||||
void _local_verticalCausalFilter(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd, const unsigned int *integrationAreas); |
||||
void _local_verticalAnticausalFilter_multGain(double *outputFrame, unsigned int IDcolumnStart, unsigned int IDcolumnEnd, const unsigned int *integrationAreas); // this functions affects _gain at the output
|
||||
|
||||
}; |
||||
|
||||
} |
||||
#endif |
||||
|
||||
|
@ -0,0 +1,449 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#include "precomp.hpp" |
||||
#include "imagelogpolprojection.hpp" |
||||
|
||||
#include <cmath> |
||||
#include <iostream> |
||||
|
||||
// @author Alexandre BENOIT, benoit.alexandre.vision@gmail.com, LISTIC : www.listic.univ-savoie.fr, Gipsa-Lab, France: www.gipsa-lab.inpg.fr/
|
||||
|
||||
namespace cv |
||||
{ |
||||
|
||||
// constructor
|
||||
ImageLogPolProjection::ImageLogPolProjection(const unsigned int nbRows, const unsigned int nbColumns, const PROJECTIONTYPE projection, const bool colorModeCapable) |
||||
:BasicRetinaFilter(nbRows, nbColumns), |
||||
_sampledFrame(0), |
||||
_tempBuffer(_localBuffer), |
||||
_transformTable(0), |
||||
_irregularLPfilteredFrame(_filterOutput) |
||||
{ |
||||
_inputDoubleNBpixels=nbRows*nbColumns*2; |
||||
_selectedProjection = projection; |
||||
_reductionFactor=0; |
||||
_initOK=false; |
||||
_usefullpixelIndex=0; |
||||
_colorModeCapable=colorModeCapable; |
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::allocating"<<std::endl; |
||||
#endif |
||||
if (_colorModeCapable) |
||||
{ |
||||
_tempBuffer.resize(nbRows*nbColumns*3); |
||||
} |
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::done"<<std::endl; |
||||
#endif |
||||
|
||||
clearAllBuffers(); |
||||
} |
||||
|
||||
// destructor
|
||||
ImageLogPolProjection::~ImageLogPolProjection() |
||||
{ |
||||
|
||||
} |
||||
|
||||
|
||||
// reset buffers method
|
||||
void ImageLogPolProjection::clearAllBuffers() |
||||
{ |
||||
_sampledFrame=0; |
||||
_tempBuffer=0; |
||||
BasicRetinaFilter::clearAllBuffers(); |
||||
} |
||||
|
||||
/**
|
||||
* resize retina color filter object (resize all allocated buffers) |
||||
* @param NBrows: the new height size |
||||
* @param NBcolumns: the new width size |
||||
*/ |
||||
void ImageLogPolProjection::resize(const unsigned int NBrows, const unsigned int NBcolumns) |
||||
{ |
||||
BasicRetinaFilter::resize(NBrows, NBcolumns); |
||||
initProjection(_reductionFactor, _samplingStrenght); |
||||
|
||||
// reset buffers method
|
||||
clearAllBuffers(); |
||||
|
||||
} |
||||
|
||||
// init functions depending on the projection type
|
||||
bool ImageLogPolProjection::initProjection(const double reductionFactor, const double samplingStrenght) |
||||
{ |
||||
switch(_selectedProjection) |
||||
{ |
||||
case RETINALOGPROJECTION: |
||||
return _initLogRetinaSampling(reductionFactor, samplingStrenght); |
||||
break; |
||||
case CORTEXLOGPOLARPROJECTION: |
||||
return _initLogPolarCortexSampling(reductionFactor, samplingStrenght); |
||||
break; |
||||
default: |
||||
std::cout<<"ImageLogPolProjection::no projection setted up... performing default retina projection... take care"<<std::endl; |
||||
return _initLogRetinaSampling(reductionFactor, samplingStrenght); |
||||
break; |
||||
} |
||||
} |
||||
|
||||
// -> private init functions dedicated to each projection
|
||||
bool ImageLogPolProjection::_initLogRetinaSampling(const double reductionFactor, const double samplingStrenght) |
||||
{ |
||||
_initOK=false; |
||||
|
||||
if (_selectedProjection!=RETINALOGPROJECTION) |
||||
{ |
||||
std::cerr<<"ImageLogPolProjection::initLogRetinaSampling: could not initialize logPolar projection for a log projection system\n -> you probably chose the wrong init function, use initLogPolarCortexSampling() instead"<<std::endl; |
||||
return false; |
||||
} |
||||
if (reductionFactor<1.0) |
||||
{ |
||||
std::cerr<<"ImageLogPolProjection::initLogRetinaSampling: reduction factor must be superior to 0, skeeping initialisation..."<<std::endl; |
||||
return false; |
||||
} |
||||
|
||||
// compute image output size
|
||||
_outputNBrows=predictOutputSize(this->getNBrows(), reductionFactor); |
||||
_outputNBcolumns=predictOutputSize(this->getNBcolumns(), reductionFactor); |
||||
_outputNBpixels=_outputNBrows*_outputNBcolumns; |
||||
_outputDoubleNBpixels=_outputNBrows*_outputNBcolumns*2; |
||||
|
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: Log resampled image resampling factor: "<<reductionFactor<<", strenght:"<<samplingStrenght<<std::endl; |
||||
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: Log resampled image size: "<<_outputNBrows<<"*"<<_outputNBcolumns<<std::endl; |
||||
#endif |
||||
|
||||
// setup progressive prefilter that will be applied BEFORE log sampling
|
||||
setProgressiveFilterConstants_CentredAccuracy(0.0, 0.0, 0.99); |
||||
|
||||
// (re)create the image output buffer and transform table if the reduction factor changed
|
||||
_sampledFrame.resize(_outputNBpixels*(1+(unsigned int)_colorModeCapable*2)); |
||||
|
||||
// specifiying new reduction factor after preliminar checks
|
||||
_reductionFactor=reductionFactor; |
||||
_samplingStrenght=samplingStrenght; |
||||
|
||||
// compute the rlim for symetric rows/columns sampling, then, the rlim is based on the smallest dimension
|
||||
_minDimension=(double)(_filterOutput.getNBrows() < _filterOutput.getNBcolumns() ? _filterOutput.getNBrows() : _filterOutput.getNBcolumns()); |
||||
|
||||
// input frame dimensions dependent log sampling:
|
||||
//double rlim=1.0/reductionFactor*(minDimension/2.0+samplingStrenght);
|
||||
|
||||
// input frame dimensions INdependent log sampling:
|
||||
_azero=(1.0+reductionFactor*sqrt(samplingStrenght))/(reductionFactor*reductionFactor*samplingStrenght-1.0); |
||||
_alim=(1.0+_azero)/reductionFactor; |
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: rlim= "<<rlim<<std::endl; |
||||
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: alim= "<<alim<<std::endl; |
||||
#endif |
||||
|
||||
// get half frame size
|
||||
unsigned int halfOutputRows = _outputNBrows/2-1; |
||||
unsigned int halfOutputColumns = _outputNBcolumns/2-1; |
||||
unsigned int halfInputRows = _filterOutput.getNBrows()/2-1; |
||||
unsigned int halfInputColumns = _filterOutput.getNBcolumns()/2-1; |
||||
|
||||
// computing log sampling matrix by computing quarters of images
|
||||
// the original new image center (_filterOutput.getNBrows()/2, _filterOutput.getNBcolumns()/2) being at coordinate (_filterOutput.getNBrows()/(2*_reductionFactor), _filterOutput.getNBcolumns()/(2*_reductionFactor))
|
||||
|
||||
// -> use a temporary transform table which is bigger than the final one, we only report pixels coordinates that are included in the sampled picture
|
||||
std::valarray<unsigned int> tempTransformTable(2*_outputNBpixels); // the structure would be: (pixelInputCoordinate n)(pixelOutputCoordinate n)(pixelInputCoordinate n+1)(pixelOutputCoordinate n+1)
|
||||
_usefullpixelIndex=0; |
||||
|
||||
double rMax=0; |
||||
halfInputRows<halfInputColumns ? rMax=(double)(halfInputRows*halfInputRows):rMax=(double)(halfInputColumns*halfInputColumns); |
||||
|
||||
for (unsigned int idRow=0;idRow<halfOutputRows; ++idRow) |
||||
{ |
||||
for (unsigned int idColumn=0;idColumn<halfOutputColumns; ++idColumn) |
||||
{ |
||||
// get the pixel position in the original picture
|
||||
|
||||
// -> input frame dimensions dependent log sampling:
|
||||
//double scale = samplingStrenght/(rlim-(double)sqrt(idRow*idRow+idColumn*idColumn));
|
||||
|
||||
// -> input frame dimensions INdependent log sampling:
|
||||
double scale=getOriginalRadiusLength((double)sqrt((double)(idRow*idRow+idColumn*idColumn))); |
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: scale= "<<scale<<std::endl; |
||||
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: scale2= "<<scale2<<std::endl; |
||||
#endif |
||||
if (scale < 0) ///check it later
|
||||
scale = 10000; |
||||
|
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
// std::cout<<"ImageLogPolProjection::initLogRetinaSampling: scale= "<<scale<<std::endl;
|
||||
#endif |
||||
|
||||
unsigned int u=(unsigned int)floor((double)idRow*scale); |
||||
unsigned int v=(unsigned int)floor((double)idColumn*scale); |
||||
|
||||
// manage border effects
|
||||
double length=u*u+v*v; |
||||
double radiusRatio=sqrt(rMax/length); |
||||
|
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::(inputH, inputW)="<<halfInputRows<<", "<<halfInputColumns<<", Rmax2="<<rMax<<std::endl; |
||||
std::cout<<"before ==> ImageLogPolProjection::(u, v)="<<u<<", "<<v<<", r="<<u*u+v*v<<std::endl; |
||||
std::cout<<"ratio ="<<radiusRatio<<std::endl; |
||||
#endif |
||||
|
||||
if (radiusRatio < 1.0) |
||||
{ |
||||
u=(unsigned int)floor(radiusRatio*double(u)); |
||||
v=(unsigned int)floor(radiusRatio*double(v)); |
||||
} |
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"after ==> ImageLogPolProjection::(u, v)="<<u<<", "<<v<<", r="<<u*u+v*v<<std::endl; |
||||
std::cout<<"ImageLogPolProjection::("<<(halfOutputRows-idRow)<<", "<<idColumn+halfOutputColumns<<") <- ("<<halfInputRows-u<<", "<<v+halfInputColumns<<")"<<std::endl; |
||||
std::cout<<(halfOutputRows-idRow)+(halfOutputColumns+idColumn)*_outputNBrows<<" -> "<<(halfInputRows-u)+_filterOutput.getNBrows()*(halfInputColumns+v)<<std::endl; |
||||
#endif |
||||
|
||||
if ((u<halfInputRows)&&(v<halfInputColumns)) |
||||
{ |
||||
|
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"*** VALID ***"<<std::endl; |
||||
#endif |
||||
|
||||
// set pixel coordinate of the input picture in the transform table at the current log sampled pixel
|
||||
// 1st quadrant
|
||||
tempTransformTable[_usefullpixelIndex++]=(halfOutputColumns+idColumn)+(halfOutputRows-idRow)*_outputNBcolumns; |
||||
tempTransformTable[_usefullpixelIndex++]=_filterOutput.getNBcolumns()*(halfInputRows-u)+(halfInputColumns+v); |
||||
// 2nd quadrant
|
||||
tempTransformTable[_usefullpixelIndex++]=(halfOutputColumns+idColumn)+(halfOutputRows+idRow)*_outputNBcolumns; |
||||
tempTransformTable[_usefullpixelIndex++]=_filterOutput.getNBcolumns()*(halfInputRows+u)+(halfInputColumns+v); |
||||
// 3rd quadrant
|
||||
tempTransformTable[_usefullpixelIndex++]=(halfOutputColumns-idColumn)+(halfOutputRows-idRow)*_outputNBcolumns; |
||||
tempTransformTable[_usefullpixelIndex++]=_filterOutput.getNBcolumns()*(halfInputRows-u)+(halfInputColumns-v); |
||||
// 4td quadrant
|
||||
tempTransformTable[_usefullpixelIndex++]=(halfOutputColumns-idColumn)+(halfOutputRows+idRow)*_outputNBcolumns; |
||||
tempTransformTable[_usefullpixelIndex++]=_filterOutput.getNBcolumns()*(halfInputRows+u)+(halfInputColumns-v); |
||||
} |
||||
} |
||||
} |
||||
|
||||
// (re)creating and filling the transform table
|
||||
_transformTable.resize(_usefullpixelIndex); |
||||
memcpy(&_transformTable[0], &tempTransformTable[0], sizeof(unsigned int)*_usefullpixelIndex); |
||||
|
||||
// reset all buffers
|
||||
clearAllBuffers(); |
||||
|
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: init done successfully"<<std::endl; |
||||
#endif |
||||
_initOK=true; |
||||
return _initOK; |
||||
} |
||||
|
||||
bool ImageLogPolProjection::_initLogPolarCortexSampling(const double reductionFactor, const double samplingStrenght) |
||||
{ |
||||
_initOK=false; |
||||
|
||||
if (_selectedProjection!=CORTEXLOGPOLARPROJECTION) |
||||
{ |
||||
std::cerr<<"ImageLogPolProjection::could not initialize log projection for a logPolar projection system\n -> you probably chose the wrong init function, use initLogRetinaSampling() instead"<<std::endl; |
||||
return false; |
||||
} |
||||
|
||||
if (reductionFactor<1.0) |
||||
{ |
||||
std::cerr<<"ImageLogPolProjection::reduction factor must be superior to 0, skeeping initialisation..."<<std::endl; |
||||
return false; |
||||
} |
||||
|
||||
// compute the smallest image size
|
||||
unsigned int minDimension=(_filterOutput.getNBrows() < _filterOutput.getNBcolumns() ? _filterOutput.getNBrows() : _filterOutput.getNBcolumns()); |
||||
// specifiying new reduction factor after preliminar checks
|
||||
_reductionFactor=reductionFactor; |
||||
// compute image output size
|
||||
_outputNBrows=(unsigned int)((double)minDimension/reductionFactor); |
||||
_outputNBcolumns=(unsigned int)((double)minDimension/reductionFactor); |
||||
_outputNBpixels=_outputNBrows*_outputNBcolumns; |
||||
_outputDoubleNBpixels=_outputNBrows*_outputNBcolumns*2; |
||||
|
||||
// get half frame size
|
||||
//unsigned int halfOutputRows = _outputNBrows/2-1;
|
||||
//unsigned int halfOutputColumns = _outputNBcolumns/2-1;
|
||||
unsigned int halfInputRows = _filterOutput.getNBrows()/2-1; |
||||
unsigned int halfInputColumns = _filterOutput.getNBcolumns()/2-1; |
||||
|
||||
|
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::Log resampled image size: "<<_outputNBrows<<"*"<<_outputNBcolumns<<std::endl; |
||||
#endif |
||||
|
||||
// setup progressive prefilter that will be applied BEFORE log sampling
|
||||
setProgressiveFilterConstants_CentredAccuracy(0.0, 0.0, 0.99); |
||||
|
||||
// (re)create the image output buffer and transform table if the reduction factor changed
|
||||
_sampledFrame.resize(_outputNBpixels*(1+(unsigned int)_colorModeCapable*2)); |
||||
|
||||
// create the radius and orientation axis and fill them, radius E [0;1], orientation E[-pi, pi]
|
||||
std::valarray<double> radiusAxis(_outputNBcolumns); |
||||
double radiusStep=2.30/(double)_outputNBcolumns; |
||||
for (unsigned int i=0;i<_outputNBcolumns;++i) |
||||
{ |
||||
radiusAxis[i]=i*radiusStep; |
||||
} |
||||
std::valarray<double> orientationAxis(_outputNBrows); |
||||
double orientationStep=-2.0*CV_PI/(double)_outputNBrows; |
||||
for (unsigned int io=0;io<_outputNBrows;++io) |
||||
{ |
||||
orientationAxis[io]=io*orientationStep; |
||||
} |
||||
// -> use a temporay transform table which is bigger than the final one, we only report pixels coordinates that are included in the sampled picture
|
||||
std::valarray<unsigned int> tempTransformTable(2*_outputNBpixels); // the structure would be: (pixelInputCoordinate n)(pixelOutputCoordinate n)(pixelInputCoordinate n+1)(pixelOutputCoordinate n+1)
|
||||
_usefullpixelIndex=0; |
||||
|
||||
//std::cout<<"ImageLogPolProjection::Starting cortex projection"<<std::endl;
|
||||
// compute transformation, get theta and Radius in reagrd of the output sampled pixel
|
||||
double diagonalLenght=sqrt(_outputNBcolumns*_outputNBcolumns+_outputNBrows*_outputNBrows); |
||||
for (unsigned int radiusIndex=0;radiusIndex<_outputNBcolumns;++radiusIndex) |
||||
for(unsigned int orientationIndex=0;orientationIndex<_outputNBrows;++orientationIndex) |
||||
{ |
||||
double x=1.0+sinh(radiusAxis[radiusIndex])*cos(orientationAxis[orientationIndex]); |
||||
double y=sinh(radiusAxis[radiusIndex])*sin(orientationAxis[orientationIndex]); |
||||
// get the input picture coordinate
|
||||
double R=diagonalLenght*sqrt(x*x+y*y)/(5.0+sqrt(x*x+y*y)); |
||||
double theta=atan2(y,x); |
||||
// convert input polar coord into cartesian/C compatble coordinate
|
||||
unsigned int columnIndex=(unsigned int)(cos(theta)*R)+halfInputColumns; |
||||
unsigned int rowIndex=(unsigned int)(sin(theta)*R)+halfInputRows; |
||||
//std::cout<<"ImageLogPolProjection::R="<<R<<" / Theta="<<theta<<" / (x, y)="<<columnIndex<<", "<<rowIndex<<std::endl;
|
||||
if ((columnIndex<_filterOutput.getNBcolumns())&&(columnIndex>0)&&(rowIndex<_filterOutput.getNBrows())&&(rowIndex>0)) |
||||
{ |
||||
// set coordinate
|
||||
tempTransformTable[_usefullpixelIndex++]=radiusIndex+orientationIndex*_outputNBcolumns; |
||||
tempTransformTable[_usefullpixelIndex++]= columnIndex+rowIndex*_filterOutput.getNBcolumns(); |
||||
} |
||||
} |
||||
|
||||
// (re)creating and filling the transform table
|
||||
_transformTable.resize(_usefullpixelIndex); |
||||
memcpy(&_transformTable[0], &tempTransformTable[0], sizeof(unsigned int)*_usefullpixelIndex); |
||||
|
||||
// reset all buffers
|
||||
clearAllBuffers(); |
||||
_initOK=true; |
||||
return true; |
||||
} |
||||
|
||||
// action function
|
||||
std::valarray<double> &ImageLogPolProjection::runProjection(const std::valarray<double> &inputFrame, const double colorMode) |
||||
{ |
||||
if (_colorModeCapable&&colorMode) |
||||
{ |
||||
// progressive filtering and storage of the result in _tempBuffer
|
||||
_spatiotemporalLPfilter_Irregular(&inputFrame[0], &_irregularLPfilteredFrame[0]); |
||||
_spatiotemporalLPfilter_Irregular(&_irregularLPfilteredFrame[0], &_tempBuffer[0]); // warning, temporal issue may occur, if the temporal constant is not NULL !!!
|
||||
|
||||
_spatiotemporalLPfilter_Irregular(&inputFrame[0]+_filterOutput.getNBpixels(), &_irregularLPfilteredFrame[0]); |
||||
_spatiotemporalLPfilter_Irregular(&_irregularLPfilteredFrame[0], &_tempBuffer[0]+_filterOutput.getNBpixels()); |
||||
|
||||
_spatiotemporalLPfilter_Irregular(&inputFrame[0]+_filterOutput.getNBpixels()*2, &_irregularLPfilteredFrame[0]); |
||||
_spatiotemporalLPfilter_Irregular(&_irregularLPfilteredFrame[0], &_tempBuffer[0]+_filterOutput.getNBpixels()*2); |
||||
|
||||
// applying image projection/resampling
|
||||
register unsigned int *transformTablePTR=&_transformTable[0]; |
||||
for (unsigned int i=0 ; i<_usefullpixelIndex ; i+=2, transformTablePTR+=2) |
||||
{ |
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::i:"<<i<<"output(max="<<_outputNBpixels<<")="<<_transformTable[i]<<" / intput(max="<<_filterOutput.getNBpixels()<<")="<<_transformTable[i+1]<<std::endl; |
||||
#endif |
||||
_sampledFrame[*(transformTablePTR)]=_tempBuffer[*(transformTablePTR+1)]; |
||||
_sampledFrame[*(transformTablePTR)+_outputNBpixels]=_tempBuffer[*(transformTablePTR+1)+_filterOutput.getNBpixels()]; |
||||
_sampledFrame[*(transformTablePTR)+_outputDoubleNBpixels]=_tempBuffer[*(transformTablePTR+1)+_inputDoubleNBpixels]; |
||||
} |
||||
|
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::runProjection: color image projection OK"<<std::endl; |
||||
#endif |
||||
//normalizeGrayOutput_0_maxOutputValue(_sampledFrame, _outputNBpixels);
|
||||
}else |
||||
{ |
||||
_spatiotemporalLPfilter_Irregular(&inputFrame[0], &_irregularLPfilteredFrame[0]); |
||||
_spatiotemporalLPfilter_Irregular(&_irregularLPfilteredFrame[0], &_irregularLPfilteredFrame[0]); |
||||
// applying image projection/resampling
|
||||
register unsigned int *transformTablePTR=&_transformTable[0]; |
||||
for (unsigned int i=0 ; i<_usefullpixelIndex ; i+=2, transformTablePTR+=2) |
||||
{ |
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"i:"<<i<<"output(max="<<_outputNBpixels<<")="<<_transformTable[i]<<" / intput(max="<<_filterOutput.getNBpixels()<<")="<<_transformTable[i+1]<<std::endl; |
||||
#endif |
||||
_sampledFrame[*(transformTablePTR)]=_irregularLPfilteredFrame[*(transformTablePTR+1)]; |
||||
} |
||||
//normalizeGrayOutput_0_maxOutputValue(_sampledFrame, _outputNBpixels);
|
||||
#ifdef IMAGELOGPOLPROJECTION_DEBUG |
||||
std::cout<<"ImageLogPolProjection::runProjection: gray level image projection OK"<<std::endl; |
||||
#endif |
||||
} |
||||
|
||||
return _sampledFrame; |
||||
} |
||||
|
||||
} |
@ -0,0 +1,236 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#ifndef IMAGELOGPOLPROJECTION_H_ |
||||
#define IMAGELOGPOLPROJECTION_H_ |
||||
|
||||
/**
|
||||
* @class ImageLogPolProjection |
||||
* @brief class able to perform a log sampling of an image input (models the log sampling of the photoreceptors of the retina) |
||||
* or a log polar projection which models the retina information projection on the primary visual cortex: a linear projection in the center for detail analysis and a log projection of the borders (low spatial frequency motion information in general) |
||||
* |
||||
* collaboration: Barthelemy DURETTE who experimented the retina log projection |
||||
-> "Traitement visuels Bio mimtiques pour la supplance perceptive", internal technical report, May 2005, Gipsa-lab/DIS, Grenoble, FRANCE |
||||
* |
||||
* * TYPICAL USE: |
||||
* |
||||
* // create object, here for a log sampling (keyword:RETINALOGPROJECTION): (dynamic object allocation sample)
|
||||
* ImageLogPolProjection *imageSamplingTool; |
||||
* imageSamplingTool = new ImageLogPolProjection(frameSizeRows, frameSizeColumns, RETINALOGPROJECTION); |
||||
* |
||||
* // init log projection:
|
||||
* imageSamplingTool->initProjection(1.0, 15.0); |
||||
* |
||||
* // during program execution, call the log transform applied to a frame called "FrameBuffer" :
|
||||
* imageSamplingTool->runProjection(FrameBuffer); |
||||
* // get output frame and its size:
|
||||
* const unsigned int logSampledFrame_nbRows=imageSamplingTool->getOutputNBrows(); |
||||
* const unsigned int logSampledFrame_nbColumns=imageSamplingTool->getOutputNBcolumns(); |
||||
* const double *logSampledFrame=imageSamplingTool->getSampledFrame(); |
||||
* |
||||
* // at the end of the program, destroy object:
|
||||
* delete imageSamplingTool; |
||||
* |
||||
* @author Alexandre BENOIT, benoit.alexandre.vision@gmail.com, LISTIC : www.listic.univ-savoie.fr, Gipsa-Lab, France: www.gipsa-lab.inpg.fr/ |
||||
* Creation date 2007 |
||||
*/ |
||||
|
||||
//#define __IMAGELOGPOLPROJECTION_DEBUG // used for std output debug information
|
||||
|
||||
#include "basicretinafilter.hpp" |
||||
|
||||
|
||||
namespace cv |
||||
{ |
||||
|
||||
class ImageLogPolProjection:public BasicRetinaFilter |
||||
{ |
||||
public: |
||||
|
||||
enum PROJECTIONTYPE{RETINALOGPROJECTION, CORTEXLOGPOLARPROJECTION}; |
||||
|
||||
/**
|
||||
* constructor, just specifies the image input size and the projection type, no projection initialisation is done |
||||
* -> use initLogRetinaSampling() or initLogPolarCortexSampling() for that |
||||
* @param nbRows: number of rows of the input image |
||||
* @param nbColumns: number of columns of the input image |
||||
* @param projection: the type of projection, RETINALOGPROJECTION or CORTEXLOGPOLARPROJECTION |
||||
* @param colorMode: specifies if the projection is applied on a grayscale image (false) or color images (3 layers) (true) |
||||
*/ |
||||
ImageLogPolProjection(const unsigned int nbRows, const unsigned int nbColumns, const PROJECTIONTYPE projection, const bool colorMode=false); |
||||
|
||||
/**
|
||||
* standard destructor |
||||
*/ |
||||
virtual ~ImageLogPolProjection(); |
||||
|
||||
/**
|
||||
* function that clears all buffers of the object |
||||
*/ |
||||
void clearAllBuffers(); |
||||
|
||||
/**
|
||||
* resize retina color filter object (resize all allocated buffers) |
||||
* @param NBrows: the new height size |
||||
* @param NBcolumns: the new width size |
||||
*/ |
||||
void resize(const unsigned int NBrows, const unsigned int NBcolumns); |
||||
|
||||
/**
|
||||
* init function depending on the projection type |
||||
* @param reductionFactor: the size reduction factor of the ouptup image in regard of the size of the input image, must be superior to 1 |
||||
* @param samplingStrenght: specifies the strenght of the log compression effect (magnifying coefficient) |
||||
* @return true if the init was performed without any errors |
||||
*/ |
||||
bool initProjection(const double reductionFactor, const double samplingStrenght); |
||||
|
||||
/**
|
||||
* main funtion of the class: run projection function |
||||
* @param inputFrame: the input frame to be processed |
||||
* @return the output frame |
||||
*/ |
||||
std::valarray<double> &runProjection(const std::valarray<double> &inputFrame, const double colorMode=false); |
||||
|
||||
/**
|
||||
* @return the numbers of rows (height) of the images OUTPUTS of the object |
||||
*/ |
||||
inline const unsigned int getOutputNBrows(){return _outputNBrows;}; |
||||
|
||||
/**
|
||||
* @return the numbers of columns (width) of the images OUTPUTS of the object |
||||
*/ |
||||
inline const unsigned int getOutputNBcolumns(){return _outputNBcolumns;}; |
||||
|
||||
/**
|
||||
* main funtion of the class: run projection function |
||||
* @param size: one of the input frame initial dimensions to be processed |
||||
* @return the output frame dimension |
||||
*/ |
||||
inline static const unsigned int predictOutputSize(const unsigned int size, const double reductionFactor){return (unsigned int)((double)size/reductionFactor);}; |
||||
|
||||
/**
|
||||
* @return the output of the filter which applies an irregular Low Pass spatial filter to the imag input (see function |
||||
*/ |
||||
inline const std::valarray<double> &getIrregularLPfilteredInputFrame() const {return _irregularLPfilteredFrame;}; |
||||
|
||||
/**
|
||||
* function which allows to retrieve the output frame which was updated after the "runProjection(...) function BasicRetinaFilter::runProgressiveFilter(...) |
||||
* @return the projection result |
||||
*/ |
||||
inline const std::valarray<double> &getSampledFrame() const {return _sampledFrame;}; |
||||
|
||||
/**
|
||||
* function which allows gives the tranformation table, its size is (getNBrows()*getNBcolumns()*2) |
||||
* @return the transformation matrix [outputPixIndex_i, inputPixIndex_i, outputPixIndex_i+1, inputPixIndex_i+1....] |
||||
*/ |
||||
inline const std::valarray<unsigned int> &getSamplingMap() const {return _transformTable;}; |
||||
|
||||
inline const double getOriginalRadiusLength(const double projectedRadiusLength){return _azero/(_alim-projectedRadiusLength*2.0/_minDimension);}; |
||||
|
||||
// unsigned int getInputPixelIndex(const unsigned int ){ return _transformTable[index*2+1]};
|
||||
|
||||
private: |
||||
PROJECTIONTYPE _selectedProjection; |
||||
|
||||
// size of the image output
|
||||
unsigned int _outputNBrows; |
||||
unsigned int _outputNBcolumns; |
||||
unsigned int _outputNBpixels; |
||||
unsigned int _outputDoubleNBpixels; |
||||
unsigned int _inputDoubleNBpixels; |
||||
|
||||
// is the object able to manage color flag
|
||||
bool _colorModeCapable; |
||||
// sampling strenght factor
|
||||
double _samplingStrenght; |
||||
// sampling reduction factor
|
||||
double _reductionFactor; |
||||
|
||||
// log sampling parameters
|
||||
double _azero; |
||||
double _alim; |
||||
double _minDimension; |
||||
|
||||
// template buffers
|
||||
std::valarray<double>_sampledFrame; |
||||
std::valarray<double>&_tempBuffer; |
||||
std::valarray<unsigned int>_transformTable; |
||||
|
||||
std::valarray<double> &_irregularLPfilteredFrame; // just a reference for easier understanding
|
||||
unsigned int _usefullpixelIndex; |
||||
|
||||
// init transformation tables
|
||||
bool _computeLogProjection(); |
||||
bool _computeLogPolarProjection(); |
||||
|
||||
// specifies if init was done correctly
|
||||
bool _initOK; |
||||
// private init projections functions called by "initProjection(...)" function
|
||||
bool _initLogRetinaSampling(const double reductionFactor, const double samplingStrenght); |
||||
bool _initLogPolarCortexSampling(const double reductionFactor, const double samplingStrenght); |
||||
|
||||
}; |
||||
|
||||
} |
||||
#endif /*IMAGELOGPOLPROJECTION_H_*/ |
@ -0,0 +1,207 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#include <iostream> |
||||
|
||||
#include "magnoretinafilter.hpp" |
||||
|
||||
#include <cmath> |
||||
|
||||
namespace cv |
||||
{ |
||||
// Constructor and Desctructor of the OPL retina filter
|
||||
MagnoRetinaFilter::MagnoRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns) |
||||
:BasicRetinaFilter(NBrows, NBcolumns, 2), |
||||
_previousInput_ON(NBrows*NBcolumns), |
||||
_previousInput_OFF(NBrows*NBcolumns), |
||||
_amacrinCellsTempOutput_ON(NBrows*NBcolumns), |
||||
_amacrinCellsTempOutput_OFF(NBrows*NBcolumns), |
||||
_magnoXOutputON(NBrows*NBcolumns), |
||||
_magnoXOutputOFF(NBrows*NBcolumns), |
||||
_localProcessBufferON(NBrows*NBcolumns), |
||||
_localProcessBufferOFF(NBrows*NBcolumns) |
||||
{ |
||||
_magnoYOutput=&_filterOutput; |
||||
_magnoYsaturated=&_localBuffer; |
||||
|
||||
|
||||
clearAllBuffers(); |
||||
|
||||
#ifdef IPL_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"MagnoRetinaFilter::Init IPL retina filter at specified frame size OK"<<std::endl; |
||||
#endif |
||||
} |
||||
|
||||
MagnoRetinaFilter::~MagnoRetinaFilter() |
||||
{ |
||||
#ifdef IPL_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"MagnoRetinaFilter::Delete IPL retina filter OK"<<std::endl; |
||||
#endif |
||||
} |
||||
|
||||
// function that clears all buffers of the object
|
||||
void MagnoRetinaFilter::clearAllBuffers() |
||||
{ |
||||
BasicRetinaFilter::clearAllBuffers(); |
||||
_previousInput_ON=0; |
||||
_previousInput_OFF=0; |
||||
_amacrinCellsTempOutput_ON=0; |
||||
_amacrinCellsTempOutput_OFF=0; |
||||
_magnoXOutputON=0; |
||||
_magnoXOutputOFF=0; |
||||
_localProcessBufferON=0; |
||||
_localProcessBufferOFF=0; |
||||
|
||||
} |
||||
|
||||
/**
|
||||
* resize retina magno filter object (resize all allocated buffers |
||||
* @param NBrows: the new height size |
||||
* @param NBcolumns: the new width size |
||||
*/ |
||||
void MagnoRetinaFilter::resize(const unsigned int NBrows, const unsigned int NBcolumns) |
||||
{ |
||||
BasicRetinaFilter::resize(NBrows, NBcolumns); |
||||
_previousInput_ON.resize(NBrows*NBcolumns); |
||||
_previousInput_OFF.resize(NBrows*NBcolumns); |
||||
_amacrinCellsTempOutput_ON.resize(NBrows*NBcolumns); |
||||
_amacrinCellsTempOutput_OFF.resize(NBrows*NBcolumns); |
||||
_magnoXOutputON.resize(NBrows*NBcolumns); |
||||
_magnoXOutputOFF.resize(NBrows*NBcolumns); |
||||
_localProcessBufferON.resize(NBrows*NBcolumns); |
||||
_localProcessBufferOFF.resize(NBrows*NBcolumns); |
||||
|
||||
// to be sure, relink buffers
|
||||
_magnoYOutput=&_filterOutput; |
||||
_magnoYsaturated=&_localBuffer; |
||||
|
||||
// reset all buffers
|
||||
clearAllBuffers(); |
||||
} |
||||
|
||||
void MagnoRetinaFilter::setCoefficientsTable(const double parasolCells_beta, const double parasolCells_tau, const double parasolCells_k, const double amacrinCellsTemporalCutFrequency, const double localAdaptIntegration_tau, const double localAdaptIntegration_k ) |
||||
{ |
||||
_temporalCoefficient=exp(-1.0/amacrinCellsTemporalCutFrequency); |
||||
// the first set of parameters is dedicated to the low pass filtering property of the ganglion cells
|
||||
BasicRetinaFilter::setLPfilterParameters(parasolCells_beta, parasolCells_tau, parasolCells_k, 0); |
||||
// the second set of parameters is dedicated to the ganglion cells output intergartion for their local adaptation property
|
||||
BasicRetinaFilter::setLPfilterParameters(0, localAdaptIntegration_tau, localAdaptIntegration_k, 1); |
||||
} |
||||
|
||||
void MagnoRetinaFilter::_amacrineCellsComputing(const double *OPL_ON, const double *OPL_OFF) |
||||
{ |
||||
register const double *OPL_ON_PTR=OPL_ON; |
||||
register const double *OPL_OFF_PTR=OPL_OFF; |
||||
register double *previousInput_ON_PTR= &_previousInput_ON[0]; |
||||
register double *previousInput_OFF_PTR= &_previousInput_OFF[0]; |
||||
register double *amacrinCellsTempOutput_ON_PTR= &_amacrinCellsTempOutput_ON[0]; |
||||
register double *amacrinCellsTempOutput_OFF_PTR= &_amacrinCellsTempOutput_OFF[0]; |
||||
|
||||
for (unsigned int IDpixel=0 ; IDpixel<this->getNBpixels(); ++IDpixel) |
||||
{ |
||||
|
||||
/* Compute ON and OFF amacrin cells high pass temporal filter */ |
||||
double magnoXonPixelResult = _temporalCoefficient*(*amacrinCellsTempOutput_ON_PTR+ *OPL_ON_PTR-*previousInput_ON_PTR); |
||||
*(amacrinCellsTempOutput_ON_PTR++)=((double)(magnoXonPixelResult>0))*magnoXonPixelResult; |
||||
|
||||
double magnoXoffPixelResult = _temporalCoefficient*(*amacrinCellsTempOutput_OFF_PTR+ *OPL_OFF_PTR-*previousInput_OFF_PTR); |
||||
*(amacrinCellsTempOutput_OFF_PTR++)=((double)(magnoXoffPixelResult>0))*magnoXoffPixelResult; |
||||
|
||||
/* prepare next loop */ |
||||
*(previousInput_ON_PTR++)=*(OPL_ON_PTR++); |
||||
*(previousInput_OFF_PTR++)=*(OPL_OFF_PTR++); |
||||
|
||||
} |
||||
} |
||||
|
||||
// launch filter that runs all the IPL filter
|
||||
const std::valarray<double> &MagnoRetinaFilter::runFilter(const std::valarray<double> &OPL_ON, const std::valarray<double> &OPL_OFF) |
||||
{ |
||||
// Compute the high pass temporal filter
|
||||
_amacrineCellsComputing(&OPL_ON[0], &OPL_OFF[0]); |
||||
|
||||
// apply low pass filtering on ON and OFF ways after temporal high pass filtering
|
||||
_spatiotemporalLPfilter(&_amacrinCellsTempOutput_ON[0], &_magnoXOutputON[0], 0); |
||||
_spatiotemporalLPfilter(&_amacrinCellsTempOutput_OFF[0], &_magnoXOutputOFF[0], 0); |
||||
|
||||
// local adaptation of the ganglion cells to the local contrast of the moving contours
|
||||
_spatiotemporalLPfilter(&_magnoXOutputON[0], &_localProcessBufferON[0], 1); |
||||
_localLuminanceAdaptation(&_magnoXOutputON[0], &_localProcessBufferON[0]); |
||||
_spatiotemporalLPfilter(&_magnoXOutputOFF[0], &_localProcessBufferOFF[0], 1); |
||||
_localLuminanceAdaptation(&_magnoXOutputOFF[0], &_localProcessBufferOFF[0]); |
||||
|
||||
/* Compute MagnoY */ |
||||
register double *magnoYOutput= &(*_magnoYOutput)[0]; |
||||
register double *magnoXOutputON_PTR= &_magnoXOutputON[0]; |
||||
register double *magnoXOutputOFF_PTR= &_magnoXOutputOFF[0]; |
||||
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel) |
||||
*(magnoYOutput++)=*(magnoXOutputON_PTR++)+*(magnoXOutputOFF_PTR++); |
||||
|
||||
return (*_magnoYOutput); |
||||
} |
||||
} |
||||
|
||||
|
@ -0,0 +1,203 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#ifndef MagnoRetinaFilter_H_ |
||||
#define MagnoRetinaFilter_H_ |
||||
|
||||
/**
|
||||
* @class MagnoRetinaFilter |
||||
* @brief class which describes the magnocellular channel of the retina: |
||||
* -> performs a moving contours extraction with powerfull local data enhancement |
||||
* |
||||
* TYPICAL USE: |
||||
* |
||||
* // create object at a specified picture size
|
||||
* MagnoRetinaFilter *movingContoursExtractor; |
||||
* movingContoursExtractor =new MagnoRetinaFilter(frameSizeRows, frameSizeColumns); |
||||
* |
||||
* // init gain, spatial and temporal parameters:
|
||||
* movingContoursExtractor->setCoefficientsTable(0, 0.7, 5, 3); |
||||
* |
||||
* // during program execution, call the filter for contours extraction for an input picture called "FrameBuffer":
|
||||
* movingContoursExtractor->runfilter(FrameBuffer); |
||||
* |
||||
* // get the output frame, check in the class description below for more outputs:
|
||||
* const double *movingContours=movingContoursExtractor->getMagnoYsaturated(); |
||||
* |
||||
* // at the end of the program, destroy object:
|
||||
* delete movingContoursExtractor; |
||||
|
||||
* @author Alexandre BENOIT, benoit.alexandre.vision@gmail.com, LISTIC : www.listic.univ-savoie.fr, Gipsa-Lab, France: www.gipsa-lab.inpg.fr/ |
||||
* Creation date 2007 |
||||
* Based on Alexandre BENOIT thesis: "Le système visuel humain au secours de la vision par ordinateur" |
||||
*/ |
||||
|
||||
#include "basicretinafilter.hpp" |
||||
|
||||
//#define _IPL_RETINA_ELEMENT_DEBUG
|
||||
|
||||
namespace cv |
||||
{ |
||||
|
||||
class MagnoRetinaFilter: public BasicRetinaFilter |
||||
{ |
||||
public: |
||||
/**
|
||||
* constructor parameters are only linked to image input size |
||||
* @param NBrows: number of rows of the input image |
||||
* @param NBcolumns: number of columns of the input image |
||||
*/ |
||||
MagnoRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns); |
||||
|
||||
|
||||
/**
|
||||
* destructor |
||||
*/ |
||||
virtual ~MagnoRetinaFilter(); |
||||
|
||||
/**
|
||||
* function that clears all buffers of the object |
||||
*/ |
||||
void clearAllBuffers(); |
||||
|
||||
/**
|
||||
* resize retina magno filter object (resize all allocated buffers) |
||||
* @param NBrows: the new height size |
||||
* @param NBcolumns: the new width size |
||||
*/ |
||||
void resize(const unsigned int NBrows, const unsigned int NBcolumns); |
||||
|
||||
/**
|
||||
* set parameters values |
||||
* @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0 |
||||
* @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response) |
||||
* @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5 |
||||
* @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5 |
||||
* @param localAdaptIntegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation |
||||
* @param localAdaptIntegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation |
||||
*/ |
||||
void setCoefficientsTable(const double parasolCells_beta, const double parasolCells_tau, const double parasolCells_k, const double amacrinCellsTemporalCutFrequency, const double localAdaptIntegration_tau, const double localAdaptIntegration_k); |
||||
|
||||
/**
|
||||
* launch filter that runs all the IPL magno filter (model of the magnocellular channel of the Inner Plexiform Layer of the retina) |
||||
* @param OPL_ON: the output of the bipolar ON cells of the retina (available from the ParvoRetinaFilter class (getBipolarCellsON() function) |
||||
* @param OPL_OFF: the output of the bipolar OFF cells of the retina (available from the ParvoRetinaFilter class (getBipolarCellsOFF() function) |
||||
* @return the processed result without post-processing |
||||
*/ |
||||
const std::valarray<double> &runFilter(const std::valarray<double> &OPL_ON, const std::valarray<double> &OPL_OFF); |
||||
|
||||
/**
|
||||
* @return the Magnocellular ON channel filtering output |
||||
*/ |
||||
inline const std::valarray<double> &getMagnoON() const {return _magnoXOutputON;}; |
||||
|
||||
/**
|
||||
* @return the Magnocellular OFF channel filtering output |
||||
*/ |
||||
inline const std::valarray<double> &getMagnoOFF() const {return _magnoXOutputOFF;}; |
||||
|
||||
/**
|
||||
* @return the Magnocellular Y (sum of the ON and OFF magno channels) filtering output |
||||
*/ |
||||
inline const std::valarray<double> &getMagnoYsaturated() const {return *_magnoYsaturated;}; |
||||
|
||||
/**
|
||||
* applies an image normalization which saturates the high output values by the use of an assymetric sigmoide |
||||
*/ |
||||
inline void normalizeGrayOutputNearZeroCentreredSigmoide(){_filterOutput.normalizeGrayOutputNearZeroCentreredSigmoide(&(*_magnoYOutput)[0], &(*_magnoYsaturated)[0]);}; |
||||
|
||||
/**
|
||||
* @return the horizontal cells' temporal constant |
||||
*/ |
||||
inline const double getTemporalConstant(){return this->_filteringCoeficientsTable[2];}; |
||||
|
||||
private: |
||||
|
||||
// related pointers to these buffers
|
||||
std::valarray<double> _previousInput_ON; |
||||
std::valarray<double> _previousInput_OFF; |
||||
std::valarray<double> _amacrinCellsTempOutput_ON; |
||||
std::valarray<double> _amacrinCellsTempOutput_OFF; |
||||
std::valarray<double> _magnoXOutputON; |
||||
std::valarray<double> _magnoXOutputOFF; |
||||
std::valarray<double> _localProcessBufferON; |
||||
std::valarray<double> _localProcessBufferOFF; |
||||
// reference to parent buffers and allow better readability
|
||||
TemplateBuffer<double> *_magnoYOutput; |
||||
std::valarray<double> *_magnoYsaturated; |
||||
|
||||
// varialbles
|
||||
double _temporalCoefficient; |
||||
|
||||
// amacrine cells filter : high pass temporal filter
|
||||
void _amacrineCellsComputing(const double *ONinput, const double *OFFinput); |
||||
|
||||
|
||||
}; |
||||
|
||||
} |
||||
|
||||
#endif /*MagnoRetinaFilter_H_*/ |
||||
|
||||
|
@ -0,0 +1,227 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#include "parvoretinafilter.hpp" |
||||
|
||||
// @author Alexandre BENOIT, benoit.alexandre.vision@gmail.com, LISTIC : www.listic.univ-savoie.fr, Gipsa-Lab, France: www.gipsa-lab.inpg.fr/
|
||||
|
||||
#include <iostream> |
||||
#include <cmath> |
||||
|
||||
namespace cv |
||||
{ |
||||
//////////////////////////////////////////////////////////
|
||||
// OPL RETINA FILTER
|
||||
//////////////////////////////////////////////////////////
|
||||
|
||||
// Constructor and Desctructor of the OPL retina filter
|
||||
|
||||
ParvoRetinaFilter::ParvoRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns) |
||||
:BasicRetinaFilter(NBrows, NBcolumns, 3), |
||||
_photoreceptorsOutput(NBrows*NBcolumns), |
||||
_horizontalCellsOutput(NBrows*NBcolumns), |
||||
_parvocellularOutputON(NBrows*NBcolumns), |
||||
_parvocellularOutputOFF(NBrows*NBcolumns), |
||||
_bipolarCellsOutputON(NBrows*NBcolumns), |
||||
_bipolarCellsOutputOFF(NBrows*NBcolumns), |
||||
_localAdaptationOFF(NBrows*NBcolumns) |
||||
{ |
||||
// link to the required local parent adaptation buffers
|
||||
_localAdaptationON=&_localBuffer; |
||||
_parvocellularOutputONminusOFF=&_filterOutput; |
||||
// (*_localAdaptationON)=&_localBuffer;
|
||||
// (*_parvocellularOutputONminusOFF)=&(BasicRetinaFilter::TemplateBuffer);
|
||||
|
||||
// init: set all the values to 0
|
||||
clearAllBuffers(); |
||||
|
||||
|
||||
#ifdef OPL_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"ParvoRetinaFilter::Init OPL retina filter at specified frame size OK\n"<<std::endl; |
||||
#endif |
||||
|
||||
} |
||||
|
||||
ParvoRetinaFilter::~ParvoRetinaFilter() |
||||
{ |
||||
|
||||
#ifdef OPL_RETINA_ELEMENT_DEBUG |
||||
std::cout<<"ParvoRetinaFilter::Delete OPL retina filter OK"<<std::endl; |
||||
#endif |
||||
} |
||||
|
||||
////////////////////////////////////
|
||||
// functions of the PARVO filter
|
||||
////////////////////////////////////
|
||||
|
||||
// function that clears all buffers of the object
|
||||
void ParvoRetinaFilter::clearAllBuffers() |
||||
{ |
||||
BasicRetinaFilter::clearAllBuffers(); |
||||
_photoreceptorsOutput=0; |
||||
_horizontalCellsOutput=0; |
||||
_parvocellularOutputON=0; |
||||
_parvocellularOutputOFF=0; |
||||
_bipolarCellsOutputON=0; |
||||
_bipolarCellsOutputOFF=0; |
||||
_localAdaptationOFF=0; |
||||
} |
||||
|
||||
/**
|
||||
* resize parvo retina filter object (resize all allocated buffers |
||||
* @param NBrows: the new height size |
||||
* @param NBcolumns: the new width size |
||||
*/ |
||||
void ParvoRetinaFilter::resize(const unsigned int NBrows, const unsigned int NBcolumns) |
||||
{ |
||||
BasicRetinaFilter::resize(NBrows, NBcolumns); |
||||
_photoreceptorsOutput.resize(NBrows*NBcolumns); |
||||
_horizontalCellsOutput.resize(NBrows*NBcolumns); |
||||
_parvocellularOutputON.resize(NBrows*NBcolumns); |
||||
_parvocellularOutputOFF.resize(NBrows*NBcolumns); |
||||
_bipolarCellsOutputON.resize(NBrows*NBcolumns); |
||||
_bipolarCellsOutputOFF.resize(NBrows*NBcolumns); |
||||
_localAdaptationOFF.resize(NBrows*NBcolumns); |
||||
|
||||
// link to the required local parent adaptation buffers
|
||||
_localAdaptationON=&_localBuffer; |
||||
_parvocellularOutputONminusOFF=&_filterOutput; |
||||
|
||||
// clean buffers
|
||||
clearAllBuffers(); |
||||
} |
||||
|
||||
// change the parameters of the filter
|
||||
void ParvoRetinaFilter::setOPLandParvoFiltersParameters(const double beta1, const double tau1, const double k1, const double beta2, const double tau2, const double k2) |
||||
{ |
||||
// init photoreceptors low pass filter
|
||||
setLPfilterParameters(beta1, tau1, k1); |
||||
// init horizontal cells low pass filter
|
||||
setLPfilterParameters(beta2, tau2, k2, 1); |
||||
// init parasol ganglion cells low pass filter (default parameters)
|
||||
setLPfilterParameters(0, tau1, k1, 2); |
||||
|
||||
} |
||||
|
||||
// update/set size of the frames
|
||||
|
||||
// run filter for a new frame input
|
||||
// output return is (*_parvocellularOutputONminusOFF)
|
||||
const std::valarray<double> &ParvoRetinaFilter::runFilter(const std::valarray<double> &inputFrame, const bool useParvoOutput) |
||||
{ |
||||
_spatiotemporalLPfilter(&inputFrame[0], &_photoreceptorsOutput[0]); |
||||
_spatiotemporalLPfilter(&_photoreceptorsOutput[0], &_horizontalCellsOutput[0], 1); |
||||
_OPL_OnOffWaysComputing(); |
||||
|
||||
if (useParvoOutput) |
||||
{ |
||||
// local adaptation processes on ON and OFF ways
|
||||
_spatiotemporalLPfilter(&_bipolarCellsOutputON[0], &(*_localAdaptationON)[0], 2); |
||||
_localLuminanceAdaptation(&_parvocellularOutputON[0], &(*_localAdaptationON)[0]); |
||||
|
||||
_spatiotemporalLPfilter(&_bipolarCellsOutputOFF[0], &_localAdaptationOFF[0], 2); |
||||
_localLuminanceAdaptation(&_parvocellularOutputOFF[0], &_localAdaptationOFF[0]); |
||||
|
||||
//// Final loop that computes the main output of this filter
|
||||
//
|
||||
//// loop that makes the difference between photoreceptor cells output and horizontal cells
|
||||
//// positive part goes on the ON way, negative pat goes on the OFF way
|
||||
register double *parvocellularOutputONminusOFF_PTR=&(*_parvocellularOutputONminusOFF)[0]; |
||||
register double *parvocellularOutputON_PTR=&_parvocellularOutputON[0]; |
||||
register double *parvocellularOutputOFF_PTR=&_parvocellularOutputOFF[0]; |
||||
|
||||
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel) |
||||
*(parvocellularOutputONminusOFF_PTR++)= (*(parvocellularOutputON_PTR++)-*(parvocellularOutputOFF_PTR++)); |
||||
} |
||||
return (*_parvocellularOutputONminusOFF); |
||||
} |
||||
|
||||
void ParvoRetinaFilter::_OPL_OnOffWaysComputing() |
||||
{ |
||||
// loop that makes the difference between photoreceptor cells output and horizontal cells
|
||||
// positive part goes on the ON way, negative pat goes on the OFF way
|
||||
register double *photoreceptorsOutput_PTR= &_photoreceptorsOutput[0]; |
||||
register double *horizontalCellsOutput_PTR= &_horizontalCellsOutput[0]; |
||||
register double *bipolarCellsON_PTR = &_bipolarCellsOutputON[0]; |
||||
register double *bipolarCellsOFF_PTR = &_bipolarCellsOutputOFF[0]; |
||||
register double *parvocellularOutputON_PTR= &_parvocellularOutputON[0]; |
||||
register double *parvocellularOutputOFF_PTR= &_parvocellularOutputOFF[0]; |
||||
|
||||
// compute bipolar cells response equal to photoreceptors minus horizontal cells response
|
||||
// and copy the result on parvo cellular outputs... keeping time before their local contrast adaptation for final result
|
||||
for (register unsigned int IDpixel=0 ; IDpixel<_filterOutput.getNBpixels() ; ++IDpixel) |
||||
{ |
||||
double pixelDifference = *(photoreceptorsOutput_PTR++) -*(horizontalCellsOutput_PTR++); |
||||
// test condition to allow write pixelDifference in ON or OFF buffer and 0 in the over
|
||||
double isPositive=(double) (pixelDifference>0); |
||||
|
||||
// ON and OFF channels writing step
|
||||
*(parvocellularOutputON_PTR++)=*(bipolarCellsON_PTR++) = isPositive*pixelDifference; |
||||
*(parvocellularOutputOFF_PTR++)=*(bipolarCellsOFF_PTR++)= (isPositive-1.0)*pixelDifference; |
||||
} |
||||
} |
||||
} |
||||
|
@ -0,0 +1,222 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#ifndef ParvoRetinaFilter_H_ |
||||
#define ParvoRetinaFilter_H_ |
||||
|
||||
/**
|
||||
* @class ParvoRetinaFilter |
||||
* @brief class which describes the OPL retina model and the Inner Plexiform Layer parvocellular channel of the retina: |
||||
* -> performs a contours extraction with powerfull local data enhancement as at the retina level |
||||
* -> spectrum whitening occurs at the OPL (Outer Plexiform Layer) of the retina: corrects the 1/f spectrum tendancy of natural images |
||||
* ---> enhances details with mid spatial frequencies, attenuates low spatial frequencies (luminance), attenuates high temporal frequencies and high spatial frequencies, etc. |
||||
* |
||||
* TYPICAL USE: |
||||
* |
||||
* // create object at a specified picture size
|
||||
* ParvoRetinaFilter *contoursExtractor; |
||||
* contoursExtractor =new ParvoRetinaFilter(frameSizeRows, frameSizeColumns); |
||||
* |
||||
* // init gain, spatial and temporal parameters:
|
||||
* contoursExtractor->setCoefficientsTable(0, 0.7, 1, 0, 7, 1); |
||||
* |
||||
* // during program execution, call the filter for contours extraction for an input picture called "FrameBuffer":
|
||||
* contoursExtractor->runfilter(FrameBuffer); |
||||
* |
||||
* // get the output frame, check in the class description below for more outputs:
|
||||
* const double *contours=contoursExtractor->getParvoONminusOFF(); |
||||
* |
||||
* // at the end of the program, destroy object:
|
||||
* delete contoursExtractor; |
||||
|
||||
* @author Alexandre BENOIT, benoit.alexandre.vision@gmail.com, LISTIC : www.listic.univ-savoie.fr, Gipsa-Lab, France: www.gipsa-lab.inpg.fr/ |
||||
* Creation date 2007 |
||||
* Based on Alexandre BENOIT thesis: "Le système visuel humain au secours de la vision par ordinateur" |
||||
* |
||||
*/ |
||||
|
||||
#include "basicretinafilter.hpp" |
||||
|
||||
|
||||
//#define _OPL_RETINA_ELEMENT_DEBUG
|
||||
|
||||
namespace cv |
||||
{ |
||||
//retina classes that derivate from the Basic Retrina class
|
||||
class ParvoRetinaFilter: public BasicRetinaFilter |
||||
{ |
||||
|
||||
public: |
||||
/**
|
||||
* constructor parameters are only linked to image input size |
||||
* @param NBrows: number of rows of the input image |
||||
* @param NBcolumns: number of columns of the input image |
||||
*/ |
||||
ParvoRetinaFilter(const unsigned int NBrows=480, const unsigned int NBcolumns=640); |
||||
|
||||
/**
|
||||
* standard desctructor |
||||
*/ |
||||
virtual ~ParvoRetinaFilter(); |
||||
|
||||
/**
|
||||
* resize method, keeps initial parameters, all buffers are flushed |
||||
* @param NBrows: number of rows of the input image |
||||
* @param NBcolumns: number of columns of the input image |
||||
*/ |
||||
void resize(const unsigned int NBrows, const unsigned int NBcolumns); |
||||
|
||||
/**
|
||||
* function that clears all buffers of the object |
||||
*/ |
||||
void clearAllBuffers(); |
||||
|
||||
/**
|
||||
* setup the OPL and IPL parvo channels |
||||
* @param beta1: gain of the horizontal cells network, if 0, then the mean value of the output is zero, if the parameter is near 1, the amplitude is boosted but it should only be used for values rescaling... if needed |
||||
* @param tau1: the time constant of the first order low pass filter of the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 1 frame |
||||
* @param k1: the spatial constant of the first order low pass filter of the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 1 pixel |
||||
* @param beta2: gain of the horizontal cells network, if 0, then the mean value of the output is zero, if the parameter is near 1, then, the luminance is not filtered and is still reachable at the output, typicall value is 0 |
||||
* @param tau2: the time constant of the first order low pass filter of the horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is frames, typical value is 1 frame, as the photoreceptors |
||||
* @param k2: the spatial constant of the first order low pass filter of the horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels, typical value is 5 pixel, this value is also used for local contrast computing when computing the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular channel model) |
||||
*/ |
||||
void setOPLandParvoFiltersParameters(const double beta1, const double tau1, const double k1, const double beta2, const double tau2, const double k2); |
||||
|
||||
/**
|
||||
* setup more precisely the low pass filter used for the ganglion cells low pass filtering (used for local luminance adaptation) |
||||
* @param tau: time constant of the filter (unit is frame for video processing) |
||||
* @param k: spatial constant of the filter (unit is pixels) |
||||
*/ |
||||
void setGanglionCellsLocalAdaptationLPfilterParameters(const double tau, const double k){BasicRetinaFilter::setLPfilterParameters(0, tau, k, 2);}; // change the parameters of the filter
|
||||
|
||||
|
||||
/**
|
||||
* launch filter that runs the OPL spatiotemporal filtering and optionally finalizes IPL Pagno filter (model of the Parvocellular channel of the Inner Plexiform Layer of the retina) |
||||
* @param inputFrame: the input image to be processed, this can be the direct gray level input frame, but a better efficacy is expected if the input is preliminary processed by the photoreceptors local adaptation possible to acheive with the help of a BasicRetinaFilter object |
||||
* @param useParvoOutput: set true if the final IPL filtering step has to be computed (local contrast enhancement) |
||||
* @return the processed Parvocellular channel output (updated only if useParvoOutput is true) |
||||
* @details: in any case, after this function call, photoreceptors and horizontal cells output are updated, use getPhotoreceptorsLPfilteringOutput() and getHorizontalCellsOutput() to get them |
||||
* also, bipolar cells output are accessible (difference between photoreceptors and horizontal cells, ON output has positive values, OFF ouput has negative values), use the following access methods: getBipolarCellsON() and getBipolarCellsOFF()if useParvoOutput is true, |
||||
* if useParvoOutput is true, the complete Parvocellular channel is computed, more outputs are updated and can be accessed threw: getParvoON(), getParvoOFF() and their difference with getOutput() |
||||
*/ |
||||
const std::valarray<double> &runFilter(const std::valarray<double> &inputFrame, const bool useParvoOutput=true); // output return is _parvocellularOutputONminusOFF
|
||||
|
||||
/**
|
||||
* @return the output of the photoreceptors filtering step (high cut frequency spatio-temporal low pass filter) |
||||
*/ |
||||
inline const std::valarray<double> &getPhotoreceptorsLPfilteringOutput() const {return _photoreceptorsOutput;}; |
||||
|
||||
/**
|
||||
* @return the output of the photoreceptors filtering step (low cut frequency spatio-temporal low pass filter) |
||||
*/ |
||||
inline const std::valarray<double> &getHorizontalCellsOutput() const { return _horizontalCellsOutput;}; |
||||
|
||||
/**
|
||||
* @return the output Parvocellular ON channel of the retina model |
||||
*/ |
||||
inline const std::valarray<double> &getParvoON() const {return _parvocellularOutputON;}; |
||||
|
||||
/**
|
||||
* @return the output Parvocellular OFF channel of the retina model |
||||
*/ |
||||
inline const std::valarray<double> &getParvoOFF() const {return _parvocellularOutputOFF;}; |
||||
|
||||
/**
|
||||
* @return the output of the Bipolar cells of the ON channel of the retina model same as function getParvoON() but without luminance local adaptation |
||||
*/ |
||||
inline const std::valarray<double> &getBipolarCellsON() const {return _bipolarCellsOutputON;}; |
||||
|
||||
/**
|
||||
* @return the output of the Bipolar cells of the OFF channel of the retina model same as function getParvoON() but without luminance local adaptation |
||||
*/ |
||||
inline const std::valarray<double> &getBipolarCellsOFF() const {return _bipolarCellsOutputOFF;}; |
||||
|
||||
/**
|
||||
* @return the photoreceptors's temporal constant |
||||
*/ |
||||
inline const double getPhotoreceptorsTemporalConstant(){return this->_filteringCoeficientsTable[2];}; |
||||
|
||||
/**
|
||||
* @return the horizontal cells' temporal constant |
||||
*/ |
||||
inline const double getHcellsTemporalConstant(){return this->_filteringCoeficientsTable[5];}; |
||||
|
||||
private: |
||||
// template buffers
|
||||
std::valarray <double>_photoreceptorsOutput; |
||||
std::valarray <double>_horizontalCellsOutput; |
||||
std::valarray <double>_parvocellularOutputON; |
||||
std::valarray <double>_parvocellularOutputOFF; |
||||
std::valarray <double>_bipolarCellsOutputON; |
||||
std::valarray <double>_bipolarCellsOutputOFF; |
||||
std::valarray <double>_localAdaptationOFF; |
||||
std::valarray <double> *_localAdaptationON; |
||||
TemplateBuffer<double> *_parvocellularOutputONminusOFF; |
||||
// private functions
|
||||
void _OPL_OnOffWaysComputing(); |
||||
|
||||
}; |
||||
} |
||||
#endif |
||||
|
@ -0,0 +1,409 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
** |
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
** |
||||
** |
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
** |
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
** |
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
** |
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
** |
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
** |
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
** |
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
** |
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
** |
||||
** Third party copyrights are property of their respective owners. |
||||
** |
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
** |
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
** |
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
** |
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
** |
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
/*
|
||||
* Retina.cpp |
||||
* |
||||
* Created on: Jul 19, 2011 |
||||
* Author: Alexandre Benoit |
||||
*/ |
||||
#include "precomp.hpp" |
||||
#include "retinafilter.hpp" |
||||
#include <iostream> |
||||
|
||||
namespace cv |
||||
{ |
||||
|
||||
Retina::Retina(const std::string parametersSaveFile, const cv::Size inputSize) |
||||
{ |
||||
_retinaFilter = 0; |
||||
_init(parametersSaveFile, inputSize, true, RETINA_COLOR_BAYER, false); |
||||
} |
||||
|
||||
Retina::Retina(const std::string parametersSaveFile, const cv::Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght) |
||||
{ |
||||
_retinaFilter = 0; |
||||
_init(parametersSaveFile, inputSize, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght); |
||||
}; |
||||
|
||||
Retina::~Retina() |
||||
{ |
||||
delete _retinaFilter; |
||||
} |
||||
|
||||
void Retina::setup(std::string retinaParameterFile, const bool applyDefaultSetupOnFailure) |
||||
{ |
||||
// open specified parameters file
|
||||
std::cout<<"Retina::setup: setting up retina from parameter file : "<<retinaParameterFile<<std::endl; |
||||
|
||||
// very UGLY cases processing... to be updated...
|
||||
|
||||
try |
||||
{ |
||||
|
||||
// rewriting a new parameter file...
|
||||
if (_parametersSaveFile.isOpened()) |
||||
_parametersSaveFile.release(); |
||||
_parametersSaveFile.open(_parametersSaveFileName, cv::FileStorage::WRITE); |
||||
// opening retinaParameterFile in read mode
|
||||
cv::FileStorage fs(retinaParameterFile, cv::FileStorage::READ); |
||||
// read parameters file if it exists or apply default setup if asked for
|
||||
if (!fs.isOpened()) |
||||
{ |
||||
std::cout<<"Retina::setup: provided parameters file could not be open... skeeping configuration"<<std::endl; |
||||
return; |
||||
// implicit else case : retinaParameterFile could be open (it exists at least)
|
||||
} |
||||
|
||||
// preparing parameter setup
|
||||
bool colorMode, normaliseOutput; |
||||
double photoreceptorsLocalAdaptationSensitivity, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, hcellsTemporalConstant, hcellsSpatialConstant, ganglionCellsSensitivity; |
||||
// OPL and Parvo init first
|
||||
cv::FileNode rootFn = fs.root(), currFn=rootFn["OPLandIPLparvo"]; |
||||
currFn["colorMode"]>>colorMode; |
||||
currFn["normaliseOutput"]>>normaliseOutput; |
||||
currFn["photoreceptorsLocalAdaptationSensitivity"]>>photoreceptorsLocalAdaptationSensitivity; |
||||
currFn["photoreceptorsTemporalConstant"]>>photoreceptorsTemporalConstant; |
||||
currFn["photoreceptorsSpatialConstant"]>>photoreceptorsSpatialConstant; |
||||
currFn["horizontalCellsGain"]>>horizontalCellsGain; |
||||
currFn["hcellsTemporalConstant"]>>hcellsTemporalConstant; |
||||
currFn["hcellsSpatialConstant"]>>hcellsSpatialConstant; |
||||
currFn["ganglionCellsSensitivity"]>>ganglionCellsSensitivity; |
||||
setupOPLandIPLParvoChannel(colorMode, normaliseOutput, photoreceptorsLocalAdaptationSensitivity, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, hcellsTemporalConstant, hcellsSpatialConstant, ganglionCellsSensitivity); |
||||
|
||||
// init retina IPL magno setup
|
||||
currFn=rootFn["IPLmagno"]; |
||||
currFn["normaliseOutput"]>>normaliseOutput; |
||||
double parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k; |
||||
currFn["parasolCells_beta"]>>parasolCells_beta; |
||||
currFn["parasolCells_tau"]>>parasolCells_tau; |
||||
currFn["parasolCells_k"]>>parasolCells_k; |
||||
currFn["amacrinCellsTemporalCutFrequency"]>>amacrinCellsTemporalCutFrequency; |
||||
currFn["V0CompressionParameter"]>>V0CompressionParameter; |
||||
currFn["localAdaptintegration_tau"]>>localAdaptintegration_tau; |
||||
currFn["localAdaptintegration_k"]>>localAdaptintegration_k; |
||||
|
||||
setupIPLMagnoChannel(normaliseOutput, parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, |
||||
V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k); |
||||
|
||||
}catch(Exception &e) |
||||
{ |
||||
std::cout<<"Retina::setup: resetting retina with default parameters"<<std::endl; |
||||
if (applyDefaultSetupOnFailure) |
||||
{ |
||||
setupOPLandIPLParvoChannel(); |
||||
setupIPLMagnoChannel(); |
||||
} |
||||
std::cout<<"Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>"<<e.what()<<std::endl; |
||||
std::cout<<"=> keeping current parameters"<<std::endl; |
||||
} |
||||
_parametersSaveFile.release(); // close file after setup
|
||||
// report current configuration
|
||||
std::cout<<printSetup()<<std::endl; |
||||
} |
||||
|
||||
const std::string Retina::printSetup() |
||||
{ |
||||
std::stringstream outmessage; |
||||
|
||||
|
||||
try |
||||
{ |
||||
cv::FileStorage parametersReader(_parametersSaveFileName, cv::FileStorage::READ); |
||||
if (!parametersReader.isOpened()) |
||||
{ |
||||
outmessage<<"Retina is not already settled up"; |
||||
} |
||||
else |
||||
{ |
||||
// accessing xml parameters nodes
|
||||
cv::FileNode rootFn = parametersReader.root(); |
||||
cv::FileNode currFn=rootFn["OPLandIPLparvo"]; |
||||
|
||||
// displaying OPL and IPL parvo setup
|
||||
outmessage<<"Current Retina instance setup :" |
||||
<<"\nOPLandIPLparvo"<<"{" |
||||
<< "\n==> colorMode : " << currFn["colorMode"].operator int() |
||||
<< "\n==> normalizeParvoOutput :" << currFn["normaliseOutput"].operator int() |
||||
<< "\n==> photoreceptorsLocalAdaptationSensitivity : " << currFn["photoreceptorsLocalAdaptationSensitivity"].operator float() |
||||
<< "\n==> photoreceptorsTemporalConstant : " << currFn["photoreceptorsTemporalConstant"].operator float() |
||||
<< "\n==> photoreceptorsSpatialConstant : " << currFn["photoreceptorsSpatialConstant"].operator float() |
||||
<< "\n==> horizontalCellsGain : " << currFn["horizontalCellsGain"].operator float() |
||||
<< "\n==> hcellsTemporalConstant : " << currFn["hcellsTemporalConstant"].operator float() |
||||
<< "\n==> hcellsSpatialConstant : " << currFn["hcellsSpatialConstant"].operator float() |
||||
<< "\n==> parvoGanglionCellsSensitivity : " << currFn["ganglionCellsSensitivity"].operator float() |
||||
<<"}\n"; |
||||
|
||||
// displaying IPL magno setup
|
||||
currFn=rootFn["IPLmagno"]; |
||||
outmessage<<"Current Retina instance setup :" |
||||
<<"\nIPLmagno"<<"{" |
||||
<< "\n==> normaliseOutput : " << currFn["normaliseOutput"].operator int() |
||||
<< "\n==> parasolCells_beta : " << currFn["parasolCells_beta"].operator float() |
||||
<< "\n==> parasolCells_tau : " << currFn["parasolCells_tau"].operator float() |
||||
<< "\n==> parasolCells_k : " << currFn["parasolCells_k"].operator float() |
||||
<< "\n==> amacrinCellsTemporalCutFrequency : " << currFn["amacrinCellsTemporalCutFrequency"].operator float() |
||||
<< "\n==> V0CompressionParameter : " << currFn["V0CompressionParameter"].operator float() |
||||
<< "\n==> localAdaptintegration_tau : " << currFn["localAdaptintegration_tau"].operator float() |
||||
<< "\n==> localAdaptintegration_k : " << currFn["localAdaptintegration_k"].operator float() |
||||
<<"}"; |
||||
} |
||||
}catch(cv::Exception &e) |
||||
{ |
||||
outmessage<<"Error reading parameters configuration file : "<<e.what()<<std::endl; |
||||
} |
||||
return outmessage.str(); |
||||
} |
||||
|
||||
void Retina::setupOPLandIPLParvoChannel(const bool colorMode, const bool normaliseOutput, const double photoreceptorsLocalAdaptationSensitivity, const double photoreceptorsTemporalConstant, const double photoreceptorsSpatialConstant, const double horizontalCellsGain, const double HcellsTemporalConstant, const double HcellsSpatialConstant, const double ganglionCellsSensitivity) |
||||
{ |
||||
// parameters setup (default setup)
|
||||
_retinaFilter->setColorMode(colorMode); |
||||
_retinaFilter->setPhotoreceptorsLocalAdaptationSensitivity(photoreceptorsLocalAdaptationSensitivity); |
||||
_retinaFilter->setOPLandParvoParameters(0, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, HcellsTemporalConstant, HcellsSpatialConstant, ganglionCellsSensitivity); |
||||
_retinaFilter->setParvoGanglionCellsLocalAdaptationSensitivity(ganglionCellsSensitivity); |
||||
_retinaFilter->activateNormalizeParvoOutput_0_maxOutputValue(normaliseOutput); |
||||
|
||||
// save parameters in the xml parameters tree... if parameters file is already open
|
||||
if (!_parametersSaveFile.isOpened()) |
||||
return; |
||||
_parametersSaveFile<<"OPLandIPLparvo"<<"{"; |
||||
_parametersSaveFile << "colorMode" << colorMode; |
||||
_parametersSaveFile << "normaliseOutput" << normaliseOutput; |
||||
_parametersSaveFile << "photoreceptorsLocalAdaptationSensitivity" << photoreceptorsLocalAdaptationSensitivity; |
||||
_parametersSaveFile << "photoreceptorsTemporalConstant" << photoreceptorsTemporalConstant; |
||||
_parametersSaveFile << "photoreceptorsSpatialConstant" << photoreceptorsSpatialConstant; |
||||
_parametersSaveFile << "horizontalCellsGain" << horizontalCellsGain; |
||||
_parametersSaveFile << "hcellsTemporalConstant" << HcellsTemporalConstant; |
||||
_parametersSaveFile << "hcellsSpatialConstant" << HcellsSpatialConstant; |
||||
_parametersSaveFile << "ganglionCellsSensitivity" << ganglionCellsSensitivity; |
||||
_parametersSaveFile << "}"; |
||||
} |
||||
|
||||
void Retina::setupIPLMagnoChannel(const bool normaliseOutput, const double parasolCells_beta, const double parasolCells_tau, const double parasolCells_k, const double amacrinCellsTemporalCutFrequency, const double V0CompressionParameter, const double localAdaptintegration_tau, const double localAdaptintegration_k) |
||||
{ |
||||
|
||||
_retinaFilter->setMagnoCoefficientsTable(parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k); |
||||
_retinaFilter->activateNormalizeMagnoOutput_0_maxOutputValue(normaliseOutput); |
||||
|
||||
// save parameters in the xml parameters tree... if parameters file is already open
|
||||
if (!_parametersSaveFile.isOpened()) |
||||
return; |
||||
_parametersSaveFile<<"IPLmagno"<<"{"; |
||||
_parametersSaveFile << "normaliseOutput" << normaliseOutput; |
||||
_parametersSaveFile << "parasolCells_beta" << parasolCells_beta; |
||||
_parametersSaveFile << "parasolCells_tau" << parasolCells_tau; |
||||
_parametersSaveFile << "parasolCells_k" << parasolCells_k; |
||||
_parametersSaveFile << "amacrinCellsTemporalCutFrequency" << amacrinCellsTemporalCutFrequency; |
||||
_parametersSaveFile << "V0CompressionParameter" << V0CompressionParameter; |
||||
_parametersSaveFile << "localAdaptintegration_tau" << localAdaptintegration_tau; |
||||
_parametersSaveFile << "localAdaptintegration_k" << localAdaptintegration_k; |
||||
_parametersSaveFile<<"}"; |
||||
|
||||
} |
||||
|
||||
void Retina::run(const cv::Mat &inputImage) |
||||
{ |
||||
|
||||
// first check input consistency
|
||||
if (inputImage.empty()) |
||||
throw cv::Exception(-1, "Retina cannot be applied, input buffer is empty", "Retina::run", "Retina.h", 0); |
||||
|
||||
// retreive color mode from image input
|
||||
bool colorMode = inputImage.channels() >=3; |
||||
|
||||
// TODO : ensure input color image is CV_BGR coded
|
||||
//if (inputImage.flags!=CV_BGR)
|
||||
// throw cv::Exception(-1, "Retina color input must be BGR coded", "Retina::run", "Retina.h", 0);
|
||||
|
||||
// first convert input image to the compatible format : std::valarray<double>
|
||||
double *imagePTR=&_inputBuffer[0]; |
||||
|
||||
if (!colorMode) |
||||
{ |
||||
for (int i=0;i<inputImage.size().height;++i) |
||||
{ |
||||
const unsigned char *linePTR = inputImage.ptr<unsigned char>(i); |
||||
for (int j=0;j<inputImage.size().width;++j) |
||||
*(imagePTR++) =(double)*(linePTR++); |
||||
} |
||||
}else |
||||
{ |
||||
const unsigned int doubleNBpixelsPerLayer=_retinaFilter->getInputNBpixels()*2; |
||||
for (int i=0;i<inputImage.size().height;++i) |
||||
{ |
||||
for (int j=0;j<inputImage.size().width;++j,++imagePTR) |
||||
{ |
||||
cv::Point2d pixel(j,i); |
||||
cv::Vec3b pixelValue=inputImage.at<cv::Vec3b>(pixel); |
||||
*(imagePTR) =(double)pixelValue[2]; |
||||
*(imagePTR+_retinaFilter->getInputNBpixels()) =(double)pixelValue[1]; |
||||
*(imagePTR+doubleNBpixelsPerLayer ) =(double)pixelValue[0]; |
||||
} |
||||
} |
||||
} |
||||
|
||||
// process the retina
|
||||
if (!_retinaFilter->runFilter(_inputBuffer, colorMode, false, colorMode, false)) |
||||
throw cv::Exception(-1, "Retina cannot be applied, wrong input buffer size", "Retina::run", "Retina.h", 0); |
||||
} |
||||
|
||||
void Retina::getParvo(cv::Mat &retinaOutput_parvo) |
||||
{ |
||||
if (_retinaFilter->getColorMode()) |
||||
{ |
||||
// reallocate output buffer (if necessary)
|
||||
_convertValarrayGrayBuffer2cvMat(_retinaFilter->getColorOutput(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), true, retinaOutput_parvo); |
||||
}else |
||||
{ |
||||
// reallocate output buffer (if necessary)
|
||||
_convertValarrayGrayBuffer2cvMat(_retinaFilter->getContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_parvo); |
||||
} |
||||
//retinaOutput_parvo/=255.0;
|
||||
} |
||||
void Retina::getMagno(cv::Mat &retinaOutput_magno) |
||||
{ |
||||
// reallocate output buffer (if necessary)
|
||||
_convertValarrayGrayBuffer2cvMat(_retinaFilter->getMovingContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_magno); |
||||
//retinaOutput_magno/=255.0;
|
||||
} |
||||
|
||||
// private method called by constructirs
|
||||
void Retina::_init(const std::string parametersSaveFile, const cv::Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght) |
||||
{ |
||||
_parametersSaveFileName = parametersSaveFile; |
||||
|
||||
// basic error check
|
||||
if (inputSize.height*inputSize.width <= 0) |
||||
throw cv::Exception(-1, "Bad retina size setup : size height and with must be superior to zero", "Retina::setup", "Retina.h", 0); |
||||
|
||||
unsigned int nbPixels=inputSize.height*inputSize.width; |
||||
// resize buffers if size does not match
|
||||
_inputBuffer.resize(nbPixels*3); // buffer supports gray images but also 3 channels color buffers... (larger is better...)
|
||||
|
||||
// allocate the retina model
|
||||
delete _retinaFilter; |
||||
_retinaFilter = new RetinaFilter(inputSize.height, inputSize.width, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght); |
||||
|
||||
// prepare the parameter XML tree
|
||||
_parametersSaveFile.open(parametersSaveFile, cv::FileStorage::WRITE ); |
||||
|
||||
_parametersSaveFile<<"InputSize"<<"{"; |
||||
_parametersSaveFile<<"height"<<inputSize.height; |
||||
_parametersSaveFile<<"width"<<inputSize.width; |
||||
_parametersSaveFile<<"}"; |
||||
|
||||
// clear all retina buffers
|
||||
// apply default setup
|
||||
setupOPLandIPLParvoChannel(); |
||||
setupIPLMagnoChannel(); |
||||
|
||||
// write current parameters to params file
|
||||
_parametersSaveFile.release(); |
||||
|
||||
// init retina
|
||||
_retinaFilter->clearAllBuffers(); |
||||
|
||||
// report current configuration
|
||||
std::cout<<printSetup()<<std::endl; |
||||
} |
||||
|
||||
void Retina::_convertValarrayGrayBuffer2cvMat(const std::valarray<double> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, cv::Mat &outBuffer) |
||||
{ |
||||
// fill output buffer with the valarray buffer
|
||||
const double *valarrayPTR=&grayMatrixToConvert[0]; |
||||
if (!colorMode) |
||||
{ |
||||
outBuffer.create(cv::Size(nbColumns, nbRows), CV_8U); |
||||
for (unsigned int i=0;i<nbRows;++i) |
||||
{ |
||||
for (unsigned int j=0;j<nbColumns;++j) |
||||
{ |
||||
cv::Point2d pixel(j,i); |
||||
outBuffer.at<unsigned char>(pixel)=(unsigned char)*(valarrayPTR++); |
||||
} |
||||
} |
||||
}else |
||||
{ |
||||
const unsigned int doubleNBpixels=_retinaFilter->getOutputNBpixels()*2; |
||||
outBuffer.create(cv::Size(nbColumns, nbRows), CV_8UC3); |
||||
for (unsigned int i=0;i<nbRows;++i) |
||||
{ |
||||
for (unsigned int j=0;j<nbColumns;++j,++valarrayPTR) |
||||
{ |
||||
cv::Point2d pixel(j,i); |
||||
cv::Vec3b pixelValues; |
||||
pixelValues[2]=(unsigned char)*(valarrayPTR); |
||||
pixelValues[1]=(unsigned char)*(valarrayPTR+_retinaFilter->getOutputNBpixels()); |
||||
pixelValues[0]=(unsigned char)*(valarrayPTR+doubleNBpixels); |
||||
|
||||
outBuffer.at<cv::Vec3b>(pixel)=pixelValues; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
void Retina::clearBuffers() {_retinaFilter->clearAllBuffers();} |
||||
|
||||
} // end of namespace cv
|
@ -0,0 +1,266 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
/**
|
||||
* @class RetinaColor a color multilexing/demultiplexing (demosaicing) based on a human vision inspiration. Different mosaicing strategies can be used, included random sampling ! |
||||
* => please take a look at the nice and efficient demosaicing strategy introduced by B.Chaix de Lavarene, take a look at the cited paper for more mathematical details |
||||
* @brief Retina color sampling model which allows classical bayer sampling, random and potentially several other method ! Low color errors on corners ! |
||||
* -> Based on the research of: |
||||
* .Brice Chaix Lavarene (chaix@lis.inpg.fr) |
||||
* .Jeanny Herault (herault@lis.inpg.fr) |
||||
* .David Alleyson (david.alleyson@upmf-grenoble.fr) |
||||
* .collaboration: alexandre benoit (benoit.alexandre.vision@gmail.com or benoit@lis.inpg.fr) |
||||
* Please cite: B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
* @author Alexandre BENOIT, benoit.alexandre.vision@gmail.com, LISTIC / Gipsa-Lab, France: www.gipsa-lab.inpg.fr/ |
||||
* Creation date 2007 |
||||
*/ |
||||
|
||||
#ifndef RETINACOLOR_HPP_ |
||||
#define RETINACOLOR_HPP_ |
||||
|
||||
#include "basicretinafilter.hpp" |
||||
|
||||
//#define __RETINACOLORDEBUG //define RETINACOLORDEBUG in order to display debug data
|
||||
|
||||
namespace cv |
||||
{ |
||||
|
||||
class RetinaColor: public BasicRetinaFilter |
||||
{ |
||||
public: |
||||
/**
|
||||
* @typedef which allows to select the type of photoreceptors color sampling |
||||
*/ |
||||
|
||||
/**
|
||||
* constructor of the retina color processing model |
||||
* @param NBrows: number of rows of the input image |
||||
* @param NBcolumns: number of columns of the input image |
||||
* @param samplingMethod: the chosen color sampling method |
||||
*/ |
||||
RetinaColor(const unsigned int NBrows, const unsigned int NBcolumns, const RETINA_COLORSAMPLINGMETHOD samplingMethod=RETINA_COLOR_DIAGONAL); |
||||
|
||||
/**
|
||||
* standard destructor |
||||
*/ |
||||
virtual ~RetinaColor(); |
||||
|
||||
/**
|
||||
* function that clears all buffers of the object |
||||
*/ |
||||
void clearAllBuffers(); |
||||
|
||||
/**
|
||||
* resize retina color filter object (resize all allocated buffers) |
||||
* @param NBrows: the new height size |
||||
* @param NBcolumns: the new width size |
||||
*/ |
||||
void resize(const unsigned int NBrows, const unsigned int NBcolumns); |
||||
|
||||
|
||||
/**
|
||||
* color multiplexing function: a demultiplexed RGB frame of size M*N*3 is transformed into a multiplexed M*N*1 pixels frame where each pixel is either Red, or Green or Blue |
||||
* @param inputRGBFrame: the input RGB frame to be processed |
||||
* @return, nothing but the multiplexed frame is available by the use of the getMultiplexedFrame() function |
||||
*/ |
||||
inline void runColorMultiplexing(const std::valarray<double> &inputRGBFrame){runColorMultiplexing(inputRGBFrame, *_multiplexedFrame);}; |
||||
|
||||
/**
|
||||
* color multiplexing function: a demultipleed RGB frame of size M*N*3 is transformed into a multiplexed M*N*1 pixels frame where each pixel is either Red, or Green or Blue if using RGB images |
||||
* @param demultiplexedInputFrame: the demultiplexed input frame to be processed of size M*N*3 |
||||
* @param multiplexedFrame: the resulting multiplexed frame |
||||
*/ |
||||
void runColorMultiplexing(const std::valarray<double> &demultiplexedInputFrame, std::valarray<double> &multiplexedFrame); |
||||
|
||||
/**
|
||||
* color demultiplexing function: a multiplexed frame of size M*N*1 pixels is transformed into a RGB demultiplexed M*N*3 pixels frame |
||||
* @param multiplexedColorFrame: the input multiplexed frame to be processed |
||||
* @param adaptiveFiltering: specifies if an adaptive filtering has to be perform rather than standard filtering (adaptive filtering allows a better rendering) |
||||
* @param maxInputValue: the maximum input data value (should be 255 for 8 bits images but it can change in the case of High Dynamic Range Images (HDRI) |
||||
* @return, nothing but the output demultiplexed frame is available by the use of the getDemultiplexedColorFrame() function, also use getLuminance() and getChrominance() in order to retreive either luminance or chrominance |
||||
*/ |
||||
void runColorDemultiplexing(const std::valarray<double> &multiplexedColorFrame, const bool adaptiveFiltering=false, const double maxInputValue=255.0); |
||||
|
||||
/**
|
||||
* activate color saturation as the final step of the color demultiplexing process |
||||
* -> this saturation is a sigmoide function applied to each channel of the demultiplexed image. |
||||
* @param saturateColors: boolean that activates color saturation (if true) or desactivate (if false) |
||||
* @param colorSaturationValue: the saturation factor |
||||
* */ |
||||
void setColorSaturation(const bool saturateColors=true, const double colorSaturationValue=4.0){_saturateColors=saturateColors; _colorSaturationValue=colorSaturationValue;}; |
||||
|
||||
/**
|
||||
* set parameters of the low pass spatio-temporal filter used to retreive the low chrominance |
||||
* @param beta: gain of the filter (generally set to zero) |
||||
* @param tau: time constant of the filter (unit is frame for video processing), typically 0 when considering static processing, 1 or more if a temporal smoothing effect is required |
||||
* @param k: spatial constant of the filter (unit is pixels), typical value is 2.5 |
||||
*/ |
||||
void setChrominanceLPfilterParameters(const double beta, const double tau, const double k){setLPfilterParameters(beta, tau, k);}; |
||||
|
||||
/**
|
||||
* apply to the retina color output the Krauskopf transformation which leads to an opponent color system: output colorspace if Acr1cr2 if input of the retina was LMS color space |
||||
* @param result: the input buffer to fill with the transformed colorspace retina output |
||||
* @return true if process ended successfully |
||||
*/ |
||||
const bool applyKrauskopfLMS2Acr1cr2Transform(std::valarray<double> &result); |
||||
|
||||
/**
|
||||
* apply to the retina color output the CIE Lab color transformation |
||||
* @param result: the input buffer to fill with the transformed colorspace retina output |
||||
* @return true if process ended successfully |
||||
*/ |
||||
const bool applyLMS2LabTransform(std::valarray<double> &result); |
||||
|
||||
/**
|
||||
* @return the multiplexed frame result (use this after function runColorMultiplexing) |
||||
*/ |
||||
inline const std::valarray<double> &getMultiplexedFrame() const {return *_multiplexedFrame;}; |
||||
|
||||
/**
|
||||
* @return the demultiplexed frame result (use this after function runColorDemultiplexing) |
||||
*/ |
||||
inline const std::valarray<double> &getDemultiplexedColorFrame() const {return _demultiplexedColorFrame;}; |
||||
|
||||
/**
|
||||
* @return the luminance of the processed frame (use this after function runColorDemultiplexing) |
||||
*/ |
||||
inline const std::valarray<double> &getLuminance() const {return *_luminance;}; |
||||
|
||||
/**
|
||||
* @return the chrominance of the processed frame (use this after function runColorDemultiplexing) |
||||
*/ |
||||
inline const std::valarray<double> &getChrominance() const {return _chrominance;}; |
||||
|
||||
/**
|
||||
* standard 0 to 255 image clipping function appled to RGB images (of size M*N*3 pixels) |
||||
* @param inputOutputBuffer: the image to be normalized (rewrites the input), if no parameter, then, the built in buffer reachable by getOutput() function is normalized |
||||
* @param maxOutputValue: the maximum value allowed at the output (values superior to it would be clipped |
||||
*/ |
||||
void clipRGBOutput_0_maxInputValue(double *inputOutputBuffer, const double maxOutputValue=255.0); |
||||
|
||||
/**
|
||||
* standard 0 to 255 image normalization function appled to RGB images (of size M*N*3 pixels) |
||||
* @param maxOutputValue: the maximum value allowed at the output (values superior to it would be clipped |
||||
*/ |
||||
void normalizeRGBOutput_0_maxOutputValue(const double maxOutputValue=255.0); |
||||
|
||||
/**
|
||||
* return the color sampling map: a Nrows*Mcolumns image in which each pixel value is the ofsset adress which gives the adress of the sampled pixel on an Nrows*Mcolumns*3 color image ordered by layers: layer1, layer2, layer3 |
||||
*/ |
||||
inline const std::valarray<unsigned int> &getSamplingMap() const {return _colorSampling;}; |
||||
|
||||
/**
|
||||
* function used (to bypass processing) to manually set the color output |
||||
* @param demultiplexedImage: the color image (luminance+chrominance) which has to be written in the object buffer |
||||
*/ |
||||
inline void setDemultiplexedColorFrame(const std::valarray<double> &demultiplexedImage){_demultiplexedColorFrame=demultiplexedImage;}; |
||||
|
||||
protected: |
||||
|
||||
// private functions
|
||||
RETINA_COLORSAMPLINGMETHOD _samplingMethod; |
||||
bool _saturateColors; |
||||
double _colorSaturationValue; |
||||
// links to parent buffers (more convienient names
|
||||
TemplateBuffer<double> *_luminance; |
||||
std::valarray<double> *_multiplexedFrame; |
||||
// instance buffers
|
||||
std::valarray<unsigned int> _colorSampling; // table (size (_nbRows*_nbColumns) which specifies the color of each pixel
|
||||
std::valarray<double> _RGBmosaic; |
||||
std::valarray<double> _tempMultiplexedFrame; |
||||
std::valarray<double> _demultiplexedTempBuffer; |
||||
std::valarray<double> _demultiplexedColorFrame; |
||||
std::valarray<double> _chrominance; |
||||
std::valarray<double> _colorLocalDensity;// buffer which contains the local density of the R, G and B photoreceptors for a normalization use
|
||||
std::valarray<double> _imageGradient; |
||||
|
||||
// variables
|
||||
double _pR, _pG, _pB; // probabilities of color R, G and B
|
||||
bool _objectInit; |
||||
|
||||
// protected functions
|
||||
void _initColorSampling(); |
||||
void _interpolateImageDemultiplexedImage(double *inputOutputBuffer); |
||||
void _interpolateSingleChannelImage111(double *inputOutputBuffer); |
||||
void _interpolateBayerRGBchannels(double *inputOutputBuffer); |
||||
void _applyRIFfilter(const double *sourceBuffer, double *destinationBuffer); |
||||
void _getNormalizedContoursImage(const double *inputFrame, double *outputFrame); |
||||
// -> special adaptive filters dedicated to low pass filtering on the chrominance (skeeps filtering on the edges)
|
||||
void _adaptiveSpatialLPfilter(const double *inputFrame, double *outputFrame); |
||||
void _adaptiveHorizontalCausalFilter_addInput(const double *inputFrame, double *outputFrame, const unsigned int IDrowStart, const unsigned int IDrowEnd); |
||||
void _adaptiveHorizontalAnticausalFilter(double *outputFrame, const unsigned int IDrowStart, const unsigned int IDrowEnd); |
||||
void _adaptiveVerticalCausalFilter(double *outputFrame, const unsigned int IDcolumnStart, const unsigned int IDcolumnEnd); |
||||
void _adaptiveVerticalAnticausalFilter_multGain(double *outputFrame, const unsigned int IDcolumnStart, const unsigned int IDcolumnEnd); |
||||
void _computeGradient(const double *luminance); |
||||
void _normalizeOutputs_0_maxOutputValue(void); |
||||
|
||||
// color space transform
|
||||
void _applyImageColorSpaceConversion(const std::valarray<double> &inputFrame, std::valarray<double> &outputFrame, const double *transformTable); |
||||
|
||||
}; |
||||
} |
||||
|
||||
#endif /*RETINACOLOR_HPP_*/ |
||||
|
||||
|
@ -0,0 +1,531 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#include "retinafilter.hpp" |
||||
|
||||
// @author Alexandre BENOIT, benoit.alexandre.vision@gmail.com, LISTIC : www.listic.univ-savoie.fr, Gipsa-Lab, France: www.gipsa-lab.inpg.fr/
|
||||
|
||||
#include <iostream> |
||||
#include <cmath> |
||||
|
||||
namespace cv |
||||
{ |
||||
// standard constructor without any log sampling of the input frame
|
||||
RetinaFilter::RetinaFilter(const unsigned int sizeRows, const unsigned int sizeColumns, const bool colorMode, const RETINA_COLORSAMPLINGMETHOD samplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght) |
||||
: |
||||
_retinaParvoMagnoMappedFrame(0), |
||||
_retinaParvoMagnoMapCoefTable(0), |
||||
_photoreceptorsPrefilter((1-(int)useRetinaLogSampling)*sizeRows+useRetinaLogSampling*ImageLogPolProjection::predictOutputSize(sizeRows, reductionFactor), (1-(int)useRetinaLogSampling)*sizeColumns+useRetinaLogSampling*ImageLogPolProjection::predictOutputSize(sizeColumns, reductionFactor), 4), |
||||
_ParvoRetinaFilter((1-(int)useRetinaLogSampling)*sizeRows+useRetinaLogSampling*ImageLogPolProjection::predictOutputSize(sizeRows, reductionFactor), (1-(int)useRetinaLogSampling)*sizeColumns+useRetinaLogSampling*ImageLogPolProjection::predictOutputSize(sizeColumns, reductionFactor)), |
||||
_MagnoRetinaFilter((1-(int)useRetinaLogSampling)*sizeRows+useRetinaLogSampling*ImageLogPolProjection::predictOutputSize(sizeRows, reductionFactor), (1-(int)useRetinaLogSampling)*sizeColumns+useRetinaLogSampling*ImageLogPolProjection::predictOutputSize(sizeColumns, reductionFactor)), |
||||
_colorEngine((1-(int)useRetinaLogSampling)*sizeRows+useRetinaLogSampling*ImageLogPolProjection::predictOutputSize(sizeRows, reductionFactor), (1-(int)useRetinaLogSampling)*sizeColumns+useRetinaLogSampling*ImageLogPolProjection::predictOutputSize(sizeColumns, reductionFactor), samplingMethod), |
||||
// configure retina photoreceptors log sampling... if necessary
|
||||
_photoreceptorsLogSampling(NULL) |
||||
{ |
||||
|
||||
#ifdef RETINADEBUG |
||||
std::cout<<"RetinaFilter::size( "<<_photoreceptorsPrefilter.getNBrows()<<", "<<_photoreceptorsPrefilter.getNBcolumns()<<")"<<" =? "<<_photoreceptorsPrefilter.getNBpixels()<<std::endl; |
||||
#endif |
||||
if (useRetinaLogSampling) |
||||
{ |
||||
_photoreceptorsLogSampling = new ImageLogPolProjection(sizeRows, sizeColumns, ImageLogPolProjection::RETINALOGPROJECTION, true); |
||||
if (!_photoreceptorsLogSampling->initProjection(reductionFactor, samplingStrenght)) |
||||
{ |
||||
std::cerr<<"RetinaFilter::Problem initializing photoreceptors log sampling, could not setup retina filter"<<std::endl; |
||||
delete _photoreceptorsLogSampling; |
||||
_photoreceptorsLogSampling=NULL; |
||||
} |
||||
else |
||||
{ |
||||
#ifdef RETINADEBUG |
||||
std::cout<<"_photoreceptorsLogSampling::size( "<<_photoreceptorsLogSampling->getNBrows()<<", "<<_photoreceptorsLogSampling->getNBcolumns()<<")"<<" =? "<<_photoreceptorsLogSampling->getNBpixels()<<std::endl; |
||||
#endif |
||||
} |
||||
} |
||||
|
||||
// set default processing activities
|
||||
_useParvoOutput=true; |
||||
_useMagnoOutput=true; |
||||
|
||||
_useColorMode=colorMode; |
||||
|
||||
// create hybrid output and related coefficient table
|
||||
_createHybridTable(); |
||||
|
||||
// set default parameters
|
||||
setGlobalParameters(); |
||||
|
||||
// stability controls values init
|
||||
_setInitPeriodCount(); |
||||
_globalTemporalConstant=25; |
||||
|
||||
// reset all buffers
|
||||
clearAllBuffers(); |
||||
|
||||
|
||||
// std::cout<<"RetinaFilter::size( "<<this->getNBrows()<<", "<<this->getNBcolumns()<<")"<<_filterOutput.size()<<" =? "<<_filterOutput.getNBpixels()<<std::endl;
|
||||
|
||||
} |
||||
|
||||
// destructor
|
||||
RetinaFilter::~RetinaFilter() |
||||
{ |
||||
if (_photoreceptorsLogSampling!=NULL) |
||||
delete _photoreceptorsLogSampling; |
||||
} |
||||
|
||||
// function that clears all buffers of the object
|
||||
void RetinaFilter::clearAllBuffers() |
||||
{ |
||||
_photoreceptorsPrefilter.clearAllBuffers(); |
||||
_ParvoRetinaFilter.clearAllBuffers(); |
||||
_MagnoRetinaFilter.clearAllBuffers(); |
||||
_colorEngine.clearAllBuffers(); |
||||
if (_photoreceptorsLogSampling!=NULL) |
||||
_photoreceptorsLogSampling->clearAllBuffers(); |
||||
// stability controls value init
|
||||
_setInitPeriodCount(); |
||||
} |
||||
|
||||
/**
|
||||
* resize retina filter object (resize all allocated buffers |
||||
* @param NBrows: the new height size |
||||
* @param NBcolumns: the new width size |
||||
*/ |
||||
void RetinaFilter::resize(const unsigned int NBrows, const unsigned int NBcolumns) |
||||
{ |
||||
unsigned int rows=NBrows, cols=NBcolumns; |
||||
|
||||
// resize optionnal member and adjust other modules size if required
|
||||
if (_photoreceptorsLogSampling) |
||||
{ |
||||
_photoreceptorsLogSampling->resize(NBrows, NBcolumns); |
||||
rows=_photoreceptorsLogSampling->getOutputNBrows(); |
||||
cols=_photoreceptorsLogSampling->getOutputNBcolumns(); |
||||
} |
||||
|
||||
_photoreceptorsPrefilter.resize(rows, cols); |
||||
_ParvoRetinaFilter.resize(rows, cols); |
||||
_MagnoRetinaFilter.resize(rows, cols); |
||||
_colorEngine.resize(rows, cols); |
||||
|
||||
// reset parvo magno mapping
|
||||
_createHybridTable(); |
||||
|
||||
// clean buffers
|
||||
clearAllBuffers(); |
||||
|
||||
} |
||||
|
||||
// stability controls value init
|
||||
void RetinaFilter::_setInitPeriodCount() |
||||
{ |
||||
|
||||
// find out the maximum temporal constant value and apply a security factor
|
||||
// false value (obviously too long) but appropriate for simple use
|
||||
_globalTemporalConstant=(unsigned int)(_ParvoRetinaFilter.getPhotoreceptorsTemporalConstant()+_ParvoRetinaFilter.getHcellsTemporalConstant()+_MagnoRetinaFilter.getTemporalConstant()); |
||||
// reset frame counter
|
||||
_ellapsedFramesSinceLastReset=0; |
||||
} |
||||
|
||||
void RetinaFilter::_createHybridTable() |
||||
{ |
||||
// create hybrid output and related coefficient table
|
||||
_retinaParvoMagnoMappedFrame.resize(_photoreceptorsPrefilter.getNBpixels()); |
||||
|
||||
_retinaParvoMagnoMapCoefTable.resize(_photoreceptorsPrefilter.getNBpixels()*2); |
||||
|
||||
// fill _hybridParvoMagnoCoefTable
|
||||
int i, j, halfRows=_photoreceptorsPrefilter.getNBrows()/2, halfColumns=_photoreceptorsPrefilter.getNBcolumns()/2; |
||||
double *hybridParvoMagnoCoefTablePTR= &_retinaParvoMagnoMapCoefTable[0]; |
||||
double minDistance=MIN(halfRows, halfColumns)*0.7; |
||||
for (i=0;i<(int)_photoreceptorsPrefilter.getNBrows();++i) |
||||
{ |
||||
for (j=0;j<(int)_photoreceptorsPrefilter.getNBcolumns();++j) |
||||
{ |
||||
double distanceToCenter=sqrt(((double)(i-halfRows)*(i-halfRows)+(j-halfColumns)*(j-halfColumns))); |
||||
if (distanceToCenter<minDistance) |
||||
{ |
||||
double a=*(hybridParvoMagnoCoefTablePTR++)=0.5+0.5*cos(CV_PI*distanceToCenter/minDistance); |
||||
*(hybridParvoMagnoCoefTablePTR++)=1.0-a; |
||||
}else |
||||
{ |
||||
*(hybridParvoMagnoCoefTablePTR++)=0; |
||||
*(hybridParvoMagnoCoefTablePTR++)=1.0; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// setup parameters function and global data filling
|
||||
void RetinaFilter::setGlobalParameters(const double OPLspatialResponse1, const double OPLtemporalresponse1, const double OPLassymetryGain, const double OPLspatialResponse2, const double OPLtemporalresponse2, const double LPfilterSpatialResponse, const double LPfilterGain, const double LPfilterTemporalresponse, const double MovingContoursExtractorCoefficient, const bool normalizeParvoOutput_0_maxOutputValue, const bool normalizeMagnoOutput_0_maxOutputValue, const double maxOutputValue, const double maxInputValue, const double meanValue) |
||||
{ |
||||
_normalizeParvoOutput_0_maxOutputValue=normalizeParvoOutput_0_maxOutputValue; |
||||
_normalizeMagnoOutput_0_maxOutputValue=normalizeMagnoOutput_0_maxOutputValue; |
||||
_maxOutputValue=maxOutputValue; |
||||
_photoreceptorsPrefilter.setV0CompressionParameter(0.9, maxInputValue, meanValue); |
||||
_photoreceptorsPrefilter.setLPfilterParameters(10, 0, 1.5, 1); // keeps low pass filter with high cut frequency in memory (usefull for the tone mapping function)
|
||||
_photoreceptorsPrefilter.setLPfilterParameters(10, 0, 3.0, 2); // keeps low pass filter with low cut frequency in memory (usefull for the tone mapping function)
|
||||
_photoreceptorsPrefilter.setLPfilterParameters(0, 0, 10, 3); // keeps low pass filter with low cut frequency in memory (usefull for the tone mapping function)
|
||||
//this->setV0CompressionParameter(0.6, maxInputValue, meanValue); // keeps log compression sensitivity parameter (usefull for the tone mapping function)
|
||||
_ParvoRetinaFilter.setOPLandParvoFiltersParameters(0,OPLtemporalresponse1, OPLspatialResponse1, OPLassymetryGain, OPLtemporalresponse2, OPLspatialResponse2); |
||||
_ParvoRetinaFilter.setV0CompressionParameter(0.9, maxInputValue, meanValue); |
||||
_MagnoRetinaFilter.setCoefficientsTable(LPfilterGain, LPfilterTemporalresponse, LPfilterSpatialResponse, MovingContoursExtractorCoefficient, 0, 2.0*LPfilterSpatialResponse); |
||||
_MagnoRetinaFilter.setV0CompressionParameter(0.7, maxInputValue, meanValue); |
||||
|
||||
// stability controls value init
|
||||
_setInitPeriodCount(); |
||||
} |
||||
|
||||
const bool RetinaFilter::checkInput(const std::valarray<double> &input, const bool colorMode) |
||||
{ |
||||
|
||||
BasicRetinaFilter *inputTarget=&_photoreceptorsPrefilter; |
||||
if (_photoreceptorsLogSampling) |
||||
inputTarget=_photoreceptorsLogSampling; |
||||
|
||||
bool test=input.size()==inputTarget->getNBpixels() || input.size()==(inputTarget->getNBpixels()*3) ; |
||||
if (!test) |
||||
{ |
||||
std::cerr<<"RetinaFilter::checkInput: input buffer does not match retina buffer size, conversion aborted"<<std::endl; |
||||
std::cout<<"RetinaFilter::checkInput: input size="<<input.size()<<" / "<<"retina size="<<inputTarget->getNBpixels()<<std::endl; |
||||
return false; |
||||
} |
||||
|
||||
return true; |
||||
} |
||||
|
||||
// main function that runs the filter for a given input frame
|
||||
const bool RetinaFilter::runFilter(const std::valarray<double> &imageInput, const bool useAdaptiveFiltering, const bool processRetinaParvoMagnoMapping, const bool useColorMode, const bool inputIsColorMultiplexed) |
||||
{ |
||||
// preliminary check
|
||||
bool processSuccess=true; |
||||
if (!checkInput(imageInput, useColorMode)) |
||||
return false; |
||||
|
||||
// run the color multiplexing if needed and compute each suub filter of the retina:
|
||||
// -> local adaptation
|
||||
// -> contours OPL extraction
|
||||
// -> moving contours extraction
|
||||
|
||||
// stability controls value update
|
||||
++_ellapsedFramesSinceLastReset; |
||||
|
||||
_useColorMode=useColorMode; |
||||
|
||||
/* pointer to the appropriate input data after,
|
||||
* by default, if graylevel mode, the input is processed, |
||||
* if color or something else must be considered, specific preprocessing are applied |
||||
*/ |
||||
|
||||
const std::valarray<double> *selectedPhotoreceptorsLocalAdaptationInput= &imageInput; |
||||
const std::valarray<double> *selectedPhotoreceptorsColorInput=&imageInput; |
||||
|
||||
//********** Following is input data specific photoreceptors processing
|
||||
if (_photoreceptorsLogSampling) |
||||
{ |
||||
_photoreceptorsLogSampling->runProjection(imageInput, useColorMode); |
||||
selectedPhotoreceptorsColorInput=selectedPhotoreceptorsLocalAdaptationInput=&(_photoreceptorsLogSampling->getSampledFrame()); |
||||
} |
||||
|
||||
if (useColorMode&& (!inputIsColorMultiplexed)) // not multiplexed color input case
|
||||
{ |
||||
_colorEngine.runColorMultiplexing(*selectedPhotoreceptorsColorInput); |
||||
selectedPhotoreceptorsLocalAdaptationInput=&(_colorEngine.getMultiplexedFrame()); |
||||
} |
||||
|
||||
//********** Following is generic Retina processing
|
||||
|
||||
// photoreceptors local adaptation
|
||||
_photoreceptorsPrefilter.runFilter_LocalAdapdation(*selectedPhotoreceptorsLocalAdaptationInput, _ParvoRetinaFilter.getHorizontalCellsOutput()); |
||||
// safety pixel values checks
|
||||
//_photoreceptorsPrefilter.normalizeGrayOutput_0_maxOutputValue(_maxOutputValue);
|
||||
|
||||
// run parvo filter
|
||||
_ParvoRetinaFilter.runFilter(_photoreceptorsPrefilter.getOutput(), _useParvoOutput); |
||||
|
||||
if (_useParvoOutput) |
||||
{ |
||||
_ParvoRetinaFilter.normalizeGrayOutputCentredSigmoide(); // models the saturation of the cells, usefull for visualisation of the ON-OFF Parvo Output, Bipolar cells outputs do not change !!!
|
||||
_ParvoRetinaFilter.centerReductImageLuminance(); // best for further spectrum analysis
|
||||
|
||||
if (_normalizeParvoOutput_0_maxOutputValue) |
||||
_ParvoRetinaFilter.normalizeGrayOutput_0_maxOutputValue(_maxOutputValue); |
||||
} |
||||
|
||||
if (_useParvoOutput&&_useMagnoOutput) |
||||
{ |
||||
_MagnoRetinaFilter.runFilter(_ParvoRetinaFilter.getBipolarCellsON(), _ParvoRetinaFilter.getBipolarCellsOFF()); |
||||
if (_normalizeMagnoOutput_0_maxOutputValue) |
||||
{ |
||||
_MagnoRetinaFilter.normalizeGrayOutput_0_maxOutputValue(_maxOutputValue); |
||||
} |
||||
_MagnoRetinaFilter.normalizeGrayOutputNearZeroCentreredSigmoide(); |
||||
} |
||||
|
||||
if (_useParvoOutput&&_useMagnoOutput&&processRetinaParvoMagnoMapping) |
||||
{ |
||||
_processRetinaParvoMagnoMapping(); |
||||
if (_useColorMode) |
||||
_colorEngine.runColorDemultiplexing(_retinaParvoMagnoMappedFrame, useAdaptiveFiltering, _maxOutputValue);//_ColorEngine->getMultiplexedFrame());//_ParvoRetinaFilter->getPhotoreceptorsLPfilteringOutput());
|
||||
|
||||
return processSuccess; |
||||
} |
||||
|
||||
if (_useParvoOutput&&_useColorMode) |
||||
{ |
||||
_colorEngine.runColorDemultiplexing(_ParvoRetinaFilter.getOutput(), useAdaptiveFiltering, _maxOutputValue);//_ColorEngine->getMultiplexedFrame());//_ParvoRetinaFilter->getPhotoreceptorsLPfilteringOutput());
|
||||
// compute A Cr1 Cr2 to LMS color space conversion
|
||||
//if (true)
|
||||
// _applyImageColorSpaceConversion(_ColorEngine->getChrominance(), lmsTempBuffer.Buffer(), _LMStoACr1Cr2);
|
||||
} |
||||
|
||||
return processSuccess; |
||||
} |
||||
|
||||
const std::valarray<double> &RetinaFilter::getContours() |
||||
{ |
||||
if (_useColorMode) |
||||
return _colorEngine.getLuminance(); |
||||
else |
||||
return _ParvoRetinaFilter.getOutput(); |
||||
} |
||||
|
||||
// run the initilized retina filter in order to perform gray image tone mapping, after this call all retina outputs are updated
|
||||
void RetinaFilter::runGrayToneMapping(const std::valarray<double> &grayImageInput, std::valarray<double> &grayImageOutput, const double PhotoreceptorsCompression, const double ganglionCellsCompression) |
||||
{ |
||||
// preliminary check
|
||||
if (!checkInput(grayImageInput, false)) |
||||
return; |
||||
|
||||
this->_runGrayToneMapping(grayImageInput, grayImageOutput, PhotoreceptorsCompression, ganglionCellsCompression); |
||||
} |
||||
|
||||
// run the initilized retina filter in order to perform gray image tone mapping, after this call all retina outputs are updated
|
||||
void RetinaFilter::_runGrayToneMapping(const std::valarray<double> &grayImageInput, std::valarray<double> &grayImageOutput, const double PhotoreceptorsCompression, const double ganglionCellsCompression) |
||||
{ |
||||
// stability controls value update
|
||||
++_ellapsedFramesSinceLastReset; |
||||
|
||||
std::valarray<double> temp2(grayImageInput.size()); |
||||
|
||||
// apply tone mapping on the multiplexed image
|
||||
// -> photoreceptors local adaptation (large area adaptation)
|
||||
_photoreceptorsPrefilter.runFilter_LPfilter(grayImageInput, grayImageOutput, 2); // compute low pass filtering modeling the horizontal cells filtering to acess local luminance
|
||||
_photoreceptorsPrefilter.setV0CompressionParameterToneMapping(PhotoreceptorsCompression, grayImageOutput.sum()/(double)_photoreceptorsPrefilter.getNBpixels()); |
||||
_photoreceptorsPrefilter.runFilter_LocalAdapdation(grayImageInput, grayImageOutput, temp2); // adapt contrast to local luminance
|
||||
|
||||
// high pass filter
|
||||
//_spatiotemporalLPfilter(_localBuffer, _filterOutput, 2); // compute low pass filtering (high cut frequency (remove spatio-temporal noise)
|
||||
|
||||
//for (unsigned int i=0;i<_NBpixels;++i)
|
||||
// _localBuffer[i]-= _filterOutput[i]/2.0;
|
||||
|
||||
// -> ganglion cells local adaptation (short area adaptation)
|
||||
_photoreceptorsPrefilter.runFilter_LPfilter(temp2, grayImageOutput, 1); // compute low pass filtering (high cut frequency (remove spatio-temporal noise)
|
||||
_photoreceptorsPrefilter.setV0CompressionParameterToneMapping(ganglionCellsCompression, temp2.max(), temp2.sum()/(double)_photoreceptorsPrefilter.getNBpixels()); |
||||
_photoreceptorsPrefilter.runFilter_LocalAdapdation(temp2, grayImageOutput, grayImageOutput); // adapt contrast to local luminance
|
||||
|
||||
} |
||||
// run the initilized retina filter in order to perform color tone mapping, after this call all retina outputs are updated
|
||||
void RetinaFilter::runRGBToneMapping(const std::valarray<double> &RGBimageInput, std::valarray<double> &RGBimageOutput, const bool useAdaptiveFiltering, const double PhotoreceptorsCompression, const double ganglionCellsCompression) |
||||
{ |
||||
// preliminary check
|
||||
if (!checkInput(RGBimageInput, true)) |
||||
return; |
||||
|
||||
// multiplex the image with the color sampling method specified in the constructor
|
||||
_colorEngine.runColorMultiplexing(RGBimageInput); |
||||
|
||||
// apply tone mapping on the multiplexed image
|
||||
_runGrayToneMapping(_colorEngine.getMultiplexedFrame(), RGBimageOutput, PhotoreceptorsCompression, ganglionCellsCompression); |
||||
|
||||
// demultiplex tone maped image
|
||||
_colorEngine.runColorDemultiplexing(RGBimageOutput, useAdaptiveFiltering, _photoreceptorsPrefilter.getMaxInputValue());//_ColorEngine->getMultiplexedFrame());//_ParvoRetinaFilter->getPhotoreceptorsLPfilteringOutput());
|
||||
|
||||
// rescaling result between 0 and 255
|
||||
_colorEngine.normalizeRGBOutput_0_maxOutputValue(255.0); |
||||
|
||||
// return the result
|
||||
RGBimageOutput=_colorEngine.getDemultiplexedColorFrame(); |
||||
} |
||||
|
||||
void RetinaFilter::runLMSToneMapping(const std::valarray<double> &LMSimageInput, std::valarray<double> &imageOuput, const bool useAdaptiveFiltering, const double PhotoreceptorsCompression, const double ganglionCellsCompression) |
||||
{ |
||||
std::cerr<<"not working, sorry"<<std::endl; |
||||
|
||||
/* // preliminary check
|
||||
const std::valarray<double> &bufferInput=checkInput(LMSimageInput, true); |
||||
if (!bufferInput) |
||||
return NULL; |
||||
|
||||
if (!_useColorMode) |
||||
std::cerr<<"RetinaFilter::Can not call tone mapping oeration if the retina filter was created for gray scale images"<<std::endl; |
||||
|
||||
// create a temporary buffer of size nrows, Mcolumns, 3 layers
|
||||
std::valarray<double> lmsTempBuffer(LMSimageInput); |
||||
std::cout<<"RetinaFilter::--->min LMS value="<<lmsTempBuffer.min()<<std::endl; |
||||
|
||||
// setup local adaptation parameter at the photoreceptors level
|
||||
setV0CompressionParameter(PhotoreceptorsCompression, _maxInputValue); |
||||
// get the local energy of each color channel
|
||||
// ->L
|
||||
_spatiotemporalLPfilter(LMSimageInput, _filterOutput, 1); |
||||
setV0CompressionParameterToneMapping(PhotoreceptorsCompression, _maxInputValue, this->sum()/_NBpixels); |
||||
_localLuminanceAdaptation(LMSimageInput, _filterOutput, lmsTempBuffer.Buffer()); |
||||
// ->M
|
||||
_spatiotemporalLPfilter(LMSimageInput+_NBpixels, _filterOutput, 1); |
||||
setV0CompressionParameterToneMapping(PhotoreceptorsCompression, _maxInputValue, this->sum()/_NBpixels); |
||||
_localLuminanceAdaptation(LMSimageInput+_NBpixels, _filterOutput, lmsTempBuffer.Buffer()+_NBpixels); |
||||
// ->S
|
||||
_spatiotemporalLPfilter(LMSimageInput+_NBpixels*2, _filterOutput, 1); |
||||
setV0CompressionParameterToneMapping(PhotoreceptorsCompression, _maxInputValue, this->sum()/_NBpixels); |
||||
_localLuminanceAdaptation(LMSimageInput+_NBpixels*2, _filterOutput, lmsTempBuffer.Buffer()+_NBpixels*2); |
||||
|
||||
// eliminate negative values
|
||||
for (unsigned int i=0;i<lmsTempBuffer.size();++i) |
||||
if (lmsTempBuffer.Buffer()[i]<0) |
||||
lmsTempBuffer.Buffer()[i]=0; |
||||
std::cout<<"RetinaFilter::->min LMS value="<<lmsTempBuffer.min()<<std::endl; |
||||
|
||||
// compute LMS to A Cr1 Cr2 color space conversion
|
||||
_applyImageColorSpaceConversion(lmsTempBuffer.Buffer(), lmsTempBuffer.Buffer(), _LMStoACr1Cr2); |
||||
|
||||
TemplateBuffer <double> acr1cr2TempBuffer(_NBrows, _NBcolumns, 3); |
||||
memcpy(acr1cr2TempBuffer.Buffer(), lmsTempBuffer.Buffer(), sizeof(double)*_NBpixels*3); |
||||
|
||||
// compute A Cr1 Cr2 to LMS color space conversion
|
||||
_applyImageColorSpaceConversion(acr1cr2TempBuffer.Buffer(), lmsTempBuffer.Buffer(), _ACr1Cr2toLMS); |
||||
|
||||
// eliminate negative values
|
||||
for (unsigned int i=0;i<lmsTempBuffer.size();++i) |
||||
if (lmsTempBuffer.Buffer()[i]<0) |
||||
lmsTempBuffer.Buffer()[i]=0; |
||||
|
||||
// rewrite output to the appropriate buffer
|
||||
_colorEngine->setDemultiplexedColorFrame(lmsTempBuffer.Buffer()); |
||||
*/ |
||||
} |
||||
|
||||
// return image with center Parvo and peripheral Magno channels
|
||||
void RetinaFilter::_processRetinaParvoMagnoMapping() |
||||
{ |
||||
register double *hybridParvoMagnoPTR= &_retinaParvoMagnoMappedFrame[0]; |
||||
register const double *parvoOutputPTR= &(_ParvoRetinaFilter.getOutput()[0]); |
||||
register const double *magnoXOutputPTR= &(_MagnoRetinaFilter.getOutput()[0]); |
||||
register double *hybridParvoMagnoCoefTablePTR= &_retinaParvoMagnoMapCoefTable[0]; |
||||
|
||||
for (unsigned int i=0 ; i<_photoreceptorsPrefilter.getNBpixels() ; ++i, hybridParvoMagnoCoefTablePTR+=2) |
||||
{ |
||||
double hybridValue=*(parvoOutputPTR++)**(hybridParvoMagnoCoefTablePTR)+*(magnoXOutputPTR++)**(hybridParvoMagnoCoefTablePTR+1); |
||||
*(hybridParvoMagnoPTR++)=hybridValue; |
||||
} |
||||
|
||||
TemplateBuffer<double>::normalizeGrayOutput_0_maxOutputValue(&_retinaParvoMagnoMappedFrame[0], _photoreceptorsPrefilter.getNBpixels()); |
||||
|
||||
} |
||||
|
||||
const bool RetinaFilter::getParvoFoveaResponse(std::valarray<double> &parvoFovealResponse) |
||||
{ |
||||
if (!_useParvoOutput) |
||||
return false; |
||||
if (parvoFovealResponse.size() != _ParvoRetinaFilter.getNBpixels()) |
||||
return false; |
||||
|
||||
register const double *parvoOutputPTR= &(_ParvoRetinaFilter.getOutput()[0]); |
||||
register double *fovealParvoResponsePTR= &parvoFovealResponse[0]; |
||||
register double *hybridParvoMagnoCoefTablePTR= &_retinaParvoMagnoMapCoefTable[0]; |
||||
|
||||
for (unsigned int i=0 ; i<_photoreceptorsPrefilter.getNBpixels() ; ++i, hybridParvoMagnoCoefTablePTR+=2) |
||||
{ |
||||
*(fovealParvoResponsePTR++)=*(parvoOutputPTR++)**(hybridParvoMagnoCoefTablePTR); |
||||
} |
||||
|
||||
return true; |
||||
} |
||||
|
||||
// method to retrieve the parafoveal magnocellular pathway response (no energy motion in fovea)
|
||||
const bool RetinaFilter::getMagnoParaFoveaResponse(std::valarray<double> &magnoParafovealResponse) |
||||
{ |
||||
if (!_useMagnoOutput) |
||||
return false; |
||||
if (magnoParafovealResponse.size() != _MagnoRetinaFilter.getNBpixels()) |
||||
return false; |
||||
|
||||
register const double *magnoXOutputPTR= &(_MagnoRetinaFilter.getOutput()[0]); |
||||
register double *parafovealMagnoResponsePTR=&magnoParafovealResponse[0]; |
||||
register double *hybridParvoMagnoCoefTablePTR=&_retinaParvoMagnoMapCoefTable[0]+1; |
||||
|
||||
for (unsigned int i=0 ; i<_photoreceptorsPrefilter.getNBpixels() ; ++i, hybridParvoMagnoCoefTablePTR+=2) |
||||
{ |
||||
*(parafovealMagnoResponsePTR++)=*(magnoXOutputPTR++)**(hybridParvoMagnoCoefTablePTR); |
||||
} |
||||
|
||||
return true; |
||||
} |
||||
|
||||
|
||||
} |
@ -0,0 +1,520 @@ |
||||
/*#******************************************************************************
|
||||
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
**
|
||||
** By downloading, copying, installing or using the software you agree to this license. |
||||
** If you do not agree to this license, do not download, install, |
||||
** copy or use the software. |
||||
**
|
||||
**
|
||||
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. |
||||
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. |
||||
**
|
||||
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) |
||||
**
|
||||
** Creation - enhancement process 2007-2011 |
||||
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France |
||||
**
|
||||
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). |
||||
** Refer to the following research paper for more information: |
||||
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
|
||||
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: |
||||
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. |
||||
**
|
||||
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : |
||||
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: |
||||
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 |
||||
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. |
||||
** ====> more informations in the above cited Jeanny Heraults's book. |
||||
**
|
||||
** License Agreement |
||||
** For Open Source Computer Vision Library |
||||
**
|
||||
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. |
||||
**
|
||||
** For Human Visual System tools (hvstools) |
||||
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. |
||||
**
|
||||
** Third party copyrights are property of their respective owners. |
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without modification, |
||||
** are permitted provided that the following conditions are met: |
||||
**
|
||||
** * Redistributions of source code must retain the above copyright notice, |
||||
** this list of conditions and the following disclaimer. |
||||
**
|
||||
** * Redistributions in binary form must reproduce the above copyright notice, |
||||
** this list of conditions and the following disclaimer in the documentation |
||||
** and/or other materials provided with the distribution. |
||||
**
|
||||
** * The name of the copyright holders may not be used to endorse or promote products |
||||
** derived from this software without specific prior written permission. |
||||
**
|
||||
** This software is provided by the copyright holders and contributors "as is" and |
||||
** any express or implied warranties, including, but not limited to, the implied |
||||
** warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
** In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
** indirect, incidental, special, exemplary, or consequential damages |
||||
** (including, but not limited to, procurement of substitute goods or services; |
||||
** loss of use, data, or profits; or business interruption) however caused |
||||
** and on any theory of liability, whether in contract, strict liability, |
||||
** or tort (including negligence or otherwise) arising in any way out of |
||||
** the use of this software, even if advised of the possibility of such damage. |
||||
*******************************************************************************/ |
||||
|
||||
#ifndef __TEMPLATEBUFFER_HPP__ |
||||
#define __TEMPLATEBUFFER_HPP__ |
||||
|
||||
#include <valarray> |
||||
#include <cstdlib> |
||||
#include <iostream> |
||||
#include <cmath> |
||||
|
||||
//#define __TEMPLATEBUFFERDEBUG //define TEMPLATEBUFFERDEBUG in order to display debug information
|
||||
|
||||
namespace cv |
||||
{ |
||||
/**
|
||||
* @class TemplateBuffer |
||||
* @brief this class is a simple template memory buffer which contains basic functions to get information on or normalize the buffer content |
||||
* note that thanks to the parent STL template class "valarray", it is possible to perform easily operations on the full array such as addition, product etc. |
||||
* @author Alexandre BENOIT (benoit.alexandre.vision@gmail.com), helped by Gelu IONESCU (gelu.ionescu@lis.inpg.fr) |
||||
* creation date: september 2007 |
||||
*/ |
||||
template <class type> class TemplateBuffer : public std::valarray<type> |
||||
{ |
||||
public: |
||||
|
||||
/**
|
||||
* constructor for monodimensional array |
||||
* @param dim: the size of the vector |
||||
*/ |
||||
TemplateBuffer(const size_t dim=0) |
||||
: std::valarray<type>((type)0, dim) |
||||
{ |
||||
_NBrows=1; |
||||
_NBcolumns=dim; |
||||
_NBdepths=1; |
||||
_NBpixels=dim; |
||||
_doubleNBpixels=2*dim; |
||||
} |
||||
|
||||
/**
|
||||
* constructor by copy for monodimensional array |
||||
* @param pVal: the pointer to a buffer to copy |
||||
* @param dim: the size of the vector |
||||
*/ |
||||
TemplateBuffer(const type* pVal, const size_t dim) |
||||
: std::valarray<type>(pVal, dim) |
||||
{ |
||||
_NBrows=1; |
||||
_NBcolumns=dim; |
||||
_NBdepths=1; |
||||
_NBpixels=dim; |
||||
_doubleNBpixels=2*dim; |
||||
} |
||||
|
||||
/**
|
||||
* constructor for bidimensional array |
||||
* @param dimRows: the size of the vector |
||||
* @param dimColumns: the size of the vector |
||||
* @param depth: the number of layers of the buffer in its third dimension (3 of color images, 1 for gray images. |
||||
*/ |
||||
TemplateBuffer(const size_t dimRows, const size_t dimColumns, const size_t depth=1) |
||||
: std::valarray<type>((type)0, dimRows*dimColumns*depth) |
||||
{ |
||||
#ifdef TEMPLATEBUFFERDEBUG |
||||
std::cout<<"TemplateBuffer::TemplateBuffer: new buffer, size="<<dimRows<<", "<<dimColumns<<", "<<depth<<"valarraySize="<<this->size()<<std::endl; |
||||
#endif |
||||
_NBrows=dimRows; |
||||
_NBcolumns=dimColumns; |
||||
_NBdepths=depth; |
||||
_NBpixels=dimRows*dimColumns; |
||||
_doubleNBpixels=2*dimRows*dimColumns; |
||||
//_createTableIndex();
|
||||
#ifdef TEMPLATEBUFFERDEBUG |
||||
std::cout<<"TemplateBuffer::TemplateBuffer: construction successful"<<std::endl; |
||||
#endif |
||||
|
||||
} |
||||
|
||||
/**
|
||||
* copy constructor |
||||
* @param toCopy |
||||
* @return thenconstructed instance |
||||
*emplateBuffer(const TemplateBuffer &toCopy) |
||||
:_NBrows(toCopy.getNBrows()),_NBcolumns(toCopy.getNBcolumns()),_NBdepths(toCopy.getNBdephs()), _NBpixels(toCopy.getNBpixels()), _doubleNBpixels(toCopy.getNBpixels()*2) |
||||
//std::valarray<type>(toCopy)
|
||||
{ |
||||
memcpy(Buffer(), toCopy.Buffer(), this->size()); |
||||
}*/ |
||||
/**
|
||||
* destructor |
||||
*/ |
||||
virtual ~TemplateBuffer() |
||||
{ |
||||
#ifdef TEMPLATEBUFFERDEBUG |
||||
std::cout<<"~TemplateBuffer"<<std::endl; |
||||
#endif |
||||
} |
||||
|
||||
/**
|
||||
* delete the buffer content (set zeros) |
||||
*/ |
||||
inline void setZero(){std::valarray<type>::operator=(0);};//memset(Buffer(), 0, sizeof(type)*_NBpixels);};
|
||||
|
||||
/**
|
||||
* @return the numbers of rows (height) of the images used by the object |
||||
*/ |
||||
inline unsigned int getNBrows(){return _NBrows;}; |
||||
|
||||
/**
|
||||
* @return the numbers of columns (width) of the images used by the object |
||||
*/ |
||||
inline unsigned int getNBcolumns(){return _NBcolumns;}; |
||||
|
||||
/**
|
||||
* @return the numbers of pixels (width*height) of the images used by the object |
||||
*/ |
||||
inline unsigned int getNBpixels(){return _NBpixels;}; |
||||
|
||||
/**
|
||||
* @return the numbers of pixels (width*height) of the images used by the object |
||||
*/ |
||||
inline unsigned int getDoubleNBpixels(){return _doubleNBpixels;}; |
||||
|
||||
/**
|
||||
* @return the numbers of depths (3rd dimension: 1 for gray images, 3 for rgb images) of the images used by the object |
||||
*/ |
||||
inline unsigned int getDepthSize(){return _NBdepths;}; |
||||
|
||||
/**
|
||||
* resize the buffer and recompute table index etc. |
||||
*/ |
||||
void resizeBuffer(const size_t dimRows, const size_t dimColumns, const size_t depth=1) |
||||
{ |
||||
this->resize(dimRows*dimColumns*depth); |
||||
_NBrows=dimRows; |
||||
_NBcolumns=dimColumns; |
||||
_NBdepths=depth; |
||||
_NBpixels=dimRows*dimColumns; |
||||
_doubleNBpixels=2*dimRows*dimColumns; |
||||
} |
||||
|
||||
inline TemplateBuffer<type> & operator=(const std::valarray<type> &b) |
||||
{ |
||||
//std::cout<<"TemplateBuffer<type> & operator= affect vector: "<<std::endl;
|
||||
std::valarray<type>::operator=(b); |
||||
return *this; |
||||
} |
||||
|
||||
inline TemplateBuffer<type> & operator=(const type &b) |
||||
{ |
||||
//std::cout<<"TemplateBuffer<type> & operator= affect value: "<<b<<std::endl;
|
||||
std::valarray<type>::operator=(b); |
||||
return *this; |
||||
} |
||||
|
||||
/* inline const type &operator[](const unsigned int &b)
|
||||
{ |
||||
return (*this)[b]; |
||||
} |
||||
*/ |
||||
/**
|
||||
* @return the buffer adress in non const mode |
||||
*/ |
||||
inline type* Buffer() { return &(*this)[0]; } |
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// Standard Image manipulation functions
|
||||
|
||||
/**
|
||||
* standard 0 to 255 image normalization function |
||||
* @param inputOutputBuffer: the image to be normalized (rewrites the input), if no parameter, then, the built in buffer reachable by getOutput() function is normalized |
||||
* @param nbPixels: specifies the number of pixel on which the normalization should be performed, if 0, then all pixels specified in the constructor are processed |
||||
* @param maxOutputValue: the maximum output value |
||||
*/ |
||||
static void normalizeGrayOutput_0_maxOutputValue(type *inputOutputBuffer, const size_t nbPixels, const type maxOutputValue=(type)255.0); |
||||
|
||||
/**
|
||||
* standard 0 to 255 image normalization function |
||||
* @param inputOutputBuffer: the image to be normalized (rewrites the input), if no parameter, then, the built in buffer reachable by getOutput() function is normalized |
||||
* @param nbPixels: specifies the number of pixel on which the normalization should be performed, if 0, then all pixels specified in the constructor are processed |
||||
* @param maxOutputValue: the maximum output value |
||||
*/ |
||||
void normalizeGrayOutput_0_maxOutputValue(const type maxOutputValue=(type)255.0){normalizeGrayOutput_0_maxOutputValue(this->Buffer(), this->size(), maxOutputValue);}; |
||||
|
||||
/**
|
||||
* sigmoide image normalization function (saturates min and max values) |
||||
* @param meanValue: specifies the mean value of th pixels to be processed |
||||
* @param sensitivity: strenght of the sigmoide |
||||
* @param inputPicture: the image to be normalized if no parameter, then, the built in buffer reachable by getOutput() function is normalized |
||||
* @param outputBuffer: the ouput buffer on which the result is writed, if no parameter, then, the built in buffer reachable by getOutput() function is normalized |
||||
* @param maxOutputValue: the maximum output value |
||||
*/ |
||||
static void normalizeGrayOutputCentredSigmoide(const type meanValue, const type sensitivity, const type maxOutputValue, type *inputPicture, type *outputBuffer, const unsigned int nbPixels); |
||||
|
||||
/**
|
||||
* sigmoide image normalization function on the current buffer (saturates min and max values) |
||||
* @param meanValue: specifies the mean value of th pixels to be processed |
||||
* @param sensitivity: strenght of the sigmoide |
||||
* @param maxOutputValue: the maximum output value |
||||
*/ |
||||
inline void normalizeGrayOutputCentredSigmoide(const type meanValue=(type)0.0, const type sensitivity=(type)2.0, const type maxOutputValue=(type)255.0){normalizeGrayOutputCentredSigmoide(meanValue, sensitivity, 255.0, this->Buffer(), this->Buffer(), this->getNBpixels());}; |
||||
|
||||
/**
|
||||
* sigmoide image normalization function (saturates min and max values), in this function, the sigmoide is centered on low values (high saturation of the medium and high values |
||||
* @param inputPicture: the image to be normalized if no parameter, then, the built in buffer reachable by getOutput() function is normalized |
||||
* @param outputBuffer: the ouput buffer on which the result is writed, if no parameter, then, the built in buffer reachable by getOutput() function is normalized |
||||
* @param sensitivity: strenght of the sigmoide |
||||
* @param maxOutputValue: the maximum output value |
||||
*/ |
||||
void normalizeGrayOutputNearZeroCentreredSigmoide(type *inputPicture=(type*)NULL, type *outputBuffer=(type*)NULL, const type sensitivity=(type)40, const type maxOutputValue=(type)255.0); |
||||
|
||||
/**
|
||||
* center and reduct the image (image-mean)/std |
||||
* @param inputOutputBuffer: the image to be normalized if no parameter, the result is rewrited on it |
||||
*/ |
||||
void centerReductImageLuminance(type *inputOutputBuffer=(type*)NULL); |
||||
|
||||
/**
|
||||
* @return standard deviation of the buffer |
||||
*/ |
||||
const double getStandardDeviation() |
||||
{ |
||||
double standardDeviation=0; |
||||
double meanValue=getMean(); |
||||
|
||||
type *bufferPTR=Buffer(); |
||||
for (unsigned int i=0;i<this->size();++i) |
||||
{ |
||||
double diff=(*(bufferPTR++)-meanValue); |
||||
standardDeviation+=diff*diff; |
||||
} |
||||
return sqrt(standardDeviation/this->size()); |
||||
}; |
||||
|
||||
/**
|
||||
* Clip buffer histogram |
||||
* @param minRatio: the minimum ratio of the lower pixel values, range=[0,1] and lower than maxRatio |
||||
* @param maxRatio: the aximum ratio of the higher pixel values, range=[0,1] and higher than minRatio |
||||
*/ |
||||
void clipHistogram(double minRatio, double maxRatio, double maxOutputValue) |
||||
{ |
||||
|
||||
if (minRatio>=maxRatio) |
||||
{ |
||||
std::cerr<<"TemplateBuffer::clipHistogram: minRatio must be inferior to maxRatio, buffer unchanged"<<std::endl; |
||||
return; |
||||
} |
||||
|
||||
/* minRatio=min(max(minRatio, 1.0),0.0);
|
||||
maxRatio=max(max(maxRatio, 0.0),1.0); |
||||
*/ |
||||
|
||||
// find the pixel value just above the threshold
|
||||
const double maxThreshold=this->max()*maxRatio; |
||||
const double minThreshold=(this->max()-this->min())*minRatio+this->min(); |
||||
|
||||
type *bufferPTR=this->Buffer(); |
||||
|
||||
double deltaH=maxThreshold; |
||||
double deltaL=maxThreshold; |
||||
|
||||
double updatedHighValue=maxThreshold; |
||||
double updatedLowValue=maxThreshold; |
||||
|
||||
for (unsigned int i=0;i<this->size();++i) |
||||
{ |
||||
double curentValue=(double)*(bufferPTR++); |
||||
|
||||
// updating "closest to the high threshold" pixel value
|
||||
double highValueTest=maxThreshold-curentValue; |
||||
if (highValueTest>0) |
||||
{ |
||||
if (deltaH>highValueTest) |
||||
{ |
||||
deltaH=highValueTest; |
||||
updatedHighValue=curentValue; |
||||
} |
||||
} |
||||
|
||||
// updating "closest to the low threshold" pixel value
|
||||
double lowValueTest=curentValue-minThreshold; |
||||
if (lowValueTest>0) |
||||
{ |
||||
if (deltaL>lowValueTest) |
||||
{ |
||||
deltaL=lowValueTest; |
||||
updatedLowValue=curentValue; |
||||
} |
||||
} |
||||
} |
||||
|
||||
std::cout<<"Tdebug"<<std::endl; |
||||
std::cout<<"deltaL="<<deltaL<<", deltaH="<<deltaH<<std::endl; |
||||
std::cout<<"this->max()"<<this->max()<<"maxThreshold="<<maxThreshold<<"updatedHighValue="<<updatedHighValue<<std::endl; |
||||
std::cout<<"this->min()"<<this->min()<<"minThreshold="<<minThreshold<<"updatedLowValue="<<updatedLowValue<<std::endl; |
||||
// clipping values outside than the updated thresholds
|
||||
bufferPTR=this->Buffer(); |
||||
for (unsigned int i=0;i<this->size();++i, ++bufferPTR) |
||||
{ |
||||
if (*bufferPTR<updatedLowValue) |
||||
*bufferPTR=updatedLowValue; |
||||
else if (*bufferPTR>updatedHighValue) |
||||
*bufferPTR=updatedHighValue; |
||||
} |
||||
|
||||
normalizeGrayOutput_0_maxOutputValue(this->Buffer(), this->size(), maxOutputValue); |
||||
|
||||
} |
||||
|
||||
/**
|
||||
* @return the mean value of the vector |
||||
*/ |
||||
inline const double getMean(){return this->sum()/this->size();}; |
||||
|
||||
protected: |
||||
size_t _NBrows; |
||||
size_t _NBcolumns; |
||||
size_t _NBdepths; |
||||
size_t _NBpixels; |
||||
size_t _doubleNBpixels; |
||||
// utilities
|
||||
static type _abs(const type x); |
||||
|
||||
}; |
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
/// normalize output between 0 and 255, can be applied on images of different size that the declared size if nbPixels parameters is setted up;
|
||||
template <class type> |
||||
void TemplateBuffer<type>::normalizeGrayOutput_0_maxOutputValue(type *inputOutputBuffer, const size_t processedPixels, const type maxOutputValue) |
||||
{ |
||||
type maxValue=inputOutputBuffer[0], minValue=inputOutputBuffer[0]; |
||||
|
||||
// get the min and max value
|
||||
register type *inputOutputBufferPTR=inputOutputBuffer; |
||||
for (register size_t j = 0; j<processedPixels; ++j) |
||||
{ |
||||
type pixValue = *(inputOutputBufferPTR++); |
||||
if (maxValue < pixValue) |
||||
maxValue = pixValue; |
||||
else if (minValue > pixValue) |
||||
minValue = pixValue; |
||||
} |
||||
// change the range of the data to 0->255
|
||||
|
||||
type factor = maxOutputValue/(maxValue-minValue); |
||||
type offset = -1.0*minValue*factor; |
||||
|
||||
inputOutputBufferPTR=inputOutputBuffer; |
||||
for (register size_t j = 0; j < processedPixels; ++j, ++inputOutputBufferPTR) |
||||
*inputOutputBufferPTR=*(inputOutputBufferPTR)*factor+offset; |
||||
|
||||
} |
||||
// normalize data with a sigmoide close to 0 (saturates values for those superior to 0)
|
||||
template <class type> |
||||
void TemplateBuffer<type>::normalizeGrayOutputNearZeroCentreredSigmoide(type *inputBuffer, type *outputBuffer, const type sensitivity, const type maxOutputValue) |
||||
{ |
||||
if (inputBuffer==NULL) |
||||
inputBuffer=Buffer(); |
||||
if (outputBuffer==NULL) |
||||
outputBuffer=Buffer(); |
||||
|
||||
type X0cube=sensitivity*sensitivity*sensitivity; |
||||
|
||||
register type *inputBufferPTR=inputBuffer; |
||||
register type *outputBufferPTR=outputBuffer; |
||||
|
||||
for (register size_t j = 0; j < _NBpixels; ++j, ++inputBufferPTR) |
||||
{ |
||||
|
||||
type currentCubeLuminance=*inputBufferPTR**inputBufferPTR**inputBufferPTR; |
||||
*(outputBufferPTR++)=maxOutputValue*currentCubeLuminance/(currentCubeLuminance+X0cube); |
||||
} |
||||
} |
||||
|
||||
// normalize and adjust luminance with a centered to 128 sigmode
|
||||
template <class type> |
||||
void TemplateBuffer<type>::normalizeGrayOutputCentredSigmoide(const type meanValue, const type sensitivity, const type maxOutputValue, type *inputBuffer, type *outputBuffer, const unsigned int nbPixels) |
||||
{ |
||||
|
||||
if (sensitivity==1.0) |
||||
{ |
||||
std::cerr<<"TemplateBuffer::TemplateBuffer<type>::normalizeGrayOutputCentredSigmoide error: 2nd parameter (sensitivity) must not equal 0, copying original data..."<<std::endl; |
||||
memcpy(outputBuffer, inputBuffer, sizeof(type)*nbPixels); |
||||
return; |
||||
} |
||||
|
||||
type X0=maxOutputValue/(sensitivity-(type)1.0); |
||||
|
||||
register type *inputBufferPTR=inputBuffer; |
||||
register type *outputBufferPTR=outputBuffer; |
||||
|
||||
for (register size_t j = 0; j < nbPixels; ++j, ++inputBufferPTR) |
||||
*(outputBufferPTR++)=(meanValue+(meanValue+X0)*(*(inputBufferPTR)-meanValue)/(_abs(*(inputBufferPTR)-meanValue)+X0)); |
||||
|
||||
} |
||||
|
||||
// center and reduct the image (image-mean)/std
|
||||
template <class type> |
||||
void TemplateBuffer<type>::centerReductImageLuminance(type *inputOutputBuffer) |
||||
{ |
||||
// if outputBuffer unsassigned, the rewrite the buffer
|
||||
if (inputOutputBuffer==NULL) |
||||
inputOutputBuffer=Buffer(); |
||||
type meanValue=0, stdValue=0; |
||||
|
||||
// compute mean value
|
||||
for (register size_t j = 0; j < _NBpixels; ++j) |
||||
meanValue+=inputOutputBuffer[j]; |
||||
meanValue/=((type)_NBpixels); |
||||
|
||||
// compute std value
|
||||
register type *inputOutputBufferPTR=inputOutputBuffer; |
||||
for (size_t index=0;index<_NBpixels;++index) |
||||
{ |
||||
type inputMinusMean=*(inputOutputBufferPTR++)-meanValue; |
||||
stdValue+=inputMinusMean*inputMinusMean; |
||||
} |
||||
|
||||
stdValue=sqrt(stdValue/((type)_NBpixels)); |
||||
// adjust luminance in regard of mean and std value;
|
||||
inputOutputBufferPTR=inputOutputBuffer; |
||||
for (size_t index=0;index<_NBpixels;++index, ++inputOutputBufferPTR) |
||||
*inputOutputBufferPTR=(*(inputOutputBufferPTR)-meanValue)/stdValue; |
||||
} |
||||
|
||||
|
||||
template <class type> |
||||
type TemplateBuffer<type>::_abs(const type x) |
||||
{ |
||||
|
||||
if (x>0) |
||||
return x; |
||||
else |
||||
return -x; |
||||
} |
||||
|
||||
template < > |
||||
inline int TemplateBuffer<int>::_abs(const int x) |
||||
{ |
||||
return std::abs(x); |
||||
} |
||||
template < > |
||||
inline double TemplateBuffer<double>::_abs(const double x) |
||||
{ |
||||
return std::fabs(x); |
||||
} |
||||
|
||||
template < > |
||||
inline float TemplateBuffer<float>::_abs(const float x) |
||||
{ |
||||
return std::fabs(x); |
||||
} |
||||
|
||||
} |
||||
#endif |
||||
|
||||
|
||||
|
@ -0,0 +1,146 @@ |
||||
//============================================================================
|
||||
// Name : retinademo.cpp
|
||||
// Author : Alexandre Benoit, benoit.alexandre.vision@gmail.com
|
||||
// Version : 0.1
|
||||
// Copyright : LISTIC/GIPSA French Labs, july 2011
|
||||
// Description : Gipsa/LISTIC Labs retina demo in C++, Ansi-style
|
||||
//============================================================================
|
||||
|
||||
#include <iostream> |
||||
#include <cstring> |
||||
|
||||
#include "HVStools/retina.hpp" // for retina processing |
||||
#include <opencv/highgui.h> // image IO |
||||
|
||||
void help(std::string errorMessage) |
||||
{ |
||||
std::cout<<"Program init error : "<<errorMessage<<std::endl; |
||||
std::cout<<"\nProgram call procedure : retinaDemo [processing mode] [Optional : media target] [Optional LAST parameter: \"log\" to activate retina log sampling]"<<std::endl; |
||||
std::cout<<"\t[processing mode] :"<<std::endl; |
||||
std::cout<<"\t -image : for still image processing"<<std::endl; |
||||
std::cout<<"\t -video : for video stream processing"<<std::endl; |
||||
std::cout<<"\t[Optional : media target] :"<<std::endl; |
||||
std::cout<<"\t if processing an image or video file, then, specify the path and filename of the target to process"<<std::endl; |
||||
std::cout<<"\t leave empty if processing video stream coming from a connected video device"<<std::endl; |
||||
std::cout<<"\t[Optional : activate retina log sampling] : an optional last parameter can be specified for retina spatial log sampling"<<std::endl; |
||||
std::cout<<"\t set \"log\" without quotes to activate this sampling, output frame size will be divided by 4"<<std::endl; |
||||
std::cout<<"\nExamples:"<<std::endl; |
||||
std::cout<<"\t-Image processing : ./retinaDemo -image lena.jpg"<<std::endl; |
||||
std::cout<<"\t-Image processing with log sampling : ./retinaDemo -image lena.jpg log"<<std::endl; |
||||
std::cout<<"\t-Video processing : ./retinaDemo -video myMovie.mp4"<<std::endl; |
||||
std::cout<<"\t-Live video processing : ./retinaDemo -video"<<std::endl; |
||||
std::cout<<"\nPlease start again with new parameters"<<std::endl; |
||||
} |
||||
|
||||
int main(int argc, char* argv[]) { |
||||
// welcome message
|
||||
std::cout<<"****************************************************"<<std::endl; |
||||
std::cout<<"* Retina demonstration : demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl; |
||||
std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl; |
||||
std::cout<<"* As a summary, these are the retina model properties:"<<std::endl; |
||||
std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl; |
||||
std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl; |
||||
std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl; |
||||
std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl; |
||||
std::cout<<"* for more information, reer to the following papers :"<<std::endl; |
||||
std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl; |
||||
std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl; |
||||
std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl; |
||||
std::cout<<"****************************************************"<<std::endl; |
||||
|
||||
// basic input arguments checking
|
||||
if (argc<2) |
||||
{ |
||||
help("bad number of parameter"); |
||||
return -1; |
||||
} |
||||
|
||||
bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
|
||||
|
||||
std::string inputMediaType=argv[1]; |
||||
|
||||
// declare the retina input buffer... that will be fed differently in regard of the input media
|
||||
cv::Mat inputFrame; |
||||
cv::VideoCapture videoCapture; // in case a video media is used, its manager is declared here
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// checking input media type (still image, video file, live video acquisition)
|
||||
if (!strcmp(inputMediaType.c_str(), "-image") && argc >= 3) |
||||
{ |
||||
std::cout<<"RetinaDemo: processing image "<<argv[2]<<std::endl; |
||||
// image processing case
|
||||
inputFrame = cv::imread(std::string(argv[2]), 1); // load image in RGB mode
|
||||
}else |
||||
if (!strcmp(inputMediaType.c_str(), "-video")) |
||||
{ |
||||
if (argc == 2 || (argc == 3 && useLogSampling)) // attempt to grab images from a video capture device
|
||||
{ |
||||
videoCapture.open(0); |
||||
}else// attempt to grab images from a video filestream
|
||||
{ |
||||
std::cout<<"RetinaDemo: processing video stream "<<argv[2]<<std::endl; |
||||
videoCapture.open(argv[2]); |
||||
} |
||||
|
||||
// grab a first frame to check if everything is ok
|
||||
videoCapture>>inputFrame; |
||||
}else |
||||
{ |
||||
// bad command parameter
|
||||
help("bad command parameter"); |
||||
return -1; |
||||
} |
||||
|
||||
if (inputFrame.empty()) |
||||
{ |
||||
help("Input media could not be loaded, aborting"); |
||||
return -1; |
||||
} |
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Program start in a try/catch safety context (Retina may throw errors)
|
||||
try |
||||
{ |
||||
// create a retina instance with default parameters setup, uncomment the initialisation you wanna test
|
||||
cv::Ptr<cv::Retina> myRetina; |
||||
|
||||
// if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
|
||||
if (useLogSampling) |
||||
myRetina = new cv::Retina("params.xml", inputFrame.size(), true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0); |
||||
else// -> else allocate "classical" retina :
|
||||
myRetina = new cv::Retina("params.xml", inputFrame.size()); |
||||
|
||||
// declare retina output buffers
|
||||
cv::Mat retinaOutput_parvo; |
||||
cv::Mat retinaOutput_magno; |
||||
|
||||
// processing loop with stop condition
|
||||
bool continueProcessing=true; // FIXME : not yet managed during process...
|
||||
while(continueProcessing) |
||||
{ |
||||
// if using video stream, then, grabbing a new frame, else, input remains the same
|
||||
if (videoCapture.isOpened()) |
||||
videoCapture>>inputFrame; |
||||
|
||||
// run retina filter
|
||||
myRetina->run(inputFrame); |
||||
// Retrieve and display retina output
|
||||
myRetina->getParvo(retinaOutput_parvo); |
||||
myRetina->getMagno(retinaOutput_magno); |
||||
cv::imshow("retina input", inputFrame); |
||||
cv::imshow("Retina Parvo", retinaOutput_parvo); |
||||
cv::imshow("Retina Magno", retinaOutput_magno); |
||||
cv::waitKey(10); |
||||
} |
||||
}catch(cv::Exception e) |
||||
{ |
||||
std::cerr<<"Error using Retina : "<<e.what()<<std::endl; |
||||
} |
||||
|
||||
// Program end message
|
||||
std::cout<<"Retina demo end"<<std::endl; |
||||
|
||||
return 0; |
||||
} |
||||
|
Loading…
Reference in new issue