Super Resolution module

pull/532/head
Vladislav Vinogradov 12 years ago
parent 620c699456
commit 7a0d6f7733
  1. 4
      modules/java/generator/rst_parser.py
  2. 33
      modules/superres/CMakeLists.txt
  3. 84
      modules/superres/doc/super_resolution.rst
  4. 8
      modules/superres/doc/superres.rst
  5. 73
      modules/superres/include/opencv2/superres/optical_flow.hpp
  6. 98
      modules/superres/include/opencv2/superres/superres.hpp
  7. 3
      modules/superres/perf/perf_main.cpp
  8. 1
      modules/superres/perf/perf_precomp.cpp
  9. 26
      modules/superres/perf/perf_precomp.hpp
  10. 153
      modules/superres/perf/perf_superres.cpp
  11. 619
      modules/superres/src/btv_l1.cpp
  12. 580
      modules/superres/src/btv_l1_gpu.cpp
  13. 234
      modules/superres/src/cuda/btv_l1_gpu.cu
  14. 255
      modules/superres/src/frame_source.cpp
  15. 273
      modules/superres/src/input_array_utility.cpp
  16. 63
      modules/superres/src/input_array_utility.hpp
  17. 721
      modules/superres/src/optical_flow.cpp
  18. 43
      modules/superres/src/precomp.cpp
  19. 78
      modules/superres/src/precomp.hpp
  20. 79
      modules/superres/src/ring_buffer.hpp
  21. 85
      modules/superres/src/super_resolution.cpp
  22. 3
      modules/superres/test/test_main.cpp
  23. 1
      modules/superres/test/test_precomp.cpp
  24. 23
      modules/superres/test/test_precomp.hpp
  25. 236
      modules/superres/test/test_superres.cpp
  26. 2
      samples/gpu/CMakeLists.txt
  27. 152
      samples/gpu/super_resolution.cpp

@ -1,7 +1,7 @@
#/usr/bin/env python
import os, sys, re, string, fnmatch
allmodules = ["core", "flann", "imgproc", "ml", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "gpu", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "ocl"]
allmodules = ["core", "flann", "imgproc", "ml", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "gpu", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "ocl", "superres"]
verbose = False
show_warnings = True
show_errors = True
@ -380,7 +380,7 @@ class RstParser(object):
@classmethod
def parse_namespace(cls, func, section_name):
known_namespaces = ["cv", "gpu", "flann"]
known_namespaces = ["cv", "gpu", "flann", "superres"]
l = section_name.strip()
for namespace in known_namespaces:
if l.startswith(namespace + "::"):

@ -0,0 +1,33 @@
if(ANDROID OR IOS)
ocv_module_disable(superres)
endif()
set(the_description "Super Resolution")
ocv_add_module(superres opencv_imgproc opencv_video OPTIONAL opencv_gpu opencv_highgui)
ocv_module_include_directories()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef /wd4127)
if(HAVE_CUDA)
string(REPLACE "-Wsign-promo" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
ocv_source_group("Src\\Cuda" GLOB "src/cuda/*.cu")
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpu/include" ${CUDA_INCLUDE_DIRS})
file(GLOB lib_cuda "src/cuda/*.cu")
ocv_cuda_compile(cuda_objs ${lib_cuda})
set(cuda_link_libs ${CUDA_LIBRARIES})
else()
set(lib_cuda "")
set(cuda_objs "")
set(cuda_link_libs "")
endif()
ocv_glob_module_sources(SOURCES ${lib_cuda} ${cuda_objs})
ocv_create_module(${cuda_link_libs})
ocv_add_precompiled_headers(${the_module})
ocv_add_accuracy_tests()
ocv_add_perf_tests()

@ -0,0 +1,84 @@
Super Resolution
================
.. highlight:: cpp
The Super Resolution module contains a set of functions and classes that can be used to solve the problem of resolution enhancement. There are a few methods implemented, most of them are descibed in the papers [Farsiu03]_ and [Mitzel09]_.
superres::SuperResolution
-------------------------
Base class for Super Resolution algorithms.
.. ocv:class:: superres::SuperResolution : public Algorithm, public superres::FrameSource
The class is only used to define the common interface for the whole family of Super Resolution algorithms.
superres::SuperResolution::setInput
-----------------------------------
Set input frame source for Super Resolution algorithm.
.. ocv:function:: void superres::SuperResolution::setInput(const Ptr<FrameSource>& frameSource)
:param frameSource: Input frame source
superres::SuperResolution::nextFrame
------------------------------------
Process next frame from input and return output result.
.. ocv:function:: void superres::SuperResolution::nextFrame(OutputArray frame)
:param frame: Output result
superres::SuperResolution::collectGarbage
-----------------------------------------
Clear all inner buffers.
.. ocv:function:: void superres::SuperResolution::collectGarbage()
superres::createSuperResolution_BTVL1
-------------------------------------
Create Bilateral TV-L1 Super Resolution.
.. ocv:function:: Ptr<SuperResolution> superres::createSuperResolution_BTVL1()
.. ocv:function:: Ptr<SuperResolution> superres::createSuperResolution_BTVL1_GPU()
This class implements Super Resolution algorithm described in the papers [Farsiu03]_ and [Mitzel09]_ .
Here are important members of the class that control the algorithm, which you can set after constructing the class instance:
* **int scale** Scale factor.
* **int iterations** Iteration count.
* **double tau** Asymptotic value of steepest descent method.
* **double lambda** Weight parameter to balance data term and smoothness term.
* **double alpha** Parameter of spacial distribution in Bilateral-TV.
* **int btvKernelSize** Kernel size of Bilateral-TV filter.
* **int blurKernelSize** Gaussian blur kernel size.
* **double blurSigma** Gaussian blur sigma.
* **int temporalAreaRadius** Radius of the temporal search area.
* **Ptr<DenseOpticalFlowExt> opticalFlow** Dense optical flow algorithm.
.. [Farsiu03] S. Farsiu, D. Robinson, M. Elad, P. Milanfar. Fast and robust Super-Resolution. Proc 2003 IEEE Int Conf on Image Process, pp. 291–294, 2003.
.. [Mitzel09] D. Mitzel, T. Pock, T. Schoenemann, D. Cremers. Video super resolution using duality based TV-L1 optical flow. DAGM, 2009.

@ -0,0 +1,8 @@
**************************
superres. Super Resolution
**************************
.. toctree::
:maxdepth: 2
super_resolution

@ -0,0 +1,73 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__
#define __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__
#include "opencv2/core/core.hpp"
namespace cv
{
namespace superres
{
class CV_EXPORTS DenseOpticalFlowExt : public cv::Algorithm
{
public:
virtual void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2 = noArray()) = 0;
virtual void collectGarbage() = 0;
};
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_GPU();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_GPU();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Brox_GPU();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_GPU();
}
}
#endif // __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__

@ -0,0 +1,98 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_SUPERRES_HPP__
#define __OPENCV_SUPERRES_HPP__
#include "opencv2/core/core.hpp"
namespace cv
{
namespace superres
{
CV_EXPORTS bool initModule_superres();
class CV_EXPORTS FrameSource
{
public:
virtual ~FrameSource();
virtual void nextFrame(OutputArray frame) = 0;
virtual void reset() = 0;
};
CV_EXPORTS Ptr<FrameSource> createFrameSource_Empty();
CV_EXPORTS Ptr<FrameSource> createFrameSource_Video(const std::string& fileName);
CV_EXPORTS Ptr<FrameSource> createFrameSource_Video_GPU(const std::string& fileName);
CV_EXPORTS Ptr<FrameSource> createFrameSource_Camera(int deviceId = 0);
class CV_EXPORTS SuperResolution : public cv::Algorithm, public FrameSource
{
public:
void setInput(const Ptr<FrameSource>& frameSource);
void nextFrame(OutputArray frame);
void reset();
virtual void collectGarbage();
protected:
SuperResolution();
virtual void initImpl(Ptr<FrameSource>& frameSource) = 0;
virtual void processImpl(Ptr<FrameSource>& frameSource, OutputArray output) = 0;
private:
Ptr<FrameSource> frameSource_;
bool firstCall_;
};
// S. Farsiu , D. Robinson, M. Elad, P. Milanfar. Fast and robust multiframe super resolution.
// Dennis Mitzel, Thomas Pock, Thomas Schoenemann, Daniel Cremers. Video Super Resolution using Duality Based TV-L1 Optical Flow.
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1();
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_GPU();
}
}
#endif // __OPENCV_SUPERRES_HPP__

@ -0,0 +1,3 @@
#include "perf_precomp.hpp"
CV_PERF_TEST_MAIN(superres)

@ -0,0 +1 @@
#include "perf_precomp.hpp"

@ -0,0 +1,26 @@
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
#include "opencv2/core/core.hpp"
#include "opencv2/core/gpumat.hpp"
#include "opencv2/ts/ts_perf.hpp"
#include "opencv2/superres/superres.hpp"
#include "opencv2/superres/optical_flow.hpp"
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
#endif

@ -0,0 +1,153 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace std::tr1;
using namespace testing;
using namespace perf;
using namespace cv;
using namespace cv::superres;
using namespace cv::gpu;
#define GPU_SANITY_CHECK(mat, ...) \
do{ \
Mat gpu_##mat(mat); \
SANITY_CHECK(gpu_##mat, ## __VA_ARGS__); \
} while(0)
#define CPU_SANITY_CHECK(mat, ...) \
do{ \
Mat cpu_##mat(mat); \
SANITY_CHECK(cpu_##mat, ## __VA_ARGS__); \
} while(0)
namespace
{
class OneFrameSource_CPU : public FrameSource
{
public:
explicit OneFrameSource_CPU(const Mat& frame) : frame_(frame) {}
void nextFrame(OutputArray frame)
{
frame.getMatRef() = frame_;
}
void reset()
{
}
private:
Mat frame_;
};
class OneFrameSource_GPU : public FrameSource
{
public:
explicit OneFrameSource_GPU(const GpuMat& frame) : frame_(frame) {}
void nextFrame(OutputArray frame)
{
frame.getGpuMatRef() = frame_;
}
void reset()
{
}
private:
GpuMat frame_;
};
class ZeroOpticalFlow : public DenseOpticalFlowExt
{
public:
void calc(InputArray frame0, InputArray, OutputArray flow1, OutputArray flow2)
{
cv::Size size = frame0.size();
if (!flow2.needed())
{
flow1.create(size, CV_32FC2);
if (flow1.kind() == cv::_InputArray::GPU_MAT)
flow1.getGpuMatRef().setTo(cv::Scalar::all(0));
else
flow1.getMatRef().setTo(cv::Scalar::all(0));
}
else
{
flow1.create(size, CV_32FC1);
flow2.create(size, CV_32FC1);
if (flow1.kind() == cv::_InputArray::GPU_MAT)
flow1.getGpuMatRef().setTo(cv::Scalar::all(0));
else
flow1.getMatRef().setTo(cv::Scalar::all(0));
if (flow2.kind() == cv::_InputArray::GPU_MAT)
flow2.getGpuMatRef().setTo(cv::Scalar::all(0));
else
flow2.getMatRef().setTo(cv::Scalar::all(0));
}
}
void collectGarbage()
{
}
};
}
PERF_TEST_P(Size_MatType, SuperResolution_BTVL1,
Combine(Values(szSmall64, szSmall128),
Values(MatType(CV_8UC1), MatType(CV_8UC3))))
{
declare.time(5 * 60);
const Size size = get<0>(GetParam());
const int type = get<1>(GetParam());
Mat frame(size, type);
declare.in(frame, WARMUP_RNG);
const int scale = 2;
const int iterations = 50;
const int temporalAreaRadius = 1;
Ptr<DenseOpticalFlowExt> opticalFlow(new ZeroOpticalFlow);
if (PERF_RUN_GPU())
{
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1_GPU();
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->set("opticalFlow", opticalFlow);
superRes->setInput(new OneFrameSource_GPU(GpuMat(frame)));
GpuMat dst;
superRes->nextFrame(dst);
TEST_CYCLE_N(10) superRes->nextFrame(dst);
GPU_SANITY_CHECK(dst);
}
else
{
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->set("opticalFlow", opticalFlow);
superRes->setInput(new OneFrameSource_CPU(frame));
Mat dst;
superRes->nextFrame(dst);
TEST_CYCLE_N(10) superRes->nextFrame(dst);
CPU_SANITY_CHECK(dst);
}
}

@ -0,0 +1,619 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// S. Farsiu , D. Robinson, M. Elad, P. Milanfar. Fast and robust multiframe super resolution.
// Dennis Mitzel, Thomas Pock, Thomas Schoenemann, Daniel Cremers. Video Super Resolution using Duality Based TV-L1 Optical Flow.
#include "precomp.hpp"
using namespace std;
using namespace cv;
using namespace cv::superres;
using namespace cv::superres::detail;
namespace
{
void calcRelativeMotions(const vector<Mat>& forwardMotions, const vector<Mat>& backwardMotions,
vector<Mat>& relForwardMotions, vector<Mat>& relBackwardMotions,
int baseIdx, Size size)
{
const int count = static_cast<int>(forwardMotions.size());
relForwardMotions.resize(count);
relForwardMotions[baseIdx].create(size, CV_32FC2);
relForwardMotions[baseIdx].setTo(Scalar::all(0));
relBackwardMotions.resize(count);
relBackwardMotions[baseIdx].create(size, CV_32FC2);
relBackwardMotions[baseIdx].setTo(Scalar::all(0));
for (int i = baseIdx - 1; i >= 0; --i)
{
add(relForwardMotions[i + 1], forwardMotions[i], relForwardMotions[i]);
add(relBackwardMotions[i + 1], backwardMotions[i + 1], relBackwardMotions[i]);
}
for (int i = baseIdx + 1; i < count; ++i)
{
add(relForwardMotions[i - 1], backwardMotions[i], relForwardMotions[i]);
add(relBackwardMotions[i - 1], forwardMotions[i - 1], relBackwardMotions[i]);
}
}
void upscaleMotions(const vector<Mat>& lowResMotions, vector<Mat>& highResMotions, int scale)
{
highResMotions.resize(lowResMotions.size());
for (size_t i = 0; i < lowResMotions.size(); ++i)
{
resize(lowResMotions[i], highResMotions[i], Size(), scale, scale, INTER_CUBIC);
multiply(highResMotions[i], Scalar::all(scale), highResMotions[i]);
}
}
void buildMotionMaps(const Mat& forwardMotion, const Mat& backwardMotion, Mat& forwardMap, Mat& backwardMap)
{
forwardMap.create(forwardMotion.size(), CV_32FC2);
backwardMap.create(forwardMotion.size(), CV_32FC2);
for (int y = 0; y < forwardMotion.rows; ++y)
{
const Point2f* forwardMotionRow = forwardMotion.ptr<Point2f>(y);
const Point2f* backwardMotionRow = backwardMotion.ptr<Point2f>(y);
Point2f* forwardMapRow = forwardMap.ptr<Point2f>(y);
Point2f* backwardMapRow = backwardMap.ptr<Point2f>(y);
for (int x = 0; x < forwardMotion.cols; ++x)
{
Point2f base(static_cast<float>(x), static_cast<float>(y));
forwardMapRow[x] = base + backwardMotionRow[x];
backwardMapRow[x] = base + forwardMotionRow[x];
}
}
}
template <typename T>
void upscaleImpl(const Mat& src, Mat& dst, int scale)
{
dst.create(src.rows * scale, src.cols * scale, src.type());
dst.setTo(Scalar::all(0));
for (int y = 0, Y = 0; y < src.rows; ++y, Y += scale)
{
const T* srcRow = src.ptr<T>(y);
T* dstRow = dst.ptr<T>(Y);
for (int x = 0, X = 0; x < src.cols; ++x, X += scale)
dstRow[X] = srcRow[x];
}
}
void upscale(const Mat& src, Mat& dst, int scale)
{
typedef void (*func_t)(const Mat& src, Mat& dst, int scale);
static const func_t funcs[] =
{
0, upscaleImpl<float>, 0, upscaleImpl<Point3f>
};
CV_Assert( src.channels() == 1 || src.channels() == 3 || src.channels() == 4 );
const func_t func = funcs[src.channels()];
func(src, dst, scale);
}
float diffSign(float a, float b)
{
return a > b ? 1.0f : a < b ? -1.0f : 0.0f;
}
Point3f diffSign(Point3f a, Point3f b)
{
return Point3f(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f
);
}
void diffSign(const Mat& src1, const Mat& src2, Mat& dst)
{
const int count = src1.cols * src1.channels();
dst.create(src1.size(), src1.type());
for (int y = 0; y < src1.rows; ++y)
{
const float* src1Ptr = src1.ptr<float>(y);
const float* src2Ptr = src2.ptr<float>(y);
float* dstPtr = dst.ptr<float>(y);
for (int x = 0; x < count; ++x)
dstPtr[x] = diffSign(src1Ptr[x], src2Ptr[x]);
}
}
void calcBtvWeights(int btvKernelSize, double alpha, vector<float>& btvWeights)
{
const size_t size = btvKernelSize * btvKernelSize;
btvWeights.resize(size);
const int ksize = (btvKernelSize - 1) / 2;
const float alpha_f = static_cast<float>(alpha);
for (int m = 0, ind = 0; m <= ksize; ++m)
{
for (int l = ksize; l + m >= 0; --l, ++ind)
btvWeights[ind] = pow(alpha_f, std::abs(m) + std::abs(l));
}
}
template <typename T>
struct BtvRegularizationBody : ParallelLoopBody
{
void operator ()(const Range& range) const;
Mat src;
mutable Mat dst;
int ksize;
const float* btvWeights;
};
template <typename T>
void BtvRegularizationBody<T>::operator ()(const Range& range) const
{
for (int i = range.start; i < range.end; ++i)
{
const T* srcRow = src.ptr<T>(i);
T* dstRow = dst.ptr<T>(i);
for(int j = ksize; j < src.cols - ksize; ++j)
{
const T srcVal = srcRow[j];
for (int m = 0, ind = 0; m <= ksize; ++m)
{
const T* srcRow2 = src.ptr<T>(i - m);
const T* srcRow3 = src.ptr<T>(i + m);
for (int l = ksize; l + m >= 0; --l, ++ind)
{
dstRow[j] += btvWeights[ind] * (diffSign(srcVal, srcRow3[j + l]) - diffSign(srcRow2[j - l], srcVal));
}
}
}
}
}
template <typename T>
void calcBtvRegularizationImpl(const Mat& src, Mat& dst, int btvKernelSize, const vector<float>& btvWeights)
{
dst.create(src.size(), src.type());
dst.setTo(Scalar::all(0));
const int ksize = (btvKernelSize - 1) / 2;
BtvRegularizationBody<T> body;
body.src = src;
body.dst = dst;
body.ksize = ksize;
body.btvWeights = &btvWeights[0];
parallel_for_(Range(ksize, src.rows - ksize), body);
}
void calcBtvRegularization(const Mat& src, Mat& dst, int btvKernelSize, const vector<float>& btvWeights)
{
typedef void (*func_t)(const Mat& src, Mat& dst, int btvKernelSize, const vector<float>& btvWeights);
static const func_t funcs[] =
{
0, calcBtvRegularizationImpl<float>, 0, calcBtvRegularizationImpl<Point3f>
};
const func_t func = funcs[src.channels()];
func(src, dst, btvKernelSize, btvWeights);
}
class BTVL1_Base
{
public:
BTVL1_Base();
void process(const vector<Mat>& src, Mat& dst,
const vector<Mat>& forwardMotions, const vector<Mat>& backwardMotions,
int baseIdx);
void collectGarbage();
protected:
int scale_;
int iterations_;
double tau_;
double lambda_;
double alpha_;
int btvKernelSize_;
int blurKernelSize_;
double blurSigma_;
Ptr<DenseOpticalFlowExt> opticalFlow_;
private:
Ptr<FilterEngine> filter_;
int curBlurKernelSize_;
double curBlurSigma_;
int curSrcType_;
vector<float> btvWeights_;
int curBtvKernelSize_;
double curAlpha_;
vector<Mat> lowResForwardMotions_;
vector<Mat> lowResBackwardMotions_;
vector<Mat> highResForwardMotions_;
vector<Mat> highResBackwardMotions_;
vector<Mat> forwardMaps_;
vector<Mat> backwardMaps_;
Mat highRes_;
Mat diffTerm_, regTerm_;
Mat a_, b_, c_;
};
BTVL1_Base::BTVL1_Base()
{
scale_ = 4;
iterations_ = 180;
lambda_ = 0.03;
tau_ = 1.3;
alpha_ = 0.7;
btvKernelSize_ = 7;
blurKernelSize_ = 5;
blurSigma_ = 0.0;
opticalFlow_ = createOptFlow_Farneback();
curBlurKernelSize_ = -1;
curBlurSigma_ = -1.0;
curSrcType_ = -1;
curBtvKernelSize_ = -1;
curAlpha_ = -1.0;
}
void BTVL1_Base::process(const vector<Mat>& src, Mat& dst, const vector<Mat>& forwardMotions, const vector<Mat>& backwardMotions, int baseIdx)
{
CV_Assert( scale_ > 1 );
CV_Assert( iterations_ > 0 );
CV_Assert( tau_ > 0.0 );
CV_Assert( alpha_ > 0.0 );
CV_Assert( btvKernelSize_ > 0 );
CV_Assert( blurKernelSize_ > 0 );
CV_Assert( blurSigma_ >= 0.0 );
// update blur filter and btv weights
if (filter_.empty() || blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
{
filter_ = createGaussianFilter(src[0].type(), Size(blurKernelSize_, blurKernelSize_), blurSigma_);
curBlurKernelSize_ = blurKernelSize_;
curBlurSigma_ = blurSigma_;
curSrcType_ = src[0].type();
}
if (btvWeights_.empty() || btvKernelSize_ != curBtvKernelSize_ || alpha_ != curAlpha_)
{
calcBtvWeights(btvKernelSize_, alpha_, btvWeights_);
curBtvKernelSize_ = btvKernelSize_;
curAlpha_ = alpha_;
}
// calc high res motions
calcRelativeMotions(forwardMotions, backwardMotions, lowResForwardMotions_, lowResBackwardMotions_, baseIdx, src[0].size());
upscaleMotions(lowResForwardMotions_, highResForwardMotions_, scale_);
upscaleMotions(lowResBackwardMotions_, highResBackwardMotions_, scale_);
forwardMaps_.resize(highResForwardMotions_.size());
backwardMaps_.resize(highResForwardMotions_.size());
for (size_t i = 0; i < highResForwardMotions_.size(); ++i)
buildMotionMaps(highResForwardMotions_[i], highResBackwardMotions_[i], forwardMaps_[i], backwardMaps_[i]);
// initial estimation
const Size lowResSize = src[0].size();
const Size highResSize(lowResSize.width * scale_, lowResSize.height * scale_);
resize(src[baseIdx], highRes_, highResSize, 0, 0, INTER_CUBIC);
// iterations
diffTerm_.create(highResSize, highRes_.type());
a_.create(highResSize, highRes_.type());
b_.create(highResSize, highRes_.type());
c_.create(lowResSize, highRes_.type());
for (int i = 0; i < iterations_; ++i)
{
diffTerm_.setTo(Scalar::all(0));
for (size_t k = 0; k < src.size(); ++k)
{
// a = M * Ih
remap(highRes_, a_, backwardMaps_[k], noArray(), INTER_NEAREST);
// b = HM * Ih
filter_->apply(a_, b_);
// c = DHM * Ih
resize(b_, c_, lowResSize, 0, 0, INTER_NEAREST);
diffSign(src[k], c_, c_);
// a = Dt * diff
upscale(c_, a_, scale_);
// b = HtDt * diff
filter_->apply(a_, b_);
// a = MtHtDt * diff
remap(b_, a_, forwardMaps_[k], noArray(), INTER_NEAREST);
add(diffTerm_, a_, diffTerm_);
}
if (lambda_ > 0)
{
calcBtvRegularization(highRes_, regTerm_, btvKernelSize_, btvWeights_);
addWeighted(diffTerm_, 1.0, regTerm_, -lambda_, 0.0, diffTerm_);
}
addWeighted(highRes_, 1.0, diffTerm_, tau_, 0.0, highRes_);
}
Rect inner(btvKernelSize_, btvKernelSize_, highRes_.cols - 2 * btvKernelSize_, highRes_.rows - 2 * btvKernelSize_);
highRes_(inner).copyTo(dst);
}
void BTVL1_Base::collectGarbage()
{
filter_.release();
lowResForwardMotions_.clear();
lowResBackwardMotions_.clear();
highResForwardMotions_.clear();
highResBackwardMotions_.clear();
forwardMaps_.clear();
backwardMaps_.clear();
highRes_.release();
diffTerm_.release();
regTerm_.release();
a_.release();
b_.release();
c_.release();
}
////////////////////////////////////////////////////////////////////
class BTVL1 : public SuperResolution, private BTVL1_Base
{
public:
AlgorithmInfo* info() const;
BTVL1();
void collectGarbage();
protected:
void initImpl(Ptr<FrameSource>& frameSource);
void processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
private:
int temporalAreaRadius_;
void readNextFrame(Ptr<FrameSource>& frameSource);
void processFrame(int idx);
Mat curFrame_;
Mat prevFrame_;
vector<Mat> frames_;
vector<Mat> forwardMotions_;
vector<Mat> backwardMotions_;
vector<Mat> outputs_;
int storePos_;
int procPos_;
int outPos_;
vector<Mat> srcFrames_;
vector<Mat> srcForwardMotions_;
vector<Mat> srcBackwardMotions_;
Mat finalOutput_;
};
CV_INIT_ALGORITHM(BTVL1, "SuperResolution.BTVL1",
obj.info()->addParam(obj, "scale", obj.scale_, false, 0, 0, "Scale factor.");
obj.info()->addParam(obj, "iterations", obj.iterations_, false, 0, 0, "Iteration count.");
obj.info()->addParam(obj, "tau", obj.tau_, false, 0, 0, "Asymptotic value of steepest descent method.");
obj.info()->addParam(obj, "lambda", obj.lambda_, false, 0, 0, "Weight parameter to balance data term and smoothness term.");
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Parameter of spacial distribution in Bilateral-TV.");
obj.info()->addParam(obj, "btvKernelSize", obj.btvKernelSize_, false, 0, 0, "Kernel size of Bilateral-TV filter.");
obj.info()->addParam(obj, "blurKernelSize", obj.blurKernelSize_, false, 0, 0, "Gaussian blur kernel size.");
obj.info()->addParam(obj, "blurSigma", obj.blurSigma_, false, 0, 0, "Gaussian blur sigma.");
obj.info()->addParam(obj, "temporalAreaRadius", obj.temporalAreaRadius_, false, 0, 0, "Radius of the temporal search area.");
obj.info()->addParam<DenseOpticalFlowExt>(obj, "opticalFlow", obj.opticalFlow_, false, 0, 0, "Dense optical flow algorithm."));
BTVL1::BTVL1()
{
temporalAreaRadius_ = 4;
}
void BTVL1::collectGarbage()
{
curFrame_.release();
prevFrame_.release();
frames_.clear();
forwardMotions_.clear();
backwardMotions_.clear();
outputs_.clear();
srcFrames_.clear();
srcForwardMotions_.clear();
srcBackwardMotions_.clear();
finalOutput_.release();
SuperResolution::collectGarbage();
BTVL1_Base::collectGarbage();
}
void BTVL1::initImpl(Ptr<FrameSource>& frameSource)
{
const int cacheSize = 2 * temporalAreaRadius_ + 1;
frames_.resize(cacheSize);
forwardMotions_.resize(cacheSize);
backwardMotions_.resize(cacheSize);
outputs_.resize(cacheSize);
storePos_ = -1;
for (int t = -temporalAreaRadius_; t <= temporalAreaRadius_; ++t)
readNextFrame(frameSource);
for (int i = 0; i <= temporalAreaRadius_; ++i)
processFrame(i);
procPos_ = temporalAreaRadius_;
outPos_ = -1;
}
void BTVL1::processImpl(Ptr<FrameSource>& frameSource, OutputArray _output)
{
if (outPos_ >= storePos_)
{
_output.release();
return;
}
readNextFrame(frameSource);
if (procPos_ < storePos_)
{
++procPos_;
processFrame(procPos_);
}
++outPos_;
const Mat& curOutput = at(outPos_, outputs_);
if (_output.kind() < _InputArray::OPENGL_BUFFER)
curOutput.convertTo(_output, CV_8U);
else
{
curOutput.convertTo(finalOutput_, CV_8U);
arrCopy(finalOutput_, _output);
}
}
void BTVL1::readNextFrame(Ptr<FrameSource>& frameSource)
{
frameSource->nextFrame(curFrame_);
if (curFrame_.empty())
return;
++storePos_;
curFrame_.convertTo(at(storePos_, frames_), CV_32F);
if (storePos_ > 0)
{
opticalFlow_->calc(prevFrame_, curFrame_, at(storePos_ - 1, forwardMotions_));
opticalFlow_->calc(curFrame_, prevFrame_, at(storePos_, backwardMotions_));
}
curFrame_.copyTo(prevFrame_);
}
void BTVL1::processFrame(int idx)
{
const int startIdx = max(idx - temporalAreaRadius_, 0);
const int procIdx = idx;
const int endIdx = min(startIdx + 2 * temporalAreaRadius_, storePos_);
const int count = endIdx - startIdx + 1;
srcFrames_.resize(count);
srcForwardMotions_.resize(count);
srcBackwardMotions_.resize(count);
int baseIdx = -1;
for (int i = startIdx, k = 0; i <= endIdx; ++i, ++k)
{
if (i == procIdx)
baseIdx = k;
srcFrames_[k] = at(i, frames_);
if (i < endIdx)
srcForwardMotions_[k] = at(i, forwardMotions_);
if (i > startIdx)
srcBackwardMotions_[k] = at(i, backwardMotions_);
}
process(srcFrames_, at(idx, outputs_), srcForwardMotions_, srcBackwardMotions_, baseIdx);
}
}
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1()
{
return new BTVL1;
}

@ -0,0 +1,580 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// S. Farsiu , D. Robinson, M. Elad, P. Milanfar. Fast and robust multiframe super resolution.
// Dennis Mitzel, Thomas Pock, Thomas Schoenemann, Daniel Cremers. Video Super Resolution using Duality Based TV-L1 Optical Flow.
#include "precomp.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
using namespace cv::superres;
using namespace cv::superres::detail;
#ifndef HAVE_CUDA
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_GPU()
{
CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<SuperResolution>();
}
#else // HAVE_CUDA
namespace btv_l1_device
{
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY);
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, cudaStream_t stream);
void loadBtvWeights(const float* weights, size_t count);
template <int cn> void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize);
}
namespace
{
void calcRelativeMotions(const vector<pair<GpuMat, GpuMat> >& forwardMotions, const vector<pair<GpuMat, GpuMat> >& backwardMotions,
vector<pair<GpuMat, GpuMat> >& relForwardMotions, vector<pair<GpuMat, GpuMat> >& relBackwardMotions,
int baseIdx, Size size)
{
const int count = static_cast<int>(forwardMotions.size());
relForwardMotions.resize(count);
relForwardMotions[baseIdx].first.create(size, CV_32FC1);
relForwardMotions[baseIdx].first.setTo(Scalar::all(0));
relForwardMotions[baseIdx].second.create(size, CV_32FC1);
relForwardMotions[baseIdx].second.setTo(Scalar::all(0));
relBackwardMotions.resize(count);
relBackwardMotions[baseIdx].first.create(size, CV_32FC1);
relBackwardMotions[baseIdx].first.setTo(Scalar::all(0));
relBackwardMotions[baseIdx].second.create(size, CV_32FC1);
relBackwardMotions[baseIdx].second.setTo(Scalar::all(0));
for (int i = baseIdx - 1; i >= 0; --i)
{
gpu::add(relForwardMotions[i + 1].first, forwardMotions[i].first, relForwardMotions[i].first);
gpu::add(relForwardMotions[i + 1].second, forwardMotions[i].second, relForwardMotions[i].second);
gpu::add(relBackwardMotions[i + 1].first, backwardMotions[i + 1].first, relBackwardMotions[i].first);
gpu::add(relBackwardMotions[i + 1].second, backwardMotions[i + 1].second, relBackwardMotions[i].second);
}
for (int i = baseIdx + 1; i < count; ++i)
{
gpu::add(relForwardMotions[i - 1].first, backwardMotions[i].first, relForwardMotions[i].first);
gpu::add(relForwardMotions[i - 1].second, backwardMotions[i].second, relForwardMotions[i].second);
gpu::add(relBackwardMotions[i - 1].first, forwardMotions[i - 1].first, relBackwardMotions[i].first);
gpu::add(relBackwardMotions[i - 1].second, forwardMotions[i - 1].second, relBackwardMotions[i].second);
}
}
void upscaleMotions(const vector<pair<GpuMat, GpuMat> >& lowResMotions, vector<pair<GpuMat, GpuMat> >& highResMotions, int scale)
{
highResMotions.resize(lowResMotions.size());
for (size_t i = 0; i < lowResMotions.size(); ++i)
{
gpu::resize(lowResMotions[i].first, highResMotions[i].first, Size(), scale, scale, INTER_CUBIC);
gpu::resize(lowResMotions[i].second, highResMotions[i].second, Size(), scale, scale, INTER_CUBIC);
gpu::multiply(highResMotions[i].first, Scalar::all(scale), highResMotions[i].first);
gpu::multiply(highResMotions[i].second, Scalar::all(scale), highResMotions[i].second);
}
}
void buildMotionMaps(const pair<GpuMat, GpuMat>& forwardMotion, const pair<GpuMat, GpuMat>& backwardMotion,
pair<GpuMat, GpuMat>& forwardMap, pair<GpuMat, GpuMat>& backwardMap)
{
forwardMap.first.create(forwardMotion.first.size(), CV_32FC1);
forwardMap.second.create(forwardMotion.first.size(), CV_32FC1);
backwardMap.first.create(forwardMotion.first.size(), CV_32FC1);
backwardMap.second.create(forwardMotion.first.size(), CV_32FC1);
btv_l1_device::buildMotionMaps(forwardMotion.first, forwardMotion.second,
backwardMotion.first, backwardMotion.second,
forwardMap.first, forwardMap.second,
backwardMap.first, backwardMap.second);
}
void upscale(const GpuMat& src, GpuMat& dst, int scale, Stream& stream)
{
typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
static const func_t funcs[] =
{
0, btv_l1_device::upscale<1>, 0, btv_l1_device::upscale<3>, btv_l1_device::upscale<4>
};
CV_Assert( src.channels() == 1 || src.channels() == 3 || src.channels() == 4 );
dst.create(src.rows * scale, src.cols * scale, src.type());
dst.setTo(Scalar::all(0));
const func_t func = funcs[src.channels()];
func(src, dst, scale, StreamAccessor::getStream(stream));
}
void diffSign(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
{
dst.create(src1.size(), src1.type());
btv_l1_device::diffSign(src1.reshape(1), src2.reshape(1), dst.reshape(1), StreamAccessor::getStream(stream));
}
void calcBtvWeights(int btvKernelSize, double alpha, vector<float>& btvWeights)
{
const size_t size = btvKernelSize * btvKernelSize;
btvWeights.resize(size);
const int ksize = (btvKernelSize - 1) / 2;
const float alpha_f = static_cast<float>(alpha);
for (int m = 0, ind = 0; m <= ksize; ++m)
{
for (int l = ksize; l + m >= 0; --l, ++ind)
btvWeights[ind] = pow(alpha_f, std::abs(m) + std::abs(l));
}
btv_l1_device::loadBtvWeights(&btvWeights[0], size);
}
void calcBtvRegularization(const GpuMat& src, GpuMat& dst, int btvKernelSize)
{
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, int ksize);
static const func_t funcs[] =
{
0,
btv_l1_device::calcBtvRegularization<1>,
0,
btv_l1_device::calcBtvRegularization<3>,
btv_l1_device::calcBtvRegularization<4>
};
dst.create(src.size(), src.type());
dst.setTo(Scalar::all(0));
const int ksize = (btvKernelSize - 1) / 2;
funcs[src.channels()](src, dst, ksize);
}
class BTVL1_GPU_Base
{
public:
BTVL1_GPU_Base();
void process(const vector<GpuMat>& src, GpuMat& dst,
const vector<pair<GpuMat, GpuMat> >& forwardMotions, const vector<pair<GpuMat, GpuMat> >& backwardMotions,
int baseIdx);
void collectGarbage();
protected:
int scale_;
int iterations_;
double lambda_;
double tau_;
double alpha_;
int btvKernelSize_;
int blurKernelSize_;
double blurSigma_;
Ptr<DenseOpticalFlowExt> opticalFlow_;
private:
vector<Ptr<FilterEngine_GPU> > filters_;
int curBlurKernelSize_;
double curBlurSigma_;
int curSrcType_;
vector<float> btvWeights_;
int curBtvKernelSize_;
double curAlpha_;
vector<pair<GpuMat, GpuMat> > lowResForwardMotions_;
vector<pair<GpuMat, GpuMat> > lowResBackwardMotions_;
vector<pair<GpuMat, GpuMat> > highResForwardMotions_;
vector<pair<GpuMat, GpuMat> > highResBackwardMotions_;
vector<pair<GpuMat, GpuMat> > forwardMaps_;
vector<pair<GpuMat, GpuMat> > backwardMaps_;
GpuMat highRes_;
vector<Stream> streams_;
vector<GpuMat> diffTerms_;
vector<GpuMat> a_, b_, c_;
GpuMat regTerm_;
};
BTVL1_GPU_Base::BTVL1_GPU_Base()
{
scale_ = 4;
iterations_ = 180;
lambda_ = 0.03;
tau_ = 1.3;
alpha_ = 0.7;
btvKernelSize_ = 7;
blurKernelSize_ = 5;
blurSigma_ = 0.0;
opticalFlow_ = createOptFlow_Farneback_GPU();
curBlurKernelSize_ = -1;
curBlurSigma_ = -1.0;
curSrcType_ = -1;
curBtvKernelSize_ = -1;
curAlpha_ = -1.0;
}
void BTVL1_GPU_Base::process(const vector<GpuMat>& src, GpuMat& dst,
const vector<pair<GpuMat, GpuMat> >& forwardMotions, const vector<pair<GpuMat, GpuMat> >& backwardMotions,
int baseIdx)
{
CV_Assert( scale_ > 1 );
CV_Assert( iterations_ > 0 );
CV_Assert( tau_ > 0.0 );
CV_Assert( alpha_ > 0.0 );
CV_Assert( btvKernelSize_ > 0 && btvKernelSize_ <= 16 );
CV_Assert( blurKernelSize_ > 0 );
CV_Assert( blurSigma_ >= 0.0 );
// update blur filter and btv weights
if (filters_.size() != src.size() || blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
{
filters_.resize(src.size());
for (size_t i = 0; i < src.size(); ++i)
filters_[i] = createGaussianFilter_GPU(src[0].type(), Size(blurKernelSize_, blurKernelSize_), blurSigma_);
curBlurKernelSize_ = blurKernelSize_;
curBlurSigma_ = blurSigma_;
curSrcType_ = src[0].type();
}
if (btvWeights_.empty() || btvKernelSize_ != curBtvKernelSize_ || alpha_ != curAlpha_)
{
calcBtvWeights(btvKernelSize_, alpha_, btvWeights_);
curBtvKernelSize_ = btvKernelSize_;
curAlpha_ = alpha_;
}
// calc motions between input frames
calcRelativeMotions(forwardMotions, backwardMotions, lowResForwardMotions_, lowResBackwardMotions_, baseIdx, src[0].size());
upscaleMotions(lowResForwardMotions_, highResForwardMotions_, scale_);
upscaleMotions(lowResBackwardMotions_, highResBackwardMotions_, scale_);
forwardMaps_.resize(highResForwardMotions_.size());
backwardMaps_.resize(highResForwardMotions_.size());
for (size_t i = 0; i < highResForwardMotions_.size(); ++i)
buildMotionMaps(highResForwardMotions_[i], highResBackwardMotions_[i], forwardMaps_[i], backwardMaps_[i]);
// initial estimation
const Size lowResSize = src[0].size();
const Size highResSize(lowResSize.width * scale_, lowResSize.height * scale_);
gpu::resize(src[baseIdx], highRes_, highResSize, 0, 0, INTER_CUBIC);
// iterations
streams_.resize(src.size());
diffTerms_.resize(src.size());
a_.resize(src.size());
b_.resize(src.size());
c_.resize(src.size());
for (int i = 0; i < iterations_; ++i)
{
for (size_t k = 0; k < src.size(); ++k)
{
// a = M * Ih
gpu::remap(highRes_, a_[k], backwardMaps_[k].first, backwardMaps_[k].second, INTER_NEAREST, BORDER_REPLICATE, Scalar(), streams_[k]);
// b = HM * Ih
filters_[k]->apply(a_[k], b_[k], Rect(0,0,-1,-1), streams_[k]);
// c = DHF * Ih
gpu::resize(b_[k], c_[k], lowResSize, 0, 0, INTER_NEAREST, streams_[k]);
diffSign(src[k], c_[k], c_[k], streams_[k]);
// a = Dt * diff
upscale(c_[k], a_[k], scale_, streams_[k]);
// b = HtDt * diff
filters_[k]->apply(a_[k], b_[k], Rect(0,0,-1,-1), streams_[k]);
// diffTerm = MtHtDt * diff
gpu::remap(b_[k], diffTerms_[k], forwardMaps_[k].first, forwardMaps_[k].second, INTER_NEAREST, BORDER_REPLICATE, Scalar(), streams_[k]);
}
if (lambda_ > 0)
{
calcBtvRegularization(highRes_, regTerm_, btvKernelSize_);
gpu::addWeighted(highRes_, 1.0, regTerm_, -tau_ * lambda_, 0.0, highRes_);
}
for (size_t k = 0; k < src.size(); ++k)
{
streams_[k].waitForCompletion();
gpu::addWeighted(highRes_, 1.0, diffTerms_[k], tau_, 0.0, highRes_);
}
}
Rect inner(btvKernelSize_, btvKernelSize_, highRes_.cols - 2 * btvKernelSize_, highRes_.rows - 2 * btvKernelSize_);
highRes_(inner).copyTo(dst);
}
void BTVL1_GPU_Base::collectGarbage()
{
filters_.clear();
lowResForwardMotions_.clear();
lowResBackwardMotions_.clear();
highResForwardMotions_.clear();
highResBackwardMotions_.clear();
forwardMaps_.clear();
backwardMaps_.clear();
highRes_.release();
diffTerms_.clear();
a_.clear();
b_.clear();
c_.clear();
regTerm_.release();
}
////////////////////////////////////////////////////////////
class BTVL1_GPU : public SuperResolution, private BTVL1_GPU_Base
{
public:
AlgorithmInfo* info() const;
BTVL1_GPU();
void collectGarbage();
protected:
void initImpl(Ptr<FrameSource>& frameSource);
void processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
private:
int temporalAreaRadius_;
void readNextFrame(Ptr<FrameSource>& frameSource);
void processFrame(int idx);
GpuMat curFrame_;
GpuMat prevFrame_;
vector<GpuMat> frames_;
vector<pair<GpuMat, GpuMat> > forwardMotions_;
vector<pair<GpuMat, GpuMat> > backwardMotions_;
vector<GpuMat> outputs_;
int storePos_;
int procPos_;
int outPos_;
vector<GpuMat> srcFrames_;
vector<pair<GpuMat, GpuMat> > srcForwardMotions_;
vector<pair<GpuMat, GpuMat> > srcBackwardMotions_;
GpuMat finalOutput_;
};
CV_INIT_ALGORITHM(BTVL1_GPU, "SuperResolution.BTVL1_GPU",
obj.info()->addParam(obj, "scale", obj.scale_, false, 0, 0, "Scale factor.");
obj.info()->addParam(obj, "iterations", obj.iterations_, false, 0, 0, "Iteration count.");
obj.info()->addParam(obj, "tau", obj.tau_, false, 0, 0, "Asymptotic value of steepest descent method.");
obj.info()->addParam(obj, "lambda", obj.lambda_, false, 0, 0, "Weight parameter to balance data term and smoothness term.");
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Parameter of spacial distribution in Bilateral-TV.");
obj.info()->addParam(obj, "btvKernelSize", obj.btvKernelSize_, false, 0, 0, "Kernel size of Bilateral-TV filter.");
obj.info()->addParam(obj, "blurKernelSize", obj.blurKernelSize_, false, 0, 0, "Gaussian blur kernel size.");
obj.info()->addParam(obj, "blurSigma", obj.blurSigma_, false, 0, 0, "Gaussian blur sigma.");
obj.info()->addParam(obj, "temporalAreaRadius", obj.temporalAreaRadius_, false, 0, 0, "Radius of the temporal search area.");
obj.info()->addParam<DenseOpticalFlowExt>(obj, "opticalFlow", obj.opticalFlow_, false, 0, 0, "Dense optical flow algorithm."));
BTVL1_GPU::BTVL1_GPU()
{
temporalAreaRadius_ = 4;
}
void BTVL1_GPU::collectGarbage()
{
curFrame_.release();
prevFrame_.release();
frames_.clear();
forwardMotions_.clear();
backwardMotions_.clear();
outputs_.clear();
srcFrames_.clear();
srcForwardMotions_.clear();
srcBackwardMotions_.clear();
finalOutput_.release();
SuperResolution::collectGarbage();
BTVL1_GPU_Base::collectGarbage();
}
void BTVL1_GPU::initImpl(Ptr<FrameSource>& frameSource)
{
const int cacheSize = 2 * temporalAreaRadius_ + 1;
frames_.resize(cacheSize);
forwardMotions_.resize(cacheSize);
backwardMotions_.resize(cacheSize);
outputs_.resize(cacheSize);
storePos_ = -1;
for (int t = -temporalAreaRadius_; t <= temporalAreaRadius_; ++t)
readNextFrame(frameSource);
for (int i = 0; i <= temporalAreaRadius_; ++i)
processFrame(i);
procPos_ = temporalAreaRadius_;
outPos_ = -1;
}
void BTVL1_GPU::processImpl(Ptr<FrameSource>& frameSource, OutputArray _output)
{
if (outPos_ >= storePos_)
{
_output.release();
return;
}
readNextFrame(frameSource);
if (procPos_ < storePos_)
{
++procPos_;
processFrame(procPos_);
}
++outPos_;
const GpuMat& curOutput = at(outPos_, outputs_);
if (_output.kind() == _InputArray::GPU_MAT)
curOutput.convertTo(_output.getGpuMatRef(), CV_8U);
else
{
curOutput.convertTo(finalOutput_, CV_8U);
arrCopy(finalOutput_, _output);
}
}
void BTVL1_GPU::readNextFrame(Ptr<FrameSource>& frameSource)
{
frameSource->nextFrame(curFrame_);
if (curFrame_.empty())
return;
++storePos_;
curFrame_.convertTo(at(storePos_, frames_), CV_32F);
if (storePos_ > 0)
{
pair<GpuMat, GpuMat>& forwardMotion = at(storePos_ - 1, forwardMotions_);
pair<GpuMat, GpuMat>& backwardMotion = at(storePos_, backwardMotions_);
opticalFlow_->calc(prevFrame_, curFrame_, forwardMotion.first, forwardMotion.second);
opticalFlow_->calc(curFrame_, prevFrame_, backwardMotion.first, backwardMotion.second);
}
curFrame_.copyTo(prevFrame_);
}
void BTVL1_GPU::processFrame(int idx)
{
const int startIdx = max(idx - temporalAreaRadius_, 0);
const int procIdx = idx;
const int endIdx = min(startIdx + 2 * temporalAreaRadius_, storePos_);
const int count = endIdx - startIdx + 1;
srcFrames_.resize(count);
srcForwardMotions_.resize(count);
srcBackwardMotions_.resize(count);
int baseIdx = -1;
for (int i = startIdx, k = 0; i <= endIdx; ++i, ++k)
{
if (i == procIdx)
baseIdx = k;
srcFrames_[k] = at(i, frames_);
if (i < endIdx)
srcForwardMotions_[k] = at(i, forwardMotions_);
if (i > startIdx)
srcBackwardMotions_[k] = at(i, backwardMotions_);
}
process(srcFrames_, at(idx, outputs_), srcForwardMotions_, srcBackwardMotions_, baseIdx);
}
}
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_GPU()
{
return new BTVL1_GPU;
}
#endif // HAVE_CUDA

@ -0,0 +1,234 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace btv_l1_device
{
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY);
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, cudaStream_t stream);
void loadBtvWeights(const float* weights, size_t count);
template <int cn> void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize);
}
namespace btv_l1_device
{
__global__ void buildMotionMapsKernel(const PtrStepSzf forwardMotionX, const PtrStepf forwardMotionY,
PtrStepf backwardMotionX, PtrStepf backwardMotionY,
PtrStepf forwardMapX, PtrStepf forwardMapY,
PtrStepf backwardMapX, PtrStepf backwardMapY)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= forwardMotionX.cols || y >= forwardMotionX.rows)
return;
const float fx = forwardMotionX(y, x);
const float fy = forwardMotionY(y, x);
const float bx = backwardMotionX(y, x);
const float by = backwardMotionY(y, x);
forwardMapX(y, x) = x + bx;
forwardMapY(y, x) = y + by;
backwardMapX(y, x) = x + fx;
backwardMapY(y, x) = y + fy;
}
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY)
{
const dim3 block(32, 8);
const dim3 grid(divUp(forwardMapX.cols, block.x), divUp(forwardMapX.rows, block.y));
buildMotionMapsKernel<<<grid, block>>>(forwardMotionX, forwardMotionY,
backwardMotionX, bacwardMotionY,
forwardMapX, forwardMapY,
backwardMapX, backwardMapY);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T>
__global__ void upscaleKernel(const PtrStepSz<T> src, PtrStep<T> dst, const int scale)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= src.cols || y >= src.rows)
return;
dst(y * scale, x * scale) = src(y, x);
}
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream)
{
typedef typename TypeVec<float, cn>::vec_type src_t;
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
upscaleKernel<src_t><<<grid, block, 0, stream>>>((PtrStepSz<src_t>) src, (PtrStepSz<src_t>) dst, scale);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void upscale<1>(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
template void upscale<3>(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
template void upscale<4>(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
__device__ __forceinline__ float diffSign(float a, float b)
{
return a > b ? 1.0f : a < b ? -1.0f : 0.0f;
}
__device__ __forceinline__ float3 diffSign(const float3& a, const float3& b)
{
return make_float3(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f
);
}
__device__ __forceinline__ float4 diffSign(const float4& a, const float4& b)
{
return make_float4(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f,
0.0f
);
}
struct DiffSign : binary_function<float, float, float>
{
__device__ __forceinline__ float operator ()(float a, float b) const
{
return diffSign(a, b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<btv_l1_device::DiffSign> : DefaultTransformFunctorTraits<btv_l1_device::DiffSign>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}}}
namespace btv_l1_device
{
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, cudaStream_t stream)
{
transform(src1, src2, dst, DiffSign(), WithOutMask(), stream);
}
__constant__ float c_btvRegWeights[16*16];
template <typename T>
__global__ void calcBtvRegularizationKernel(const PtrStepSz<T> src, PtrStep<T> dst, const int ksize)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + ksize;
const int y = blockIdx.y * blockDim.y + threadIdx.y + ksize;
if (y >= src.rows - ksize || x >= src.cols - ksize)
return;
const T srcVal = src(y, x);
T dstVal = VecTraits<T>::all(0);
for (int m = 0, count = 0; m <= ksize; ++m)
{
for (int l = ksize; l + m >= 0; --l, ++count)
dstVal = dstVal + c_btvRegWeights[count] * (diffSign(srcVal, src(y + m, x + l)) - diffSign(src(y - m, x - l), srcVal));
}
dst(y, x) = dstVal;
}
void loadBtvWeights(const float* weights, size_t count)
{
cudaSafeCall( cudaMemcpyToSymbol(c_btvRegWeights, weights, count * sizeof(float)) );
}
template <int cn>
void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize)
{
typedef typename TypeVec<float, cn>::vec_type src_t;
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
calcBtvRegularizationKernel<src_t><<<grid, block>>>((PtrStepSz<src_t>) src, (PtrStepSz<src_t>) dst, ksize);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template void calcBtvRegularization<1>(PtrStepSzb src, PtrStepSzb dst, int ksize);
template void calcBtvRegularization<3>(PtrStepSzb src, PtrStepSzb dst, int ksize);
template void calcBtvRegularization<4>(PtrStepSzb src, PtrStepSzb dst, int ksize);
}

@ -0,0 +1,255 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
using namespace cv::superres;
using namespace cv::superres::detail;
cv::superres::FrameSource::~FrameSource()
{
}
//////////////////////////////////////////////////////
// EmptyFrameSource
namespace
{
class EmptyFrameSource : public FrameSource
{
public:
void nextFrame(OutputArray frame);
void reset();
};
void EmptyFrameSource::nextFrame(OutputArray frame)
{
frame.release();
}
void EmptyFrameSource::reset()
{
}
}
Ptr<FrameSource> cv::superres::createFrameSource_Empty()
{
return new EmptyFrameSource;
}
//////////////////////////////////////////////////////
// VideoFrameSource & CameraFrameSource
#ifndef HAVE_OPENCV_HIGHGUI
Ptr<FrameSource> cv::superres::createFrameSource_Video(const string& fileName)
{
(void) fileName;
CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<FrameSource>();
}
Ptr<FrameSource> cv::superres::createFrameSource_Camera(int deviceId)
{
(void) deviceId;
CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<FrameSource>();
}
#else // HAVE_OPENCV_HIGHGUI
namespace
{
class CaptureFrameSource : public FrameSource
{
public:
void nextFrame(OutputArray frame);
protected:
VideoCapture vc_;
private:
Mat frame_;
};
void CaptureFrameSource::nextFrame(OutputArray _frame)
{
if (_frame.kind() == _InputArray::MAT)
{
vc_ >> _frame.getMatRef();
}
else
{
vc_ >> frame_;
arrCopy(frame_, _frame);
}
}
class VideoFrameSource : public CaptureFrameSource
{
public:
VideoFrameSource(const string& fileName);
void reset();
private:
string fileName_;
};
VideoFrameSource::VideoFrameSource(const string& fileName) : fileName_(fileName)
{
reset();
}
void VideoFrameSource::reset()
{
vc_.release();
vc_.open(fileName_);
CV_Assert( vc_.isOpened() );
}
class CameraFrameSource : public CaptureFrameSource
{
public:
CameraFrameSource(int deviceId);
void reset();
private:
int deviceId_;
};
CameraFrameSource::CameraFrameSource(int deviceId) : deviceId_(deviceId)
{
reset();
}
void CameraFrameSource::reset()
{
vc_.release();
vc_.open(deviceId_);
CV_Assert( vc_.isOpened() );
}
}
Ptr<FrameSource> cv::superres::createFrameSource_Video(const string& fileName)
{
return new VideoFrameSource(fileName);
}
Ptr<FrameSource> cv::superres::createFrameSource_Camera(int deviceId)
{
return new CameraFrameSource(deviceId);
}
#endif // HAVE_OPENCV_HIGHGUI
//////////////////////////////////////////////////////
// VideoFrameSource_GPU
#ifndef HAVE_OPENCV_GPU
Ptr<FrameSource> cv::superres::createFrameSource_Video_GPU(const string& fileName)
{
(void) fileName;
CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<FrameSource>();
}
#else // HAVE_OPENCV_GPU
namespace
{
class VideoFrameSource_GPU : public FrameSource
{
public:
VideoFrameSource_GPU(const string& fileName);
void nextFrame(OutputArray frame);
void reset();
private:
string fileName_;
VideoReader_GPU reader_;
GpuMat frame_;
};
VideoFrameSource_GPU::VideoFrameSource_GPU(const string& fileName) : fileName_(fileName)
{
reset();
}
void VideoFrameSource_GPU::nextFrame(OutputArray _frame)
{
if (_frame.kind() == _InputArray::GPU_MAT)
{
bool res = reader_.read(_frame.getGpuMatRef());
if (!res)
_frame.release();
}
else
{
bool res = reader_.read(frame_);
if (!res)
_frame.release();
else
arrCopy(frame_, _frame);
}
}
void VideoFrameSource_GPU::reset()
{
reader_.close();
reader_.open(fileName_);
CV_Assert( reader_.isOpened() );
}
}
Ptr<FrameSource> cv::superres::createFrameSource_Video_GPU(const string& fileName)
{
return new VideoFrameSource(fileName);
}
#endif // HAVE_OPENCV_GPU

@ -0,0 +1,273 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
Mat cv::superres::arrGetMat(InputArray arr, Mat& buf)
{
switch (arr.kind())
{
case _InputArray::GPU_MAT:
arr.getGpuMat().download(buf);
return buf;
case _InputArray::OPENGL_BUFFER:
arr.getOGlBuffer().copyTo(buf);
return buf;
case _InputArray::OPENGL_TEXTURE:
arr.getOGlTexture2D().copyTo(buf);
return buf;
default:
return arr.getMat();
}
}
GpuMat cv::superres::arrGetGpuMat(InputArray arr, GpuMat& buf)
{
switch (arr.kind())
{
case _InputArray::GPU_MAT:
return arr.getGpuMat();
case _InputArray::OPENGL_BUFFER:
arr.getOGlBuffer().copyTo(buf);
return buf;
case _InputArray::OPENGL_TEXTURE:
arr.getOGlTexture2D().copyTo(buf);
return buf;
default:
buf.upload(arr.getMat());
return buf;
}
}
namespace
{
void mat2mat(InputArray src, OutputArray dst)
{
src.getMat().copyTo(dst);
}
void arr2buf(InputArray src, OutputArray dst)
{
dst.getOGlBufferRef().copyFrom(src);
}
void arr2tex(InputArray src, OutputArray dst)
{
dst.getOGlTexture2D().copyFrom(src);
}
void mat2gpu(InputArray src, OutputArray dst)
{
dst.getGpuMatRef().upload(src.getMat());
}
void buf2arr(InputArray src, OutputArray dst)
{
src.getOGlBuffer().copyTo(dst);
}
void tex2arr(InputArray src, OutputArray dst)
{
src.getOGlTexture2D().copyTo(dst);
}
void gpu2mat(InputArray src, OutputArray dst)
{
GpuMat d = src.getGpuMat();
dst.create(d.size(), d.type());
Mat m = dst.getMat();
d.download(m);
}
void gpu2gpu(InputArray src, OutputArray dst)
{
src.getGpuMat().copyTo(dst.getGpuMatRef());
}
}
void cv::superres::arrCopy(InputArray src, OutputArray dst)
{
typedef void (*func_t)(InputArray src, OutputArray dst);
static const func_t funcs[10][10] =
{
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu},
{0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu},
{0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu},
{0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu},
{0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu},
{0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, arr2tex, mat2gpu},
{0, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr},
{0, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr, tex2arr},
{0, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, arr2buf, arr2tex, gpu2gpu}
};
const int src_kind = src.kind() >> _InputArray::KIND_SHIFT;
const int dst_kind = dst.kind() >> _InputArray::KIND_SHIFT;
CV_DbgAssert( src_kind >= 0 && src_kind < 10 );
CV_DbgAssert( dst_kind >= 0 && dst_kind < 10 );
const func_t func = funcs[src_kind][dst_kind];
CV_DbgAssert( func != 0 );
func(src, dst);
}
namespace
{
void convertToCn(InputArray src, OutputArray dst, int cn)
{
CV_Assert( src.channels() == 1 || src.channels() == 3 || src.channels() == 4 );
CV_Assert( cn == 1 || cn == 3 || cn == 4 );
static const int codes[5][5] =
{
{-1, -1, -1, -1, -1},
{-1, -1, -1, COLOR_GRAY2BGR, COLOR_GRAY2BGRA},
{-1, -1, -1, -1, -1},
{-1, COLOR_BGR2GRAY, -1, -1, COLOR_BGR2BGRA},
{-1, COLOR_BGRA2GRAY, -1, COLOR_BGRA2BGR, -1},
};
const int code = codes[src.channels()][cn];
CV_DbgAssert( code >= 0 );
switch (src.kind())
{
case _InputArray::GPU_MAT:
#ifdef HAVE_OPENCV_GPU
gpu::cvtColor(src.getGpuMat(), dst.getGpuMatRef(), code, cn);
#else
CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
#endif
break;
default:
cvtColor(src, dst, code, cn);
break;
}
}
void convertToDepth(InputArray src, OutputArray dst, int depth)
{
CV_Assert( src.depth() <= CV_64F );
CV_Assert( depth == CV_8U || depth == CV_32F );
static const double maxVals[] =
{
numeric_limits<uchar>::max(),
numeric_limits<schar>::max(),
numeric_limits<ushort>::max(),
numeric_limits<short>::max(),
numeric_limits<int>::max(),
1.0,
1.0,
};
const double scale = maxVals[depth] / maxVals[src.depth()];
switch (src.kind())
{
case _InputArray::GPU_MAT:
src.getGpuMat().convertTo(dst.getGpuMatRef(), depth, scale);
break;
default:
src.getMat().convertTo(dst, depth, scale);
break;
}
}
}
Mat cv::superres::convertToType(const Mat& src, int type, Mat& buf0, Mat& buf1)
{
if (src.type() == type)
return src;
const int depth = CV_MAT_DEPTH(type);
const int cn = CV_MAT_CN(type);
if (src.depth() == depth)
{
convertToCn(src, buf0, cn);
return buf0;
}
if (src.channels() == cn)
{
convertToDepth(src, buf1, depth);
return buf1;
}
convertToCn(src, buf0, cn);
convertToDepth(buf0, buf1, depth);
return buf1;
}
GpuMat cv::superres::convertToType(const GpuMat& src, int type, GpuMat& buf0, GpuMat& buf1)
{
if (src.type() == type)
return src;
const int depth = CV_MAT_DEPTH(type);
const int cn = CV_MAT_CN(type);
if (src.depth() == depth)
{
convertToCn(src, buf0, cn);
return buf0;
}
if (src.channels() == cn)
{
convertToDepth(src, buf1, depth);
return buf1;
}
convertToCn(src, buf0, cn);
convertToDepth(buf0, buf1, depth);
return buf1;
}

@ -0,0 +1,63 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_SUPERRES_INPUT_ARRAY_UTILITY_HPP__
#define __OPENCV_SUPERRES_INPUT_ARRAY_UTILITY_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/core/gpumat.hpp"
namespace cv
{
namespace superres
{
CV_EXPORTS Mat arrGetMat(InputArray arr, Mat& buf);
CV_EXPORTS gpu::GpuMat arrGetGpuMat(InputArray arr, gpu::GpuMat& buf);
CV_EXPORTS void arrCopy(InputArray src, OutputArray dst);
CV_EXPORTS Mat convertToType(const Mat& src, int type, Mat& buf0, Mat& buf1);
CV_EXPORTS gpu::GpuMat convertToType(const gpu::GpuMat& src, int type, gpu::GpuMat& buf0, gpu::GpuMat& buf1);
}
}
#endif // __OPENCV_SUPERRES_INPUT_ARRAY_UTILITY_HPP__

@ -0,0 +1,721 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
using namespace cv::superres;
using namespace cv::superres::detail;
///////////////////////////////////////////////////////////////////
// CpuOpticalFlow
namespace
{
class CpuOpticalFlow : public DenseOpticalFlowExt
{
public:
explicit CpuOpticalFlow(int work_type);
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
void collectGarbage();
protected:
virtual void impl(const Mat& input0, const Mat& input1, OutputArray dst) = 0;
private:
int work_type_;
Mat buf_[6];
Mat flow_;
Mat flows_[2];
};
CpuOpticalFlow::CpuOpticalFlow(int work_type) : work_type_(work_type)
{
}
void CpuOpticalFlow::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
Mat frame0 = arrGetMat(_frame0, buf_[0]);
Mat frame1 = arrGetMat(_frame1, buf_[1]);
CV_Assert( frame1.type() == frame0.type() );
CV_Assert( frame1.size() == frame0.size() );
Mat input0 = convertToType(frame0, work_type_, buf_[2], buf_[3]);
Mat input1 = convertToType(frame1, work_type_, buf_[4], buf_[5]);
if (!_flow2.needed() && _flow1.kind() < _InputArray::OPENGL_BUFFER)
{
impl(input0, input1, _flow1);
return;
}
impl(input0, input1, flow_);
if (!_flow2.needed())
{
arrCopy(flow_, _flow1);
}
else
{
split(flow_, flows_);
arrCopy(flows_[0], _flow1);
arrCopy(flows_[1], _flow2);
}
}
void CpuOpticalFlow::collectGarbage()
{
for (int i = 0; i < 6; ++i)
buf_[i].release();
flow_.release();
flows_[0].release();
flows_[1].release();
}
}
///////////////////////////////////////////////////////////////////
// Farneback
namespace
{
class Farneback : public CpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
Farneback();
protected:
void impl(const Mat& input0, const Mat& input1, OutputArray dst);
private:
double pyrScale_;
int numLevels_;
int winSize_;
int numIters_;
int polyN_;
double polySigma_;
int flags_;
};
CV_INIT_ALGORITHM(Farneback, "DenseOpticalFlowExt.Farneback",
obj.info()->addParam(obj, "pyrScale", obj.pyrScale_);
obj.info()->addParam(obj, "numLevels", obj.numLevels_);
obj.info()->addParam(obj, "winSize", obj.winSize_);
obj.info()->addParam(obj, "numIters", obj.numIters_);
obj.info()->addParam(obj, "polyN", obj.polyN_);
obj.info()->addParam(obj, "polySigma", obj.polySigma_);
obj.info()->addParam(obj, "flags", obj.flags_));
Farneback::Farneback() : CpuOpticalFlow(CV_8UC1)
{
pyrScale_ = 0.5;
numLevels_ = 5;
winSize_ = 13;
numIters_ = 10;
polyN_ = 5;
polySigma_ = 1.1;
flags_ = 0;
}
void Farneback::impl(const Mat& input0, const Mat& input1, OutputArray dst)
{
calcOpticalFlowFarneback(input0, input1, dst, pyrScale_, numLevels_, winSize_, numIters_, polyN_, polySigma_, flags_);
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback()
{
return new Farneback;
}
///////////////////////////////////////////////////////////////////
// Simple
namespace
{
class Simple : public CpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
Simple();
protected:
void impl(const Mat& input0, const Mat& input1, OutputArray dst);
private:
int layers_;
int averagingBlockSize_;
int maxFlow_;
double sigmaDist_;
double sigmaColor_;
int postProcessWindow_;
double sigmaDistFix_;
double sigmaColorFix_;
double occThr_;
int upscaleAveragingRadius_;
double upscaleSigmaDist_;
double upscaleSigmaColor_;
double speedUpThr_;
};
CV_INIT_ALGORITHM(Simple, "DenseOpticalFlowExt.Simple",
obj.info()->addParam(obj, "layers", obj.layers_);
obj.info()->addParam(obj, "averagingBlockSize", obj.averagingBlockSize_);
obj.info()->addParam(obj, "maxFlow", obj.maxFlow_);
obj.info()->addParam(obj, "sigmaDist", obj.sigmaDist_);
obj.info()->addParam(obj, "sigmaColor", obj.sigmaColor_);
obj.info()->addParam(obj, "postProcessWindow", obj.postProcessWindow_);
obj.info()->addParam(obj, "sigmaDistFix", obj.sigmaDistFix_);
obj.info()->addParam(obj, "sigmaColorFix", obj.sigmaColorFix_);
obj.info()->addParam(obj, "occThr", obj.occThr_);
obj.info()->addParam(obj, "upscaleAveragingRadius", obj.upscaleAveragingRadius_);
obj.info()->addParam(obj, "upscaleSigmaDist", obj.upscaleSigmaDist_);
obj.info()->addParam(obj, "upscaleSigmaColor", obj.upscaleSigmaColor_);
obj.info()->addParam(obj, "speedUpThr", obj.speedUpThr_));
Simple::Simple() : CpuOpticalFlow(CV_8UC3)
{
layers_ = 3;
averagingBlockSize_ = 2;
maxFlow_ = 4;
sigmaDist_ = 4.1;
sigmaColor_ = 25.5;
postProcessWindow_ = 18;
sigmaDistFix_ = 55.0;
sigmaColorFix_ = 25.5;
occThr_ = 0.35;
upscaleAveragingRadius_ = 18;
upscaleSigmaDist_ = 55.0;
upscaleSigmaColor_ = 25.5;
speedUpThr_ = 10;
}
void Simple::impl(const Mat& _input0, const Mat& _input1, OutputArray dst)
{
Mat input0 = _input0;
Mat input1 = _input1;
calcOpticalFlowSF(input0, input1, dst.getMatRef(),
layers_,
averagingBlockSize_,
maxFlow_,
sigmaDist_,
sigmaColor_,
postProcessWindow_,
sigmaDistFix_,
sigmaColorFix_,
occThr_,
upscaleAveragingRadius_,
upscaleSigmaDist_,
upscaleSigmaColor_,
speedUpThr_);
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Simple()
{
return new Simple;
}
///////////////////////////////////////////////////////////////////
// DualTVL1
namespace
{
class DualTVL1 : public CpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
DualTVL1();
void collectGarbage();
protected:
void impl(const Mat& input0, const Mat& input1, OutputArray dst);
private:
double tau_;
double lambda_;
double theta_;
int nscales_;
int warps_;
double epsilon_;
int iterations_;
bool useInitialFlow_;
Ptr<DenseOpticalFlow> alg_;
};
CV_INIT_ALGORITHM(DualTVL1, "DenseOpticalFlowExt.DualTVL1",
obj.info()->addParam(obj, "tau", obj.tau_);
obj.info()->addParam(obj, "lambda", obj.lambda_);
obj.info()->addParam(obj, "theta", obj.theta_);
obj.info()->addParam(obj, "nscales", obj.nscales_);
obj.info()->addParam(obj, "warps", obj.warps_);
obj.info()->addParam(obj, "epsilon", obj.epsilon_);
obj.info()->addParam(obj, "iterations", obj.iterations_);
obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow_));
DualTVL1::DualTVL1() : CpuOpticalFlow(CV_8UC1)
{
alg_ = cv::createOptFlow_DualTVL1();
tau_ = alg_->getDouble("tau");
lambda_ = alg_->getDouble("lambda");
theta_ = alg_->getDouble("theta");
nscales_ = alg_->getInt("nscales");
warps_ = alg_->getInt("warps");
epsilon_ = alg_->getDouble("epsilon");
iterations_ = alg_->getInt("iterations");
useInitialFlow_ = alg_->getBool("useInitialFlow");
}
void DualTVL1::impl(const Mat& input0, const Mat& input1, OutputArray dst)
{
alg_->set("tau", tau_);
alg_->set("lambda", lambda_);
alg_->set("theta", theta_);
alg_->set("nscales", nscales_);
alg_->set("warps", warps_);
alg_->set("epsilon", epsilon_);
alg_->set("iterations", iterations_);
alg_->set("useInitialFlow", useInitialFlow_);
alg_->calc(input0, input1, dst);
}
void DualTVL1::collectGarbage()
{
alg_->collectGarbage();
CpuOpticalFlow::collectGarbage();
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1()
{
return new DualTVL1;
}
///////////////////////////////////////////////////////////////////
// GpuOpticalFlow
#ifndef HAVE_OPENCV_GPU
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_GPU()
{
CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_GPU()
{
CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_GPU()
{
CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_GPU()
{
CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
}
#else // HAVE_OPENCV_GPU
namespace
{
class GpuOpticalFlow : public DenseOpticalFlowExt
{
public:
explicit GpuOpticalFlow(int work_type);
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
void collectGarbage();
protected:
virtual void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) = 0;
private:
int work_type_;
GpuMat buf_[6];
GpuMat u_, v_, flow_;
};
GpuOpticalFlow::GpuOpticalFlow(int work_type) : work_type_(work_type)
{
}
void GpuOpticalFlow::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
GpuMat frame0 = arrGetGpuMat(_frame0, buf_[0]);
GpuMat frame1 = arrGetGpuMat(_frame1, buf_[1]);
CV_Assert( frame1.type() == frame0.type() );
CV_Assert( frame1.size() == frame0.size() );
GpuMat input0 = convertToType(frame0, work_type_, buf_[2], buf_[3]);
GpuMat input1 = convertToType(frame1, work_type_, buf_[4], buf_[5]);
if (_flow2.needed() && _flow1.kind() == _InputArray::GPU_MAT && _flow2.kind() == _InputArray::GPU_MAT)
{
impl(input0, input1, _flow1.getGpuMatRef(), _flow2.getGpuMatRef());
return;
}
impl(input0, input1, u_, v_);
if (_flow2.needed())
{
arrCopy(u_, _flow1);
arrCopy(v_, _flow2);
}
else
{
GpuMat src[] = {u_, v_};
merge(src, 2, flow_);
arrCopy(flow_, _flow1);
}
}
void GpuOpticalFlow::collectGarbage()
{
for (int i = 0; i < 6; ++i)
buf_[i].release();
u_.release();
v_.release();
flow_.release();
}
}
///////////////////////////////////////////////////////////////////
// Brox_GPU
namespace
{
class Brox_GPU : public GpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
Brox_GPU();
void collectGarbage();
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
private:
double alpha_;
double gamma_;
double scaleFactor_;
int innerIterations_;
int outerIterations_;
int solverIterations_;
BroxOpticalFlow alg_;
};
CV_INIT_ALGORITHM(Brox_GPU, "DenseOpticalFlowExt.Brox_GPU",
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Flow smoothness");
obj.info()->addParam(obj, "gamma", obj.gamma_, false, 0, 0, "Gradient constancy importance");
obj.info()->addParam(obj, "scaleFactor", obj.scaleFactor_, false, 0, 0, "Pyramid scale factor");
obj.info()->addParam(obj, "innerIterations", obj.innerIterations_, false, 0, 0, "Number of lagged non-linearity iterations (inner loop)");
obj.info()->addParam(obj, "outerIterations", obj.outerIterations_, false, 0, 0, "Number of warping iterations (number of pyramid levels)");
obj.info()->addParam(obj, "solverIterations", obj.solverIterations_, false, 0, 0, "Number of linear system solver iterations"));
Brox_GPU::Brox_GPU() : GpuOpticalFlow(CV_32FC1), alg_(0.197f, 50.0f, 0.8f, 10, 77, 10)
{
alpha_ = alg_.alpha;
gamma_ = alg_.gamma;
scaleFactor_ = alg_.scale_factor;
innerIterations_ = alg_.inner_iterations;
outerIterations_ = alg_.outer_iterations;
solverIterations_ = alg_.solver_iterations;
}
void Brox_GPU::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_.alpha = static_cast<float>(alpha_);
alg_.gamma = static_cast<float>(gamma_);
alg_.scale_factor = static_cast<float>(scaleFactor_);
alg_.inner_iterations = innerIterations_;
alg_.outer_iterations = outerIterations_;
alg_.solver_iterations = solverIterations_;
alg_(input0, input1, dst1, dst2);
}
void Brox_GPU::collectGarbage()
{
alg_.buf.release();
GpuOpticalFlow::collectGarbage();
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_GPU()
{
return new Brox_GPU;
}
///////////////////////////////////////////////////////////////////
// PyrLK_GPU
namespace
{
class PyrLK_GPU : public GpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
PyrLK_GPU();
void collectGarbage();
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
private:
int winSize_;
int maxLevel_;
int iterations_;
PyrLKOpticalFlow alg_;
};
CV_INIT_ALGORITHM(PyrLK_GPU, "DenseOpticalFlowExt.PyrLK_GPU",
obj.info()->addParam(obj, "winSize", obj.winSize_);
obj.info()->addParam(obj, "maxLevel", obj.maxLevel_);
obj.info()->addParam(obj, "iterations", obj.iterations_));
PyrLK_GPU::PyrLK_GPU() : GpuOpticalFlow(CV_8UC1)
{
winSize_ = alg_.winSize.width;
maxLevel_ = alg_.maxLevel;
iterations_ = alg_.iters;
}
void PyrLK_GPU::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_.winSize.width = winSize_;
alg_.winSize.height = winSize_;
alg_.maxLevel = maxLevel_;
alg_.iters = iterations_;
alg_.dense(input0, input1, dst1, dst2);
}
void PyrLK_GPU::collectGarbage()
{
alg_.releaseMemory();
GpuOpticalFlow::collectGarbage();
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_GPU()
{
return new PyrLK_GPU;
}
///////////////////////////////////////////////////////////////////
// Farneback_GPU
namespace
{
class Farneback_GPU : public GpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
Farneback_GPU();
void collectGarbage();
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
private:
double pyrScale_;
int numLevels_;
int winSize_;
int numIters_;
int polyN_;
double polySigma_;
int flags_;
FarnebackOpticalFlow alg_;
};
CV_INIT_ALGORITHM(Farneback_GPU, "DenseOpticalFlowExt.Farneback_GPU",
obj.info()->addParam(obj, "pyrScale", obj.pyrScale_);
obj.info()->addParam(obj, "numLevels", obj.numLevels_);
obj.info()->addParam(obj, "winSize", obj.winSize_);
obj.info()->addParam(obj, "numIters", obj.numIters_);
obj.info()->addParam(obj, "polyN", obj.polyN_);
obj.info()->addParam(obj, "polySigma", obj.polySigma_);
obj.info()->addParam(obj, "flags", obj.flags_));
Farneback_GPU::Farneback_GPU() : GpuOpticalFlow(CV_8UC1)
{
pyrScale_ = alg_.pyrScale;
numLevels_ = alg_.numLevels;
winSize_ = alg_.winSize;
numIters_ = alg_.numIters;
polyN_ = alg_.polyN;
polySigma_ = alg_.polySigma;
flags_ = alg_.flags;
}
void Farneback_GPU::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_.pyrScale = pyrScale_;
alg_.numLevels = numLevels_;
alg_.winSize = winSize_;
alg_.numIters = numIters_;
alg_.polyN = polyN_;
alg_.polySigma = polySigma_;
alg_.flags = flags_;
alg_(input0, input1, dst1, dst2);
}
void Farneback_GPU::collectGarbage()
{
alg_.releaseMemory();
GpuOpticalFlow::collectGarbage();
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_GPU()
{
return new Farneback_GPU;
}
///////////////////////////////////////////////////////////////////
// DualTVL1_GPU
namespace
{
class DualTVL1_GPU : public GpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
DualTVL1_GPU();
void collectGarbage();
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
private:
double tau_;
double lambda_;
double theta_;
int nscales_;
int warps_;
double epsilon_;
int iterations_;
bool useInitialFlow_;
OpticalFlowDual_TVL1_GPU alg_;
};
CV_INIT_ALGORITHM(DualTVL1_GPU, "DenseOpticalFlowExt.DualTVL1_GPU",
obj.info()->addParam(obj, "tau", obj.tau_);
obj.info()->addParam(obj, "lambda", obj.lambda_);
obj.info()->addParam(obj, "theta", obj.theta_);
obj.info()->addParam(obj, "nscales", obj.nscales_);
obj.info()->addParam(obj, "warps", obj.warps_);
obj.info()->addParam(obj, "epsilon", obj.epsilon_);
obj.info()->addParam(obj, "iterations", obj.iterations_);
obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow_));
DualTVL1_GPU::DualTVL1_GPU() : GpuOpticalFlow(CV_8UC1)
{
tau_ = alg_.tau;
lambda_ = alg_.lambda;
theta_ = alg_.theta;
nscales_ = alg_.nscales;
warps_ = alg_.warps;
epsilon_ = alg_.epsilon;
iterations_ = alg_.iterations;
useInitialFlow_ = alg_.useInitialFlow;
}
void DualTVL1_GPU::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_.tau = tau_;
alg_.lambda = lambda_;
alg_.theta = theta_;
alg_.nscales = nscales_;
alg_.warps = warps_;
alg_.epsilon = epsilon_;
alg_.iterations = iterations_;
alg_.useInitialFlow = useInitialFlow_;
alg_(input0, input1, dst1, dst2);
}
void DualTVL1_GPU::collectGarbage()
{
alg_.collectGarbage();
GpuOpticalFlow::collectGarbage();
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_GPU()
{
return new DualTVL1_GPU;
}
#endif // HAVE_OPENCV_GPU

@ -0,0 +1,43 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"

@ -0,0 +1,78 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include <vector>
#include <limits>
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
#include "opencv2/opencv_modules.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/core/gpumat.hpp"
#include "opencv2/core/opengl_interop.hpp"
#include "opencv2/core/internal.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/tracking.hpp"
#ifdef HAVE_OPENCV_GPU
#include "opencv2/gpu/gpu.hpp"
#ifdef HAVE_CUDA
#include "opencv2/gpu/stream_accessor.hpp"
#endif
#endif
#ifdef HAVE_OPENCV_HIGHGUI
#include "opencv2/highgui/highgui.hpp"
#endif
#include "opencv2/superres/superres.hpp"
#include "opencv2/superres/optical_flow.hpp"
#include "input_array_utility.hpp"
#include "ring_buffer.hpp"
#endif /* __OPENCV_PRECOMP_H__ */

@ -0,0 +1,79 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __RING_BUFFER_HPP__
#define __RING_BUFFER_HPP__
#include "precomp.hpp"
namespace cv
{
namespace superres
{
namespace detail
{
template <typename T, class A>
inline const T& at(int index, const std::vector<T, A>& items)
{
const int len = static_cast<int>(items.size());
if (index < 0)
index -= ((index - len + 1) / len) * len;
if (index >= len)
index %= len;
return items[index];
}
template <typename T, class A>
inline T& at(int index, std::vector<T, A>& items)
{
const int len = static_cast<int>(items.size());
if (index < 0)
index -= ((index - len + 1) / len) * len;
if (index >= len)
index %= len;
return items[index];
}
}
}
}
#endif // __RING_BUFFER_HPP__

@ -0,0 +1,85 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace std;
using namespace cv;
using namespace cv::superres;
bool cv::superres::initModule_superres()
{
return !createSuperResolution_BTVL1().empty();
}
cv::superres::SuperResolution::SuperResolution()
{
frameSource_ = createFrameSource_Empty();
firstCall_ = true;
}
void cv::superres::SuperResolution::setInput(const Ptr<FrameSource>& frameSource)
{
frameSource_ = frameSource;
firstCall_ = true;
}
void cv::superres::SuperResolution::nextFrame(OutputArray frame)
{
if (firstCall_)
{
initImpl(frameSource_);
firstCall_ = false;
}
processImpl(frameSource_, frame);
}
void cv::superres::SuperResolution::reset()
{
frameSource_->reset();
firstCall_ = true;
}
void cv::superres::SuperResolution::collectGarbage()
{
}

@ -0,0 +1,3 @@
#include "test_precomp.hpp"
CV_TEST_MAIN("superres")

@ -0,0 +1 @@
#include "test_precomp.hpp"

@ -0,0 +1,23 @@
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
#include "opencv2/opencv_modules.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/ts/ts.hpp"
#include "opencv2/superres/superres.hpp"
#include "input_array_utility.hpp"
#endif

@ -0,0 +1,236 @@
#include "test_precomp.hpp"
class AllignedFrameSource : public cv::superres::FrameSource
{
public:
AllignedFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale);
void nextFrame(cv::OutputArray frame);
void reset();
private:
cv::Ptr<cv::superres::FrameSource> base_;
cv::Mat origFrame_;
int scale_;
};
AllignedFrameSource::AllignedFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale) :
base_(base), scale_(scale)
{
CV_Assert( !base_.empty() );
}
void AllignedFrameSource::nextFrame(cv::OutputArray frame)
{
base_->nextFrame(origFrame_);
if (origFrame_.rows % scale_ == 0 && origFrame_.cols % scale_ == 0)
{
cv::superres::arrCopy(origFrame_, frame);
}
else
{
cv::Rect ROI(0, 0, (origFrame_.cols / scale_) * scale_, (origFrame_.rows / scale_) * scale_);
cv::superres::arrCopy(origFrame_(ROI), frame);
}
}
void AllignedFrameSource::reset()
{
base_->reset();
}
class DegradeFrameSource : public cv::superres::FrameSource
{
public:
DegradeFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale);
void nextFrame(cv::OutputArray frame);
void reset();
private:
cv::Ptr<cv::superres::FrameSource> base_;
cv::Mat origFrame_;
cv::Mat blurred_;
cv::Mat deg_;
double iscale_;
};
DegradeFrameSource::DegradeFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale) :
base_(base), iscale_(1.0 / scale)
{
CV_Assert( !base_.empty() );
}
void addGaussNoise(cv::Mat& image, double sigma)
{
cv::Mat noise(image.size(), CV_32FC(image.channels()));
cvtest::TS::ptr()->get_rng().fill(noise, cv::RNG::NORMAL, 0.0, sigma);
cv::addWeighted(image, 1.0, noise, 1.0, 0.0, image, image.depth());
}
void addSpikeNoise(cv::Mat& image, int frequency)
{
cv::Mat_<uchar> mask(image.size(), 0);
for (int y = 0; y < mask.rows; ++y)
{
for (int x = 0; x < mask.cols; ++x)
{
if (cvtest::TS::ptr()->get_rng().uniform(0, frequency) < 1)
mask(y, x) = 255;
}
}
image.setTo(cv::Scalar::all(255), mask);
}
void DegradeFrameSource::nextFrame(cv::OutputArray frame)
{
base_->nextFrame(origFrame_);
cv::GaussianBlur(origFrame_, blurred_, cv::Size(5, 5), 0);
cv::resize(blurred_, deg_, cv::Size(), iscale_, iscale_, cv::INTER_NEAREST);
addGaussNoise(deg_, 10.0);
addSpikeNoise(deg_, 500);
cv::superres::arrCopy(deg_, frame);
}
void DegradeFrameSource::reset()
{
base_->reset();
}
double MSSIM(const cv::Mat& i1, const cv::Mat& i2)
{
const double C1 = 6.5025;
const double C2 = 58.5225;
const int depth = CV_32F;
cv::Mat I1, I2;
i1.convertTo(I1, depth);
i2.convertTo(I2, depth);
cv::Mat I2_2 = I2.mul(I2); // I2^2
cv::Mat I1_2 = I1.mul(I1); // I1^2
cv::Mat I1_I2 = I1.mul(I2); // I1 * I2
cv::Mat mu1, mu2;
cv::GaussianBlur(I1, mu1, cv::Size(11, 11), 1.5);
cv::GaussianBlur(I2, mu2, cv::Size(11, 11), 1.5);
cv::Mat mu1_2 = mu1.mul(mu1);
cv::Mat mu2_2 = mu2.mul(mu2);
cv::Mat mu1_mu2 = mu1.mul(mu2);
cv::Mat sigma1_2, sigma2_2, sigma12;
cv::GaussianBlur(I1_2, sigma1_2, cv::Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
cv::GaussianBlur(I2_2, sigma2_2, cv::Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
cv::GaussianBlur(I1_I2, sigma12, cv::Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
cv::Mat t1, t2;
cv::Mat numerator;
cv::Mat denominator;
// t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
numerator = t1.mul(t2);
// t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
denominator = t1.mul(t2);
// ssim_map = numerator./denominator;
cv::Mat ssim_map;
cv::divide(numerator, denominator, ssim_map);
// mssim = average of ssim map
cv::Scalar mssim = cv::mean(ssim_map);
if (i1.channels() == 1)
return mssim[0];
return (mssim[0] + mssim[1] + mssim[3]) / 3;
}
class SuperResolution : public testing::Test
{
public:
void RunTest(cv::Ptr<cv::superres::SuperResolution> superRes);
};
void SuperResolution::RunTest(cv::Ptr<cv::superres::SuperResolution> superRes)
{
const std::string inputVideoName = cvtest::TS::ptr()->get_data_path() + "car.avi";
const int scale = 2;
const int iterations = 100;
const int temporalAreaRadius = 2;
ASSERT_FALSE( superRes.empty() );
const int btvKernelSize = superRes->getInt("btvKernelSize");
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
cv::Ptr<cv::superres::FrameSource> goldSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale));
cv::Ptr<cv::superres::FrameSource> lowResSource(new DegradeFrameSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale), scale));
// skip first frame
cv::Mat frame;
lowResSource->nextFrame(frame);
goldSource->nextFrame(frame);
cv::Rect inner(btvKernelSize, btvKernelSize, frame.cols - 2 * btvKernelSize, frame.rows - 2 * btvKernelSize);
superRes->setInput(lowResSource);
double srAvgMSSIM = 0.0;
const int count = 10;
cv::Mat goldFrame, superResFrame;
for (int i = 0; i < count; ++i)
{
goldSource->nextFrame(goldFrame);
ASSERT_FALSE( goldFrame.empty() );
superRes->nextFrame(superResFrame);
ASSERT_FALSE( superResFrame.empty() );
const double srMSSIM = MSSIM(goldFrame(inner), superResFrame);
srAvgMSSIM += srMSSIM;
}
srAvgMSSIM /= count;
EXPECT_GE( srAvgMSSIM, 0.5 );
}
TEST_F(SuperResolution, BTVL1)
{
RunTest(cv::superres::createSuperResolution_BTVL1());
}
#if defined(HAVE_OPENCV_GPU) && defined(HAVE_CUDA)
TEST_F(SuperResolution, BTVL1_GPU)
{
RunTest(cv::superres::createSuperResolution_BTVL1_GPU());
}
#endif

@ -1,7 +1,7 @@
SET(OPENCV_GPU_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui
opencv_ml opencv_video opencv_objdetect opencv_features2d
opencv_calib3d opencv_legacy opencv_contrib opencv_gpu
opencv_nonfree)
opencv_nonfree opencv_superres)
ocv_check_dependencies(${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})

@ -0,0 +1,152 @@
#include <iostream>
#include <iomanip>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/superres/superres.hpp"
#include "opencv2/superres/optical_flow.hpp"
using namespace std;
using namespace cv;
using namespace cv::superres;
#define MEASURE_TIME(op) \
{ \
TickMeter tm; \
tm.start(); \
op; \
tm.stop(); \
cout << tm.getTimeSec() << " sec" << endl; \
}
static Ptr<DenseOpticalFlowExt> createOptFlow(const string& name, bool useGpu)
{
if (name == "farneback")
{
if (useGpu)
return createOptFlow_Farneback_GPU();
else
return createOptFlow_Farneback();
}
else if (name == "simple")
return createOptFlow_Simple();
else if (name == "tvl1")
{
if (useGpu)
return createOptFlow_DualTVL1_GPU();
else
return createOptFlow_DualTVL1();
}
else if (name == "brox")
return createOptFlow_Brox_GPU();
else if (name == "pyrlk")
return createOptFlow_PyrLK_GPU();
else
{
cerr << "Incorrect Optical Flow algorithm - " << name << endl;
exit(-1);
}
return Ptr<DenseOpticalFlowExt>();
}
int main(int argc, const char* argv[])
{
CommandLineParser cmd(argc, argv,
"{ v | video | | Input video }"
"{ o | output | | Output video }"
"{ s | scale | 4 | Scale factor }"
"{ i | iterations | 180 | Iteration count }"
"{ t | temporal | 4 | Radius of the temporal search area }"
"{ f | flow | farneback | Optical flow algorithm (farneback, simple, tvl1, brox, pyrlk) }"
"{ gpu | gpu | false | Use GPU }"
"{ h | help | false | Print help message }"
);
if (cmd.get<bool>("help"))
{
cout << "This sample demonstrates Super Resolution algorithms for video sequence" << endl;
cmd.printParams();
return 0;
}
const string inputVideoName = cmd.get<string>("video");
const string outputVideoName = cmd.get<string>("output");
const int scale = cmd.get<int>("scale");
const int iterations = cmd.get<int>("iterations");
const int temporalAreaRadius = cmd.get<int>("temporal");
const string optFlow = cmd.get<string>("flow");
const bool useGpu = cmd.get<bool>("gpu");
Ptr<SuperResolution> superRes;
if (useGpu)
superRes = createSuperResolution_BTVL1_GPU();
else
superRes = createSuperResolution_BTVL1();
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->set("opticalFlow", createOptFlow(optFlow, useGpu));
Ptr<FrameSource> frameSource;
if (useGpu)
{
// Try to use gpu Video Decoding
try
{
frameSource = createFrameSource_Video_GPU(inputVideoName);
Mat frame;
frameSource->nextFrame(frame);
}
catch (const cv::Exception&)
{
frameSource.release();
}
}
if (frameSource.empty())
frameSource = createFrameSource_Video(inputVideoName);
// skip first frame, it is usually corrupted
{
Mat frame;
frameSource->nextFrame(frame);
cout << "Input : " << inputVideoName << " " << frame.size() << endl;
cout << "Scale factor : " << scale << endl;
cout << "Iterations : " << iterations << endl;
cout << "Temporal radius : " << temporalAreaRadius << endl;
cout << "Optical Flow : " << optFlow << endl;
cout << "Mode : " << (useGpu ? "GPU" : "CPU") << endl;
}
superRes->setInput(frameSource);
VideoWriter writer;
for (int i = 0;; ++i)
{
cout << '[' << setw(3) << i << "] : ";
Mat result;
MEASURE_TIME(superRes->nextFrame(result));
if (result.empty())
break;
imshow("Super Resolution", result);
if (waitKey(1000) > 0)
break;
if (!outputVideoName.empty())
{
if (!writer.isOpened())
writer.open(outputVideoName, CV_FOURCC('X', 'V', 'I', 'D'), 25.0, result.size());
writer << result;
}
}
return 0;
}
Loading…
Cancel
Save