commit
e13e3be16e
74 changed files with 2974 additions and 796 deletions
@ -0,0 +1,4 @@ |
||||
downloads/ |
||||
macosx/ |
||||
linux/ |
||||
windows/ |
@ -0,0 +1,100 @@ |
||||
# |
||||
# The script downloads ICV package |
||||
# |
||||
# On return this will define: |
||||
# OPENCV_ICV_PATH - path to unpacked downloaded package |
||||
# |
||||
|
||||
function(_icv_downloader) |
||||
# Define actual ICV versions |
||||
if(APPLE) |
||||
set(OPENCV_ICV_PACKAGE_NAME "ippicv_macosx.tar.gz") |
||||
set(OPENCV_ICV_PACKAGE_HASH "d489e447906de7808a9a9d7e3f225f7a") |
||||
set(OPENCV_ICV_PLATFORM "macosx") |
||||
elseif(UNIX AND NOT ANDROID) |
||||
set(OPENCV_ICV_PACKAGE_NAME "ippicv_linux.tar.gz") |
||||
set(OPENCV_ICV_PACKAGE_HASH "42798c6cd6348bd40e74c425dc23338a") |
||||
set(OPENCV_ICV_PLATFORM "linux") |
||||
elseif(WIN32 AND NOT ARM) |
||||
set(OPENCV_ICV_PACKAGE_NAME "ippicv_windows.zip") |
||||
set(OPENCV_ICV_PACKAGE_HASH "2715f39ae65dc09bae3648bffe538706") |
||||
set(OPENCV_ICV_PLATFORM "windows") |
||||
else() |
||||
return() # Not supported |
||||
endif() |
||||
|
||||
set(OPENCV_ICV_PATH "${CMAKE_CURRENT_LIST_DIR}/${OPENCV_ICV_PLATFORM}") |
||||
|
||||
if(DEFINED OPENCV_ICV_PACKAGE_DOWNLOADED |
||||
AND OPENCV_ICV_PACKAGE_DOWNLOADED STREQUAL OPENCV_ICV_PACKAGE_HASH |
||||
AND EXISTS ${OPENCV_ICV_PATH}) |
||||
# Package has been downloaded and checked by the previous build |
||||
set(OPENCV_ICV_PATH "${OPENCV_ICV_PATH}" PARENT_SCOPE) |
||||
return() |
||||
else() |
||||
if(EXISTS ${OPENCV_ICV_PATH}) |
||||
message(STATUS "ICV: Removing previous unpacked package: ${OPENCV_ICV_PATH}") |
||||
file(REMOVE_RECURSE ${OPENCV_ICV_PATH}) |
||||
endif() |
||||
endif() |
||||
unset(OPENCV_ICV_PACKAGE_DOWNLOADED CACHE) |
||||
|
||||
set(OPENCV_ICV_PACKAGE_ARCHIVE "${CMAKE_CURRENT_LIST_DIR}/downloads/${OPENCV_ICV_PLATFORM}-${OPENCV_ICV_PACKAGE_HASH}/${OPENCV_ICV_PACKAGE_NAME}") |
||||
get_filename_component(OPENCV_ICV_PACKAGE_ARCHIVE_DIR "${OPENCV_ICV_PACKAGE_ARCHIVE}" PATH) |
||||
if(EXISTS "${OPENCV_ICV_PACKAGE_ARCHIVE}") |
||||
file(MD5 "${OPENCV_ICV_PACKAGE_ARCHIVE}" archive_md5) |
||||
if(NOT archive_md5 STREQUAL OPENCV_ICV_PACKAGE_HASH) |
||||
message(WARNING "ICV: Local copy of ICV package has invalid MD5 hash: ${archive_md5} (expected: ${OPENCV_ICV_PACKAGE_HASH})") |
||||
file(REMOVE "${OPENCV_ICV_PACKAGE_ARCHIVE}") |
||||
file(REMOVE_RECURSE "${OPENCV_ICV_PACKAGE_ARCHIVE_DIR}") |
||||
endif() |
||||
endif() |
||||
|
||||
if(NOT EXISTS "${OPENCV_ICV_PACKAGE_ARCHIVE}") |
||||
if(NOT DEFINED OPENCV_ICV_URL) |
||||
if(NOT DEFINED ENV{OPENCV_ICV_URL}) |
||||
# TODO Specify default URL after ICV publishing |
||||
message(STATUS "ICV: downloading URL is not specified, skip downloading") |
||||
return() |
||||
endif() |
||||
set(OPENCV_ICV_URL $ENV{OPENCV_ICV_URL}) |
||||
endif() |
||||
|
||||
file(MAKE_DIRECTORY ${OPENCV_ICV_PACKAGE_ARCHIVE_DIR}) |
||||
message(STATUS "ICV: Downloading ${OPENCV_ICV_PACKAGE_NAME}...") |
||||
file(DOWNLOAD "${OPENCV_ICV_URL}/${OPENCV_ICV_PACKAGE_NAME}" "${OPENCV_ICV_PACKAGE_ARCHIVE}" |
||||
TIMEOUT 600 STATUS __status |
||||
EXPECTED_MD5 ${OPENCV_ICV_PACKAGE_HASH}) |
||||
if(NOT __status EQUAL 0) |
||||
message(FATAL_ERROR "ICV: Failed to download ICV package: ${OPENCV_ICV_PACKAGE_NAME}. Status=${__status}") |
||||
else() |
||||
# Don't remove this code, because EXPECTED_MD5 parameter doesn't fail "file(DOWNLOAD)" step |
||||
# on wrong hash |
||||
file(MD5 "${OPENCV_ICV_PACKAGE_ARCHIVE}" archive_md5) |
||||
if(NOT archive_md5 STREQUAL OPENCV_ICV_PACKAGE_HASH) |
||||
message(FATAL_ERROR "ICV: Downloaded copy of ICV package has invalid MD5 hash: ${archive_md5} (expected: ${OPENCV_ICV_PACKAGE_HASH})") |
||||
endif() |
||||
endif() |
||||
endif() |
||||
|
||||
ocv_assert(EXISTS "${OPENCV_ICV_PACKAGE_ARCHIVE}") |
||||
ocv_assert(NOT EXISTS "${OPENCV_ICV_PATH}") |
||||
file(MAKE_DIRECTORY ${OPENCV_ICV_PATH}) |
||||
ocv_assert(EXISTS "${OPENCV_ICV_PATH}") |
||||
|
||||
message(STATUS "ICV: Unpacking ${OPENCV_ICV_PACKAGE_NAME} to ${OPENCV_ICV_PATH}...") |
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E tar xz "${OPENCV_ICV_PACKAGE_ARCHIVE}" |
||||
WORKING_DIRECTORY "${OPENCV_ICV_PATH}" |
||||
RESULT_VARIABLE __result) |
||||
|
||||
if(NOT __result EQUAL 0) |
||||
message(FATAL_ERROR "ICV: Failed to unpack ICV package from ${OPENCV_ICV_PACKAGE_ARCHIVE} to ${OPENCV_ICV_PATH} with error ${__result}") |
||||
endif() |
||||
|
||||
set(OPENCV_ICV_PACKAGE_DOWNLOADED "${OPENCV_ICV_PACKAGE_HASH}" CACHE INTERNAL "ICV package hash") |
||||
|
||||
message(STATUS "ICV: Package successfully downloaded") |
||||
set(OPENCV_ICV_PATH "${OPENCV_ICV_PATH}" PARENT_SCOPE) |
||||
endfunction() |
||||
|
||||
_icv_downloader() |
@ -0,0 +1,144 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2014, Itseez, Inc, all rights reserved.
|
||||
|
||||
#include "perf_precomp.hpp" |
||||
#include "opencv2/ts/ocl_perf.hpp" |
||||
|
||||
using namespace cv; |
||||
using namespace perf; |
||||
using namespace cvtest::ocl; |
||||
using namespace std; |
||||
using namespace std::tr1; |
||||
|
||||
#define SURF_MATCH_CONFIDENCE 0.65f |
||||
#define ORB_MATCH_CONFIDENCE 0.3f |
||||
#define WORK_MEGAPIX 0.6 |
||||
|
||||
typedef TestBaseWithParam<string> stitch; |
||||
|
||||
#ifdef HAVE_OPENCV_NONFREE_TODO_FIND_WHY_SURF_IS_NOT_ABLE_TO_STITCH_PANOS |
||||
#define TEST_DETECTORS testing::Values("surf", "orb") |
||||
#else |
||||
#define TEST_DETECTORS testing::Values<string>("orb") |
||||
#endif |
||||
|
||||
OCL_PERF_TEST_P(stitch, a123, TEST_DETECTORS) |
||||
{ |
||||
UMat pano; |
||||
|
||||
vector<Mat> _imgs; |
||||
_imgs.push_back( imread( getDataPath("stitching/a1.png") ) ); |
||||
_imgs.push_back( imread( getDataPath("stitching/a2.png") ) ); |
||||
_imgs.push_back( imread( getDataPath("stitching/a3.png") ) ); |
||||
vector<UMat> imgs = ToUMat(_imgs); |
||||
|
||||
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb" |
||||
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder()) |
||||
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder()); |
||||
|
||||
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb" |
||||
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE) |
||||
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE); |
||||
|
||||
declare.iterations(20); |
||||
|
||||
while(next()) |
||||
{ |
||||
Stitcher stitcher = Stitcher::createDefault(); |
||||
stitcher.setFeaturesFinder(featuresFinder); |
||||
stitcher.setFeaturesMatcher(featuresMatcher); |
||||
stitcher.setWarper(makePtr<SphericalWarper>()); |
||||
stitcher.setRegistrationResol(WORK_MEGAPIX); |
||||
|
||||
startTimer(); |
||||
stitcher.stitch(imgs, pano); |
||||
stopTimer(); |
||||
} |
||||
|
||||
EXPECT_NEAR(pano.size().width, 1182, 50); |
||||
EXPECT_NEAR(pano.size().height, 682, 30); |
||||
|
||||
SANITY_CHECK_NOTHING(); |
||||
} |
||||
|
||||
OCL_PERF_TEST_P(stitch, b12, TEST_DETECTORS) |
||||
{ |
||||
UMat pano; |
||||
|
||||
vector<Mat> imgs; |
||||
imgs.push_back( imread( getDataPath("stitching/b1.png") ) ); |
||||
imgs.push_back( imread( getDataPath("stitching/b2.png") ) ); |
||||
|
||||
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb" |
||||
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder()) |
||||
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder()); |
||||
|
||||
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb" |
||||
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE) |
||||
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE); |
||||
|
||||
declare.iterations(20); |
||||
|
||||
while(next()) |
||||
{ |
||||
Stitcher stitcher = Stitcher::createDefault(); |
||||
stitcher.setFeaturesFinder(featuresFinder); |
||||
stitcher.setFeaturesMatcher(featuresMatcher); |
||||
stitcher.setWarper(makePtr<SphericalWarper>()); |
||||
stitcher.setRegistrationResol(WORK_MEGAPIX); |
||||
|
||||
startTimer(); |
||||
stitcher.stitch(imgs, pano); |
||||
stopTimer(); |
||||
} |
||||
|
||||
EXPECT_NEAR(pano.size().width, 1124, 50); |
||||
EXPECT_NEAR(pano.size().height, 644, 30); |
||||
|
||||
SANITY_CHECK_NOTHING(); |
||||
} |
||||
|
||||
OCL_PERF_TEST_P(stitch, boat, TEST_DETECTORS) |
||||
{ |
||||
UMat pano; |
||||
|
||||
vector<Mat> _imgs; |
||||
_imgs.push_back( imread( getDataPath("stitching/boat1.jpg") ) ); |
||||
_imgs.push_back( imread( getDataPath("stitching/boat2.jpg") ) ); |
||||
_imgs.push_back( imread( getDataPath("stitching/boat3.jpg") ) ); |
||||
_imgs.push_back( imread( getDataPath("stitching/boat4.jpg") ) ); |
||||
_imgs.push_back( imread( getDataPath("stitching/boat5.jpg") ) ); |
||||
_imgs.push_back( imread( getDataPath("stitching/boat6.jpg") ) ); |
||||
vector<UMat> imgs = ToUMat(_imgs); |
||||
|
||||
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb" |
||||
? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder()) |
||||
: Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder()); |
||||
|
||||
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb" |
||||
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE) |
||||
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE); |
||||
|
||||
declare.iterations(20); |
||||
|
||||
while(next()) |
||||
{ |
||||
Stitcher stitcher = Stitcher::createDefault(); |
||||
stitcher.setFeaturesFinder(featuresFinder); |
||||
stitcher.setFeaturesMatcher(featuresMatcher); |
||||
stitcher.setWarper(makePtr<SphericalWarper>()); |
||||
stitcher.setRegistrationResol(WORK_MEGAPIX); |
||||
|
||||
startTimer(); |
||||
stitcher.stitch(imgs, pano); |
||||
stopTimer(); |
||||
} |
||||
|
||||
EXPECT_NEAR(pano.size().width, 10789, 200); |
||||
EXPECT_NEAR(pano.size().height, 2663, 100); |
||||
|
||||
SANITY_CHECK_NOTHING(); |
||||
} |
@ -0,0 +1,282 @@ |
||||
// This file is part of OpenCV project. |
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory |
||||
// of this distribution and at http://opencv.org/license.html. |
||||
// |
||||
// Copyright (C) 2014, Itseez, Inc, all rights reserved. |
||||
|
||||
// |
||||
// Common preprocessors macro |
||||
// |
||||
|
||||
// |
||||
// TODO: Move this common code into "header" file |
||||
// |
||||
|
||||
#ifndef NL // New Line: for preprocessor debugging |
||||
#define NL |
||||
#endif |
||||
|
||||
#define REF(x) x |
||||
#define __CAT(x, y) x##y |
||||
#define CAT(x, y) __CAT(x, y) |
||||
|
||||
// |
||||
// All matrixes are come with this description ("name" is a name of matrix): |
||||
// * name_CN - number of channels (1,2,3,4) |
||||
// * name_DEPTH - numeric value of CV_MAT_DEPTH(type). See CV_8U, CV_32S, etc macro below. |
||||
// |
||||
// Currently we also pass these attributes (to reduce this macro block): |
||||
// * name_T - datatype (int, float, uchar4, float4) |
||||
// * name_T1 - datatype for one channel (int, float, uchar). |
||||
// It is equal to result of "T1(name_T)" macro |
||||
// * name_TSIZE - CV_ELEM_SIZE(type). |
||||
// We can't use sizeof(name_T) here, because sizeof(float3) is usually equal to 8, not 6. |
||||
// * name_T1SIZE - CV_ELEM_SIZE1(type) |
||||
// |
||||
|
||||
// |
||||
// Usage sample: |
||||
// |
||||
// #define workType TYPE(float, src_CN) |
||||
// #define convertToWorkType CONVERT_TO(workType) |
||||
// #define convertWorkTypeToDstType CONVERT(workType, dst_T) |
||||
// |
||||
// __kernel void kernelFn(DECLARE_MAT_ARG(src), DECLARE_MAT_ARG(dst)) |
||||
// { |
||||
// const int x = get_global_id(0); |
||||
// const int y = get_global_id(1); |
||||
// |
||||
// if (x < srcWidth && y < srcHeight) |
||||
// { |
||||
// int src_byteOffset = MAT_BYTE_OFFSET(src, x, y); |
||||
// int dst_byteOffset = MAT_BYTE_OFFSET(dst, x, y); |
||||
// workType value = convertToWorkType(LOAD_MAT_AT(src, src_byteOffset)); |
||||
// |
||||
// ... value processing ... |
||||
// |
||||
// STORE_MAT_AT(dst, dst_byteOffset, convertWorkTypeToDstType(value)); |
||||
// } |
||||
// } |
||||
// |
||||
|
||||
#define DECLARE_MAT_ARG(name) \ |
||||
__global uchar* restrict name ## Ptr, \ |
||||
int name ## StepBytes, \ |
||||
int name ## Offset, \ |
||||
int name ## Height, \ |
||||
int name ## Width NL |
||||
|
||||
#define MAT_BYTE_OFFSET(name, x, y) mad24((y)/* + name ## OffsetY*/, name ## StepBytes, ((x)/* + name ## OffsetX*/) * (int)(name ## _TSIZE) + name ## Offset) |
||||
#define MAT_RELATIVE_BYTE_OFFSET(name, x, y) mad24(y, name ## StepBytes, (x) * (int)(name ## _TSIZE)) |
||||
|
||||
#define __LOAD_MAT_AT(name, byteOffset) *((const __global name ## _T*)(name ## Ptr + (byteOffset))) |
||||
#define __vload_CN__(name_cn) vload ## name_cn |
||||
#define __vload_CN_(name_cn) __vload_CN__(name_cn) |
||||
#define __vload_CN(name) __vload_CN_(name ## _CN) |
||||
#define __LOAD_MAT_AT_vload(name, byteOffset) __vload_CN(name)(0, ((const __global name ## _T1*)(name ## Ptr + (byteOffset)))) |
||||
#define __LOAD_MAT_AT_1 __LOAD_MAT_AT |
||||
#define __LOAD_MAT_AT_2 __LOAD_MAT_AT |
||||
#define __LOAD_MAT_AT_3 __LOAD_MAT_AT_vload |
||||
#define __LOAD_MAT_AT_4 __LOAD_MAT_AT |
||||
#define __LOAD_MAT_AT_CN__(name_cn) __LOAD_MAT_AT_ ## name_cn |
||||
#define __LOAD_MAT_AT_CN_(name_cn) __LOAD_MAT_AT_CN__(name_cn) |
||||
#define __LOAD_MAT_AT_CN(name) __LOAD_MAT_AT_CN_(name ## _CN) |
||||
#define LOAD_MAT_AT(name, byteOffset) __LOAD_MAT_AT_CN(name)(name, byteOffset) |
||||
|
||||
#define __STORE_MAT_AT(name, byteOffset, v) *((__global name ## _T*)(name ## Ptr + (byteOffset))) = v |
||||
#define __vstore_CN__(name_cn) vstore ## name_cn |
||||
#define __vstore_CN_(name_cn) __vstore_CN__(name_cn) |
||||
#define __vstore_CN(name) __vstore_CN_(name ## _CN) |
||||
#define __STORE_MAT_AT_vstore(name, byteOffset, v) __vstore_CN(name)(v, 0, ((__global name ## _T1*)(name ## Ptr + (byteOffset)))) |
||||
#define __STORE_MAT_AT_1 __STORE_MAT_AT |
||||
#define __STORE_MAT_AT_2 __STORE_MAT_AT |
||||
#define __STORE_MAT_AT_3 __STORE_MAT_AT_vstore |
||||
#define __STORE_MAT_AT_4 __STORE_MAT_AT |
||||
#define __STORE_MAT_AT_CN__(name_cn) __STORE_MAT_AT_ ## name_cn |
||||
#define __STORE_MAT_AT_CN_(name_cn) __STORE_MAT_AT_CN__(name_cn) |
||||
#define __STORE_MAT_AT_CN(name) __STORE_MAT_AT_CN_(name ## _CN) |
||||
#define STORE_MAT_AT(name, byteOffset, v) __STORE_MAT_AT_CN(name)(name, byteOffset, v) |
||||
|
||||
#define T1_uchar uchar |
||||
#define T1_uchar2 uchar |
||||
#define T1_uchar3 uchar |
||||
#define T1_uchar4 uchar |
||||
#define T1_char char |
||||
#define T1_char2 char |
||||
#define T1_char3 char |
||||
#define T1_char4 char |
||||
#define T1_ushort ushort |
||||
#define T1_ushort2 ushort |
||||
#define T1_ushort3 ushort |
||||
#define T1_ushort4 ushort |
||||
#define T1_short short |
||||
#define T1_short2 short |
||||
#define T1_short3 short |
||||
#define T1_short4 short |
||||
#define T1_int int |
||||
#define T1_int2 int |
||||
#define T1_int3 int |
||||
#define T1_int4 int |
||||
#define T1_float float |
||||
#define T1_float2 float |
||||
#define T1_float3 float |
||||
#define T1_float4 float |
||||
#define T1_double double |
||||
#define T1_double2 double |
||||
#define T1_double3 double |
||||
#define T1_double4 double |
||||
#define T1(type) REF(CAT(T1_, REF(type))) |
||||
|
||||
#define uchar1 uchar |
||||
#define char1 char |
||||
#define short1 short |
||||
#define ushort1 ushort |
||||
#define int1 int |
||||
#define float1 float |
||||
#define double1 double |
||||
#define TYPE(type, cn) REF(CAT(REF(type), REF(cn))) |
||||
|
||||
#define __CONVERT_MODE_uchar_uchar __NO_CONVERT |
||||
#define __CONVERT_MODE_uchar_char __CONVERT_sat |
||||
#define __CONVERT_MODE_uchar_ushort __CONVERT |
||||
#define __CONVERT_MODE_uchar_short __CONVERT |
||||
#define __CONVERT_MODE_uchar_int __CONVERT |
||||
#define __CONVERT_MODE_uchar_float __CONVERT |
||||
#define __CONVERT_MODE_uchar_double __CONVERT |
||||
#define __CONVERT_MODE_char_uchar __CONVERT_sat |
||||
#define __CONVERT_MODE_char_char __NO_CONVERT |
||||
#define __CONVERT_MODE_char_ushort __CONVERT_sat |
||||
#define __CONVERT_MODE_char_short __CONVERT |
||||
#define __CONVERT_MODE_char_int __CONVERT |
||||
#define __CONVERT_MODE_char_float __CONVERT |
||||
#define __CONVERT_MODE_char_double __CONVERT |
||||
#define __CONVERT_MODE_ushort_uchar __CONVERT_sat |
||||
#define __CONVERT_MODE_ushort_char __CONVERT_sat |
||||
#define __CONVERT_MODE_ushort_ushort __NO_CONVERT |
||||
#define __CONVERT_MODE_ushort_short __CONVERT_sat |
||||
#define __CONVERT_MODE_ushort_int __CONVERT |
||||
#define __CONVERT_MODE_ushort_float __CONVERT |
||||
#define __CONVERT_MODE_ushort_double __CONVERT |
||||
#define __CONVERT_MODE_short_uchar __CONVERT_sat |
||||
#define __CONVERT_MODE_short_char __CONVERT_sat |
||||
#define __CONVERT_MODE_short_ushort __CONVERT_sat |
||||
#define __CONVERT_MODE_short_short __NO_CONVERT |
||||
#define __CONVERT_MODE_short_int __CONVERT |
||||
#define __CONVERT_MODE_short_float __CONVERT |
||||
#define __CONVERT_MODE_short_double __CONVERT |
||||
#define __CONVERT_MODE_int_uchar __CONVERT_sat |
||||
#define __CONVERT_MODE_int_char __CONVERT_sat |
||||
#define __CONVERT_MODE_int_ushort __CONVERT_sat |
||||
#define __CONVERT_MODE_int_short __CONVERT_sat |
||||
#define __CONVERT_MODE_int_int __NO_CONVERT |
||||
#define __CONVERT_MODE_int_float __CONVERT |
||||
#define __CONVERT_MODE_int_double __CONVERT |
||||
#define __CONVERT_MODE_float_uchar __CONVERT_sat_rte |
||||
#define __CONVERT_MODE_float_char __CONVERT_sat_rte |
||||
#define __CONVERT_MODE_float_ushort __CONVERT_sat_rte |
||||
#define __CONVERT_MODE_float_short __CONVERT_sat_rte |
||||
#define __CONVERT_MODE_float_int __CONVERT_rte |
||||
#define __CONVERT_MODE_float_float __NO_CONVERT |
||||
#define __CONVERT_MODE_float_double __CONVERT |
||||
#define __CONVERT_MODE_double_uchar __CONVERT_sat_rte |
||||
#define __CONVERT_MODE_double_char __CONVERT_sat_rte |
||||
#define __CONVERT_MODE_double_ushort __CONVERT_sat_rte |
||||
#define __CONVERT_MODE_double_short __CONVERT_sat_rte |
||||
#define __CONVERT_MODE_double_int __CONVERT_rte |
||||
#define __CONVERT_MODE_double_float __CONVERT |
||||
#define __CONVERT_MODE_double_double __NO_CONVERT |
||||
#define __CONVERT_MODE(srcType, dstType) CAT(__CONVERT_MODE_, CAT(REF(T1(srcType)), CAT(_, REF(T1(dstType))))) |
||||
|
||||
#define __ROUND_MODE__NO_CONVERT |
||||
#define __ROUND_MODE__CONVERT // nothing |
||||
#define __ROUND_MODE__CONVERT_rte _rte |
||||
#define __ROUND_MODE__CONVERT_sat _sat |
||||
#define __ROUND_MODE__CONVERT_sat_rte _sat_rte |
||||
#define ROUND_MODE(srcType, dstType) CAT(__ROUND_MODE_, __CONVERT_MODE(srcType, dstType)) |
||||
|
||||
#define __CONVERT_ROUND(dstType, roundMode) CAT(CAT(convert_, REF(dstType)), roundMode) |
||||
#define __NO_CONVERT(dstType) // nothing |
||||
#define __CONVERT(dstType) __CONVERT_ROUND(dstType,) |
||||
#define __CONVERT_rte(dstType) __CONVERT_ROUND(dstType,_rte) |
||||
#define __CONVERT_sat(dstType) __CONVERT_ROUND(dstType,_sat) |
||||
#define __CONVERT_sat_rte(dstType) __CONVERT_ROUND(dstType,_sat_rte) |
||||
#define CONVERT(srcType, dstType) REF(__CONVERT_MODE(srcType,dstType))(dstType) |
||||
#define CONVERT_TO(dstType) __CONVERT_ROUND(dstType,) |
||||
|
||||
// OpenCV depths |
||||
#define CV_8U 0 |
||||
#define CV_8S 1 |
||||
#define CV_16U 2 |
||||
#define CV_16S 3 |
||||
#define CV_32S 4 |
||||
#define CV_32F 5 |
||||
#define CV_64F 6 |
||||
|
||||
// |
||||
// End of common preprocessors macro |
||||
// |
||||
|
||||
|
||||
|
||||
#if defined(DEFINE_feed) |
||||
|
||||
#define workType TYPE(weight_T1, src_CN) |
||||
#define convertSrcToWorkType CONVERT_TO(workType) |
||||
#define convertToDstType CONVERT_TO(dst_T) // sat_rte provides incompatible results with CPU path |
||||
|
||||
__kernel void feed( |
||||
DECLARE_MAT_ARG(src), DECLARE_MAT_ARG(weight), |
||||
DECLARE_MAT_ARG(dst), DECLARE_MAT_ARG(dstWeight) |
||||
) |
||||
{ |
||||
const int x = get_global_id(0); |
||||
const int y = get_global_id(1); |
||||
|
||||
if (x < srcWidth && y < srcHeight) |
||||
{ |
||||
int src_byteOffset = MAT_BYTE_OFFSET(src, x, y); |
||||
int weight_byteOffset = MAT_BYTE_OFFSET(weight, x, y); |
||||
int dst_byteOffset = MAT_BYTE_OFFSET(dst, x, y); |
||||
int dstWeight_byteOffset = MAT_BYTE_OFFSET(dstWeight, x, y); |
||||
|
||||
weight_T w = LOAD_MAT_AT(weight, weight_byteOffset); |
||||
workType src_value = convertSrcToWorkType(LOAD_MAT_AT(src, src_byteOffset)); |
||||
STORE_MAT_AT(dst, dst_byteOffset, LOAD_MAT_AT(dst, dst_byteOffset) + convertToDstType(src_value * w)); |
||||
STORE_MAT_AT(dstWeight, dstWeight_byteOffset, LOAD_MAT_AT(dstWeight, dstWeight_byteOffset) + w); |
||||
} |
||||
} |
||||
|
||||
#endif |
||||
|
||||
#if defined(DEFINE_normalizeUsingWeightMap) |
||||
|
||||
#define workType TYPE(weight_T1, mat_CN) |
||||
#define convertSrcToWorkType CONVERT_TO(workType) |
||||
#define convertToDstType CONVERT_TO(mat_T) // sat_rte provides incompatible results with CPU path |
||||
|
||||
#if weight_DEPTH >= CV_32F |
||||
#define WEIGHT_EPS 1e-5f |
||||
#else |
||||
#define WEIGHT_EPS 0 |
||||
#endif |
||||
|
||||
__kernel void normalizeUsingWeightMap( |
||||
DECLARE_MAT_ARG(mat), DECLARE_MAT_ARG(weight) |
||||
) |
||||
{ |
||||
const int x = get_global_id(0); |
||||
const int y = get_global_id(1); |
||||
|
||||
if (x < matWidth && y < matHeight) |
||||
{ |
||||
int mat_byteOffset = MAT_BYTE_OFFSET(mat, x, y); |
||||
int weight_byteOffset = MAT_BYTE_OFFSET(weight, x, y); |
||||
|
||||
weight_T w = LOAD_MAT_AT(weight, weight_byteOffset); |
||||
workType value = convertSrcToWorkType(LOAD_MAT_AT(mat, mat_byteOffset)); |
||||
value = value / (w + WEIGHT_EPS); |
||||
STORE_MAT_AT(mat, mat_byteOffset, convertToDstType(value)); |
||||
} |
||||
} |
||||
|
||||
#endif |
@ -1,187 +0,0 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp" |
||||
#include "opencl_kernels.hpp" |
||||
|
||||
namespace cv { |
||||
namespace detail { |
||||
|
||||
/////////////////////////////////////////// PlaneWarperOcl ////////////////////////////////////////////
|
||||
|
||||
Rect PlaneWarperOcl::buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap) |
||||
{ |
||||
projector_.setCameraParams(K, R, T); |
||||
|
||||
Point dst_tl, dst_br; |
||||
detectResultRoi(src_size, dst_tl, dst_br); |
||||
|
||||
if (ocl::useOpenCL()) |
||||
{ |
||||
ocl::Kernel k("buildWarpPlaneMaps", ocl::stitching::warpers_oclsrc); |
||||
if (!k.empty()) |
||||
{ |
||||
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1); |
||||
xmap.create(dsize, CV_32FC1); |
||||
ymap.create(dsize, CV_32FC1); |
||||
|
||||
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv), t(1, 3, CV_32FC1, projector_.t); |
||||
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), |
||||
uk_rinv = k_rinv.getUMat(ACCESS_READ), ut = t.getUMat(ACCESS_READ); |
||||
|
||||
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap), |
||||
ocl::KernelArg::PtrReadOnly(uk_rinv), ocl::KernelArg::PtrReadOnly(ut), |
||||
dst_tl.x, dst_tl.y, projector_.scale); |
||||
|
||||
size_t globalsize[2] = { dsize.width, dsize.height }; |
||||
if (k.run(2, globalsize, NULL, true)) |
||||
return Rect(dst_tl, dst_br); |
||||
} |
||||
} |
||||
|
||||
return PlaneWarper::buildMaps(src_size, K, R, T, xmap, ymap); |
||||
} |
||||
|
||||
Point PlaneWarperOcl::warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode, OutputArray dst) |
||||
{ |
||||
UMat uxmap, uymap; |
||||
Rect dst_roi = buildMaps(src.size(), K, R, T, uxmap, uymap); |
||||
|
||||
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type()); |
||||
UMat udst = dst.getUMat(); |
||||
remap(src, udst, uxmap, uymap, interp_mode, border_mode); |
||||
|
||||
return dst_roi.tl(); |
||||
} |
||||
|
||||
/////////////////////////////////////////// SphericalWarperOcl ////////////////////////////////////////
|
||||
|
||||
Rect SphericalWarperOcl::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) |
||||
{ |
||||
projector_.setCameraParams(K, R); |
||||
|
||||
Point dst_tl, dst_br; |
||||
detectResultRoi(src_size, dst_tl, dst_br); |
||||
|
||||
if (ocl::useOpenCL()) |
||||
{ |
||||
ocl::Kernel k("buildWarpSphericalMaps", ocl::stitching::warpers_oclsrc); |
||||
if (!k.empty()) |
||||
{ |
||||
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1); |
||||
xmap.create(dsize, CV_32FC1); |
||||
ymap.create(dsize, CV_32FC1); |
||||
|
||||
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv); |
||||
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ); |
||||
|
||||
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap), |
||||
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, projector_.scale); |
||||
|
||||
size_t globalsize[2] = { dsize.width, dsize.height }; |
||||
if (k.run(2, globalsize, NULL, true)) |
||||
return Rect(dst_tl, dst_br); |
||||
} |
||||
} |
||||
|
||||
return SphericalWarper::buildMaps(src_size, K, R, xmap, ymap); |
||||
} |
||||
|
||||
Point SphericalWarperOcl::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst) |
||||
{ |
||||
UMat uxmap, uymap; |
||||
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap); |
||||
|
||||
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type()); |
||||
UMat udst = dst.getUMat(); |
||||
remap(src, udst, uxmap, uymap, interp_mode, border_mode); |
||||
|
||||
return dst_roi.tl(); |
||||
} |
||||
|
||||
/////////////////////////////////////////// CylindricalWarperOcl ////////////////////////////////////////
|
||||
|
||||
Rect CylindricalWarperOcl::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) |
||||
{ |
||||
projector_.setCameraParams(K, R); |
||||
|
||||
Point dst_tl, dst_br; |
||||
detectResultRoi(src_size, dst_tl, dst_br); |
||||
|
||||
if (ocl::useOpenCL()) |
||||
{ |
||||
ocl::Kernel k("buildWarpCylindricalMaps", ocl::stitching::warpers_oclsrc); |
||||
if (!k.empty()) |
||||
{ |
||||
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1); |
||||
xmap.create(dsize, CV_32FC1); |
||||
ymap.create(dsize, CV_32FC1); |
||||
|
||||
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv); |
||||
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ); |
||||
|
||||
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap), |
||||
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, projector_.scale); |
||||
|
||||
size_t globalsize[2] = { dsize.width, dsize.height }; |
||||
if (k.run(2, globalsize, NULL, true)) |
||||
return Rect(dst_tl, dst_br); |
||||
} |
||||
} |
||||
|
||||
return CylindricalWarper::buildMaps(src_size, K, R, xmap, ymap); |
||||
} |
||||
|
||||
Point CylindricalWarperOcl::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst) |
||||
{ |
||||
UMat uxmap, uymap; |
||||
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap); |
||||
|
||||
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type()); |
||||
UMat udst = dst.getUMat(); |
||||
remap(src, udst, uxmap, uymap, interp_mode, border_mode); |
||||
|
||||
return dst_roi.tl(); |
||||
} |
||||
|
||||
} // namespace detail
|
||||
} // namespace cv
|
@ -0,0 +1,654 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
//#include <math.h>
|
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
|
||||
/*!
|
||||
The class implements the following algorithm: |
||||
"Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction" |
||||
Z.Zivkovic, F. van der Heijden |
||||
Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006 |
||||
http://www.zoranz.net/Publications/zivkovicPRL2006.pdf
|
||||
*/ |
||||
|
||||
// default parameters of gaussian background detection algorithm
|
||||
static const int defaultHistory2 = 500; // Learning rate; alpha = 1/defaultHistory2
|
||||
static const int defaultNsamples = 7; // number of samples saved in memory
|
||||
static const float defaultDist2Threshold = 20.0f*20.0f;//threshold on distance from the sample
|
||||
|
||||
// additional parameters
|
||||
static const unsigned char defaultnShadowDetection2 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
|
||||
static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
|
||||
|
||||
class BackgroundSubtractorKNNImpl : public BackgroundSubtractorKNN |
||||
{ |
||||
public: |
||||
//! the default constructor
|
||||
BackgroundSubtractorKNNImpl() |
||||
{ |
||||
frameSize = Size(0,0); |
||||
frameType = 0; |
||||
nframes = 0; |
||||
history = defaultHistory2; |
||||
|
||||
//set parameters
|
||||
// N - the number of samples stored in memory per model
|
||||
nN = defaultNsamples; |
||||
|
||||
//kNN - k nearest neighbour - number on NN for detecting background - default K=[0.1*nN]
|
||||
nkNN=MAX(1,cvRound(0.1*nN*3+0.40)); |
||||
|
||||
//Tb - Threshold Tb*kernelwidth
|
||||
fTb = defaultDist2Threshold; |
||||
|
||||
// Shadow detection
|
||||
bShadowDetection = 1;//turn on
|
||||
nShadowDetection = defaultnShadowDetection2; |
||||
fTau = defaultfTau;// Tau - shadow threshold
|
||||
name_ = "BackgroundSubtractor.KNN"; |
||||
} |
||||
//! the full constructor that takes the length of the history,
|
||||
// the number of gaussian mixtures, the background ratio parameter and the noise strength
|
||||
BackgroundSubtractorKNNImpl(int _history, float _dist2Threshold, bool _bShadowDetection=true) |
||||
{ |
||||
frameSize = Size(0,0); |
||||
frameType = 0; |
||||
|
||||
nframes = 0; |
||||
history = _history > 0 ? _history : defaultHistory2; |
||||
|
||||
//set parameters
|
||||
// N - the number of samples stored in memory per model
|
||||
nN = defaultNsamples; |
||||
//kNN - k nearest neighbour - number on NN for detcting background - default K=[0.1*nN]
|
||||
nkNN=MAX(1,cvRound(0.1*nN*3+0.40)); |
||||
|
||||
//Tb - Threshold Tb*kernelwidth
|
||||
fTb = _dist2Threshold>0? _dist2Threshold : defaultDist2Threshold; |
||||
|
||||
bShadowDetection = _bShadowDetection; |
||||
nShadowDetection = defaultnShadowDetection2; |
||||
fTau = defaultfTau; |
||||
name_ = "BackgroundSubtractor.KNN"; |
||||
} |
||||
//! the destructor
|
||||
~BackgroundSubtractorKNNImpl() {} |
||||
//! the update operator
|
||||
void apply(InputArray image, OutputArray fgmask, double learningRate=-1); |
||||
|
||||
//! computes a background image which are the mean of all background gaussians
|
||||
virtual void getBackgroundImage(OutputArray backgroundImage) const; |
||||
|
||||
//! re-initiaization method
|
||||
void initialize(Size _frameSize, int _frameType) |
||||
{ |
||||
frameSize = _frameSize; |
||||
frameType = _frameType; |
||||
nframes = 0; |
||||
|
||||
int nchannels = CV_MAT_CN(frameType); |
||||
CV_Assert( nchannels <= CV_CN_MAX ); |
||||
|
||||
// Reserve memory for the model
|
||||
int size=frameSize.height*frameSize.width; |
||||
// for each sample of 3 speed pixel models each pixel bg model we store ...
|
||||
// values + flag (nchannels+1 values)
|
||||
bgmodel.create( 1,(nN * 3) * (nchannels+1)* size,CV_8U); |
||||
|
||||
//index through the three circular lists
|
||||
aModelIndexShort.create(1,size,CV_8U); |
||||
aModelIndexMid.create(1,size,CV_8U); |
||||
aModelIndexLong.create(1,size,CV_8U); |
||||
//when to update next
|
||||
nNextShortUpdate.create(1,size,CV_8U); |
||||
nNextMidUpdate.create(1,size,CV_8U); |
||||
nNextLongUpdate.create(1,size,CV_8U); |
||||
|
||||
//Reset counters
|
||||
nShortCounter = 0; |
||||
nMidCounter = 0; |
||||
nLongCounter = 0; |
||||
|
||||
aModelIndexShort = Scalar::all(0);//random? //((m_nN)*rand())/(RAND_MAX+1);//0...m_nN-1
|
||||
aModelIndexMid = Scalar::all(0); |
||||
aModelIndexLong = Scalar::all(0); |
||||
nNextShortUpdate = Scalar::all(0); |
||||
nNextMidUpdate = Scalar::all(0); |
||||
nNextLongUpdate = Scalar::all(0); |
||||
} |
||||
|
||||
virtual AlgorithmInfo* info() const { return 0; } |
||||
|
||||
virtual int getHistory() const { return history; } |
||||
virtual void setHistory(int _nframes) { history = _nframes; } |
||||
|
||||
virtual int getNSamples() const { return nN; } |
||||
virtual void setNSamples(int _nN) { nN = _nN; }//needs reinitialization!
|
||||
|
||||
virtual int getkNNSamples() const { return nkNN; } |
||||
virtual void setkNNSamples(int _nkNN) { nkNN = _nkNN; } |
||||
|
||||
virtual double getDist2Threshold() const { return fTb; } |
||||
virtual void setDist2Threshold(double _dist2Threshold) { fTb = (float)_dist2Threshold; } |
||||
|
||||
virtual bool getDetectShadows() const { return bShadowDetection; } |
||||
virtual void setDetectShadows(bool detectshadows) { bShadowDetection = detectshadows; } |
||||
|
||||
virtual int getShadowValue() const { return nShadowDetection; } |
||||
virtual void setShadowValue(int value) { nShadowDetection = (uchar)value; } |
||||
|
||||
virtual double getShadowThreshold() const { return fTau; } |
||||
virtual void setShadowThreshold(double value) { fTau = (float)value; } |
||||
|
||||
virtual void write(FileStorage& fs) const |
||||
{ |
||||
fs << "name" << name_ |
||||
<< "history" << history |
||||
<< "nsamples" << nN |
||||
<< "nKNN" << nkNN |
||||
<< "dist2Threshold" << fTb |
||||
<< "detectShadows" << (int)bShadowDetection |
||||
<< "shadowValue" << (int)nShadowDetection |
||||
<< "shadowThreshold" << fTau; |
||||
} |
||||
|
||||
virtual void read(const FileNode& fn) |
||||
{ |
||||
CV_Assert( (String)fn["name"] == name_ ); |
||||
history = (int)fn["history"]; |
||||
nN = (int)fn["nsamples"]; |
||||
nkNN = (int)fn["nKNN"]; |
||||
fTb = (float)fn["dist2Threshold"]; |
||||
bShadowDetection = (int)fn["detectShadows"] != 0; |
||||
nShadowDetection = saturate_cast<uchar>((int)fn["shadowValue"]); |
||||
fTau = (float)fn["shadowThreshold"]; |
||||
} |
||||
|
||||
protected: |
||||
Size frameSize; |
||||
int frameType; |
||||
int nframes; |
||||
/////////////////////////
|
||||
//very important parameters - things you will change
|
||||
////////////////////////
|
||||
int history; |
||||
//alpha=1/history - speed of update - if the time interval you want to average over is T
|
||||
//set alpha=1/history. It is also usefull at start to make T slowly increase
|
||||
//from 1 until the desired T
|
||||
float fTb; |
||||
//Tb - threshold on the squared distance from the sample used to decide if it is well described
|
||||
//by the background model or not. A typical value could be 2 sigma
|
||||
//and that is Tb=2*2*10*10 =400; where we take typical pixel level sigma=10
|
||||
|
||||
/////////////////////////
|
||||
//less important parameters - things you might change but be carefull
|
||||
////////////////////////
|
||||
int nN;//totlal number of samples
|
||||
int nkNN;//number on NN for detcting background - default K=[0.1*nN]
|
||||
|
||||
//shadow detection parameters
|
||||
bool bShadowDetection;//default 1 - do shadow detection
|
||||
unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value
|
||||
float fTau; |
||||
// Tau - shadow threshold. The shadow is detected if the pixel is darker
|
||||
//version of the background. Tau is a threshold on how much darker the shadow can be.
|
||||
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
|
||||
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
|
||||
|
||||
//model data
|
||||
int nLongCounter;//circular counter
|
||||
int nMidCounter; |
||||
int nShortCounter; |
||||
Mat bgmodel; // model data pixel values
|
||||
Mat aModelIndexShort;// index into the models
|
||||
Mat aModelIndexMid; |
||||
Mat aModelIndexLong; |
||||
Mat nNextShortUpdate;//random update points per model
|
||||
Mat nNextMidUpdate; |
||||
Mat nNextLongUpdate; |
||||
|
||||
String name_; |
||||
}; |
||||
|
||||
//{ to do - paralelization ...
|
||||
//struct KNNInvoker....
|
||||
CV_INLINE void |
||||
_cvUpdatePixelBackgroundNP( long pixel,const uchar* data, int nchannels, int m_nN, |
||||
uchar* m_aModel, |
||||
uchar* m_nNextLongUpdate, |
||||
uchar* m_nNextMidUpdate, |
||||
uchar* m_nNextShortUpdate, |
||||
uchar* m_aModelIndexLong, |
||||
uchar* m_aModelIndexMid, |
||||
uchar* m_aModelIndexShort, |
||||
int m_nLongCounter, |
||||
int m_nMidCounter, |
||||
int m_nShortCounter, |
||||
int m_nLongUpdate, |
||||
int m_nMidUpdate, |
||||
int m_nShortUpdate, |
||||
uchar include |
||||
) |
||||
{ |
||||
// hold the offset
|
||||
int ndata=1+nchannels; |
||||
long offsetLong = ndata * (pixel * m_nN * 3 + m_aModelIndexLong[pixel] + m_nN * 2); |
||||
long offsetMid = ndata * (pixel * m_nN * 3 + m_aModelIndexMid[pixel] + m_nN * 1); |
||||
long offsetShort = ndata * (pixel * m_nN * 3 + m_aModelIndexShort[pixel]); |
||||
|
||||
// Long update?
|
||||
if (m_nNextLongUpdate[pixel] == m_nLongCounter) |
||||
{ |
||||
// add the oldest pixel from Mid to the list of values (for each color)
|
||||
memcpy(&m_aModel[offsetLong],&m_aModel[offsetMid],ndata*sizeof(unsigned char)); |
||||
// increase the index
|
||||
m_aModelIndexLong[pixel] = (m_aModelIndexLong[pixel] >= (m_nN-1)) ? 0 : (m_aModelIndexLong[pixel] + 1); |
||||
}; |
||||
if (m_nLongCounter == (m_nLongUpdate-1)) |
||||
{ |
||||
//m_nNextLongUpdate[pixel] = (uchar)(((m_nLongUpdate)*(rand()-1))/RAND_MAX);//0,...m_nLongUpdate-1;
|
||||
m_nNextLongUpdate[pixel] = (uchar)( rand() % m_nLongUpdate );//0,...m_nLongUpdate-1;
|
||||
}; |
||||
|
||||
// Mid update?
|
||||
if (m_nNextMidUpdate[pixel] == m_nMidCounter) |
||||
{ |
||||
// add this pixel to the list of values (for each color)
|
||||
memcpy(&m_aModel[offsetMid],&m_aModel[offsetShort],ndata*sizeof(unsigned char)); |
||||
// increase the index
|
||||
m_aModelIndexMid[pixel] = (m_aModelIndexMid[pixel] >= (m_nN-1)) ? 0 : (m_aModelIndexMid[pixel] + 1); |
||||
}; |
||||
if (m_nMidCounter == (m_nMidUpdate-1)) |
||||
{ |
||||
m_nNextMidUpdate[pixel] = (uchar)( rand() % m_nMidUpdate ); |
||||
}; |
||||
|
||||
// Short update?
|
||||
if (m_nNextShortUpdate[pixel] == m_nShortCounter) |
||||
{ |
||||
// add this pixel to the list of values (for each color)
|
||||
memcpy(&m_aModel[offsetShort],data,ndata*sizeof(unsigned char)); |
||||
//set the include flag
|
||||
m_aModel[offsetShort+nchannels]=include; |
||||
// increase the index
|
||||
m_aModelIndexShort[pixel] = (m_aModelIndexShort[pixel] >= (m_nN-1)) ? 0 : (m_aModelIndexShort[pixel] + 1); |
||||
}; |
||||
if (m_nShortCounter == (m_nShortUpdate-1)) |
||||
{ |
||||
m_nNextShortUpdate[pixel] = (uchar)( rand() % m_nShortUpdate ); |
||||
}; |
||||
}; |
||||
|
||||
CV_INLINE int |
||||
_cvCheckPixelBackgroundNP(long pixel, |
||||
const uchar* data, int nchannels, |
||||
int m_nN, |
||||
uchar* m_aModel, |
||||
float m_fTb, |
||||
int m_nkNN, |
||||
float tau, |
||||
int m_nShadowDetection, |
||||
uchar& include) |
||||
{ |
||||
int Pbf = 0; // the total probability that this pixel is background
|
||||
int Pb = 0; //background model probability
|
||||
float dData[CV_CN_MAX]; |
||||
|
||||
//uchar& include=data[nchannels];
|
||||
include=0;//do we include this pixel into background model?
|
||||
|
||||
int ndata=nchannels+1; |
||||
long posPixel = pixel * ndata * m_nN * 3; |
||||
// float k;
|
||||
// now increase the probability for each pixel
|
||||
for (int n = 0; n < m_nN*3; n++) |
||||
{ |
||||
uchar* mean_m = &m_aModel[posPixel + n*ndata]; |
||||
|
||||
//calculate difference and distance
|
||||
float dist2; |
||||
|
||||
if( nchannels == 3 ) |
||||
{ |
||||
dData[0] = (float)mean_m[0] - data[0]; |
||||
dData[1] = (float)mean_m[1] - data[1]; |
||||
dData[2] = (float)mean_m[2] - data[2]; |
||||
dist2 = dData[0]*dData[0] + dData[1]*dData[1] + dData[2]*dData[2]; |
||||
} |
||||
else |
||||
{ |
||||
dist2 = 0.f; |
||||
for( int c = 0; c < nchannels; c++ ) |
||||
{ |
||||
dData[c] = (float)mean_m[c] - data[c]; |
||||
dist2 += dData[c]*dData[c]; |
||||
} |
||||
} |
||||
|
||||
if (dist2<m_fTb) |
||||
{ |
||||
Pbf++;//all
|
||||
//background only
|
||||
//if(m_aModel[subPosPixel + nchannels])//indicator
|
||||
if(mean_m[nchannels])//indicator
|
||||
{ |
||||
Pb++; |
||||
if (Pb >= m_nkNN)//Tb
|
||||
{ |
||||
include=1;//include
|
||||
return 1;//background ->exit
|
||||
}; |
||||
} |
||||
}; |
||||
}; |
||||
|
||||
//include?
|
||||
if (Pbf>=m_nkNN)//m_nTbf)
|
||||
{ |
||||
include=1; |
||||
} |
||||
|
||||
int Ps = 0; // the total probability that this pixel is background shadow
|
||||
// Detected as moving object, perform shadow detection
|
||||
if (m_nShadowDetection) |
||||
{ |
||||
for (int n = 0; n < m_nN*3; n++) |
||||
{ |
||||
//long subPosPixel = posPixel + n*ndata;
|
||||
uchar* mean_m = &m_aModel[posPixel + n*ndata]; |
||||
|
||||
if(mean_m[nchannels])//check only background
|
||||
{ |
||||
float numerator = 0.0f; |
||||
float denominator = 0.0f; |
||||
for( int c = 0; c < nchannels; c++ ) |
||||
{ |
||||
numerator += (float)data[c] * mean_m[c]; |
||||
denominator += (float)mean_m[c] * mean_m[c]; |
||||
} |
||||
|
||||
// no division by zero allowed
|
||||
if( denominator == 0 ) |
||||
return 0; |
||||
|
||||
// if tau < a < 1 then also check the color distortion
|
||||
if( numerator <= denominator && numerator >= tau*denominator ) |
||||
{ |
||||
float a = numerator / denominator; |
||||
float dist2a = 0.0f; |
||||
|
||||
for( int c = 0; c < nchannels; c++ ) |
||||
{ |
||||
float dD= a*mean_m[c] - data[c]; |
||||
dist2a += dD*dD; |
||||
} |
||||
|
||||
if (dist2a<m_fTb*a*a) |
||||
{ |
||||
Ps++; |
||||
if (Ps >= m_nkNN)//shadow
|
||||
return 2; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
} |
||||
return 0; |
||||
}; |
||||
|
||||
CV_INLINE void |
||||
icvUpdatePixelBackgroundNP(const Mat& _src, Mat& _dst, |
||||
Mat& _bgmodel, |
||||
Mat& _nNextLongUpdate, |
||||
Mat& _nNextMidUpdate, |
||||
Mat& _nNextShortUpdate, |
||||
Mat& _aModelIndexLong, |
||||
Mat& _aModelIndexMid, |
||||
Mat& _aModelIndexShort, |
||||
int& _nLongCounter, |
||||
int& _nMidCounter, |
||||
int& _nShortCounter, |
||||
int _nN, |
||||
float _fAlphaT, |
||||
float _fTb, |
||||
int _nkNN, |
||||
float _fTau, |
||||
int _bShadowDetection, |
||||
uchar nShadowDetection |
||||
) |
||||
{ |
||||
int size=_src.rows*_src.cols; |
||||
int nchannels = CV_MAT_CN(_src.type()); |
||||
const uchar* pDataCurrent=_src.ptr(0); |
||||
uchar* pDataOutput=_dst.ptr(0); |
||||
//model
|
||||
uchar* m_aModel=_bgmodel.ptr(0); |
||||
uchar* m_nNextLongUpdate=_nNextLongUpdate.ptr(0); |
||||
uchar* m_nNextMidUpdate=_nNextMidUpdate.ptr(0); |
||||
uchar* m_nNextShortUpdate=_nNextShortUpdate.ptr(0); |
||||
uchar* m_aModelIndexLong=_aModelIndexLong.ptr(0); |
||||
uchar* m_aModelIndexMid=_aModelIndexMid.ptr(0); |
||||
uchar* m_aModelIndexShort=_aModelIndexShort.ptr(0); |
||||
|
||||
//some constants
|
||||
int m_nN=_nN; |
||||
float m_fAlphaT=_fAlphaT; |
||||
float m_fTb=_fTb;//Tb - threshold on the distance
|
||||
float m_fTau=_fTau; |
||||
int m_nkNN=_nkNN; |
||||
int m_bShadowDetection=_bShadowDetection; |
||||
|
||||
//recalculate update rates - in case alpha is changed
|
||||
// calculate update parameters (using alpha)
|
||||
int Kshort,Kmid,Klong; |
||||
//approximate exponential learning curve
|
||||
Kshort=(int)(log(0.7)/log(1-m_fAlphaT))+1;//Kshort
|
||||
Kmid=(int)(log(0.4)/log(1-m_fAlphaT))-Kshort+1;//Kmid
|
||||
Klong=(int)(log(0.1)/log(1-m_fAlphaT))-Kshort-Kmid+1;//Klong
|
||||
|
||||
//refresh rates
|
||||
int m_nShortUpdate = (Kshort/m_nN)+1; |
||||
int m_nMidUpdate = (Kmid/m_nN)+1; |
||||
int m_nLongUpdate = (Klong/m_nN)+1; |
||||
|
||||
//int m_nShortUpdate = MAX((Kshort/m_nN),m_nN);
|
||||
//int m_nMidUpdate = MAX((Kmid/m_nN),m_nN);
|
||||
//int m_nLongUpdate = MAX((Klong/m_nN),m_nN);
|
||||
|
||||
//update counters for the refresh rate
|
||||
int m_nLongCounter=_nLongCounter; |
||||
int m_nMidCounter=_nMidCounter; |
||||
int m_nShortCounter=_nShortCounter; |
||||
|
||||
_nShortCounter++;//0,1,...,m_nShortUpdate-1
|
||||
_nMidCounter++; |
||||
_nLongCounter++; |
||||
if (_nShortCounter >= m_nShortUpdate) _nShortCounter = 0; |
||||
if (_nMidCounter >= m_nMidUpdate) _nMidCounter = 0; |
||||
if (_nLongCounter >= m_nLongUpdate) _nLongCounter = 0; |
||||
|
||||
//go through the image
|
||||
for (long i=0;i<size;i++) |
||||
{ |
||||
const uchar* data=pDataCurrent; |
||||
pDataCurrent=pDataCurrent+nchannels; |
||||
|
||||
//update model+ background subtract
|
||||
uchar include=0; |
||||
int result= _cvCheckPixelBackgroundNP(i, data, nchannels, |
||||
m_nN, m_aModel, m_fTb,m_nkNN, m_fTau,m_bShadowDetection,include); |
||||
|
||||
_cvUpdatePixelBackgroundNP(i,data,nchannels, |
||||
m_nN, m_aModel, |
||||
m_nNextLongUpdate, |
||||
m_nNextMidUpdate, |
||||
m_nNextShortUpdate, |
||||
m_aModelIndexLong, |
||||
m_aModelIndexMid, |
||||
m_aModelIndexShort, |
||||
m_nLongCounter, |
||||
m_nMidCounter, |
||||
m_nShortCounter, |
||||
m_nLongUpdate, |
||||
m_nMidUpdate, |
||||
m_nShortUpdate, |
||||
include |
||||
); |
||||
switch (result) |
||||
{ |
||||
case 0: |
||||
//foreground
|
||||
(* pDataOutput)=255; |
||||
break; |
||||
case 1: |
||||
//background
|
||||
(* pDataOutput)=0; |
||||
break; |
||||
case 2: |
||||
//shadow
|
||||
(* pDataOutput)=nShadowDetection; |
||||
break; |
||||
} |
||||
pDataOutput++; |
||||
} |
||||
}; |
||||
|
||||
|
||||
|
||||
void BackgroundSubtractorKNNImpl::apply(InputArray _image, OutputArray _fgmask, double learningRate) |
||||
{ |
||||
Mat image = _image.getMat(); |
||||
bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType; |
||||
|
||||
if( needToInitialize ) |
||||
initialize(image.size(), image.type()); |
||||
|
||||
_fgmask.create( image.size(), CV_8U ); |
||||
Mat fgmask = _fgmask.getMat(); |
||||
|
||||
++nframes; |
||||
learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./std::min( 2*nframes, history ); |
||||
CV_Assert(learningRate >= 0); |
||||
|
||||
//parallel_for_(Range(0, image.rows),
|
||||
// KNNInvoker(image, fgmask,
|
||||
icvUpdatePixelBackgroundNP(image, fgmask, |
||||
bgmodel, |
||||
nNextLongUpdate, |
||||
nNextMidUpdate, |
||||
nNextShortUpdate, |
||||
aModelIndexLong, |
||||
aModelIndexMid, |
||||
aModelIndexShort, |
||||
nLongCounter, |
||||
nMidCounter, |
||||
nShortCounter, |
||||
nN, |
||||
(float)learningRate, |
||||
fTb, |
||||
nkNN, |
||||
fTau, |
||||
bShadowDetection, |
||||
nShadowDetection |
||||
); |
||||
} |
||||
|
||||
void BackgroundSubtractorKNNImpl::getBackgroundImage(OutputArray backgroundImage) const |
||||
{ |
||||
int nchannels = CV_MAT_CN(frameType); |
||||
//CV_Assert( nchannels == 3 );
|
||||
Mat meanBackground(frameSize, CV_8UC3, Scalar::all(0)); |
||||
|
||||
int ndata=nchannels+1; |
||||
int modelstep=(ndata * nN * 3); |
||||
|
||||
const uchar* pbgmodel=bgmodel.ptr(0); |
||||
for(int row=0; row<meanBackground.rows; row++) |
||||
{ |
||||
for(int col=0; col<meanBackground.cols; col++) |
||||
{ |
||||
for (int n = 0; n < nN*3; n++) |
||||
{ |
||||
const uchar* mean_m = &pbgmodel[n*ndata]; |
||||
if (mean_m[nchannels]) |
||||
{ |
||||
meanBackground.at<Vec3b>(row, col) = Vec3b(mean_m); |
||||
break; |
||||
} |
||||
} |
||||
pbgmodel=pbgmodel+modelstep; |
||||
} |
||||
} |
||||
|
||||
switch(CV_MAT_CN(frameType)) |
||||
{ |
||||
case 1: |
||||
{ |
||||
std::vector<Mat> channels; |
||||
split(meanBackground, channels); |
||||
channels[0].copyTo(backgroundImage); |
||||
break; |
||||
} |
||||
case 3: |
||||
{ |
||||
meanBackground.copyTo(backgroundImage); |
||||
break; |
||||
} |
||||
default: |
||||
CV_Error(Error::StsUnsupportedFormat, ""); |
||||
} |
||||
} |
||||
|
||||
|
||||
Ptr<BackgroundSubtractorKNN> createBackgroundSubtractorKNN(int _history, double _threshold2, |
||||
bool _bShadowDetection) |
||||
{ |
||||
return makePtr<BackgroundSubtractorKNNImpl>(_history, (float)_threshold2, _bShadowDetection); |
||||
} |
||||
|
||||
} |
||||
|
||||
/* End of file. */ |
Loading…
Reference in new issue