From 4dff5b7c7206751368d0756bbd8c33b3e30e852d Mon Sep 17 00:00:00 2001 From: StevenPuttemans Date: Thu, 18 Jul 2013 16:18:06 +0200 Subject: [PATCH 001/139] Added bugfix #2795 - changing license header - fixed bug in QtKit when changing camera resolution --- modules/highgui/src/cap_qtkit.mm | 76 ++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 29 deletions(-) diff --git a/modules/highgui/src/cap_qtkit.mm b/modules/highgui/src/cap_qtkit.mm index 207f01b525..e958ae90d0 100644 --- a/modules/highgui/src/cap_qtkit.mm +++ b/modules/highgui/src/cap_qtkit.mm @@ -1,32 +1,44 @@ -/* - * CvCapture.mm - * - * Created by Nicholas Butko on 11/3/09. - * Copyright 2009. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO - * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the contributor be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*//////////////////////////////////////////////////////////////////////////////////////// + #include "precomp.hpp" #include "opencv2/imgproc/imgproc.hpp" @@ -398,6 +410,9 @@ int CvCaptureCAM::startCaptureDevice(int cameraNum) { void CvCaptureCAM::setWidthHeight() { NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init]; + + [mCaptureSession stopRunning]; + NSDictionary* pixelBufferOptions = [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithDouble:1.0*width], (id)kCVPixelBufferWidthKey, [NSNumber numberWithDouble:1.0*height], (id)kCVPixelBufferHeightKey, @@ -406,6 +421,9 @@ void CvCaptureCAM::setWidthHeight() { nil]; [mCaptureDecompressedVideoOutput setPixelBufferAttributes:pixelBufferOptions]; + + [mCaptureSession startRunning]; + grabFrame(60); [localpool drain]; } From fd77a49e76d667182629d42b32adcb44f8632a35 Mon Sep 17 00:00:00 2001 From: peng xiao Date: Thu, 1 Aug 2013 13:06:33 +0800 Subject: [PATCH 002/139] Fix ocl compilation error when using Intel OpenCL SDK. --- modules/nonfree/src/opencl/surf.cl | 483 +++++++++++++++-------------- 1 file changed, 255 insertions(+), 228 deletions(-) diff --git a/modules/nonfree/src/opencl/surf.cl b/modules/nonfree/src/opencl/surf.cl index 140a4d746c..3dced5ea10 100644 --- a/modules/nonfree/src/opencl/surf.cl +++ b/modules/nonfree/src/opencl/surf.cl @@ -16,6 +16,7 @@ // // @Authors // Peng Xiao, pengxiao@multicorewareinc.com +// Sen Liu, swjtuls1987@126.com // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: @@ -43,9 +44,6 @@ // //M*/ -#pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics : enable -#pragma OPENCL EXTENSION cl_khr_local_int32_base_atomics : enable - // specialized for non-image2d_t supported platform, intel HD4000, for example #ifdef DISABLE_IMAGE2D #define IMAGE_INT32 __global uint * @@ -105,7 +103,7 @@ __constant sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAM // for simple haar paatern float icvCalcHaarPatternSum_2( IMAGE_INT32 sumTex, - __constant float src[2][5], + __constant float2 *src, int oldSize, int newSize, int y, int x, @@ -116,21 +114,24 @@ float icvCalcHaarPatternSum_2( F d = 0; -#pragma unroll - for (int k = 0; k < 2; ++k) - { - int dx1 = convert_int_rte(ratio * src[k][0]); - int dy1 = convert_int_rte(ratio * src[k][1]); - int dx2 = convert_int_rte(ratio * src[k][2]); - int dy2 = convert_int_rte(ratio * src[k][3]); - - F t = 0; - t += read_sumTex( sumTex, sampler, (int2)(x + dx1, y + dy1), rows, cols, elemPerRow ); - t -= read_sumTex( sumTex, sampler, (int2)(x + dx1, y + dy2), rows, cols, elemPerRow ); - t -= read_sumTex( sumTex, sampler, (int2)(x + dx2, y + dy1), rows, cols, elemPerRow ); - t += read_sumTex( sumTex, sampler, (int2)(x + dx2, y + dy2), rows, cols, elemPerRow ); - d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1)); - } + int2 dx1 = convert_int2_rte(ratio * src[0]); + int2 dy1 = convert_int2_rte(ratio * src[1]); + int2 dx2 = convert_int2_rte(ratio * src[2]); + int2 dy2 = convert_int2_rte(ratio * src[3]); + + F t = 0; + t += read_sumTex( sumTex, sampler, (int2)(x + dx1.x, y + dy1.x), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.x, y + dy2.x), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy1.x), rows, cols, elemPerRow ); + t += read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy2.x), rows, cols, elemPerRow ); + d += t * src[4].x / ((dx2.x - dx1.x) * (dy2.x - dy1.x)); + + t = 0; + t += read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy1.y), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy2.y), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy1.y), rows, cols, elemPerRow ); + t += read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy2.y), rows, cols, elemPerRow ); + d += t * src[4].y / ((dx2.y - dx1.y) * (dy2.y - dy1.y)); return (float)d; } @@ -138,7 +139,7 @@ float icvCalcHaarPatternSum_2( // N = 3 float icvCalcHaarPatternSum_3( IMAGE_INT32 sumTex, - __constant float src[2][5], + __constant float4 *src, int oldSize, int newSize, int y, int x, @@ -149,21 +150,31 @@ float icvCalcHaarPatternSum_3( F d = 0; -#pragma unroll - for (int k = 0; k < 3; ++k) - { - int dx1 = convert_int_rte(ratio * src[k][0]); - int dy1 = convert_int_rte(ratio * src[k][1]); - int dx2 = convert_int_rte(ratio * src[k][2]); - int dy2 = convert_int_rte(ratio * src[k][3]); - - F t = 0; - t += read_sumTex( sumTex, sampler, (int2)(x + dx1, y + dy1), rows, cols, elemPerRow ); - t -= read_sumTex( sumTex, sampler, (int2)(x + dx1, y + dy2), rows, cols, elemPerRow ); - t -= read_sumTex( sumTex, sampler, (int2)(x + dx2, y + dy1), rows, cols, elemPerRow ); - t += read_sumTex( sumTex, sampler, (int2)(x + dx2, y + dy2), rows, cols, elemPerRow ); - d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1)); - } + int4 dx1 = convert_int4_rte(ratio * src[0]); + int4 dy1 = convert_int4_rte(ratio * src[1]); + int4 dx2 = convert_int4_rte(ratio * src[2]); + int4 dy2 = convert_int4_rte(ratio * src[3]); + + F t = 0; + t += read_sumTex( sumTex, sampler, (int2)(x + dx1.x, y + dy1.x), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.x, y + dy2.x), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy1.x), rows, cols, elemPerRow ); + t += read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy2.x), rows, cols, elemPerRow ); + d += t * src[4].x / ((dx2.x - dx1.x) * (dy2.x - dy1.x)); + + t = 0; + t += read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy1.y), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy2.y), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy1.y), rows, cols, elemPerRow ); + t += read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy2.y), rows, cols, elemPerRow ); + d += t * src[4].y / ((dx2.y - dx1.y) * (dy2.y - dy1.y)); + + t = 0; + t += read_sumTex( sumTex, sampler, (int2)(x + dx1.z, y + dy1.z), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.z, y + dy2.z), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.z, y + dy1.z), rows, cols, elemPerRow ); + t += read_sumTex( sumTex, sampler, (int2)(x + dx2.z, y + dy2.z), rows, cols, elemPerRow ); + d += t * src[4].z / ((dx2.z - dx1.z) * (dy2.z - dy1.z)); return (float)d; } @@ -171,7 +182,7 @@ float icvCalcHaarPatternSum_3( // N = 4 float icvCalcHaarPatternSum_4( IMAGE_INT32 sumTex, - __constant float src[2][5], + __constant float4 *src, int oldSize, int newSize, int y, int x, @@ -182,21 +193,38 @@ float icvCalcHaarPatternSum_4( F d = 0; -#pragma unroll - for (int k = 0; k < 4; ++k) - { - int dx1 = convert_int_rte(ratio * src[k][0]); - int dy1 = convert_int_rte(ratio * src[k][1]); - int dx2 = convert_int_rte(ratio * src[k][2]); - int dy2 = convert_int_rte(ratio * src[k][3]); - - F t = 0; - t += read_sumTex( sumTex, sampler, (int2)(x + dx1, y + dy1), rows, cols, elemPerRow ); - t -= read_sumTex( sumTex, sampler, (int2)(x + dx1, y + dy2), rows, cols, elemPerRow ); - t -= read_sumTex( sumTex, sampler, (int2)(x + dx2, y + dy1), rows, cols, elemPerRow ); - t += read_sumTex( sumTex, sampler, (int2)(x + dx2, y + dy2), rows, cols, elemPerRow ); - d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1)); - } + int4 dx1 = convert_int4_rte(ratio * src[0]); + int4 dy1 = convert_int4_rte(ratio * src[1]); + int4 dx2 = convert_int4_rte(ratio * src[2]); + int4 dy2 = convert_int4_rte(ratio * src[3]); + + F t = 0; + t += read_sumTex( sumTex, sampler, (int2)(x + dx1.x, y + dy1.x), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.x, y + dy2.x), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy1.x), rows, cols, elemPerRow ); + t += read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy2.x), rows, cols, elemPerRow ); + d += t * src[4].x / ((dx2.x - dx1.x) * (dy2.x - dy1.x)); + + t = 0; + t += read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy1.y), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy2.y), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy1.y), rows, cols, elemPerRow ); + t += read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy2.y), rows, cols, elemPerRow ); + d += t * src[4].y / ((dx2.y - dx1.y) * (dy2.y - dy1.y)); + + t = 0; + t += read_sumTex( sumTex, sampler, (int2)(x + dx1.z, y + dy1.z), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.z, y + dy2.z), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.z, y + dy1.z), rows, cols, elemPerRow ); + t += read_sumTex( sumTex, sampler, (int2)(x + dx2.z, y + dy2.z), rows, cols, elemPerRow ); + d += t * src[4].z / ((dx2.z - dx1.z) * (dy2.z - dy1.z)); + + t = 0; + t += read_sumTex( sumTex, sampler, (int2)(x + dx1.w, y + dy1.w), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.w, y + dy2.w), rows, cols, elemPerRow ); + t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.w, y + dy1.w), rows, cols, elemPerRow ); + t += read_sumTex( sumTex, sampler, (int2)(x + dx2.w, y + dy2.w), rows, cols, elemPerRow ); + d += t * src[4].w / ((dx2.w - dx1.w) * (dy2.w - dy1.w)); return (float)d; } @@ -204,9 +232,9 @@ float icvCalcHaarPatternSum_4( //////////////////////////////////////////////////////////////////////// // Hessian -__constant float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} }; -__constant float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} }; -__constant float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} }; +__constant float4 c_DX[5] = { (float4)(0, 3, 6, 0), (float4)(2, 2, 2, 0), (float4)(3, 6, 9, 0), (float4)(7, 7, 7, 0), (float4)(1, -2, 1, 0) }; +__constant float4 c_DY[5] = { (float4)(2, 2, 2, 0), (float4)(0, 3, 6, 0), (float4)(7, 7, 7, 0), (float4)(3, 6, 9, 0), (float4)(1, -2, 1, 0) }; +__constant float4 c_DXY[5] = { (float4)(1, 5, 1, 5), (float4)(1, 1, 5, 5), (float4)(4, 8, 4, 8), (float4)(4, 4, 8, 8), (float4)(1, -1, -1, 1) };// Use integral image to calculate haar wavelets. __inline int calcSize(int octave, int layer) { @@ -236,7 +264,7 @@ __kernel void icvCalcLayerDetAndTrace( int c_octave, int c_layer_rows, int sumTex_step - ) +) { det_step /= sizeof(*det); trace_step /= sizeof(*trace); @@ -300,7 +328,7 @@ bool within_check(IMAGE_INT32 maskSumTex, int sum_i, int sum_j, int size, int ro // Non-maximal suppression to further filtering the candidates from previous step __kernel - void icvFindMaximaInLayer_withmask( +void icvFindMaximaInLayer_withmask( __global const float * det, __global const float * trace, __global int4 * maxPosBuffer, @@ -318,7 +346,7 @@ __kernel float c_hessianThreshold, IMAGE_INT32 maskSumTex, int mask_step - ) +) { volatile __local float N9[768]; // threads.x * threads.y * 3 @@ -347,26 +375,26 @@ __kernel const int localLin = get_local_id(0) + get_local_id(1) * get_local_size(0) + zoff; N9[localLin - zoff] = det[det_step * - (c_layer_rows * (layer - 1) + min(max(i, 0), c_img_rows - 1)) // y - + min(max(j, 0), c_img_cols - 1)]; // x + (c_layer_rows * (layer - 1) + min(max(i, 0), c_img_rows - 1)) // y + + min(max(j, 0), c_img_cols - 1)]; // x N9[localLin ] = det[det_step * - (c_layer_rows * (layer ) + min(max(i, 0), c_img_rows - 1)) // y - + min(max(j, 0), c_img_cols - 1)]; // x + (c_layer_rows * (layer ) + min(max(i, 0), c_img_rows - 1)) // y + + min(max(j, 0), c_img_cols - 1)]; // x N9[localLin + zoff] = det[det_step * - (c_layer_rows * (layer + 1) + min(max(i, 0), c_img_rows - 1)) // y - + min(max(j, 0), c_img_cols - 1)]; // x + (c_layer_rows * (layer + 1) + min(max(i, 0), c_img_rows - 1)) // y + + min(max(j, 0), c_img_cols - 1)]; // x barrier(CLK_LOCAL_MEM_FENCE); if (i < c_layer_rows - margin - && j < c_layer_cols - margin - && get_local_id(0) > 0 - && get_local_id(0) < get_local_size(0) - 1 - && get_local_id(1) > 0 - && get_local_id(1) < get_local_size(1) - 1 // these are unnecessary conditions ported from CUDA - ) + && j < c_layer_cols - margin + && get_local_id(0) > 0 + && get_local_id(0) < get_local_size(0) - 1 + && get_local_id(1) > 0 + && get_local_id(1) < get_local_size(1) - 1 // these are unnecessary conditions ported from CUDA + ) { float val0 = N9[localLin]; @@ -382,34 +410,34 @@ __kernel { // Check to see if we have a max (in its 26 neighbours) const bool condmax = val0 > N9[localLin - 1 - get_local_size(0) - zoff] - && val0 > N9[localLin - get_local_size(0) - zoff] - && val0 > N9[localLin + 1 - get_local_size(0) - zoff] - && val0 > N9[localLin - 1 - zoff] - && val0 > N9[localLin - zoff] - && val0 > N9[localLin + 1 - zoff] - && val0 > N9[localLin - 1 + get_local_size(0) - zoff] - && val0 > N9[localLin + get_local_size(0) - zoff] - && val0 > N9[localLin + 1 + get_local_size(0) - zoff] - - && val0 > N9[localLin - 1 - get_local_size(0)] - && val0 > N9[localLin - get_local_size(0)] - && val0 > N9[localLin + 1 - get_local_size(0)] - && val0 > N9[localLin - 1 ] - && val0 > N9[localLin + 1 ] - && val0 > N9[localLin - 1 + get_local_size(0)] - && val0 > N9[localLin + get_local_size(0)] - && val0 > N9[localLin + 1 + get_local_size(0)] - - && val0 > N9[localLin - 1 - get_local_size(0) + zoff] - && val0 > N9[localLin - get_local_size(0) + zoff] - && val0 > N9[localLin + 1 - get_local_size(0) + zoff] - && val0 > N9[localLin - 1 + zoff] - && val0 > N9[localLin + zoff] - && val0 > N9[localLin + 1 + zoff] - && val0 > N9[localLin - 1 + get_local_size(0) + zoff] - && val0 > N9[localLin + get_local_size(0) + zoff] - && val0 > N9[localLin + 1 + get_local_size(0) + zoff] - ; + && val0 > N9[localLin - get_local_size(0) - zoff] + && val0 > N9[localLin + 1 - get_local_size(0) - zoff] + && val0 > N9[localLin - 1 - zoff] + && val0 > N9[localLin - zoff] + && val0 > N9[localLin + 1 - zoff] + && val0 > N9[localLin - 1 + get_local_size(0) - zoff] + && val0 > N9[localLin + get_local_size(0) - zoff] + && val0 > N9[localLin + 1 + get_local_size(0) - zoff] + + && val0 > N9[localLin - 1 - get_local_size(0)] + && val0 > N9[localLin - get_local_size(0)] + && val0 > N9[localLin + 1 - get_local_size(0)] + && val0 > N9[localLin - 1 ] + && val0 > N9[localLin + 1 ] + && val0 > N9[localLin - 1 + get_local_size(0)] + && val0 > N9[localLin + get_local_size(0)] + && val0 > N9[localLin + 1 + get_local_size(0)] + + && val0 > N9[localLin - 1 - get_local_size(0) + zoff] + && val0 > N9[localLin - get_local_size(0) + zoff] + && val0 > N9[localLin + 1 - get_local_size(0) + zoff] + && val0 > N9[localLin - 1 + zoff] + && val0 > N9[localLin + zoff] + && val0 > N9[localLin + 1 + zoff] + && val0 > N9[localLin - 1 + get_local_size(0) + zoff] + && val0 > N9[localLin + get_local_size(0) + zoff] + && val0 > N9[localLin + 1 + get_local_size(0) + zoff] + ; if(condmax) { @@ -428,7 +456,7 @@ __kernel } __kernel - void icvFindMaximaInLayer( +void icvFindMaximaInLayer( __global float * det, __global float * trace, __global int4 * maxPosBuffer, @@ -444,7 +472,7 @@ __kernel int c_layer_cols, int c_max_candidates, float c_hessianThreshold - ) +) { volatile __local float N9[768]; // threads.x * threads.y * 3 @@ -483,12 +511,12 @@ __kernel barrier(CLK_LOCAL_MEM_FENCE); if (i < c_layer_rows - margin - && j < c_layer_cols - margin - && get_local_id(0) > 0 - && get_local_id(0) < get_local_size(0) - 1 - && get_local_id(1) > 0 - && get_local_id(1) < get_local_size(1) - 1 // these are unnecessary conditions ported from CUDA - ) + && j < c_layer_cols - margin + && get_local_id(0) > 0 + && get_local_id(0) < get_local_size(0) - 1 + && get_local_id(1) > 0 + && get_local_id(1) < get_local_size(1) - 1 // these are unnecessary conditions ported from CUDA + ) { float val0 = N9[localLin]; if (val0 > c_hessianThreshold) @@ -499,38 +527,38 @@ __kernel // Check to see if we have a max (in its 26 neighbours) const bool condmax = val0 > N9[localLin - 1 - get_local_size(0) - zoff] - && val0 > N9[localLin - get_local_size(0) - zoff] - && val0 > N9[localLin + 1 - get_local_size(0) - zoff] - && val0 > N9[localLin - 1 - zoff] - && val0 > N9[localLin - zoff] - && val0 > N9[localLin + 1 - zoff] - && val0 > N9[localLin - 1 + get_local_size(0) - zoff] - && val0 > N9[localLin + get_local_size(0) - zoff] - && val0 > N9[localLin + 1 + get_local_size(0) - zoff] - - && val0 > N9[localLin - 1 - get_local_size(0)] - && val0 > N9[localLin - get_local_size(0)] - && val0 > N9[localLin + 1 - get_local_size(0)] - && val0 > N9[localLin - 1 ] - && val0 > N9[localLin + 1 ] - && val0 > N9[localLin - 1 + get_local_size(0)] - && val0 > N9[localLin + get_local_size(0)] - && val0 > N9[localLin + 1 + get_local_size(0)] - - && val0 > N9[localLin - 1 - get_local_size(0) + zoff] - && val0 > N9[localLin - get_local_size(0) + zoff] - && val0 > N9[localLin + 1 - get_local_size(0) + zoff] - && val0 > N9[localLin - 1 + zoff] - && val0 > N9[localLin + zoff] - && val0 > N9[localLin + 1 + zoff] - && val0 > N9[localLin - 1 + get_local_size(0) + zoff] - && val0 > N9[localLin + get_local_size(0) + zoff] - && val0 > N9[localLin + 1 + get_local_size(0) + zoff] - ; + && val0 > N9[localLin - get_local_size(0) - zoff] + && val0 > N9[localLin + 1 - get_local_size(0) - zoff] + && val0 > N9[localLin - 1 - zoff] + && val0 > N9[localLin - zoff] + && val0 > N9[localLin + 1 - zoff] + && val0 > N9[localLin - 1 + get_local_size(0) - zoff] + && val0 > N9[localLin + get_local_size(0) - zoff] + && val0 > N9[localLin + 1 + get_local_size(0) - zoff] + + && val0 > N9[localLin - 1 - get_local_size(0)] + && val0 > N9[localLin - get_local_size(0)] + && val0 > N9[localLin + 1 - get_local_size(0)] + && val0 > N9[localLin - 1 ] + && val0 > N9[localLin + 1 ] + && val0 > N9[localLin - 1 + get_local_size(0)] + && val0 > N9[localLin + get_local_size(0)] + && val0 > N9[localLin + 1 + get_local_size(0)] + + && val0 > N9[localLin - 1 - get_local_size(0) + zoff] + && val0 > N9[localLin - get_local_size(0) + zoff] + && val0 > N9[localLin + 1 - get_local_size(0) + zoff] + && val0 > N9[localLin - 1 + zoff] + && val0 > N9[localLin + zoff] + && val0 > N9[localLin + 1 + zoff] + && val0 > N9[localLin - 1 + get_local_size(0) + zoff] + && val0 > N9[localLin + get_local_size(0) + zoff] + && val0 > N9[localLin + 1 + get_local_size(0) + zoff] + ; if(condmax) { - int ind = atomic_inc(maxCounter); + int ind = atomic_inc(maxCounter); if (ind < c_max_candidates) { @@ -544,30 +572,30 @@ __kernel } // solve 3x3 linear system Ax=b for floating point input -inline bool solve3x3_float(volatile __local const float A[3][3], volatile __local const float b[3], volatile __local float x[3]) +inline bool solve3x3_float(volatile __local const float4 *A, volatile __local const float *b, volatile __local float *x) { - float det = A[0][0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) - - A[0][1] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) - + A[0][2] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]); + float det = A[0].x * (A[1].y * A[2].z - A[1].z * A[2].y) + - A[0].y * (A[1].x * A[2].z - A[1].z * A[2].x) + + A[0].z * (A[1].x * A[2].y - A[1].y * A[2].x); if (det != 0) { F invdet = 1.0 / det; x[0] = invdet * - (b[0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) - - A[0][1] * (b[1] * A[2][2] - A[1][2] * b[2] ) + - A[0][2] * (b[1] * A[2][1] - A[1][1] * b[2] )); + (b[0] * (A[1].y * A[2].z - A[1].z * A[2].y) - + A[0].y * (b[1] * A[2].z - A[1].z * b[2] ) + + A[0].z * (b[1] * A[2].y - A[1].y * b[2] )); x[1] = invdet * - (A[0][0] * (b[1] * A[2][2] - A[1][2] * b[2] ) - - b[0] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) + - A[0][2] * (A[1][0] * b[2] - b[1] * A[2][0])); + (A[0].x * (b[1] * A[2].z - A[1].z * b[2] ) - + b[0] * (A[1].x * A[2].z - A[1].z * A[2].x) + + A[0].z * (A[1].x * b[2] - b[1] * A[2].x)); x[2] = invdet * - (A[0][0] * (A[1][1] * b[2] - b[1] * A[2][1]) - - A[0][1] * (A[1][0] * b[2] - b[1] * A[2][0]) + - b[0] * (A[1][0] * A[2][1] - A[1][1] * A[2][0])); + (A[0].x * (A[1].y * b[2] - b[1] * A[2].y) - + A[0].y * (A[1].x * b[2] - b[1] * A[2].x) + + b[0] * (A[1].x * A[2].y - A[1].y * A[2].x)); return true; } @@ -586,7 +614,7 @@ inline bool solve3x3_float(volatile __local const float A[3][3], volatile __loc //////////////////////////////////////////////////////////////////////// // INTERPOLATION __kernel - void icvInterpolateKeypoint( +void icvInterpolateKeypoint( __global const float * det, __global const int4 * maxPosBuffer, __global float * keypoints, @@ -598,7 +626,7 @@ __kernel int c_octave, int c_layer_rows, int c_max_features - ) +) { det_step /= sizeof(*det); keypoints_step /= sizeof(*keypoints); @@ -632,26 +660,26 @@ __kernel //ds dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]); - volatile __local float H[3][3]; + volatile __local float4 H[3]; //dxx - H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2]; + H[0].x = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2]; //dxy - H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]); + H[0].y= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]); //dxs - H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]); + H[0].z= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]); //dyx = dxy - H[1][0] = H[0][1]; + H[1].x = H[0].y; //dyy - H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1]; + H[1].y = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1]; //dys - H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]); + H[1].z= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]); //dsx = dxs - H[2][0] = H[0][2]; + H[2].x = H[0].z; //dsy = dys - H[2][1] = H[1][2]; + H[2].y = H[1].z; //dss - H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1]; + H[2].z = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1]; volatile __local float x[3]; @@ -689,7 +717,7 @@ __kernel if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size) { // Get a new feature index. - int ind = atomic_inc(featureCounter); + int ind = atomic_inc(featureCounter); if (ind < c_max_features) { @@ -716,31 +744,32 @@ __kernel __constant float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6}; __constant float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0}; __constant float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, - 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, - 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, - 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, - 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, - 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, - 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, - 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, - 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, - 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, - 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, - 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, - 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, - 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, - 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, - 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, - 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, - 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, - 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, - 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, - 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, - 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, - 0.001707611023448408f, 0.001455130288377404f}; - -__constant float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}}; -__constant float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}}; + 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, + 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, + 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, + 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, + 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, + 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, + 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, + 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, + 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, + 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, + 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, + 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, + 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, + 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, + 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, + 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, + 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, + 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, + 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, + 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, + 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, + 0.001707611023448408f, 0.001455130288377404f + }; + +__constant float2 c_NX[5] = { (float2)(0, 2), (float2)(0, 0), (float2)(2, 4), (float2)(4, 4), (float2)(-1, 1) }; +__constant float2 c_NY[5] = { (float2)(0, 0), (float2)(0, 2), (float2)(4, 4), (float2)(2, 4), (float2)(1, -1) }; void reduce_32_sum(volatile __local float * data, volatile float* partial_reduction, int tid) { @@ -759,14 +788,14 @@ void reduce_32_sum(volatile __local float * data, volatile float* partial_reduc if (tid < 8) { #endif - data[tid] = *partial_reduction = op(partial_reduction, data[tid + 8 ]); + data[tid] = *partial_reduction = op(partial_reduction, data[tid + 8]); #if WAVE_SIZE < 8 } barrier(CLK_LOCAL_MEM_FENCE); if (tid < 4) { #endif - data[tid] = *partial_reduction = op(partial_reduction, data[tid + 4 ]); + data[tid] = *partial_reduction = op(partial_reduction, data[tid + 4]); #if WAVE_SIZE < 4 } barrier(CLK_LOCAL_MEM_FENCE); @@ -787,14 +816,14 @@ void reduce_32_sum(volatile __local float * data, volatile float* partial_reduc } __kernel - void icvCalcOrientation( +void icvCalcOrientation( IMAGE_INT32 sumTex, __global float * keypoints, int keypoints_step, int c_img_rows, int c_img_cols, int sum_step - ) +) { keypoints_step /= sizeof(*keypoints); sum_step /= sizeof(uint); @@ -838,7 +867,7 @@ __kernel const int y = convert_int_rte(featureY[get_group_id(0)] + c_aptY[tid] * s - margin); if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size && - x >= 0 && x < (c_img_cols + 1) - grad_wav_size) + x >= 0 && x < (c_img_cols + 1) - grad_wav_size) { X = c_aptW[tid] * icvCalcHaarPatternSum_2(sumTex, c_NX, 4, grad_wav_size, y, x, c_img_rows, c_img_cols, sum_step); Y = c_aptW[tid] * icvCalcHaarPatternSum_2(sumTex, c_NY, 4, grad_wav_size, y, x, c_img_rows, c_img_cols, sum_step); @@ -934,11 +963,11 @@ __kernel __kernel - void icvSetUpright( +void icvSetUpright( __global float * keypoints, int keypoints_step, int nFeatures - ) +) { keypoints_step /= sizeof(*keypoints); __global float* featureDir = keypoints + ANGLE_ROW * keypoints_step; @@ -988,7 +1017,7 @@ inline uchar readerGet( IMAGE_INT8 src, const float centerX, const float centerY, const float win_offset, const float cos_dir, const float sin_dir, int i, int j, int rows, int cols, int elemPerRow - ) +) { float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir; float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir; @@ -999,7 +1028,7 @@ inline float linearFilter( IMAGE_INT8 src, const float centerX, const float centerY, const float win_offset, const float cos_dir, const float sin_dir, float y, float x, int rows, int cols, int elemPerRow - ) +) { x -= 0.5f; y -= 0.5f; @@ -1028,9 +1057,9 @@ inline float linearFilter( void calc_dx_dy( IMAGE_INT8 imgTex, - volatile __local float s_dx_bin[25], - volatile __local float s_dy_bin[25], - volatile __local float s_PATCH[6][6], + volatile __local float *s_dx_bin, + volatile __local float *s_dy_bin, + volatile __local float *s_PATCH, __global const float* featureX, __global const float* featureY, __global const float* featureSize, @@ -1038,7 +1067,7 @@ void calc_dx_dy( int rows, int cols, int elemPerRow - ) +) { const float centerX = featureX[get_group_id(0)]; const float centerY = featureY[get_group_id(0)]; @@ -1048,6 +1077,7 @@ void calc_dx_dy( { descriptor_dir = 0.0f; } + descriptor_dir *= (float)(CV_PI_F / 180.0f); /* The sampling intervals and wavelet sized for selecting an orientation @@ -1074,7 +1104,7 @@ void calc_dx_dy( const float icoo = ((float)yIndex / (PATCH_SZ + 1)) * win_size; const float jcoo = ((float)xIndex / (PATCH_SZ + 1)) * win_size; - s_PATCH[get_local_id(1)][get_local_id(0)] = linearFilter(imgTex, centerX, centerY, win_offset, cos_dir, sin_dir, icoo, jcoo, rows, cols, elemPerRow); + s_PATCH[get_local_id(1) * 6 + get_local_id(0)] = linearFilter(imgTex, centerX, centerY, win_offset, cos_dir, sin_dir, icoo, jcoo, rows, cols, elemPerRow); barrier(CLK_LOCAL_MEM_FENCE); @@ -1085,17 +1115,17 @@ void calc_dx_dy( const float dw = c_DW[yIndex * PATCH_SZ + xIndex]; const float vx = ( - s_PATCH[get_local_id(1) ][get_local_id(0) + 1] - - s_PATCH[get_local_id(1) ][get_local_id(0) ] + - s_PATCH[get_local_id(1) + 1][get_local_id(0) + 1] - - s_PATCH[get_local_id(1) + 1][get_local_id(0) ]) - * dw; + s_PATCH[ get_local_id(1) * 6 + get_local_id(0) + 1] - + s_PATCH[ get_local_id(1) * 6 + get_local_id(0) ] + + s_PATCH[(get_local_id(1) + 1) * 6 + get_local_id(0) + 1] - + s_PATCH[(get_local_id(1) + 1) * 6 + get_local_id(0) ]) + * dw; const float vy = ( - s_PATCH[get_local_id(1) + 1][get_local_id(0) ] - - s_PATCH[get_local_id(1) ][get_local_id(0) ] + - s_PATCH[get_local_id(1) + 1][get_local_id(0) + 1] - - s_PATCH[get_local_id(1) ][get_local_id(0) + 1]) - * dw; + s_PATCH[(get_local_id(1) + 1) * 6 + get_local_id(0) ] - + s_PATCH[ get_local_id(1) * 6 + get_local_id(0) ] + + s_PATCH[(get_local_id(1) + 1) * 6 + get_local_id(0) + 1] - + s_PATCH[ get_local_id(1) * 6 + get_local_id(0) + 1]) + * dw; s_dx_bin[tid] = vx; s_dy_bin[tid] = vy; } @@ -1106,7 +1136,7 @@ void reduce_sum25( volatile __local float* sdata3, volatile __local float* sdata4, int tid - ) +) { #ifndef WAVE_SIZE #define WAVE_SIZE 1 @@ -1125,11 +1155,8 @@ void reduce_sum25( { #endif sdata1[tid] += sdata1[tid + 8]; - sdata2[tid] += sdata2[tid + 8]; - sdata3[tid] += sdata3[tid + 8]; - sdata4[tid] += sdata4[tid + 8]; #if WAVE_SIZE < 8 } @@ -1166,7 +1193,7 @@ void reduce_sum25( } __kernel - void compute_descriptors64( +void compute_descriptors64( IMAGE_INT8 imgTex, __global float * descriptors, __global const float * keypoints, @@ -1175,7 +1202,7 @@ __kernel int rows, int cols, int img_step - ) +) { descriptors_step /= sizeof(float); keypoints_step /= sizeof(float); @@ -1189,7 +1216,7 @@ __kernel volatile __local float sdy[25]; volatile __local float sdxabs[25]; volatile __local float sdyabs[25]; - volatile __local float s_PATCH[6][6]; + volatile __local float s_PATCH[6*6]; calc_dx_dy(imgTex, sdx, sdy, s_PATCH, featureX, featureY, featureSize, featureDir, rows, cols, img_step); barrier(CLK_LOCAL_MEM_FENCE); @@ -1203,7 +1230,7 @@ __kernel } barrier(CLK_LOCAL_MEM_FENCE); - reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid); + reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid); barrier(CLK_LOCAL_MEM_FENCE); if (tid < 25) @@ -1221,7 +1248,7 @@ __kernel } } __kernel - void compute_descriptors128( +void compute_descriptors128( IMAGE_INT8 imgTex, __global float * descriptors, __global float * keypoints, @@ -1230,7 +1257,7 @@ __kernel int rows, int cols, int img_step - ) +) { descriptors_step /= sizeof(*descriptors); keypoints_step /= sizeof(*keypoints); @@ -1249,7 +1276,7 @@ __kernel volatile __local float sd2[25]; volatile __local float sdabs1[25]; volatile __local float sdabs2[25]; - volatile __local float s_PATCH[6][6]; + volatile __local float s_PATCH[6*6]; calc_dx_dy(imgTex, sdx, sdy, s_PATCH, featureX, featureY, featureSize, featureDir, rows, cols, img_step); barrier(CLK_LOCAL_MEM_FENCE); @@ -1275,7 +1302,7 @@ __kernel } barrier(CLK_LOCAL_MEM_FENCE); - reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); + reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); barrier(CLK_LOCAL_MEM_FENCE); __global float* descriptors_block = descriptors + descriptors_step * get_group_id(0) + (get_group_id(1) << 3); @@ -1306,8 +1333,7 @@ __kernel } } barrier(CLK_LOCAL_MEM_FENCE); - - reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); + reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); barrier(CLK_LOCAL_MEM_FENCE); if (tid < 25) @@ -1322,11 +1348,13 @@ __kernel } } } + void reduce_sum128(volatile __local float* smem, int tid) { #ifndef WAVE_SIZE #define WAVE_SIZE 1 #endif + if (tid < 64) { smem[tid] += smem[tid + 64]; @@ -1374,6 +1402,8 @@ void reduce_sum128(volatile __local float* smem, int tid) smem[tid] += smem[tid + 1]; } } + + void reduce_sum64(volatile __local float* smem, int tid) { #ifndef WAVE_SIZE @@ -1421,7 +1451,7 @@ void reduce_sum64(volatile __local float* smem, int tid) } __kernel - void normalize_descriptors128(__global float * descriptors, int descriptors_step) +void normalize_descriptors128(__global float * descriptors, int descriptors_step) { descriptors_step /= sizeof(*descriptors); // no need for thread ID @@ -1436,8 +1466,6 @@ __kernel reduce_sum128(sqDesc, get_local_id(0)); barrier(CLK_LOCAL_MEM_FENCE); - - // compute length (square root) volatile __local float len; if (get_local_id(0) == 0) @@ -1450,7 +1478,7 @@ __kernel descriptor_base[get_local_id(0)] = lookup / len; } __kernel - void normalize_descriptors64(__global float * descriptors, int descriptors_step) +void normalize_descriptors64(__global float * descriptors, int descriptors_step) { descriptors_step /= sizeof(*descriptors); // no need for thread ID @@ -1462,7 +1490,6 @@ __kernel sqDesc[get_local_id(0)] = lookup * lookup; barrier(CLK_LOCAL_MEM_FENCE); - reduce_sum64(sqDesc, get_local_id(0)); barrier(CLK_LOCAL_MEM_FENCE); From c1ad6a9ffbf567757935eeb7bd865cf5105f199a Mon Sep 17 00:00:00 2001 From: kdrobnyh Date: Thu, 1 Aug 2013 15:15:36 +0400 Subject: [PATCH 003/139] Add IPP support in cvtColor function --- modules/imgproc/src/color.cpp | 452 ++++++++++++++++++++++++++++++++++ 1 file changed, 452 insertions(+) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 41ca2db9c0..f2c32f5de6 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -93,6 +93,13 @@ #include #include +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) +#define MAX_IPP8u 255 +#define MAX_IPP16u 65535 +#define MAX_IPP32f 1.0 +static IppStatus sts = ippInit(); +#endif + namespace cv { @@ -190,6 +197,286 @@ void CvtColorLoop(const Mat& src, Mat& dst, const Cvt& cvt) parallel_for_(Range(0, src.rows), CvtColorLoop_Invoker(src, dst, cvt), src.total()/(double)(1<<16) ); } +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) +#define CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(func, src, dst ) \ + if( depth == CV_8U ) { \ + CV_DEF_IPP_PARALLEL_FOR(func##_8u, src, dst); } \ + else if( depth == CV_16U ) { \ + CV_DEF_IPP_PARALLEL_FOR(func##_16u, src, dst); } \ + else { CV_DEF_IPP_PARALLEL_FOR(func##_32f, src, dst); } + +#define CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(func, src, dst ) \ + if( depth == CV_8U ) { \ + CV_DEF_IPP_PARALLEL_FOR(func##_8u, src, dst); } \ + else if( depth == CV_16U ){ \ + CV_DEF_IPP_PARALLEL_FOR(func##_16u, src, dst); } + +#define CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(func, src, dst ) \ + Mat temp; Mat &source = src; \ + if( src.data == dst.data ) { \ + src.copyTo(temp); source = temp; } \ + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(func, source, dst) + +#define CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(func, src, dst ) \ + Mat temp; Mat &source = src; \ + if( src.data == dst.data ) { \ + src.copyTo(temp); source = temp; } \ + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(func, source, dst) + +#define CV_DEF_IPP_PARALLEL_FOR(Functor, src, dst) \ + bool ok; \ + Functor invoker(src, dst, &ok); \ + parallel_for_(Range(0, src.rows), invoker, src.total()/(double)(1<<16) ); \ + if( ok ) { return; } + +#define CV_DEF_IPP_COLORLOOP_BODY(mode, type, funcbody) \ +class IPPColorLoop_Invoker_##mode##_##type## : public ParallelLoopBody { \ +public: IPPColorLoop_Invoker_##mode##_##type##(const Mat& _src, Mat& _dst, bool *_ok) : \ + ParallelLoopBody(), src(_src), dst(_dst), ok(_ok) \ +{ *ok = true; } \ + virtual void operator()(const Range& range) const \ +{ funcbody(type) } \ +private: const Mat& src; \ + Mat& dst; bool *ok; \ +const IPPColorLoop_Invoker_##mode##_##type##& operator= (const IPPColorLoop_Invoker_##mode##_##type##&); }; + +#define CV_DEF_IPP_FUNCPROC(func) \ + if( func < 0 ) { *ok = false; return; } + +#define CV_DEF_IPP_SWAP_CHANNELS_C3C4(order1, order2, order3, order4, type) \ + int order[4] = { order1, order2, order3, order4 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) + +#define CV_DEF_IPP_SWAP_CHANNELS_C3C4_0123(type) \ + CV_DEF_IPP_SWAP_CHANNELS_C3C4(0, 1, 2, 3, type) + +#define CV_DEF_IPP_SWAP_CHANNELS_C3C4_2103(type) \ + CV_DEF_IPP_SWAP_CHANNELS_C3C4(2, 1, 0, 3, type) + +#define CV_DEF_IPP_COPY_AC4C3(type) \ + CV_DEF_IPP_FUNCPROC(ippiCopy_##type##_AC4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ))) + +#define CV_DEF_IPP_SWAP_CHANNELS_C4C3(order1, order2, order3, type) \ + int order[3] = { order1, order2, order3 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) + +#define CV_DEF_IPP_SWAP_CHANNELS_C4C3_210(type) \ + CV_DEF_IPP_SWAP_CHANNELS_C4C3(2, 1, 0, type) + +#define CV_DEF_IPP_SWAP_CHANNELS_C3(order1, order2, order3, type) \ + int order[3] = { order1, order2, order3 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) + +#define CV_DEF_IPP_SWAP_CHANNELS_C3_210(type) \ + CV_DEF_IPP_SWAP_CHANNELS_C3(2, 1, 0, type) + +#define CV_DEF_IPP_SWAP_CHANNELS_C4(order1, order2, order3, type) \ + int order[3] = { order1, order2, order3 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_AC4R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) + +#define CV_DEF_IPP_SWAP_CHANNELS_C4_2103(type) \ + CV_DEF_IPP_SWAP_CHANNELS_C4(2, 1, 0, type) + +#define CV_DEF_IPP_BGR2GRAY(type) \ + Ipp32f coeff[3] = { 0.114f, 0.587f, 0.299f }; \ + CV_DEF_IPP_FUNCPROC(ippiColorToGray_##type##_C3C1R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), coeff )) + +#define CV_DEF_IPP_RGB2GRAY(type) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToGray_##type##_C3C1R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ))) + +#define CV_DEF_IPP_BGRA2GRAY(type) \ + Ipp32f coeff[3] = { 0.114f, 0.587f, 0.299f }; \ + CV_DEF_IPP_FUNCPROC(ippiColorToGray_##type##_AC4C1R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), coeff )) + +#define CV_DEF_IPP_RGBA2GRAY(type) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToGray_##type##_AC4C1R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ))) + +#define CV_DEF_IPP_Copy_P3C3(type) \ + Ipp##type *pointer = (Ipp##type *)src.ptr(range.start); \ + Ipp##type* srcarray[3] = { pointer, pointer, pointer }; \ + CV_DEF_IPP_FUNCPROC(ippiCopy_##type##_P3C3R( srcarray, (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ))) + +#define CV_DEF_IPP_GRAY2BGRA(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + Ipp##type *pointer = (Ipp##type *)src.ptr(range.start); \ + Ipp##type* srcarray[3] = { pointer, pointer, pointer }; \ + CV_DEF_IPP_FUNCPROC(ippiCopy_##type##_P3C3R( srcarray, (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ))) \ + int order[4] = { 0, 1, 2, 3 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) + +#define CV_DEF_IPP_BGR2XYZ(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + int order[3] = { 2, 1, 0 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToXYZ_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_BGRA2XYZ(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + int order[3] = { 2, 1, 0 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToXYZ_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_RGB2XYZ(type) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToXYZ_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_RGBA2XYZ(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + int order[3] = { 0, 1, 2 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToXYZ_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_XYZ2BGR(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + CV_DEF_IPP_FUNCPROC(ippiXYZToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ + int order[3] = { 2, 1, 0 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) + +#define CV_DEF_IPP_XYZ2BGRA_FAMILY(order1, order2, order3, type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + CV_DEF_IPP_FUNCPROC(ippiXYZToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ + int order[4] = { order1, order2, order3, 3 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) + +#define CV_DEF_IPP_XYZ2BGRA(type) \ + CV_DEF_IPP_XYZ2BGRA_FAMILY(2, 1, 0, type) + +#define CV_DEF_IPP_XYZ2RGB(type) \ + CV_DEF_IPP_FUNCPROC(ippiXYZToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_XYZ2RGBA(type) \ + CV_DEF_IPP_XYZ2BGRA_FAMILY(0, 1, 2, type) + +#define CV_DEF_IPP_BGR2HSV_FULL(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + int order[3] = { 2, 1, 0 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToHSV_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_BGRA2HSV_FULL(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + int order[3] = { 2, 1, 0 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToHSV_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_RGB2HSV_FULL(type) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToHSV_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_RGBA2HSV_FULL(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + int order[3] = { 0, 1, 2 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToHSV_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_BGR2HLS_FULL(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + int order[3] = { 2, 1, 0 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToHLS_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_BGRA2HLS_FULL(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + int order[3] = { 2, 1, 0 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToHLS_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_RGB2HLS_FULL(type) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToHLS_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_RGBA2HLS_FULL(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + int order[3] = { 0, 1, 2 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ + CV_DEF_IPP_FUNCPROC(ippiRGBToHLS_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_HSV2BGR_FULL(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + CV_DEF_IPP_FUNCPROC(ippiHSVToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ + int order[3] = { 2, 1, 0 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) + +#define CV_DEF_IPP_HSV2BGRA_FULL_FAMILY(order1, order2, order3, type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + CV_DEF_IPP_FUNCPROC(ippiHSVToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ + int order[4] = { order1, order2, order3, 3 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) + +#define CV_DEF_IPP_HSV2BGRA_FULL(type) \ + CV_DEF_IPP_HSV2BGRA_FULL_FAMILY(2, 1, 0, type) \ + +#define CV_DEF_IPP_HSV2RGB_FULL(type) \ + CV_DEF_IPP_FUNCPROC(ippiHSVToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_HSV2RGBA_FULL(type) \ + CV_DEF_IPP_HSV2BGRA_FULL_FAMILY(0, 1, 2, type) \ + +#define CV_DEF_IPP_HLS2BGR_FULL(type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + CV_DEF_IPP_FUNCPROC(ippiHLSToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ + int order[3] = { 2, 1, 0 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) + +#define CV_DEF_IPP_HLS2BGRA_FULL_FAMILY(order1, order2, order3, type) \ + Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ + CV_DEF_IPP_FUNCPROC(ippiHLSToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ + int order[4] = { order1, order2, order3, 3 }; \ + CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) + +#define CV_DEF_IPP_HLS2BGRA_FULL(type) \ + CV_DEF_IPP_HLS2BGRA_FULL_FAMILY(2, 1, 0, type) \ + +#define CV_DEF_IPP_HLS2RGB_FULL(type) \ + CV_DEF_IPP_FUNCPROC(ippiHLSToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) + +#define CV_DEF_IPP_HLS2RGBA_FULL(type) \ + CV_DEF_IPP_HLS2BGRA_FULL_FAMILY(0, 1, 2, type) \ + +#define CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(mode, funcbody) \ + CV_DEF_IPP_COLORLOOP_BODY(mode, 8u, funcbody) \ + CV_DEF_IPP_COLORLOOP_BODY(mode, 16u, funcbody) \ + CV_DEF_IPP_COLORLOOP_BODY(mode, 32f, funcbody) + +#define CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(mode, funcbody) \ + CV_DEF_IPP_COLORLOOP_BODY(mode, 8u, funcbody) \ + CV_DEF_IPP_COLORLOOP_BODY(mode, 16u, funcbody) + +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGR2BGRA, CV_DEF_IPP_SWAP_CHANNELS_C3C4_0123) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGRA2BGR, CV_DEF_IPP_COPY_AC4C3) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGR2RGBA, CV_DEF_IPP_SWAP_CHANNELS_C3C4_2103) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGBA2BGR, CV_DEF_IPP_SWAP_CHANNELS_C4C3_210) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGB2BGR, CV_DEF_IPP_SWAP_CHANNELS_C3_210) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGBA2BGRA, CV_DEF_IPP_SWAP_CHANNELS_C4_2103) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGR2GRAY, CV_DEF_IPP_BGR2GRAY) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGB2GRAY, CV_DEF_IPP_RGB2GRAY) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGRA2GRAY, CV_DEF_IPP_BGRA2GRAY) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGBA2GRAY, CV_DEF_IPP_RGBA2GRAY) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(GRAY2BGR, CV_DEF_IPP_Copy_P3C3) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(GRAY2BGRA, CV_DEF_IPP_GRAY2BGRA) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGR2XYZ, CV_DEF_IPP_BGR2XYZ) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGRA2XYZ, CV_DEF_IPP_BGRA2XYZ) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGB2XYZ, CV_DEF_IPP_RGB2XYZ) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGBA2XYZ, CV_DEF_IPP_RGBA2XYZ) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(XYZ2BGR, CV_DEF_IPP_XYZ2BGR) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(XYZ2BGRA, CV_DEF_IPP_XYZ2BGRA) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(XYZ2RGB, CV_DEF_IPP_XYZ2RGB) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(XYZ2RGBA, CV_DEF_IPP_XYZ2RGBA) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(BGR2HSV_FULL, CV_DEF_IPP_BGR2HSV_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(BGRA2HSV_FULL, CV_DEF_IPP_BGRA2HSV_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(RGB2HSV_FULL, CV_DEF_IPP_RGB2HSV_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(RGBA2HSV_FULL, CV_DEF_IPP_RGBA2HSV_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(BGR2HLS_FULL, CV_DEF_IPP_BGR2HLS_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(BGRA2HLS_FULL, CV_DEF_IPP_BGRA2HLS_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(RGB2HLS_FULL, CV_DEF_IPP_RGB2HLS_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(RGBA2HLS_FULL, CV_DEF_IPP_RGBA2HLS_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HSV2BGR_FULL, CV_DEF_IPP_HSV2BGR_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HSV2BGRA_FULL, CV_DEF_IPP_HSV2BGRA_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HSV2RGB_FULL, CV_DEF_IPP_HSV2RGB_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HSV2RGBA_FULL, CV_DEF_IPP_HSV2RGBA_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HLS2BGR_FULL, CV_DEF_IPP_HLS2BGR_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HLS2BGRA_FULL, CV_DEF_IPP_HLS2BGRA_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HLS2RGB_FULL, CV_DEF_IPP_HLS2RGB_FULL) +CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HLS2RGBA_FULL, CV_DEF_IPP_HLS2RGBA_FULL) +#endif + ////////////////// Various 3/4-channel to 3/4-channel RGB transformations ///////////////// template struct RGB2RGB @@ -3349,6 +3636,33 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create( sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if( code == CV_BGR2BGRA || code == CV_RGB2RGBA) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGR2BGRA, src, dst) + } + else if( code == CV_BGRA2BGR ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGRA2BGR, src, dst) + } + else if( code == CV_BGR2RGBA ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGR2RGBA, src, dst) + } + else if( code == CV_RGBA2BGR ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_RGBA2BGR, src, dst) + } + else if( code == CV_RGB2BGR ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_RGB2BGR, src, dst) + } + else if( code == CV_RGBA2BGRA ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_RGBA2BGRA, src, dst) + } +#endif if( depth == CV_8U ) { @@ -3402,6 +3716,25 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) CV_Assert( scn == 3 || scn == 4 ); _dst.create(sz, CV_MAKETYPE(depth, 1)); dst = _dst.getMat(); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if( code == CV_BGR2GRAY ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGR2GRAY, src, dst) + } + else if( code == CV_RGB2GRAY ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_RGB2GRAY, src, dst) + } + else if( code == CV_BGRA2GRAY ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGRA2GRAY, src, dst) + } + else if( code == CV_RGBA2GRAY ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_RGBA2GRAY, src, dst) + } +#endif bidx = code == CV_BGR2GRAY || code == CV_BGRA2GRAY ? 0 : 2; @@ -3431,6 +3764,17 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) CV_Assert( scn == 1 && (dcn == 3 || dcn == 4)); _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if( code == CV_GRAY2BGR ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_GRAY2BGR, src, dst) + } + else if( code == CV_GRAY2BGRA ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_GRAY2BGRA, src, dst) + } +#endif if( depth == CV_8U ) { @@ -3510,6 +3854,25 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if( code == CV_BGR2XYZ && scn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_BGR2XYZ, src, dst) + } + else if( code == CV_BGR2XYZ && scn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGRA2XYZ, src, dst) + } + else if( code == CV_RGB2XYZ && scn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_RGB2XYZ, src, dst) + } + else if( code == CV_RGB2XYZ && scn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_RGBA2XYZ, src, dst) + } +#endif if( depth == CV_8U ) CvtColorLoop(src, dst, RGB2XYZ_i(scn, bidx, 0)); @@ -3526,6 +3889,25 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if( code == CV_XYZ2BGR && dcn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_XYZ2BGR, src, dst) + } + else if( code == CV_XYZ2BGR && dcn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_XYZ2BGRA, src, dst) + } + else if( code == CV_XYZ2RGB && dcn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_XYZ2RGB, src, dst) + } + else if( code == CV_XYZ2RGB && dcn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_XYZ2RGBA, src, dst) + } +#endif if( depth == CV_8U ) CvtColorLoop(src, dst, XYZ2RGB_i(dcn, bidx, 0)); @@ -3546,6 +3928,41 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if( code == CV_BGR2HSV_FULL && scn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_BGR2HSV_FULL, src, dst) + } + else if( code == CV_BGR2HSV_FULL && scn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_BGRA2HSV_FULL, src, dst) + } + else if( code == CV_RGB2HSV_FULL && scn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_RGB2HSV_FULL, src, dst) + } + else if( code == CV_RGB2HSV_FULL && scn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_RGBA2HSV_FULL, src, dst) + } + else if( code == CV_BGR2HLS_FULL && scn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_BGR2HLS_FULL, src, dst) + } + else if( code == CV_BGR2HLS_FULL && scn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_BGRA2HLS_FULL, src, dst) + } + else if( code == CV_RGB2HLS_FULL && scn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_RGB2HLS_FULL, src, dst) + } + else if( code == CV_RGB2HLS_FULL && scn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_RGBA2HLS_FULL, src, dst) + } +#endif if( code == CV_BGR2HSV || code == CV_RGB2HSV || code == CV_BGR2HSV_FULL || code == CV_RGB2HSV_FULL ) @@ -3581,6 +3998,41 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if( code == CV_HSV2BGR_FULL && dcn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_HSV2BGR_FULL, src, dst) + } + else if( code == CV_HSV2BGR_FULL && dcn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_HSV2BGRA_FULL, src, dst) + } + else if( code == CV_HSV2RGB_FULL && dcn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_HSV2RGB_FULL, src, dst) + } + else if( code == CV_HSV2RGB_FULL && dcn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_HSV2RGBA_FULL, src, dst) + } + else if( code == CV_HLS2BGR_FULL && dcn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_HLS2BGR_FULL, src, dst) + } + else if( code == CV_HLS2BGR_FULL && dcn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_HLS2BGRA_FULL, src, dst) + } + else if( code == CV_HLS2RGB_FULL && dcn == 3 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_HLS2RGB_FULL, src, dst) + } + else if( code == CV_HLS2RGB_FULL && dcn == 4 ) + { + CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_HLS2RGBA_FULL, src, dst) + } +#endif if( code == CV_HSV2BGR || code == CV_HSV2RGB || code == CV_HSV2BGR_FULL || code == CV_HSV2RGB_FULL ) From d13ecd0d76910ce62def8fcd51ad2c29fd856820 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Fri, 2 Aug 2013 17:31:34 +0400 Subject: [PATCH 004/139] increase epsilon for GEMM & Convolve sanity tests for CUDA 5.5 --- modules/gpu/perf/perf_core.cpp | 2 +- modules/gpu/perf/perf_imgproc.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/gpu/perf/perf_core.cpp b/modules/gpu/perf/perf_core.cpp index 3042beadc2..e38196b994 100644 --- a/modules/gpu/perf/perf_core.cpp +++ b/modules/gpu/perf/perf_core.cpp @@ -1337,7 +1337,7 @@ PERF_TEST_P(Sz_Type_Flags, Core_GEMM, TEST_CYCLE() cv::gpu::gemm(d_src1, d_src2, 1.0, d_src3, 1.0, dst, flags); - GPU_SANITY_CHECK(dst, 1e-6); + GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE); } else { diff --git a/modules/gpu/perf/perf_imgproc.cpp b/modules/gpu/perf/perf_imgproc.cpp index c7fb2a17c0..d942bed612 100644 --- a/modules/gpu/perf/perf_imgproc.cpp +++ b/modules/gpu/perf/perf_imgproc.cpp @@ -884,7 +884,7 @@ PERF_TEST_P(Sz_KernelSz_Ccorr, ImgProc_Convolve, TEST_CYCLE() cv::gpu::convolve(d_image, d_templ, dst, ccorr, d_buf); - GPU_SANITY_CHECK(dst); + GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE); } else { From ff85575b28923f55e12aa61bc9668f84a3608682 Mon Sep 17 00:00:00 2001 From: Mathieu Barnachon Date: Mon, 5 Aug 2013 12:06:55 +1200 Subject: [PATCH 005/139] Adding read/write functions to PCA class. Update PCA test. --- modules/core/include/opencv2/core.hpp | 4 ++++ modules/core/src/matmul.cpp | 21 +++++++++++++++++++++ modules/core/test/test_mat.cpp | 26 ++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index 9833315d56..c7f07ed459 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -670,6 +670,10 @@ public: //! reconstructs the original vector from the projection void backProject(InputArray vec, OutputArray result) const; + //! write and load PCA matrix + void write(FileStorage& fs ) const; + void read(const FileNode& fs); + Mat eigenvectors; //!< eigenvectors of the covariation matrix Mat eigenvalues; //!< eigenvalues of the covariation matrix Mat mean; //!< mean value subtracted before the projection and added after the back projection diff --git a/modules/core/src/matmul.cpp b/modules/core/src/matmul.cpp index 404c5b4341..b3aadebbd8 100644 --- a/modules/core/src/matmul.cpp +++ b/modules/core/src/matmul.cpp @@ -2896,6 +2896,27 @@ PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, int maxComp return *this; } +void PCA::write(FileStorage& fs ) const +{ + CV_Assert( fs.isOpened() ); + + fs << "name" << "PCA"; + fs << "vectors" << eigenvectors; + fs << "values" << eigenvalues; + fs << "mean" << mean; +} + +void PCA::read(const FileNode& fs) +{ + CV_Assert( !fs.empty() ); + String name = (String)fs["name"]; + CV_Assert( name == "PCA" ); + + cv::read(fs["vectors"], eigenvectors); + cv::read(fs["values"], eigenvalues); + cv::read(fs["mean"], mean); +} + template int computeCumulativeEnergy(const Mat& eigenvalues, double retainedVariance) { diff --git a/modules/core/test/test_mat.cpp b/modules/core/test/test_mat.cpp index 245347b8b2..6e3ec03dc6 100644 --- a/modules/core/test/test_mat.cpp +++ b/modules/core/test/test_mat.cpp @@ -510,6 +510,32 @@ protected: return; } #endif + // Test read and write + FileStorage fs( "PCA_store.yml", FileStorage::WRITE ); + rPCA.write( fs ); + fs.release(); + + PCA lPCA; + fs.open( "PCA_store.yml", FileStorage::READ ); + lPCA.read( fs.root() ); + err = norm( rPCA.eigenvectors, lPCA.eigenvectors, CV_RELATIVE_L2 ); + if( err > 0 ) + { + ts->printf( cvtest::TS::LOG, "bad accuracy of write/load functions (YML); err = %f\n", err ); + ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY ); + } + err = norm( rPCA.eigenvalues, lPCA.eigenvalues, CV_RELATIVE_L2 ); + if( err > 0 ) + { + ts->printf( cvtest::TS::LOG, "bad accuracy of write/load functions (YML); err = %f\n", err ); + ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY ); + } + err = norm( rPCA.mean, lPCA.mean, CV_RELATIVE_L2 ); + if( err > 0 ) + { + ts->printf( cvtest::TS::LOG, "bad accuracy of write/load functions (YML); err = %f\n", err ); + ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY ); + } } }; From 5dd598fc6d00bf66fe6151bb686515f1a866ce7d Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Tue, 6 Aug 2013 18:56:36 +0400 Subject: [PATCH 006/139] Fix building the iOS framework after I dropped the VERSION macro. This version uses CMake to generate Info.plist, which should be more reliable than the old approach. --- CMakeLists.txt | 3 ++- cmake/OpenCVGenInfoPlist.cmake | 4 ++++ platforms/ios/Info.plist.in | 4 ++-- platforms/ios/build_framework.py | 18 ++---------------- 4 files changed, 10 insertions(+), 19 deletions(-) create mode 100644 cmake/OpenCVGenInfoPlist.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 46881c4531..2ddbd84079 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -497,6 +497,8 @@ include(cmake/OpenCVGenAndroidMK.cmake) # Generate OpenCVСonfig.cmake and OpenCVConfig-version.cmake for cmake projects include(cmake/OpenCVGenConfig.cmake) +# Generate Info.plist for the IOS framework +include(cmake/OpenCVGenInfoPlist.cmake) # ---------------------------------------------------------------------------- # Summary: @@ -891,4 +893,3 @@ ocv_finalize_status() if("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}") message(WARNING "The source directory is the same as binary directory. \"make clean\" may damage the source tree") endif() - diff --git a/cmake/OpenCVGenInfoPlist.cmake b/cmake/OpenCVGenInfoPlist.cmake new file mode 100644 index 0000000000..97c674ceb7 --- /dev/null +++ b/cmake/OpenCVGenInfoPlist.cmake @@ -0,0 +1,4 @@ +if(IOS) + configure_file("${OpenCV_SOURCE_DIR}/platforms/ios/Info.plist.in" + "${CMAKE_BINARY_DIR}/ios/Info.plist") +endif() diff --git a/platforms/ios/Info.plist.in b/platforms/ios/Info.plist.in index 6bcfe862d0..b2a3baf524 100644 --- a/platforms/ios/Info.plist.in +++ b/platforms/ios/Info.plist.in @@ -7,9 +7,9 @@ CFBundleIdentifier org.opencv CFBundleVersion - ${VERSION} + ${OPENCV_LIBVERSION} CFBundleShortVersionString - ${VERSION} + ${OPENCV_LIBVERSION} CFBundleSignature ???? CFBundlePackageType diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py index bc385bb1bb..23da296a41 100755 --- a/platforms/ios/build_framework.py +++ b/platforms/ios/build_framework.py @@ -71,15 +71,6 @@ def put_framework_together(srcroot, dstroot): os.makedirs(framework_dir) os.chdir(framework_dir) - # determine OpenCV version (without subminor part) - tdir0 = "../build/" + targetlist[0] - cfg = open(tdir0 + "/cvconfig.h", "rt") - for l in cfg.readlines(): - if l.startswith("#define VERSION"): - opencv_version = l[l.find("\"")+1:l.rfind(".")] - break - cfg.close() - # form the directory tree dstdir = "Versions/A" os.makedirs(dstdir + "/Resources") @@ -91,13 +82,8 @@ def put_framework_together(srcroot, dstroot): wlist = " ".join(["../build/" + t + "/lib/Release/libopencv_world.a" for t in targetlist]) os.system("lipo -create " + wlist + " -o " + dstdir + "/opencv2") - # form Info.plist - srcfile = open(srcroot + "/platforms/ios/Info.plist.in", "rt") - dstfile = open(dstdir + "/Resources/Info.plist", "wt") - for l in srcfile.readlines(): - dstfile.write(l.replace("${VERSION}", opencv_version)) - srcfile.close() - dstfile.close() + # copy Info.plist + shutil.copyfile("../build/ios/Info.plist", dstdir + "/Resources/Info.plist") # make symbolic links os.symlink("A", "Versions/Current") From 5aaed77cbc98eb23e3df297fb31295a3808ceee5 Mon Sep 17 00:00:00 2001 From: peng xiao Date: Wed, 7 Aug 2013 14:36:24 +0800 Subject: [PATCH 007/139] Fix a bug of retinaParameters. When colorMode is set to 0 via Retina::_init method, _retinaParameters's colorMode variable is not correctly assigned. --- modules/contrib/src/retina.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/contrib/src/retina.cpp b/modules/contrib/src/retina.cpp index 1464896871..6f8de96412 100644 --- a/modules/contrib/src/retina.cpp +++ b/modules/contrib/src/retina.cpp @@ -357,6 +357,7 @@ void Retina::_init(const cv::Size inputSz, const bool colorMode, RETINA_COLORSAM delete _retinaFilter; _retinaFilter = new RetinaFilter(inputSz.height, inputSz.width, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght); + _retinaParameters.OPLandIplParvo.colorMode = colorMode; // prepare the default parameter XML file with default setup setup(_retinaParameters); From d9f71b762fc6bc37b35c542cdeae472e236c7f94 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Thu, 8 Aug 2013 12:41:48 +0400 Subject: [PATCH 008/139] Deleted almost all of the precomp.cpp files. Looks like we don't actually use them; when creating precompiled headers with Visual C++, a dummy .cpp is created at build time. --- modules/calib3d/perf/perf_precomp.cpp | 1 - modules/calib3d/src/precomp.cpp | 44 ---------------------- modules/calib3d/test/test_precomp.cpp | 1 - modules/contrib/src/precomp.cpp | 44 ---------------------- modules/contrib/test/test_precomp.cpp | 1 - modules/core/perf/perf_precomp.cpp | 1 - modules/core/src/precomp.cpp | 45 ----------------------- modules/core/test/test_precomp.cpp | 1 - modules/features2d/perf/perf_precomp.cpp | 1 - modules/features2d/src/precomp.cpp | 44 ---------------------- modules/features2d/test/test_precomp.cpp | 1 - modules/flann/src/precomp.cpp | 1 - modules/flann/test/test_precomp.cpp | 1 - modules/gpu/perf/perf_precomp.cpp | 43 ---------------------- modules/gpu/test/test_precomp.cpp | 43 ---------------------- modules/highgui/CMakeLists.txt | 1 - modules/highgui/perf/perf_precomp.cpp | 1 - modules/highgui/src/precomp.cpp | 43 ---------------------- modules/highgui/test/test_precomp.cpp | 1 - modules/imgproc/perf/perf_precomp.cpp | 1 - modules/imgproc/src/precomp.cpp | 44 ---------------------- modules/imgproc/test/test_precomp.cpp | 1 - modules/legacy/src/precomp.cpp | 42 --------------------- modules/legacy/test/test_precomp.cpp | 1 - modules/ml/src/precomp.cpp | 45 ----------------------- modules/ml/test/test_precomp.cpp | 1 - modules/nonfree/perf/perf_precomp.cpp | 1 - modules/nonfree/src/precomp.cpp | 45 ----------------------- modules/nonfree/test/test_precomp.cpp | 1 - modules/objdetect/perf/perf_precomp.cpp | 1 - modules/objdetect/src/precomp.cpp | 44 ---------------------- modules/objdetect/test/test_precomp.cpp | 1 - modules/ocl/src/precomp.cpp | 47 ------------------------ modules/ocl/test/precomp.cpp | 44 ---------------------- modules/photo/perf/perf_precomp.cpp | 1 - modules/photo/src/precomp.cpp | 44 ---------------------- modules/photo/test/test_precomp.cpp | 1 - modules/stitching/perf/perf_precomp.cpp | 1 - modules/stitching/src/precomp.cpp | 43 ---------------------- modules/stitching/test/test_precomp.cpp | 1 - modules/superres/perf/perf_precomp.cpp | 43 ---------------------- modules/superres/src/precomp.cpp | 43 ---------------------- modules/superres/test/test_precomp.cpp | 43 ---------------------- modules/ts/src/precomp.cpp | 1 - modules/video/perf/perf_precomp.cpp | 1 - modules/video/src/precomp.cpp | 44 ---------------------- modules/video/test/test_precomp.cpp | 1 - modules/videostab/src/precomp.cpp | 43 ---------------------- modules/world/src/precomp.cpp | 0 49 files changed, 947 deletions(-) delete mode 100644 modules/calib3d/perf/perf_precomp.cpp delete mode 100644 modules/calib3d/src/precomp.cpp delete mode 100644 modules/calib3d/test/test_precomp.cpp delete mode 100644 modules/contrib/src/precomp.cpp delete mode 100644 modules/contrib/test/test_precomp.cpp delete mode 100644 modules/core/perf/perf_precomp.cpp delete mode 100644 modules/core/src/precomp.cpp delete mode 100644 modules/core/test/test_precomp.cpp delete mode 100644 modules/features2d/perf/perf_precomp.cpp delete mode 100644 modules/features2d/src/precomp.cpp delete mode 100644 modules/features2d/test/test_precomp.cpp delete mode 100644 modules/flann/src/precomp.cpp delete mode 100644 modules/flann/test/test_precomp.cpp delete mode 100644 modules/gpu/perf/perf_precomp.cpp delete mode 100644 modules/gpu/test/test_precomp.cpp delete mode 100644 modules/highgui/perf/perf_precomp.cpp delete mode 100644 modules/highgui/src/precomp.cpp delete mode 100644 modules/highgui/test/test_precomp.cpp delete mode 100644 modules/imgproc/perf/perf_precomp.cpp delete mode 100644 modules/imgproc/src/precomp.cpp delete mode 100644 modules/imgproc/test/test_precomp.cpp delete mode 100644 modules/legacy/src/precomp.cpp delete mode 100644 modules/legacy/test/test_precomp.cpp delete mode 100644 modules/ml/src/precomp.cpp delete mode 100644 modules/ml/test/test_precomp.cpp delete mode 100644 modules/nonfree/perf/perf_precomp.cpp delete mode 100644 modules/nonfree/src/precomp.cpp delete mode 100644 modules/nonfree/test/test_precomp.cpp delete mode 100644 modules/objdetect/perf/perf_precomp.cpp delete mode 100644 modules/objdetect/src/precomp.cpp delete mode 100644 modules/objdetect/test/test_precomp.cpp delete mode 100644 modules/ocl/src/precomp.cpp delete mode 100644 modules/ocl/test/precomp.cpp delete mode 100644 modules/photo/perf/perf_precomp.cpp delete mode 100644 modules/photo/src/precomp.cpp delete mode 100644 modules/photo/test/test_precomp.cpp delete mode 100644 modules/stitching/perf/perf_precomp.cpp delete mode 100644 modules/stitching/src/precomp.cpp delete mode 100644 modules/stitching/test/test_precomp.cpp delete mode 100644 modules/superres/perf/perf_precomp.cpp delete mode 100644 modules/superres/src/precomp.cpp delete mode 100644 modules/superres/test/test_precomp.cpp delete mode 100644 modules/ts/src/precomp.cpp delete mode 100644 modules/video/perf/perf_precomp.cpp delete mode 100644 modules/video/src/precomp.cpp delete mode 100644 modules/video/test/test_precomp.cpp delete mode 100644 modules/videostab/src/precomp.cpp delete mode 100644 modules/world/src/precomp.cpp diff --git a/modules/calib3d/perf/perf_precomp.cpp b/modules/calib3d/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/calib3d/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/calib3d/src/precomp.cpp b/modules/calib3d/src/precomp.cpp deleted file mode 100644 index 3e0ec42de9..0000000000 --- a/modules/calib3d/src/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/calib3d/test/test_precomp.cpp b/modules/calib3d/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/calib3d/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/contrib/src/precomp.cpp b/modules/contrib/src/precomp.cpp deleted file mode 100644 index 3e0ec42de9..0000000000 --- a/modules/contrib/src/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/contrib/test/test_precomp.cpp b/modules/contrib/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/contrib/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/core/perf/perf_precomp.cpp b/modules/core/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/core/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/core/src/precomp.cpp b/modules/core/src/precomp.cpp deleted file mode 100644 index e540cc5e8a..0000000000 --- a/modules/core/src/precomp.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/core/test/test_precomp.cpp b/modules/core/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/core/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/features2d/perf/perf_precomp.cpp b/modules/features2d/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/features2d/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/features2d/src/precomp.cpp b/modules/features2d/src/precomp.cpp deleted file mode 100644 index 3e0ec42de9..0000000000 --- a/modules/features2d/src/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/features2d/test/test_precomp.cpp b/modules/features2d/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/features2d/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/flann/src/precomp.cpp b/modules/flann/src/precomp.cpp deleted file mode 100644 index c149df18f9..0000000000 --- a/modules/flann/src/precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "precomp.hpp" diff --git a/modules/flann/test/test_precomp.cpp b/modules/flann/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/flann/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/gpu/perf/perf_precomp.cpp b/modules/gpu/perf/perf_precomp.cpp deleted file mode 100644 index 81f16e8f14..0000000000 --- a/modules/gpu/perf/perf_precomp.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "perf_precomp.hpp" diff --git a/modules/gpu/test/test_precomp.cpp b/modules/gpu/test/test_precomp.cpp deleted file mode 100644 index 0fb6521809..0000000000 --- a/modules/gpu/test/test_precomp.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" diff --git a/modules/highgui/CMakeLists.txt b/modules/highgui/CMakeLists.txt index cabee23f65..3d7667b65f 100644 --- a/modules/highgui/CMakeLists.txt +++ b/modules/highgui/CMakeLists.txt @@ -58,7 +58,6 @@ set(highgui_srcs src/cap_images.cpp src/cap_ffmpeg.cpp src/loadsave.cpp - src/precomp.cpp src/utils.cpp src/window.cpp ) diff --git a/modules/highgui/perf/perf_precomp.cpp b/modules/highgui/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/highgui/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/highgui/src/precomp.cpp b/modules/highgui/src/precomp.cpp deleted file mode 100644 index d6f6e18f74..0000000000 --- a/modules/highgui/src/precomp.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - diff --git a/modules/highgui/test/test_precomp.cpp b/modules/highgui/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/highgui/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/imgproc/perf/perf_precomp.cpp b/modules/imgproc/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/imgproc/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/imgproc/src/precomp.cpp b/modules/imgproc/src/precomp.cpp deleted file mode 100644 index 3e0ec42de9..0000000000 --- a/modules/imgproc/src/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/imgproc/test/test_precomp.cpp b/modules/imgproc/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/imgproc/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/legacy/src/precomp.cpp b/modules/legacy/src/precomp.cpp deleted file mode 100644 index a9477b85ba..0000000000 --- a/modules/legacy/src/precomp.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" diff --git a/modules/legacy/test/test_precomp.cpp b/modules/legacy/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/legacy/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/ml/src/precomp.cpp b/modules/ml/src/precomp.cpp deleted file mode 100644 index e540cc5e8a..0000000000 --- a/modules/ml/src/precomp.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/ml/test/test_precomp.cpp b/modules/ml/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/ml/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/nonfree/perf/perf_precomp.cpp b/modules/nonfree/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/nonfree/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/nonfree/src/precomp.cpp b/modules/nonfree/src/precomp.cpp deleted file mode 100644 index 730edbb63d..0000000000 --- a/modules/nonfree/src/precomp.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/nonfree/test/test_precomp.cpp b/modules/nonfree/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/nonfree/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/objdetect/perf/perf_precomp.cpp b/modules/objdetect/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/objdetect/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/objdetect/src/precomp.cpp b/modules/objdetect/src/precomp.cpp deleted file mode 100644 index 3e0ec42de9..0000000000 --- a/modules/objdetect/src/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/objdetect/test/test_precomp.cpp b/modules/objdetect/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/objdetect/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/ocl/src/precomp.cpp b/modules/ocl/src/precomp.cpp deleted file mode 100644 index 766138c577..0000000000 --- a/modules/ocl/src/precomp.cpp +++ /dev/null @@ -1,47 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved. -// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// @Authors -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other oclMaterials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -//CriticalSection cs; -/* End of file. */ diff --git a/modules/ocl/test/precomp.cpp b/modules/ocl/test/precomp.cpp deleted file mode 100644 index 7d287004ee..0000000000 --- a/modules/ocl/test/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - - diff --git a/modules/photo/perf/perf_precomp.cpp b/modules/photo/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/photo/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/photo/src/precomp.cpp b/modules/photo/src/precomp.cpp deleted file mode 100644 index 3e0ec42de9..0000000000 --- a/modules/photo/src/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/photo/test/test_precomp.cpp b/modules/photo/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/photo/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/stitching/perf/perf_precomp.cpp b/modules/stitching/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/stitching/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/stitching/src/precomp.cpp b/modules/stitching/src/precomp.cpp deleted file mode 100644 index 390dbfbc6b..0000000000 --- a/modules/stitching/src/precomp.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" \ No newline at end of file diff --git a/modules/stitching/test/test_precomp.cpp b/modules/stitching/test/test_precomp.cpp deleted file mode 100644 index 14a070e817..0000000000 --- a/modules/stitching/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" \ No newline at end of file diff --git a/modules/superres/perf/perf_precomp.cpp b/modules/superres/perf/perf_precomp.cpp deleted file mode 100644 index 81f16e8f14..0000000000 --- a/modules/superres/perf/perf_precomp.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "perf_precomp.hpp" diff --git a/modules/superres/src/precomp.cpp b/modules/superres/src/precomp.cpp deleted file mode 100644 index 3c01a2596d..0000000000 --- a/modules/superres/src/precomp.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" diff --git a/modules/superres/test/test_precomp.cpp b/modules/superres/test/test_precomp.cpp deleted file mode 100644 index 0fb6521809..0000000000 --- a/modules/superres/test/test_precomp.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "test_precomp.hpp" diff --git a/modules/ts/src/precomp.cpp b/modules/ts/src/precomp.cpp deleted file mode 100644 index c149df18f9..0000000000 --- a/modules/ts/src/precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "precomp.hpp" diff --git a/modules/video/perf/perf_precomp.cpp b/modules/video/perf/perf_precomp.cpp deleted file mode 100644 index 8552ac3d42..0000000000 --- a/modules/video/perf/perf_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "perf_precomp.hpp" diff --git a/modules/video/src/precomp.cpp b/modules/video/src/precomp.cpp deleted file mode 100644 index 3e0ec42de9..0000000000 --- a/modules/video/src/precomp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// Intel License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of Intel Corporation may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" - -/* End of file. */ diff --git a/modules/video/test/test_precomp.cpp b/modules/video/test/test_precomp.cpp deleted file mode 100644 index 5956e13e3e..0000000000 --- a/modules/video/test/test_precomp.cpp +++ /dev/null @@ -1 +0,0 @@ -#include "test_precomp.hpp" diff --git a/modules/videostab/src/precomp.cpp b/modules/videostab/src/precomp.cpp deleted file mode 100644 index 111385282e..0000000000 --- a/modules/videostab/src/precomp.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" diff --git a/modules/world/src/precomp.cpp b/modules/world/src/precomp.cpp deleted file mode 100644 index e69de29bb2..0000000000 From 6d7f29a03a77fe5ef77bd3662195418d88746959 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Thu, 8 Aug 2013 16:23:20 +0400 Subject: [PATCH 009/139] Made it so that a dummy .cpp is actually created when compiling with VC++. --- cmake/OpenCVModule.cmake | 18 +++++++++++++++--- cmake/OpenCVPCHSupport.cmake | 13 +++---------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake index 0ac39e4c1f..44e43fc2b2 100644 --- a/cmake/OpenCVModule.cmake +++ b/cmake/OpenCVModule.cmake @@ -470,8 +470,16 @@ endmacro() # ocv_create_module() # ocv_create_module(SKIP_LINK) macro(ocv_create_module) + # The condition we ought to be testing here is whether ocv_add_precompiled_headers will + # be called at some point in the future. We can't look into the future, though, + # so this will have to do. + if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/src/precomp.hpp") + get_native_precompiled_header(${the_module} precomp.hpp) + endif() + add_library(${the_module} ${OPENCV_MODULE_TYPE} ${OPENCV_MODULE_${the_module}_HEADERS} ${OPENCV_MODULE_${the_module}_SOURCES} - "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp") + "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/cvconfig.h" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp" + ${${the_module}_pch}) if(NOT "${ARGN}" STREQUAL "SKIP_LINK") target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS} ${OPENCV_MODULE_${the_module}_DEPS_EXT} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${ARGN}) @@ -637,7 +645,9 @@ function(ocv_add_perf_tests) set(OPENCV_PERF_${the_module}_SOURCES ${perf_srcs} ${perf_hdrs}) endif() - add_executable(${the_target} ${OPENCV_PERF_${the_module}_SOURCES}) + get_native_precompiled_header(${the_target} test_precomp.hpp) + + add_executable(${the_target} ${OPENCV_PERF_${the_module}_SOURCES} ${${the_target}_pch}) target_link_libraries(${the_target} ${OPENCV_MODULE_${the_module}_DEPS} ${perf_deps} ${OPENCV_LINKER_LIBS}) add_dependencies(opencv_perf_tests ${the_target}) @@ -685,7 +695,9 @@ function(ocv_add_accuracy_tests) set(OPENCV_TEST_${the_module}_SOURCES ${test_srcs} ${test_hdrs}) endif() - add_executable(${the_target} ${OPENCV_TEST_${the_module}_SOURCES}) + get_native_precompiled_header(${the_target} test_precomp.hpp) + + add_executable(${the_target} ${OPENCV_TEST_${the_module}_SOURCES} ${${the_target}_pch}) target_link_libraries(${the_target} ${OPENCV_MODULE_${the_module}_DEPS} ${test_deps} ${OPENCV_LINKER_LIBS}) add_dependencies(opencv_tests ${the_target}) diff --git a/cmake/OpenCVPCHSupport.cmake b/cmake/OpenCVPCHSupport.cmake index cfc4bfa724..9b849ebd79 100644 --- a/cmake/OpenCVPCHSupport.cmake +++ b/cmake/OpenCVPCHSupport.cmake @@ -272,12 +272,9 @@ ENDMACRO(ADD_PRECOMPILED_HEADER) MACRO(GET_NATIVE_PRECOMPILED_HEADER _targetName _input) if(CMAKE_GENERATOR MATCHES "^Visual.*$") - SET(_dummy_str "#include \"${_input}\"\n" -"// This is required to suppress LNK4221. Very annoying.\n" -"void *g_${_targetName}Dummy = 0\;\n") + set(_dummy_str "#include \"${_input}\"\n") - # Use of cxx extension for generated files (as Qt does) - SET(${_targetName}_pch ${CMAKE_CURRENT_BINARY_DIR}/${_targetName}_pch.cxx) + set(${_targetName}_pch ${CMAKE_CURRENT_BINARY_DIR}/${_targetName}_pch.cpp) if(EXISTS ${${_targetName}_pch}) # Check if contents is the same, if not rewrite # todo @@ -337,11 +334,7 @@ ENDMACRO(ADD_NATIVE_PRECOMPILED_HEADER) macro(ocv_add_precompiled_header_to_target the_target pch_header) if(PCHSupport_FOUND AND ENABLE_PRECOMPILED_HEADERS AND EXISTS "${pch_header}") - if(CMAKE_GENERATOR MATCHES Visual) - string(REGEX REPLACE "hpp$" "cpp" ${the_target}_pch "${pch_header}") - add_native_precompiled_header(${the_target} ${pch_header}) - unset(${the_target}_pch) - elseif(CMAKE_GENERATOR MATCHES Xcode) + if(CMAKE_GENERATOR MATCHES "^Visual" OR CMAKE_GENERATOR MATCHES Xcode) add_native_precompiled_header(${the_target} ${pch_header}) elseif(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_GENERATOR MATCHES "Makefiles|Ninja") add_precompiled_header(${the_target} ${pch_header}) From 4dc63273f0112d03ba6e663e6420d9e251960927 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Thu, 8 Aug 2013 18:18:54 +0400 Subject: [PATCH 010/139] Renamed ocl tests' precomp files according to convention. Also, added the missing include guard to perf_precomp.hpp. This should fix the build. --- modules/ocl/perf/main.cpp | 2 +- modules/ocl/perf/perf_arithm.cpp | 2 +- modules/ocl/perf/perf_blend.cpp | 2 +- modules/ocl/perf/perf_brute_force_matcher.cpp | 2 +- modules/ocl/perf/perf_calib3d.cpp | 2 +- modules/ocl/perf/perf_canny.cpp | 2 +- modules/ocl/perf/perf_color.cpp | 2 +- modules/ocl/perf/perf_fft.cpp | 2 +- modules/ocl/perf/perf_filters.cpp | 2 +- modules/ocl/perf/perf_gemm.cpp | 2 +- modules/ocl/perf/perf_gftt.cpp | 2 +- modules/ocl/perf/perf_haar.cpp | 2 +- modules/ocl/perf/perf_hog.cpp | 2 +- modules/ocl/perf/perf_imgproc.cpp | 2 +- modules/ocl/perf/perf_match_template.cpp | 2 +- modules/ocl/perf/perf_matrix_operation.cpp | 2 +- modules/ocl/perf/perf_moments.cpp | 2 +- modules/ocl/perf/perf_norm.cpp | 2 +- modules/ocl/perf/perf_opticalflow.cpp | 2 +- modules/ocl/perf/{precomp.cpp => perf_precomp.cpp} | 2 +- modules/ocl/perf/{precomp.hpp => perf_precomp.hpp} | 11 ++++++++--- modules/ocl/perf/perf_pyramid.cpp | 2 +- modules/ocl/perf/perf_split_merge.cpp | 2 +- modules/ocl/test/main.cpp | 2 +- modules/ocl/test/test_arithm.cpp | 2 +- modules/ocl/test/test_blend.cpp | 2 +- modules/ocl/test/test_brute_force_matcher.cpp | 2 +- modules/ocl/test/test_calib3d.cpp | 2 +- modules/ocl/test/test_canny.cpp | 2 +- modules/ocl/test/test_color.cpp | 2 +- modules/ocl/test/test_fft.cpp | 2 +- modules/ocl/test/test_filters.cpp | 2 +- modules/ocl/test/test_gemm.cpp | 2 +- modules/ocl/test/test_imgproc.cpp | 2 +- modules/ocl/test/test_kmeans.cpp | 2 +- modules/ocl/test/test_match_template.cpp | 2 +- modules/ocl/test/test_matrix_operation.cpp | 2 +- modules/ocl/test/test_moments.cpp | 2 +- modules/ocl/test/test_objdetect.cpp | 2 +- modules/ocl/test/test_optflow.cpp | 2 +- modules/ocl/test/{precomp.hpp => test_precomp.hpp} | 0 modules/ocl/test/test_pyramids.cpp | 2 +- modules/ocl/test/test_sort.cpp | 2 +- modules/ocl/test/test_split_merge.cpp | 2 +- modules/ocl/test/utility.cpp | 2 +- 45 files changed, 51 insertions(+), 46 deletions(-) rename modules/ocl/perf/{precomp.cpp => perf_precomp.cpp} (99%) rename modules/ocl/perf/{precomp.hpp => perf_precomp.hpp} (98%) rename modules/ocl/test/{precomp.hpp => test_precomp.hpp} (100%) diff --git a/modules/ocl/perf/main.cpp b/modules/ocl/perf/main.cpp index bd2a4ec4b6..0875903413 100644 --- a/modules/ocl/perf/main.cpp +++ b/modules/ocl/perf/main.cpp @@ -40,7 +40,7 @@ // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" int main(int argc, const char *argv[]) { diff --git a/modules/ocl/perf/perf_arithm.cpp b/modules/ocl/perf/perf_arithm.cpp index 3ef0634e70..29ff0d802a 100644 --- a/modules/ocl/perf/perf_arithm.cpp +++ b/modules/ocl/perf/perf_arithm.cpp @@ -44,7 +44,7 @@ // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// Lut //////////////////////// PERFTEST(lut) { diff --git a/modules/ocl/perf/perf_blend.cpp b/modules/ocl/perf/perf_blend.cpp index 8ebb6482ba..22139779d4 100644 --- a/modules/ocl/perf/perf_blend.cpp +++ b/modules/ocl/perf/perf_blend.cpp @@ -44,7 +44,7 @@ // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// blend //////////////////////// template void blendLinearGold(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &weights1, const cv::Mat &weights2, cv::Mat &result_gold) diff --git a/modules/ocl/perf/perf_brute_force_matcher.cpp b/modules/ocl/perf/perf_brute_force_matcher.cpp index 406b46a324..c481766035 100644 --- a/modules/ocl/perf/perf_brute_force_matcher.cpp +++ b/modules/ocl/perf/perf_brute_force_matcher.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" //////////////////// BruteForceMatch ///////////////// PERFTEST(BruteForceMatcher) diff --git a/modules/ocl/perf/perf_calib3d.cpp b/modules/ocl/perf/perf_calib3d.cpp index f998ddf0f3..e0622aa56b 100644 --- a/modules/ocl/perf/perf_calib3d.cpp +++ b/modules/ocl/perf/perf_calib3d.cpp @@ -44,7 +44,7 @@ // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// StereoMatchBM //////////////////////// PERFTEST(StereoMatchBM) { diff --git a/modules/ocl/perf/perf_canny.cpp b/modules/ocl/perf/perf_canny.cpp index cb23d7ad28..7ea0ce652a 100644 --- a/modules/ocl/perf/perf_canny.cpp +++ b/modules/ocl/perf/perf_canny.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// Canny //////////////////////// PERFTEST(Canny) diff --git a/modules/ocl/perf/perf_color.cpp b/modules/ocl/perf/perf_color.cpp index daf1cfdc9c..4bd5fa5d44 100644 --- a/modules/ocl/perf/perf_color.cpp +++ b/modules/ocl/perf/perf_color.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// cvtColor//////////////////////// PERFTEST(cvtColor) diff --git a/modules/ocl/perf/perf_fft.cpp b/modules/ocl/perf/perf_fft.cpp index 6e0be3f19d..7073eb691d 100644 --- a/modules/ocl/perf/perf_fft.cpp +++ b/modules/ocl/perf/perf_fft.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// dft //////////////////////// PERFTEST(dft) diff --git a/modules/ocl/perf/perf_filters.cpp b/modules/ocl/perf/perf_filters.cpp index e988ce09d6..be288b444b 100644 --- a/modules/ocl/perf/perf_filters.cpp +++ b/modules/ocl/perf/perf_filters.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// Blur//////////////////////// PERFTEST(Blur) diff --git a/modules/ocl/perf/perf_gemm.cpp b/modules/ocl/perf/perf_gemm.cpp index f197c5f5a0..abaeda363c 100644 --- a/modules/ocl/perf/perf_gemm.cpp +++ b/modules/ocl/perf/perf_gemm.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// gemm //////////////////////// PERFTEST(gemm) diff --git a/modules/ocl/perf/perf_gftt.cpp b/modules/ocl/perf/perf_gftt.cpp index bca6f398a2..9e809e4435 100644 --- a/modules/ocl/perf/perf_gftt.cpp +++ b/modules/ocl/perf/perf_gftt.cpp @@ -44,7 +44,7 @@ //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// GoodFeaturesToTrack //////////////////////// PERFTEST(GoodFeaturesToTrack) diff --git a/modules/ocl/perf/perf_haar.cpp b/modules/ocl/perf/perf_haar.cpp index 72f01dc935..26bed5ebd9 100644 --- a/modules/ocl/perf/perf_haar.cpp +++ b/modules/ocl/perf/perf_haar.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// Haar //////////////////////// namespace cv diff --git a/modules/ocl/perf/perf_hog.cpp b/modules/ocl/perf/perf_hog.cpp index 7daa61396c..0f05581295 100644 --- a/modules/ocl/perf/perf_hog.cpp +++ b/modules/ocl/perf/perf_hog.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// HOG//////////////////////// diff --git a/modules/ocl/perf/perf_imgproc.cpp b/modules/ocl/perf/perf_imgproc.cpp index ade5019147..582853d05b 100644 --- a/modules/ocl/perf/perf_imgproc.cpp +++ b/modules/ocl/perf/perf_imgproc.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// equalizeHist //////////////////////// PERFTEST(equalizeHist) diff --git a/modules/ocl/perf/perf_match_template.cpp b/modules/ocl/perf/perf_match_template.cpp index 5da15aaf64..8aafc98968 100644 --- a/modules/ocl/perf/perf_match_template.cpp +++ b/modules/ocl/perf/perf_match_template.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" /////////// matchTemplate //////////////////////// //void InitMatchTemplate() diff --git a/modules/ocl/perf/perf_matrix_operation.cpp b/modules/ocl/perf/perf_matrix_operation.cpp index b724cdbe64..bb407c9afe 100644 --- a/modules/ocl/perf/perf_matrix_operation.cpp +++ b/modules/ocl/perf/perf_matrix_operation.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// ConvertTo//////////////////////// PERFTEST(ConvertTo) diff --git a/modules/ocl/perf/perf_moments.cpp b/modules/ocl/perf/perf_moments.cpp index 7fa3948dec..a1515b879b 100644 --- a/modules/ocl/perf/perf_moments.cpp +++ b/modules/ocl/perf/perf_moments.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// Moments //////////////////////// PERFTEST(Moments) { diff --git a/modules/ocl/perf/perf_norm.cpp b/modules/ocl/perf/perf_norm.cpp index 1d986c8e49..fec8d73791 100644 --- a/modules/ocl/perf/perf_norm.cpp +++ b/modules/ocl/perf/perf_norm.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// norm//////////////////////// PERFTEST(norm) diff --git a/modules/ocl/perf/perf_opticalflow.cpp b/modules/ocl/perf/perf_opticalflow.cpp index 10763b5b0f..a6724c8123 100644 --- a/modules/ocl/perf/perf_opticalflow.cpp +++ b/modules/ocl/perf/perf_opticalflow.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// PyrLKOpticalFlow //////////////////////// PERFTEST(PyrLKOpticalFlow) diff --git a/modules/ocl/perf/precomp.cpp b/modules/ocl/perf/perf_precomp.cpp similarity index 99% rename from modules/ocl/perf/precomp.cpp rename to modules/ocl/perf/perf_precomp.cpp index dd3b5e4ea1..2a49eb20e8 100644 --- a/modules/ocl/perf/precomp.cpp +++ b/modules/ocl/perf/perf_precomp.cpp @@ -40,7 +40,7 @@ // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" #if GTEST_OS_WINDOWS #ifndef NOMINMAX #define NOMINMAX diff --git a/modules/ocl/perf/precomp.hpp b/modules/ocl/perf/perf_precomp.hpp similarity index 98% rename from modules/ocl/perf/precomp.hpp rename to modules/ocl/perf/perf_precomp.hpp index 97e3d7e5c6..2df1b2c8f3 100644 --- a/modules/ocl/perf/precomp.hpp +++ b/modules/ocl/perf/perf_precomp.hpp @@ -40,6 +40,9 @@ // //M*/ +#ifndef __OPENCV_PERF_PRECOMP_HPP__ +#define __OPENCV_PERF_PRECOMP_HPP__ + #include #include #include @@ -99,7 +102,7 @@ int EeceptDoubleEQ(T1 expected, T1 actual) testing::internal::Double lhs(expected); testing::internal::Double rhs(actual); - if (lhs.AlmostEquals(rhs)) + if (lhs.AlmostEquals(rhs)) { return 1; } @@ -352,7 +355,7 @@ public: if(accurate_diff_ <= eps) is_accurate_ = 1; else - is_accurate_ = 0; + is_accurate_ = 0; } std::stringstream &getCurSubtestDescription() @@ -369,7 +372,7 @@ private: speedup_full_faster_count_(0), speedup_full_slower_count_(0), speedup_full_equal_count_(0), is_list_mode_(false), num_iters_(10), cpu_num_iters_(2), gpu_warmup_iters_(1), cur_iter_idx_(0), cur_warmup_idx_(0), - record_(0), recordname_("performance"), itname_changed_(true), + record_(0), recordname_("performance"), itname_changed_(true), is_accurate_(-1), accurate_diff_(0.) { cpu_times_.reserve(num_iters_); @@ -506,3 +509,5 @@ struct name##_test: Runnable { \ #define WARMUP_OFF \ ocl::finish();\ } TestSystem::instance().warmupComplete() + +#endif diff --git a/modules/ocl/perf/perf_pyramid.cpp b/modules/ocl/perf/perf_pyramid.cpp index 3b96251e5d..b98f531b5e 100644 --- a/modules/ocl/perf/perf_pyramid.cpp +++ b/modules/ocl/perf/perf_pyramid.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// pyrDown ////////////////////// PERFTEST(pyrDown) diff --git a/modules/ocl/perf/perf_split_merge.cpp b/modules/ocl/perf/perf_split_merge.cpp index 0fafd14aba..cbe817faff 100644 --- a/modules/ocl/perf/perf_split_merge.cpp +++ b/modules/ocl/perf/perf_split_merge.cpp @@ -43,7 +43,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" ///////////// Merge//////////////////////// PERFTEST(Merge) diff --git a/modules/ocl/test/main.cpp b/modules/ocl/test/main.cpp index 1250691a1f..594c196a59 100644 --- a/modules/ocl/test/main.cpp +++ b/modules/ocl/test/main.cpp @@ -39,7 +39,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/test_arithm.cpp b/modules/ocl/test/test_arithm.cpp index d12cef0b22..149c172efa 100644 --- a/modules/ocl/test/test_arithm.cpp +++ b/modules/ocl/test/test_arithm.cpp @@ -54,7 +54,7 @@ //#define PRINT_TIME -#include "precomp.hpp" +#include "test_precomp.hpp" #include #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/test_blend.cpp b/modules/ocl/test/test_blend.cpp index fa1aea1727..9341981609 100644 --- a/modules/ocl/test/test_blend.cpp +++ b/modules/ocl/test/test_blend.cpp @@ -42,7 +42,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #include using namespace cv; diff --git a/modules/ocl/test/test_brute_force_matcher.cpp b/modules/ocl/test/test_brute_force_matcher.cpp index 11fdbb3598..7d8fc368f8 100644 --- a/modules/ocl/test/test_brute_force_matcher.cpp +++ b/modules/ocl/test/test_brute_force_matcher.cpp @@ -43,7 +43,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL namespace { diff --git a/modules/ocl/test/test_calib3d.cpp b/modules/ocl/test/test_calib3d.cpp index 950f19d3c0..7e5c4a4196 100644 --- a/modules/ocl/test/test_calib3d.cpp +++ b/modules/ocl/test/test_calib3d.cpp @@ -43,7 +43,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #include #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/test_canny.cpp b/modules/ocl/test/test_canny.cpp index b378b2281b..6c6ea4eddb 100644 --- a/modules/ocl/test/test_canny.cpp +++ b/modules/ocl/test/test_canny.cpp @@ -43,7 +43,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL //////////////////////////////////////////////////////// diff --git a/modules/ocl/test/test_color.cpp b/modules/ocl/test/test_color.cpp index 9748104de6..a0293fc454 100644 --- a/modules/ocl/test/test_color.cpp +++ b/modules/ocl/test/test_color.cpp @@ -43,7 +43,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL //#define MAT_DEBUG diff --git a/modules/ocl/test/test_fft.cpp b/modules/ocl/test/test_fft.cpp index fda55ddcc2..08e70e2f96 100644 --- a/modules/ocl/test/test_fft.cpp +++ b/modules/ocl/test/test_fft.cpp @@ -43,7 +43,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" using namespace std; #ifdef HAVE_CLAMDFFT //////////////////////////////////////////////////////////////////////////// diff --git a/modules/ocl/test/test_filters.cpp b/modules/ocl/test/test_filters.cpp index cfd57413e1..ec46a5cd6a 100644 --- a/modules/ocl/test/test_filters.cpp +++ b/modules/ocl/test/test_filters.cpp @@ -48,7 +48,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/test_gemm.cpp b/modules/ocl/test/test_gemm.cpp index 5548456568..00d428770f 100644 --- a/modules/ocl/test/test_gemm.cpp +++ b/modules/ocl/test/test_gemm.cpp @@ -43,7 +43,7 @@ //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" using namespace std; #ifdef HAVE_CLAMDBLAS //////////////////////////////////////////////////////////////////////////// diff --git a/modules/ocl/test/test_imgproc.cpp b/modules/ocl/test/test_imgproc.cpp index 3228b6c0cf..225925c03e 100644 --- a/modules/ocl/test/test_imgproc.cpp +++ b/modules/ocl/test/test_imgproc.cpp @@ -51,7 +51,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/test_kmeans.cpp b/modules/ocl/test/test_kmeans.cpp index ebade3bbc4..a3e472bdf1 100644 --- a/modules/ocl/test/test_kmeans.cpp +++ b/modules/ocl/test/test_kmeans.cpp @@ -44,7 +44,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/test_match_template.cpp b/modules/ocl/test/test_match_template.cpp index a393abdeb3..551c9ff12a 100644 --- a/modules/ocl/test/test_match_template.cpp +++ b/modules/ocl/test/test_match_template.cpp @@ -43,7 +43,7 @@ //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL //////////////////////////////////////////////////////////////////////////////// diff --git a/modules/ocl/test/test_matrix_operation.cpp b/modules/ocl/test/test_matrix_operation.cpp index 92d810818b..e8b5022324 100644 --- a/modules/ocl/test/test_matrix_operation.cpp +++ b/modules/ocl/test/test_matrix_operation.cpp @@ -44,7 +44,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/test_moments.cpp b/modules/ocl/test/test_moments.cpp index e3ab1fa3ce..65034acfe3 100644 --- a/modules/ocl/test/test_moments.cpp +++ b/modules/ocl/test/test_moments.cpp @@ -1,4 +1,4 @@ -#include "precomp.hpp" +#include "test_precomp.hpp" #include #include "opencv2/imgproc/imgproc_c.h" diff --git a/modules/ocl/test/test_objdetect.cpp b/modules/ocl/test/test_objdetect.cpp index e9b571e602..d75d99198b 100644 --- a/modules/ocl/test/test_objdetect.cpp +++ b/modules/ocl/test/test_objdetect.cpp @@ -43,7 +43,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #include "opencv2/core/core.hpp" #include "opencv2/objdetect/objdetect.hpp" diff --git a/modules/ocl/test/test_optflow.cpp b/modules/ocl/test/test_optflow.cpp index 941ade129e..4693d46ddf 100644 --- a/modules/ocl/test/test_optflow.cpp +++ b/modules/ocl/test/test_optflow.cpp @@ -43,7 +43,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #include #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/precomp.hpp b/modules/ocl/test/test_precomp.hpp similarity index 100% rename from modules/ocl/test/precomp.hpp rename to modules/ocl/test/test_precomp.hpp diff --git a/modules/ocl/test/test_pyramids.cpp b/modules/ocl/test/test_pyramids.cpp index 1bd188dea6..b7bc752d67 100644 --- a/modules/ocl/test/test_pyramids.cpp +++ b/modules/ocl/test/test_pyramids.cpp @@ -44,7 +44,7 @@ //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #include #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/test_sort.cpp b/modules/ocl/test/test_sort.cpp index f39f78e9bf..83326a5426 100644 --- a/modules/ocl/test/test_sort.cpp +++ b/modules/ocl/test/test_sort.cpp @@ -44,7 +44,7 @@ //M*/ #include #include -#include "precomp.hpp" +#include "test_precomp.hpp" using namespace std; using namespace cvtest; diff --git a/modules/ocl/test/test_split_merge.cpp b/modules/ocl/test/test_split_merge.cpp index 854ce309c2..9663f5321c 100644 --- a/modules/ocl/test/test_split_merge.cpp +++ b/modules/ocl/test/test_split_merge.cpp @@ -44,7 +44,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL diff --git a/modules/ocl/test/utility.cpp b/modules/ocl/test/utility.cpp index 27f9cec079..440a89d4a0 100644 --- a/modules/ocl/test/utility.cpp +++ b/modules/ocl/test/utility.cpp @@ -39,7 +39,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #define VARNAME(A) #A using namespace std; using namespace cv; From 2765f3f7ada0ff6eff0b7b850509079df5cafeb5 Mon Sep 17 00:00:00 2001 From: Dmitry Retinskiy Date: Fri, 9 Aug 2013 10:32:34 +0400 Subject: [PATCH 011/139] Minor language corrections in the tutorial document --- .../camera_calibration/camera_calibration.rst | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst b/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst index 6637e2590c..0e2c764930 100644 --- a/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst +++ b/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst @@ -3,42 +3,42 @@ Camera calibration With OpenCV ****************************** -Cameras have been around for a long-long time. However, with the introduction of the cheap *pinhole* cameras in the late 20th century, they became a common occurrence in our everyday life. Unfortunately, this cheapness comes with its price: significant distortion. Luckily, these are constants and with a calibration and some remapping we can correct this. Furthermore, with calibration you may also determinate the relation between the camera's natural units (pixels) and the real world units (for example millimeters). +Cameras have been around for a long-long time. However, with the introduction of the cheap *pinhole* cameras in the late 20th century, they became a common occurrence in our everyday life. Unfortunately, this cheapness comes with its price: significant distortion. Luckily, these are constants and with a calibration and some remapping we can correct this. Furthermore, with calibration you may also determine the relation between the camera's natural units (pixels) and the real world units (for example millimeters). Theory ====== -For the distortion OpenCV takes into account the radial and tangential factors. For the radial one uses the following formula: +For the distortion OpenCV takes into account the radial and tangential factors. For the radial factor one uses the following formula: .. math:: x_{corrected} = x( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6) \\ y_{corrected} = y( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6) -So for an old pixel point at :math:`(x,y)` coordinate in the input image, for a corrected output image its position will be :math:`(x_{corrected} y_{corrected})` . The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect. +So for an old pixel point at :math:`(x,y)` coordinates in the input image, its position on the corrected output image will be :math:`(x_{corrected} y_{corrected})`. The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect. -Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. Correcting this is made via the formulas: +Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. It can be corrected via the formulas: .. math:: x_{corrected} = x + [ 2p_1xy + p_2(r^2+2x^2)] \\ y_{corrected} = y + [ p_1(r^2+ 2y^2)+ 2p_2xy] -So we have five distortion parameters, which in OpenCV are organized in a 5 column one row matrix: +So we have five distortion parameters which in OpenCV are presented as one row matrix with 5 columns: .. math:: Distortion_{coefficients}=(k_1 \hspace{10pt} k_2 \hspace{10pt} p_1 \hspace{10pt} p_2 \hspace{10pt} k_3) -Now for the unit conversion, we use the following formula: +Now for the unit conversion we use the following formula: .. math:: \left [ \begin{matrix} x \\ y \\ w \end{matrix} \right ] = \left [ \begin{matrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1 \end{matrix} \right ] \left [ \begin{matrix} X \\ Y \\ Z \end{matrix} \right ] -Here the presence of the :math:`w` is cause we use a homography coordinate system (and :math:`w=Z`). The unknown parameters are :math:`f_x` and :math:`f_y` (camera focal lengths) and :math:`(c_x, c_y)` what are the optical centers expressed in pixels coordinates. If for both axes a common focal length is used with a given :math:`a` aspect ratio (usually 1), then :math:`f_y=f_x*a` and in the upper formula we will have a single :math:`f` focal length. The matrix containing these four parameters is referred to as the *camera matrix*. While the distortion coefficients are the same regardless of the camera resolutions used, these should be scaled along with the current resolution from the calibrated resolution. +Here the presence of :math:`w` is explained by the use of homography coordinate system (and :math:`w=Z`). The unknown parameters are :math:`f_x` and :math:`f_y` (camera focal lengths) and :math:`(c_x, c_y)` which are the optical centers expressed in pixels coordinates. If for both axes a common focal length is used with a given :math:`a` aspect ratio (usually 1), then :math:`f_y=f_x*a` and in the upper formula we will have a single focal length :math:`f`. The matrix containing these four parameters is referred to as the *camera matrix*. While the distortion coefficients are the same regardless of the camera resolutions used, these should be scaled along with the current resolution from the calibrated resolution. -The process of determining these two matrices is the calibration. Calculating these parameters is done by some basic geometrical equations. The equations used depend on the calibrating objects used. Currently OpenCV supports three types of object for calibration: +The process of determining these two matrices is the calibration. Calculation of these parameters is done through basic geometrical equations. The equations used depend on the chosen calibrating objects. Currently OpenCV supports three types of objects for calibration: .. container:: enumeratevisibleitemswithsquare @@ -46,7 +46,7 @@ The process of determining these two matrices is the calibration. Calculating th + Symmetrical circle pattern + Asymmetrical circle pattern -Basically, you need to take snapshots of these patterns with your camera and let OpenCV find them. Each found pattern equals in a new equation. To solve the equation you need at least a predetermined number of pattern snapshots to form a well-posed equation system. This number is higher for the chessboard pattern and less for the circle ones. For example, in theory the chessboard one requires at least two. However, in practice we have a good amount of noise present in our input images, so for good results you will probably want at least 10 good snapshots of the input pattern in different position. +Basically, you need to take snapshots of these patterns with your camera and let OpenCV find them. Each found pattern results in a new equation. To solve the equation you need at least a predetermined number of pattern snapshots to form a well-posed equation system. This number is higher for the chessboard pattern and less for the circle ones. For example, in theory the chessboard pattern requires at least two snapshots. However, in practice we have a good amount of noise present in our input images, so for good results you will probably need at least 10 good snapshots of the input pattern in different positions. Goal ==== @@ -55,19 +55,19 @@ The sample application will: .. container:: enumeratevisibleitemswithsquare - + Determinate the distortion matrix - + Determinate the camera matrix - + Input from Camera, Video and Image file list - + Configuration from XML/YAML file + + Determine the distortion matrix + + Determine the camera matrix + + Take input from Camera, Video and Image file list + + Read configuration from XML/YAML file + Save the results into XML/YAML file + Calculate re-projection error Source code =========== -You may also find the source code in the :file:`samples/cpp/tutorial_code/calib3d/camera_calibration/` folder of the OpenCV source library or :download:`download it from here <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp>`. The program has a single argument. The name of its configuration file. If none given it will try to open the one named "default.xml". :download:`Here's a sample configuration file <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml>` in XML format. In the configuration file you may choose to use as input a camera, a video file or an image list. If you opt for the later one, you need to create a configuration file where you enumerate the images to use. Here's :download:`an example of this <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml>`. The important part to remember is that the images needs to be specified using the absolute path or the relative one from your applications working directory. You may find all this in the beforehand mentioned directory. +You may also find the source code in the :file:`samples/cpp/tutorial_code/calib3d/camera_calibration/` folder of the OpenCV source library or :download:`download it from here <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp>`. The program has a single argument: the name of its configuration file. If none is given then it will try to open the one named "default.xml". :download:`Here's a sample configuration file <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml>` in XML format. In the configuration file you may choose to use camera as an input, a video file or an image list. If you opt for the last one, you will need to create a configuration file where you enumerate the images to use. Here's :download:`an example of this <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml>`. The important part to remember is that the images need to be specified using the absolute path or the relative one from your application's working directory. You may find all this in the samples directory mentioned above. -The application starts up with reading the settings from the configuration file. Although, this is an important part of it, it has nothing to do with the subject of this tutorial: *camera calibration*. Therefore, I've chosen to do not post here the code part for that. The technical background on how to do this you can find in the :ref:`fileInputOutputXMLYAML` tutorial. +The application starts up with reading the settings from the configuration file. Although, this is an important part of it, it has nothing to do with the subject of this tutorial: *camera calibration*. Therefore, I've chosen not to post the code for that part here. Technical background on how to do this you can find in the :ref:`fileInputOutputXMLYAML` tutorial. Explanation =========== @@ -93,9 +93,9 @@ Explanation return -1; } - For this I've used simple OpenCV class input operation. After reading the file I've an additional post-process function that checks for the validity of the input. Only if all of them are good will be the *goodInput* variable true. + For this I've used simple OpenCV class input operation. After reading the file I've an additional post-processing function that checks validity of the input. Only if all inputs are good then *goodInput* variable will be true. -#. **Get next input, if it fails or we have enough of them calibrate**. After this we have a big loop where we do the following operations: get the next image from the image list, camera or video file. If this fails or we have enough images we run the calibration process. In case of image we step out of the loop and otherwise the remaining frames will be undistorted (if the option is set) via changing from *DETECTION* mode to *CALIBRATED* one. +#. **Get next input, if it fails or we have enough of them - calibrate**. After this we have a big loop where we do the following operations: get the next image from the image list, camera or video file. If this fails or we have enough images then we run the calibration process. In case of image we step out of the loop and otherwise the remaining frames will be undistorted (if the option is set) via changing from *DETECTION* mode to the *CALIBRATED* one. .. code-block:: cpp @@ -125,7 +125,7 @@ Explanation For some cameras we may need to flip the input image. Here we do this too. -#. **Find the pattern in the current input**. The formation of the equations I mentioned above consists of finding the major patterns in the input: in case of the chessboard this is their corners of the squares and for the circles, well, the circles itself. The position of these will form the result and is collected into the *pointBuf* vector. +#. **Find the pattern in the current input**. The formation of the equations I mentioned above aims to finding major patterns in the input: in case of the chessboard this are corners of the squares and for the circles, well, the circles themselves. The position of these will form the result which will be written into the *pointBuf* vector. .. code-block:: cpp @@ -146,9 +146,9 @@ Explanation break; } - Depending on the type of the input pattern you use either the :calib3d:`findChessboardCorners ` or the :calib3d:`findCirclesGrid ` function. For both of them you pass on the current image, the size of the board and you'll get back the positions of the patterns. Furthermore, they return a boolean variable that states if in the input we could find or not the pattern (we only need to take into account images where this is true!). + Depending on the type of the input pattern you use either the :calib3d:`findChessboardCorners ` or the :calib3d:`findCirclesGrid ` function. For both of them you pass the current image and the size of the board and you'll get the positions of the patterns. Furthermore, they return a boolean variable which states if the pattern was found in the input (we only need to take into account those images where this is true!). - Then again in case of cameras we only take camera images after an input delay time passed. This is in order to allow for the user to move the chessboard around and as getting different images. Same images mean same equations, and same equations at the calibration will form an ill-posed problem, so the calibration will fail. For square images the position of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix ` function. This way will get a better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image with the :calib3d:`findChessboardCorners ` function. + Then again in case of cameras we only take camera images when an input delay time is passed. This is done in order to allow user moving the chessboard around and getting different images. Similar images result in similar equations, and similar equations at the calibration step will form an ill-posed problem, so the calibration will fail. For square images the positions of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix ` function. It will produce better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image using :calib3d:`findChessboardCorners ` function. .. code-block:: cpp @@ -175,7 +175,7 @@ Explanation drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found ); } -#. **Show state and result for the user, plus command line control of the application**. The showing part consists of a text output on the live feed, and for video or camera input to show the "capturing" frame we simply bitwise negate the input image. +#. **Show state and result to the user, plus command line control of the application**. This part shows text output on the image. .. code-block:: cpp @@ -199,7 +199,7 @@ Explanation if( blinkOutput ) bitwise_not(view, view); - If we only ran the calibration and got the camera matrix plus the distortion coefficients we may just as correct the image with the :imgproc_geometric:`undistort ` function: + If we ran calibration and got camera's matrix with the distortion coefficients we may want to correct the image using :imgproc_geometric:`undistort ` function: .. code-block:: cpp @@ -212,7 +212,7 @@ Explanation //------------------------------ Show image and check for input commands ------------------- imshow("Image View", view); - Then we wait for an input key and if this is *u* we toggle the distortion removal, if it is *g* we start all over the detection process (or simply start it), and finally for the *ESC* key quit the application: + Then we wait for an input key and if this is *u* we toggle the distortion removal, if it is *g* we start again the detection process, and finally for the *ESC* key we quit the application: .. code-block:: cpp @@ -229,7 +229,7 @@ Explanation imagePoints.clear(); } -#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must append this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort ` function, which is in fact first a call of the :imgproc_geometric:`initUndistortRectifyMap ` to find out the transformation matrices and then doing the transformation with the :imgproc_geometric:`remap ` function. Because, after a successful calibration the map calculation needs to be done only once, by using this expanded form you may speed up your application: +#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must do this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort ` function, which is in fact first calls :imgproc_geometric:`initUndistortRectifyMap ` to find transformation matrices and then performs transformation using :imgproc_geometric:`remap ` function. Because, after successful calibration map calculation needs to be done only once, by using this expanded form you may speed up your application: .. code-block:: cpp @@ -256,7 +256,7 @@ Explanation The calibration and save ======================== -Because the calibration needs to be only once per camera it makes sense to save them after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file. +Because the calibration needs to be done only once per camera, it makes sense to save it after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file. Therefore in the first function we just split up these two processes. Because we want to save many of the calibration variables we'll create these variables here and pass on both of them to the calibration and saving function. Again, I'll not show the saving part as that has little in common with the calibration. Explore the source file in order to find out how and what: @@ -280,7 +280,7 @@ Therefore in the first function we just split up these two processes. Because we return ok; } -We do the calibration with the help of the :calib3d:`calibrateCamera ` function. This has the following parameters: +We do the calibration with the help of the :calib3d:`calibrateCamera ` function. It has the following parameters: .. container:: enumeratevisibleitemswithsquare @@ -318,11 +318,11 @@ We do the calibration with the help of the :calib3d:`calibrateCamera ` or the :calib3d:`findCirclesGrid ` function returned. We just need to pass it on. + + The image points. This is a vector of *Point2f* vector which for each input image contains coordinates of the important points (corners for chessboard and centers of the circles for the circle pattern). We have already collected this from :calib3d:`findChessboardCorners ` or :calib3d:`findCirclesGrid ` function. We just need to pass it on. + The size of the image acquired from the camera, video file or the images. - + The camera matrix. If we used the fix aspect ratio option we need to set the :math:`f_x` to zero: + + The camera matrix. If we used the fixed aspect ratio option we need to set the :math:`f_x` to zero: .. code-block:: cpp @@ -336,16 +336,16 @@ We do the calibration with the help of the :calib3d:`calibrateCamera ` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculate for all the calibration images. + + The function returns the average re-projection error. This number gives a good estimation of precision of the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices we may calculate the error for one view by using the :calib3d:`projectPoints ` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculated for all the calibration images. .. code-block:: cpp @@ -378,25 +378,25 @@ We do the calibration with the help of the :calib3d:`calibrateCamera ` that has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into a VID5 directory. I've put this inside the :file:`images/CameraCalibraation` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use: +Let there be :download:`this input chessboard pattern <../../../pattern.png>` which has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into VID5 directory. I've put this inside the :file:`images/CameraCalibration` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use: .. code-block:: xml - images/CameraCalibraation/VID5/xx1.jpg - images/CameraCalibraation/VID5/xx2.jpg - images/CameraCalibraation/VID5/xx3.jpg - images/CameraCalibraation/VID5/xx4.jpg - images/CameraCalibraation/VID5/xx5.jpg - images/CameraCalibraation/VID5/xx6.jpg - images/CameraCalibraation/VID5/xx7.jpg - images/CameraCalibraation/VID5/xx8.jpg + images/CameraCalibration/VID5/xx1.jpg + images/CameraCalibration/VID5/xx2.jpg + images/CameraCalibration/VID5/xx3.jpg + images/CameraCalibration/VID5/xx4.jpg + images/CameraCalibration/VID5/xx5.jpg + images/CameraCalibration/VID5/xx6.jpg + images/CameraCalibration/VID5/xx7.jpg + images/CameraCalibration/VID5/xx8.jpg -Then specified the :file:`images/CameraCalibraation/VID5/VID5.XML` as input in the configuration file. Here's a chessboard pattern found during the runtime of the application: +Then passed :file:`images/CameraCalibration/VID5/VID5.XML` as an input in the configuration file. Here's a chessboard pattern found during the runtime of the application: .. image:: images/fileListImage.jpg :alt: A found chessboard @@ -433,7 +433,7 @@ In both cases in the specified output XML/YAML file you'll find the camera and d -4.1802327176423804e-001 5.0715244063187526e-001 0. 0. -5.7843597214487474e-001 -Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap ` and the :imgproc_geometric:`remap ` function to remove distortion and enjoy distortion free inputs with cheap and low quality cameras. +Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap ` and the :imgproc_geometric:`remap ` function to remove distortion and enjoy distortion free inputs for cheap and low quality cameras. You may observe a runtime instance of this on the `YouTube here `_. From fd056235d39655ad6100d686f7e3abe5681179ef Mon Sep 17 00:00:00 2001 From: StevenPuttemans Date: Mon, 12 Aug 2013 11:44:38 +0200 Subject: [PATCH 012/139] Added bugfix 3209 : anchor point (-1,1) changed to (-1,-1) --- modules/imgproc/doc/filtering.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/imgproc/doc/filtering.rst b/modules/imgproc/doc/filtering.rst index 6982b6f445..73df71e6bd 100755 --- a/modules/imgproc/doc/filtering.rst +++ b/modules/imgproc/doc/filtering.rst @@ -1446,7 +1446,7 @@ Applies a separable linear filter to an image. :param kernelY: Coefficients for filtering each column. - :param anchor: Anchor position within the kernel. The default value :math:`(-1, 1)` means that the anchor is at the kernel center. + :param anchor: Anchor position within the kernel. The default value :math:`(-1,-1)` means that the anchor is at the kernel center. :param delta: Value added to the filtered results before storing them. From 086db9d6db73956be2ee18b8a552a9179da6b6fd Mon Sep 17 00:00:00 2001 From: kocheganovvm Date: Sat, 10 Aug 2013 21:02:09 +0400 Subject: [PATCH 013/139] Introduce CV_COMP_CHISQR_ALT, an alternative method to calculate ChiSquare Histogram comparison There's some disagreement about the correct formula. has its supporters, however, for texture analysis, the newly introduced formula became standard. The commit enables both uses without breaking backward compatibility. First contributor of this commit was sperrholz. --- modules/imgproc/doc/histograms.rst | 11 ++++++++ .../imgproc/include/opencv2/imgproc/types_c.h | 3 ++- modules/imgproc/src/histogram.cpp | 26 ++++++++++++------- modules/imgproc/test/test_histograms.cpp | 9 ++++++- 4 files changed, 38 insertions(+), 11 deletions(-) diff --git a/modules/imgproc/doc/histograms.rst b/modules/imgproc/doc/histograms.rst index 1ec6a0177b..2240e04457 100644 --- a/modules/imgproc/doc/histograms.rst +++ b/modules/imgproc/doc/histograms.rst @@ -173,6 +173,8 @@ Compares two histograms. * **CV_COMP_CHISQR** Chi-Square + * **CV_COMP_CHISQR_ALT** Alternative Chi-Square + * **CV_COMP_INTERSECT** Intersection * **CV_COMP_BHATTACHARYYA** Bhattacharyya distance @@ -202,6 +204,14 @@ The functions ``compareHist`` compare two dense or two sparse histograms using t d(H_1,H_2) = \sum _I \frac{\left(H_1(I)-H_2(I)\right)^2}{H_1(I)} +* Alternative Chi-Square (``method=CV_COMP_CHISQR_ALT``) + + .. math:: + + d(H_1,H_2) = 2 * \sum _I \frac{\left(H_1(I)-H_2(I)\right)^2}{H_1(I)+H_2(I)} + + This alternative formula is regularly used for texture comparison. See e.g. [Puzicha1997]_. + * Intersection (``method=CV_COMP_INTERSECT``) .. math:: @@ -493,3 +503,4 @@ The function clears histogram bins that are below the specified threshold. .. [RubnerSept98] Y. Rubner. C. Tomasi, L.J. Guibas. *The Earth Mover’s Distance as a Metric for Image Retrieval*. Technical Report STAN-CS-TN-98-86, Department of Computer Science, Stanford University, September 1998. +.. [Puzicha1997] Puzicha, J., Hofmann, T., and Buhmann, J. *Non-parametric similarity measures for unsupervised texture segmentation and image retrieval.* In Proc. IEEE Conf. Computer Vision and Pattern Recognition, San Juan, Puerto Rico, pp. 267-272, 1997. diff --git a/modules/imgproc/include/opencv2/imgproc/types_c.h b/modules/imgproc/include/opencv2/imgproc/types_c.h index 2b1d0722e2..dd0d8b8a6e 100644 --- a/modules/imgproc/include/opencv2/imgproc/types_c.h +++ b/modules/imgproc/include/opencv2/imgproc/types_c.h @@ -508,7 +508,8 @@ enum CV_COMP_CHISQR =1, CV_COMP_INTERSECT =2, CV_COMP_BHATTACHARYYA =3, - CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA + CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA, + CV_COMP_CHISQR_ALT =4 }; /* Mask size for distance transform */ diff --git a/modules/imgproc/src/histogram.cpp b/modules/imgproc/src/histogram.cpp index 08252586a9..f0a78187d0 100644 --- a/modules/imgproc/src/histogram.cpp +++ b/modules/imgproc/src/histogram.cpp @@ -1990,12 +1990,12 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) const float* h2 = (const float*)it.planes[1].data; len = it.planes[0].rows*it.planes[0].cols*H1.channels(); - if( method == CV_COMP_CHISQR ) + if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT)) { for( j = 0; j < len; j++ ) { double a = h1[j] - h2[j]; - double b = h1[j]; + double b = (method == CV_COMP_CHISQR) ? h1[j] : h1[j] + h2[j]; if( fabs(b) > DBL_EPSILON ) result += a*a/b; } @@ -2034,7 +2034,9 @@ double cv::compareHist( InputArray _H1, InputArray _H2, int method ) CV_Error( CV_StsBadArg, "Unknown comparison method" ); } - if( method == CV_COMP_CORREL ) + if( method == CV_COMP_CHISQR_ALT ) + result *= 2; + else if( method == CV_COMP_CORREL ) { size_t total = H1.total(); double scale = 1./total; @@ -2063,13 +2065,13 @@ double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method ) CV_Assert( H1.size(i) == H2.size(i) ); const SparseMat *PH1 = &H1, *PH2 = &H2; - if( PH1->nzcount() > PH2->nzcount() && method != CV_COMP_CHISQR ) + if( PH1->nzcount() > PH2->nzcount() && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT) std::swap(PH1, PH2); SparseMatConstIterator it = PH1->begin(); int N1 = (int)PH1->nzcount(), N2 = (int)PH2->nzcount(); - if( method == CV_COMP_CHISQR ) + if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT) ) { for( i = 0; i < N1; i++, ++it ) { @@ -2077,7 +2079,7 @@ double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method ) const SparseMat::Node* node = it.node(); float v2 = PH2->value(node->idx, (size_t*)&node->hashval); double a = v1 - v2; - double b = v1; + double b = (method == CV_COMP_CHISQR) ? v1 : v1 + v2; if( fabs(b) > DBL_EPSILON ) result += a*a/b; } @@ -2146,6 +2148,9 @@ double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method ) else CV_Error( CV_StsBadArg, "Unknown comparison method" ); + if( method == CV_COMP_CHISQR_ALT ) + result *= 2; + return result; } @@ -2485,13 +2490,13 @@ cvCompareHist( const CvHistogram* hist1, CvSparseMatIterator iterator; CvSparseNode *node1, *node2; - if( mat1->heap->active_count > mat2->heap->active_count && method != CV_COMP_CHISQR ) + if( mat1->heap->active_count > mat2->heap->active_count && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT) { CvSparseMat* t; CV_SWAP( mat1, mat2, t ); } - if( method == CV_COMP_CHISQR ) + if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT) ) { for( node1 = cvInitSparseMatIterator( mat1, &iterator ); node1 != 0; node1 = cvGetNextSparseNode( &iterator )) @@ -2500,7 +2505,7 @@ cvCompareHist( const CvHistogram* hist1, uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), 0, 0, &node1->hashval ); double v2 = node2_data ? *(float*)node2_data : 0.f; double a = v1 - v2; - double b = v1; + double b = (method == CV_COMP_CHISQR) ? v1 : v1 + v2; if( fabs(b) > DBL_EPSILON ) result += a*a/b; } @@ -2590,6 +2595,9 @@ cvCompareHist( const CvHistogram* hist1, else CV_Error( CV_StsBadArg, "Unknown comparison method" ); + if( method == CV_COMP_CHISQR_ALT ) + result *= 2; + return result; } diff --git a/modules/imgproc/test/test_histograms.cpp b/modules/imgproc/test/test_histograms.cpp index ccdaa74f27..19ccc656b5 100644 --- a/modules/imgproc/test/test_histograms.cpp +++ b/modules/imgproc/test/test_histograms.cpp @@ -948,7 +948,7 @@ int CV_ThreshHistTest::validate_test_results( int /*test_case_idx*/ ) class CV_CompareHistTest : public CV_BaseHistTest { public: - enum { MAX_METHOD = 4 }; + enum { MAX_METHOD = 5 }; CV_CompareHistTest(); protected: @@ -1014,6 +1014,8 @@ int CV_CompareHistTest::validate_test_results( int /*test_case_idx*/ ) result0[CV_COMP_INTERSECT] += MIN(v0,v1); if( fabs(v0) > DBL_EPSILON ) result0[CV_COMP_CHISQR] += (v0 - v1)*(v0 - v1)/v0; + if( fabs(v0 + v1) > DBL_EPSILON ) + result0[CV_COMP_CHISQR_ALT] += (v0 - v1)*(v0 - v1)/(v0 + v1); s0 += v0; s1 += v1; sq0 += v0*v0; @@ -1039,6 +1041,8 @@ int CV_CompareHistTest::validate_test_results( int /*test_case_idx*/ ) result0[CV_COMP_INTERSECT] += MIN(v0,v1); if( fabs(v0) > DBL_EPSILON ) result0[CV_COMP_CHISQR] += (v0 - v1)*(v0 - v1)/v0; + if( fabs(v0 + v1) > DBL_EPSILON ) + result0[CV_COMP_CHISQR_ALT] += (v0 - v1)*(v0 - v1)/(v0 + v1); s0 += v0; sq0 += v0*v0; result0[CV_COMP_BHATTACHARYYA] += sqrt(v0*v1); @@ -1053,6 +1057,8 @@ int CV_CompareHistTest::validate_test_results( int /*test_case_idx*/ ) } } + result0[CV_COMP_CHISQR_ALT] *= 2; + t = (sq0 - s0*s0/total_size)*(sq1 - s1*s1/total_size); result0[CV_COMP_CORREL] = fabs(t) > DBL_EPSILON ? (result0[CV_COMP_CORREL] - s0*s1/total_size)/sqrt(t) : 1; @@ -1067,6 +1073,7 @@ int CV_CompareHistTest::validate_test_results( int /*test_case_idx*/ ) double v = result[i], v0 = result0[i]; const char* method_name = i == CV_COMP_CHISQR ? "Chi-Square" : + i == CV_COMP_CHISQR_ALT ? "Alternative Chi-Square" : i == CV_COMP_CORREL ? "Correlation" : i == CV_COMP_INTERSECT ? "Intersection" : i == CV_COMP_BHATTACHARYYA ? "Bhattacharyya" : "Unknown"; From d302222d8266c0c2b3f5cbe97617062e19174b82 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Mon, 12 Aug 2013 19:04:18 +0400 Subject: [PATCH 014/139] Don't put \r into OPENCV_REFMAN_TOC. There's no need, since configure_file writes its output using native line endings, anyway. --- doc/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index 70f4809d22..63f3a451b3 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -49,7 +49,7 @@ if(BUILD_DOCS AND HAVE_SPHINX) set(toc_file "${OPENCV_MODULE_opencv_${mod}_LOCATION}/doc/${mod}.rst") if(EXISTS "${toc_file}") file(RELATIVE_PATH toc_file "${OpenCV_SOURCE_DIR}/modules" "${toc_file}") - set(OPENCV_REFMAN_TOC "${OPENCV_REFMAN_TOC} ${toc_file}\r\n") + set(OPENCV_REFMAN_TOC "${OPENCV_REFMAN_TOC} ${toc_file}\n") endif() endforeach() From 7fe840307485099948a312d5fe113556fd9f76e5 Mon Sep 17 00:00:00 2001 From: peng xiao Date: Tue, 13 Aug 2013 13:58:55 +0800 Subject: [PATCH 015/139] Let ocl::filter2D support kernel size >= 3. --- modules/ocl/src/filtering.cpp | 88 ++- modules/ocl/src/opencl/filtering_laplacian.cl | 546 +++++++----------- modules/ocl/test/test_filters.cpp | 37 +- 3 files changed, 288 insertions(+), 383 deletions(-) diff --git a/modules/ocl/src/filtering.cpp b/modules/ocl/src/filtering.cpp index f35a26e332..324bf83eb7 100644 --- a/modules/ocl/src/filtering.cpp +++ b/modules/ocl/src/filtering.cpp @@ -592,20 +592,21 @@ public: } static void GPUFilter2D(const oclMat &src, oclMat &dst, oclMat &mat_kernel, - Size &ksize, const Point anchor, const int borderType) + Size &ksize, const Point anchor, const int borderType) { CV_Assert(src.clCxt == dst.clCxt); CV_Assert((src.cols == dst.cols) && (src.rows == dst.rows)); CV_Assert((src.oclchannels() == dst.oclchannels())); - CV_Assert((borderType != 0)); CV_Assert(ksize.height > 0 && ksize.width > 0 && ((ksize.height & 1) == 1) && ((ksize.width & 1) == 1)); CV_Assert((anchor.x == -1 && anchor.y == -1) || (anchor.x == ksize.width >> 1 && anchor.y == ksize.height >> 1)); + CV_Assert(ksize.width == ksize.height); Context *clCxt = src.clCxt; - int cn = src.oclchannels(); - int depth = src.depth(); - string kernelName = "filter2D"; + int filterWidth = ksize.width; + bool ksize_3x3 = filterWidth == 3 && src.type() != CV_32FC4; // CV_32FC4 is not tuned up with filter2d_3x3 kernel + + string kernelName = ksize_3x3 ? "filter2D_3x3" : "filter2D"; size_t src_offset_x = (src.offset % src.step) / src.elemSize(); size_t src_offset_y = src.offset / src.step; @@ -613,44 +614,68 @@ static void GPUFilter2D(const oclMat &src, oclMat &dst, oclMat &mat_kernel, size_t dst_offset_x = (dst.offset % dst.step) / dst.elemSize(); size_t dst_offset_y = dst.offset / dst.step; - int vector_lengths[4][7] = {{4, 4, 4, 4, 4, 4, 4}, - {4, 4, 1, 1, 1, 1, 1}, - {1, 1, 1, 1, 1, 1, 1}, - {4, 4, 4, 4, 1, 1, 4} - }; + int paddingPixels = (int)(filterWidth/2)*2; + + size_t localThreads[3] = {ksize_3x3 ? 256 : 16, ksize_3x3 ? 1 : 16, 1}; + size_t globalThreads[3] = {src.wholecols, src.wholerows, 1}; - int vector_length = vector_lengths[cn - 1][depth]; - int offset_cols = (dst_offset_x) & (vector_length - 1); - int cols = dst.cols + offset_cols; - int rows = divUp(dst.rows, vector_length); + int cn = src.oclchannels(); + int src_step = (int)(src.step/src.elemSize()); + int dst_step = (int)(dst.step/src.elemSize()); + + int localWidth = localThreads[0] + paddingPixels; + int localHeight = localThreads[1] + paddingPixels; - size_t localThreads[3] = {256, 1, 1}; - size_t globalThreads[3] = { divUp(cols, localThreads[0]) *localThreads[0], - divUp(rows, localThreads[1]) *localThreads[1], 1 - }; + size_t localMemSize = ksize_3x3 ? 260 * 6 * src.elemSize() : (localWidth * localHeight) * src.elemSize(); + + int vector_lengths[4][7] = {{4, 4, 4, 4, 4, 4, 4}, + {4, 4, 1, 1, 1, 1, 1}, + {1, 1, 1, 1, 1, 1, 1}, + {4, 4, 4, 4, 1, 1, 4} + }; + int cols = dst.cols + ((dst_offset_x) & (vector_lengths[cn - 1][src.depth()] - 1)); vector< pair > args; args.push_back(make_pair(sizeof(cl_mem), (void *)&src.data)); - args.push_back(make_pair(sizeof(cl_int), (void *)&src.step)); + args.push_back(make_pair(sizeof(cl_mem), (void *)&dst.data)); + args.push_back(make_pair(sizeof(cl_int), (void *)&src_step)); + args.push_back(make_pair(sizeof(cl_int), (void *)&dst_step)); + args.push_back(make_pair(sizeof(cl_mem), (void *)&mat_kernel.data)); + args.push_back(make_pair(localMemSize, (void *)NULL)); + args.push_back(make_pair(sizeof(cl_int), (void *)&src.wholerows)); + args.push_back(make_pair(sizeof(cl_int), (void *)&src.wholecols)); args.push_back(make_pair(sizeof(cl_int), (void *)&src_offset_x)); args.push_back(make_pair(sizeof(cl_int), (void *)&src_offset_y)); - args.push_back(make_pair(sizeof(cl_mem), (void *)&dst.data)); - args.push_back(make_pair(sizeof(cl_int), (void *)&dst.step)); args.push_back(make_pair(sizeof(cl_int), (void *)&dst_offset_x)); args.push_back(make_pair(sizeof(cl_int), (void *)&dst_offset_y)); - args.push_back(make_pair(sizeof(cl_mem), (void *)&mat_kernel.data)); args.push_back(make_pair(sizeof(cl_int), (void *)&src.cols)); args.push_back(make_pair(sizeof(cl_int), (void *)&src.rows)); args.push_back(make_pair(sizeof(cl_int), (void *)&cols)); - args.push_back(make_pair(sizeof(cl_int), (void *)&src.wholecols)); - args.push_back(make_pair(sizeof(cl_int), (void *)&src.wholerows)); - - const int buffer_size = 100; - char opt_buffer [buffer_size] = ""; - sprintf(opt_buffer, "-DANCHOR=%d -DANX=%d -DANY=%d", ksize.width, anchor.x, anchor.y); - - openCLExecuteKernel(clCxt, &filtering_laplacian, kernelName, globalThreads, localThreads, args, cn, depth, opt_buffer); + char btype[30]; + switch (borderType) + { + case 0: + sprintf(btype, "BORDER_CONSTANT"); + break; + case 1: + sprintf(btype, "BORDER_REPLICATE"); + break; + case 2: + sprintf(btype, "BORDER_REFLECT"); + break; + case 3: + CV_Error(CV_StsUnsupportedFormat, "BORDER_WRAP is not supported!"); + return; + case 4: + sprintf(btype, "BORDER_REFLECT_101"); + break; + } + int type = src.depth(); + char build_options[150]; + sprintf(build_options, "-D %s -D IMG_C_%d_%d -D CN=%d -D FILTER_SIZE=%d", btype, cn, type, cn, ksize.width); + openCLExecuteKernel(clCxt, &filtering_laplacian, kernelName, globalThreads, localThreads, args, -1, -1, build_options); } + Ptr cv::ocl::getLinearFilter_GPU(int srcType, int dstType, const Mat &kernel, const Size &ksize, Point anchor, int borderType) { @@ -659,8 +684,7 @@ Ptr cv::ocl::getLinearFilter_GPU(int srcType, int dstType, const CV_Assert((srcType == CV_8UC1 || srcType == CV_8UC3 || srcType == CV_8UC4 || srcType == CV_32FC1 || srcType == CV_32FC3 || srcType == CV_32FC4) && dstType == srcType); oclMat gpu_krnl; - int nDivisor; - normalizeKernel(kernel, gpu_krnl, CV_32S, &nDivisor, false); + normalizeKernel(kernel, gpu_krnl, CV_32FC1); normalizeAnchor(anchor, ksize); return Ptr(new LinearFilter_GPU(ksize, anchor, gpu_krnl, GPUFilter2D_callers[CV_MAT_CN(srcType)], diff --git a/modules/ocl/src/opencl/filtering_laplacian.cl b/modules/ocl/src/opencl/filtering_laplacian.cl index 8535eb1a54..5016b0b6aa 100644 --- a/modules/ocl/src/opencl/filtering_laplacian.cl +++ b/modules/ocl/src/opencl/filtering_laplacian.cl @@ -15,7 +15,9 @@ // Third party copyrights are property of their respective owners. // // @Authors +// Pang Erping, erping@multicorewareinc.com // Jia Haipeng, jiahaipeng95@gmail.com +// Peng Xiao, pengxiao@outlook.com // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: @@ -42,292 +44,229 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#define BORDER_REFLECT_101 +//#define BORDER_REFLECT_101 /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////Macro for border type//////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// #ifdef BORDER_REPLICATE + //BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh #define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (l_edge) : (i)) #define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (r_edge)-1 : (addr)) -#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (t_edge) :(i)) +#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (t_edge) : (i)) #define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (b_edge)-1 :(addr)) #endif #ifdef BORDER_REFLECT -//BORDER_REFLECT: fedcba|abcdefgh|hgfedcb -#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i)-1 : (i)) +#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? ((l_edge)<<1)-(i)-1 : (i)) #define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-1+((r_edge)<<1) : (addr)) -#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i)-1 : (i)) +#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? ((t_edge)<<1)-(i)-1 : (i)) #define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-1+((b_edge)<<1) : (addr)) #endif #ifdef BORDER_REFLECT_101 //BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba -#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i) : (i)) +#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? ((l_edge)<<1)-(i) : (i)) #define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-2+((r_edge)<<1) : (addr)) -#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i) : (i)) +#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? ((t_edge)<<1)-(i) : (i)) #define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-2+((b_edge)<<1) : (addr)) #endif -#ifdef BORDER_WRAP -//BORDER_WRAP: cdefgh|abcdefgh|abcdefg -#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (i)+(r_edge) : (i)) -#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (i)-(r_edge) : (addr)) -#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (i)+(b_edge) : (i)) -#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (i)-(b_edge) : (addr)) +#ifdef IMG_C_1_0 +#define T_IMG uchar +#define T_IMGx4 uchar4 +#define T_IMG_C1 uchar +#define CONVERT_TYPE convert_uchar_sat +#define CONVERT_TYPEx4 convert_uchar4_sat +#endif +#ifdef IMG_C_4_0 +#define T_IMG uchar4 +#define T_IMGx4 uchar16 +#define T_IMG_C1 uchar +#define CONVERT_TYPE convert_uchar4_sat +#define CONVERT_TYPEx4 convert_uchar16_sat +#endif +#ifdef IMG_C_1_5 +#define T_IMG float +#define T_IMGx4 float4 +#define T_IMG_C1 float +#define CONVERT_TYPE convert_float +#define CONVERT_TYPEx4 convert_float4 +#endif +#ifdef IMG_C_4_5 +#define T_IMG float4 +#define T_IMGx4 float16 +#define T_IMG_C1 float +#define CONVERT_TYPE convert_float4 +#define CONVERT_TYPEx4 convert_float16 #endif -////////////////////////////////////////////////////////////////////////////////////////////////////// -/////////////////////////////Macro for define elements number per thread///////////////////////////// -//////////////////////////////////////////////////////////////////////////////////////////////////// -//#define ANCHOR 3 -//#define ANX 1 -//#define ANY 1 - -#define ROWS_PER_GROUP 4 -#define ROWS_PER_GROUP_BITS 2 -#define ROWS_FETCH (ROWS_PER_GROUP + ANY + ANY) //(ROWS_PER_GROUP + anY * 2) - -#define THREADS_PER_ROW 64 -#define THREADS_PER_ROW_BIT 6 +#ifndef CN +#define CN 1 +#endif -#define ELEMENTS_PER_THREAD 4 -#define ELEMENTS_PER_THREAD_BIT 2 +#if CN == 1 +#define T_SUM float +#define T_SUMx4 float4 +#define CONVERT_TYPE_SUM convert_float +#define CONVERT_TYPE_SUMx4 convert_float4 +#define SUM_ZERO (0.0f) +#define SUM_ZEROx4 (0.0f, 0.0f, 0.0f, 0.0f) +#define VLOAD4 vload4 +#define SX x +#define SY y +#define SZ z +#define SW w +#elif CN == 4 +#define T_SUM float4 +#define T_SUMx4 float16 +#define CONVERT_TYPE_SUM convert_float4 +#define CONVERT_TYPE_SUMx4 convert_float16 +#define SUM_ZERO (0.0f, 0.0f, 0.0f, 0.0f) +#define SUM_ZEROx4 (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f) +#define VLOAD4 vload16 +#define SX s0123 +#define SY s4567 +#define SZ s89ab +#define SW scdef +#endif -#define LOCAL_MEM_STEP 260 //divup((get_local_size(0) + anX * 2), 4) * 4 +#ifndef FILTER_SIZE +#define FILTER_SIZE 3 +#endif -/////////////////////////////////////////////////////////////////////////////////////////////////// -/////////////////////////////////////////8uC1//////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////////////////////////// -__kernel void filter2D_C1_D0(__global uchar *src, int src_step, int src_offset_x, int src_offset_y, - __global uchar *dst, int dst_step, int dst_offset_x, int dst_offset_y, - __constant int *mat_kernel __attribute__((max_constant_size (16384))), - int cols,int rows, int operate_cols, int wholecols, int wholerows) +#define LOCAL_GROUP_SIZE 16 + +#define LOCAL_WIDTH ((FILTER_SIZE/2)*2 + LOCAL_GROUP_SIZE) +#define LOCAL_HEIGHT ((FILTER_SIZE/2)*2 + LOCAL_GROUP_SIZE) + +#define FILTER_RADIUS (FILTER_SIZE >> 1) + +__kernel void filter2D( + __global T_IMG *src, + __global T_IMG *dst, + int src_step, + int dst_step, + __constant float *mat_kernel, + __local T_IMG *local_data, + int wholerows, + int wholecols, + int src_offset_x, + int src_offset_y, + int dst_offset_x, + int dst_offset_y, + int cols, + int rows, + int operate_cols +) { - int gX = get_global_id(0); - int gY = get_global_id(1); - - int lX = get_local_id(0); - - int groupX_size = get_local_size(0); - int groupX_id = get_group_id(0); - -#define dst_align (dst_offset_x & 3) - int cols_start_index_group = src_offset_x - dst_align + groupX_size * groupX_id - ANX; - int rows_start_index = src_offset_y + (gY << ROWS_PER_GROUP_BITS) - ANY; - - __local uchar local_data[LOCAL_MEM_STEP * ROWS_FETCH]; - if((gY << 2) < rows) + int groupStartCol = get_group_id(0) * get_local_size(0); + int groupStartRow = get_group_id(1) * get_local_size(1); + + int localCol = get_local_id(0); + int localRow = get_local_id(1); + int globalCol = groupStartCol + localCol; + int globalRow = groupStartRow + localRow; + const int src_offset = mad24(src_offset_y, src_step, src_offset_x); + const int dst_offset = mad24(dst_offset_y, dst_step, dst_offset_x); +#ifdef BORDER_CONSTANT + for(int i = localRow; i < LOCAL_HEIGHT; i += get_local_size(1)) { - for(int i = 0; i < ROWS_FETCH; ++i) + int curRow = groupStartRow + i; + for(int j = localCol; j < LOCAL_WIDTH; j += get_local_size(0)) { - if((rows_start_index - src_offset_y) + i < rows + ANY) + int curCol = groupStartCol + j; + if(curRow < FILTER_RADIUS - src_offset_y || (curRow - FILTER_RADIUS) >= wholerows - src_offset_y|| + curCol < FILTER_RADIUS - src_offset_x || (curCol - FILTER_RADIUS) >= wholecols - src_offset_x) { -#ifdef BORDER_CONSTANT - int selected_row = rows_start_index + i; - int selected_cols = cols_start_index_group + lX; - - uchar data = *(src + selected_row * src_step + selected_cols); - int con = selected_row >=0 && selected_row < wholerows && selected_cols >=0 && selected_cols < wholecols; - data = con ? data : 0; - local_data[i * LOCAL_MEM_STEP + lX ] =data; - - if(lX < (ANX << 1)) - { - selected_cols = cols_start_index_group + lX + groupX_size; - - data = *(src + selected_row * src_step + selected_cols); - con = selected_row >=0 && selected_row < wholerows && selected_cols >=0 && selected_cols < wholecols; - data = con ? data : 0; - local_data[i * LOCAL_MEM_STEP + lX + groupX_size] =data; - } -#else - int selected_row = ADDR_H(rows_start_index + i, 0, wholerows); - selected_row = ADDR_B(rows_start_index + i, wholerows, selected_row); - - int selected_cols = ADDR_L(cols_start_index_group + lX, 0, wholecols); - selected_cols = ADDR_R(cols_start_index_group + lX, wholecols, selected_cols); - - uchar data = *(src + selected_row * src_step + selected_cols); - - local_data[i * LOCAL_MEM_STEP + lX ] =data; - - if(lX < (ANX << 1)) - { - selected_cols = cols_start_index_group + lX + groupX_size; - selected_cols = ADDR_R(selected_cols, wholecols, selected_cols); - - data = *(src + selected_row * src_step + selected_cols); - local_data[i * LOCAL_MEM_STEP + lX + groupX_size] =data; - } -#endif + local_data[(i) * LOCAL_WIDTH + j] = 0; + } + else + { + local_data[(i) * LOCAL_WIDTH + j] = src[(curRow - FILTER_RADIUS) * src_step + curCol - FILTER_RADIUS + src_offset]; } } } - barrier(CLK_LOCAL_MEM_FENCE); - - int process_col = groupX_size * groupX_id + ((lX % THREADS_PER_ROW) << 2); - if(((gY << 2) < rows) && (process_col < operate_cols)) +#else + for(int i = localRow; i < LOCAL_HEIGHT; i += get_local_size(1)) { - int dst_cols_start = dst_offset_x; - int dst_cols_end = dst_offset_x + cols; - int dst_cols_index = (dst_offset_x + process_col) & 0xfffffffc; - - int dst_rows_end = dst_offset_y + rows; - int dst_rows_index = dst_offset_y + (gY << ROWS_PER_GROUP_BITS) + (lX >> THREADS_PER_ROW_BIT); + int curRow = groupStartRow + i; - uchar4 dst_data = *((__global uchar4 *)(dst + dst_rows_index * dst_step + dst_cols_index)); + curRow = ADDR_H(curRow, FILTER_RADIUS - src_offset_y, wholerows - src_offset_y); - int4 sum = (int4)(0); - uchar4 data; + curRow = ADDR_B(curRow - FILTER_RADIUS, wholerows - src_offset_y, curRow - FILTER_RADIUS); - for(int i = 0; i < ANCHOR; i++) + for(int j = localCol; j < LOCAL_WIDTH; j += get_local_size(0)) { -#pragma unroll - for(int j = 0; j < ANCHOR; j++) + int curCol = groupStartCol + j; + curCol = ADDR_L(curCol, FILTER_RADIUS - src_offset_x, wholecols - src_offset_x); + curCol = ADDR_R(curCol - FILTER_RADIUS, wholecols - src_offset_x, curCol - FILTER_RADIUS); + if(curRow < wholerows && curCol < wholecols) { - if(dst_rows_index < dst_rows_end) - { - int local_row = (lX >> THREADS_PER_ROW_BIT) + i; - int local_cols = ((lX % THREADS_PER_ROW) << ELEMENTS_PER_THREAD_BIT) + j; - - data = vload4(0, local_data+local_row * LOCAL_MEM_STEP + local_cols); - sum = sum + (mat_kernel[i * ANCHOR + j] * convert_int4_sat(data)); - } + local_data[(i) * LOCAL_WIDTH + j] = src[(curRow) * src_step + curCol + src_offset]; } } - - if(dst_rows_index < dst_rows_end) - { - sum.x = ((dst_cols_index + 0 >= dst_cols_start) && (dst_cols_index + 0 < dst_cols_end)) ? sum.x : dst_data.x; - sum.y = ((dst_cols_index + 1 >= dst_cols_start) && (dst_cols_index + 1 < dst_cols_end)) ? sum.y : dst_data.y; - sum.z = ((dst_cols_index + 2 >= dst_cols_start) && (dst_cols_index + 2 < dst_cols_end)) ? sum.z : dst_data.z; - sum.w = ((dst_cols_index + 3 >= dst_cols_start) && (dst_cols_index + 3 < dst_cols_end)) ? sum.w : dst_data.w; - *((__global uchar4 *)(dst + dst_rows_index * dst_step + dst_cols_index)) = convert_uchar4_sat(sum); - } } -} -/////////////////////////////////////////////////////////////////////////////////////////////////// -/////////////////////////////////////////32FC1//////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////////////////////////// -__kernel void filter2D_C1_D5(__global float *src, int src_step, int src_offset_x, int src_offset_y, - __global float *dst, int dst_step, int dst_offset_x, int dst_offset_y, - __constant int *mat_kernel __attribute__((max_constant_size (16384))), - int cols,int rows, int operate_cols, int wholecols, int wholerows) -{ - int gX = get_global_id(0); - int gY = get_global_id(1); - - int lX = get_local_id(0); - - int groupX_size = get_local_size(0); - int groupX_id = get_group_id(0); - -#define dst_align (dst_offset_x & 3) - int cols_start_index_group = src_offset_x - dst_align + groupX_size * groupX_id - ANX; - int rows_start_index = src_offset_y + (gY << ROWS_PER_GROUP_BITS) - ANY; - - __local float local_data[LOCAL_MEM_STEP * ROWS_FETCH]; - if(((gY << 2) < rows)) +#endif + barrier(CLK_LOCAL_MEM_FENCE); + if(globalRow < rows && globalCol < cols) { - for(int i = 0; i < ROWS_FETCH; ++i) + T_SUM sum = (T_SUM)SUM_ZERO; + int filterIdx = 0; + for(int i = 0; i < FILTER_SIZE; i++) { - if((rows_start_index - src_offset_y) + i < rows + ANY) - { -#ifdef BORDER_CONSTANT - int selected_row = rows_start_index + i; - int selected_cols = cols_start_index_group + lX; - - float data = *((__global float *)((__global char *)src + selected_row * src_step + (selected_cols << 2))); - int con = selected_row >=0 && selected_row < wholerows && selected_cols >=0 && selected_cols < wholecols; - data = con ? data : 0; - local_data[i * LOCAL_MEM_STEP + lX ] =data; + int offset = (i + localRow) * LOCAL_WIDTH; - if(lX < (ANX << 1)) - { - selected_cols = cols_start_index_group + lX + groupX_size; - - data = *((__global float *)((__global char *)src + selected_row * src_step + (selected_cols << 2))); - con = selected_row >=0 && selected_row < wholerows && selected_cols >=0 && selected_cols < wholecols; - data = con ? data : 0; - local_data[i * LOCAL_MEM_STEP + lX + groupX_size] =data; - } -#else - int selected_row = ADDR_H(rows_start_index + i, 0, wholerows); - selected_row = ADDR_B(rows_start_index + i, wholerows, selected_row); - - int selected_cols = ADDR_L(cols_start_index_group + lX, 0, wholecols); - selected_cols = ADDR_R(cols_start_index_group + lX, wholecols, selected_cols); - - float data = *((__global float *)((__global char *)src + selected_row * src_step + (selected_cols << 2))); - local_data[i * LOCAL_MEM_STEP + lX] =data; - - if(lX < (ANX << 1)) - { - selected_cols = cols_start_index_group + lX + groupX_size; - selected_cols = ADDR_R(selected_cols, wholecols, selected_cols); - - data = *((__global float *)((__global char *)src + selected_row * src_step + (selected_cols << 2))); - local_data[i * LOCAL_MEM_STEP + lX + groupX_size] =data; - } -#endif + for(int j = 0; j < FILTER_SIZE; j++) + { + sum += CONVERT_TYPE_SUM(local_data[offset + j + localCol]) * mat_kernel[filterIdx++]; } } + dst[(globalRow)*dst_step + (globalCol) + dst_offset] = CONVERT_TYPE(sum); } - barrier(CLK_LOCAL_MEM_FENCE); - - int process_col = groupX_size * groupX_id + ((lX % THREADS_PER_ROW) << 2); - if(((gY << 2) < rows) && (process_col < operate_cols)) - { - int dst_cols_start = dst_offset_x; - int dst_cols_end = dst_offset_x + cols; - int dst_cols_index = (dst_offset_x + process_col) & 0xfffffffc; - - int dst_rows_end = dst_offset_y + rows; - int dst_rows_index = dst_offset_y + (gY << ROWS_PER_GROUP_BITS) + (lX >> THREADS_PER_ROW_BIT); +} - float4 dst_data = *((__global float4*)((__global char *)dst + dst_rows_index * dst_step + (dst_cols_index << 2))); +/// following is specific for 3x3 kernels - float4 sum = (float4)(0); - float4 data; +////////////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////Macro for define elements number per thread///////////////////////////// +//////////////////////////////////////////////////////////////////////////////////////////////////// +#define ANX 1 +#define ANY 1 - for(int i = 0; i < ANCHOR; i++) - { -#pragma unroll - for(int j = 0; j < ANCHOR; j++) - { - if(dst_rows_index < dst_rows_end) - { - int local_row = (lX >> THREADS_PER_ROW_BIT) + i; - int local_cols = ((lX % THREADS_PER_ROW) << ELEMENTS_PER_THREAD_BIT) + j; +#define ROWS_PER_GROUP 4 +#define ROWS_PER_GROUP_BITS 2 +#define ROWS_FETCH (ROWS_PER_GROUP + ANY + ANY) //(ROWS_PER_GROUP + anY * 2) - data = vload4(0, local_data+local_row * LOCAL_MEM_STEP + local_cols); - sum = sum + ((float)(mat_kernel[i * ANCHOR + j]) * data); - } - } - } +#define THREADS_PER_ROW 64 +#define THREADS_PER_ROW_BIT 6 - if(dst_rows_index < dst_rows_end) - { - sum.x = ((dst_cols_index + 0 >= dst_cols_start) && (dst_cols_index + 0 < dst_cols_end)) ? sum.x : dst_data.x; - sum.y = ((dst_cols_index + 1 >= dst_cols_start) && (dst_cols_index + 1 < dst_cols_end)) ? sum.y : dst_data.y; - sum.z = ((dst_cols_index + 2 >= dst_cols_start) && (dst_cols_index + 2 < dst_cols_end)) ? sum.z : dst_data.z; - sum.w = ((dst_cols_index + 3 >= dst_cols_start) && (dst_cols_index + 3 < dst_cols_end)) ? sum.w : dst_data.w; +#define ELEMENTS_PER_THREAD 4 +#define ELEMENTS_PER_THREAD_BIT 2 - *((__global float4 *)((__global char *)dst + dst_rows_index * dst_step + (dst_cols_index << 2))) = sum; - } - } -} +#define LOCAL_MEM_STEP 260 //divup((get_local_size(0) + anX * 2), 4) * 4 /////////////////////////////////////////////////////////////////////////////////////////////////// -/////////////////////////////////////////8uC4//////////////////////////////////////////////////////// +/////////////////////////////////////////8uC1//////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// -__kernel void filter2D_C4_D0(__global uchar4 *src, int src_step, int src_offset_x, int src_offset_y, - __global uchar4 *dst, int dst_step, int dst_offset_x, int dst_offset_y, - __constant int *mat_kernel __attribute__((max_constant_size (16384))), - int cols,int rows, int operate_cols, int wholecols, int wholerows) +__kernel void filter2D_3x3( + __global T_IMG *src, + __global T_IMG *dst, + int src_step, + int dst_step, + __constant float *mat_kernel, + __local T_IMG *local_data, + int wholerows, + int wholecols, + int src_offset_x, + int src_offset_y, + int dst_offset_x, + int dst_offset_y, + int cols, + int rows, + int operate_cols +) { int gX = get_global_id(0); int gY = get_global_id(1); @@ -341,9 +280,7 @@ __kernel void filter2D_C4_D0(__global uchar4 *src, int src_step, int src_offset_ int cols_start_index_group = src_offset_x - dst_align + groupX_size * groupX_id - ANX; int rows_start_index = src_offset_y + (gY << ROWS_PER_GROUP_BITS) - ANY; - __local uchar4 local_data[LOCAL_MEM_STEP * ROWS_FETCH]; - - if(((gY << 2) < rows)) + if((gY << 2) < rows) { for(int i = 0; i < ROWS_FETCH; ++i) { @@ -353,19 +290,19 @@ __kernel void filter2D_C4_D0(__global uchar4 *src, int src_step, int src_offset_ int selected_row = rows_start_index + i; int selected_cols = cols_start_index_group + lX; - uchar4 data = *((__global uchar4*)((__global char*)src + selected_row * src_step + (selected_cols << 2))); - int con = selected_row >=0 && selected_row < wholerows && selected_cols >=0 && selected_cols < wholecols; + T_IMG data = src[mad24(selected_row, src_step, selected_cols)]; + int con = selected_row >= 0 && selected_row < wholerows && selected_cols >= 0 && selected_cols < wholecols; data = con ? data : 0; - local_data[i * LOCAL_MEM_STEP + lX ] =data; + local_data[mad24(i, LOCAL_MEM_STEP, lX)] = data; if(lX < (ANX << 1)) { selected_cols = cols_start_index_group + lX + groupX_size; - data = *((__global uchar4*)((__global char*)src + selected_row * src_step + (selected_cols << 2))); - con = selected_row >=0 && selected_row < wholerows && selected_cols >=0 && selected_cols < wholecols; + data = src[mad24(selected_row, src_step, selected_cols)]; + con = selected_row >= 0 && selected_row < wholerows && selected_cols >= 0 && selected_cols < wholecols; data = con ? data : 0; - local_data[i * LOCAL_MEM_STEP + lX + groupX_size] =data; + local_data[mad24(i, LOCAL_MEM_STEP, lX) + groupX_size] = data; } #else int selected_row = ADDR_H(rows_start_index + i, 0, wholerows); @@ -374,17 +311,17 @@ __kernel void filter2D_C4_D0(__global uchar4 *src, int src_step, int src_offset_ int selected_cols = ADDR_L(cols_start_index_group + lX, 0, wholecols); selected_cols = ADDR_R(cols_start_index_group + lX, wholecols, selected_cols); - uchar4 data = *((__global uchar4*)((__global char*)src + selected_row * src_step + (selected_cols << 2))); + T_IMG data = src[mad24(selected_row, src_step, selected_cols)]; - local_data[i * LOCAL_MEM_STEP + lX] =data; + local_data[mad24(i, LOCAL_MEM_STEP, lX)] = data; if(lX < (ANX << 1)) { selected_cols = cols_start_index_group + lX + groupX_size; selected_cols = ADDR_R(selected_cols, wholecols, selected_cols); - data = *((__global uchar4*)((__global char*)src + selected_row * src_step + (selected_cols << 2))); - local_data[i * LOCAL_MEM_STEP + lX + groupX_size] =data; + data = src[mad24(selected_row, src_step, selected_cols)]; + local_data[mad24(i, LOCAL_MEM_STEP, lX) + groupX_size] = data; } #endif } @@ -401,131 +338,40 @@ __kernel void filter2D_C4_D0(__global uchar4 *src, int src_step, int src_offset_ int dst_rows_end = dst_offset_y + rows; int dst_rows_index = dst_offset_y + (gY << ROWS_PER_GROUP_BITS) + (lX >> THREADS_PER_ROW_BIT); + dst = dst + mad24(dst_rows_index, dst_step, dst_cols_index); - uchar16 dst_data; - dst_data = *((__global uchar16*)((__global char *)dst + dst_rows_index * dst_step + (dst_cols_index << 2))); + T_IMGx4 dst_data = *(__global T_IMGx4 *)dst; - int16 sum = (int16)(0); - uchar16 data; + T_SUMx4 sum = (T_SUMx4)SUM_ZEROx4; + T_IMGx4 data; - for(int i = 0; i < ANCHOR; i++) + for(int i = 0; i < FILTER_SIZE; i++) { #pragma unroll - for(int j = 0; j < ANCHOR; j++) + for(int j = 0; j < FILTER_SIZE; j++) { if(dst_rows_index < dst_rows_end) { int local_row = (lX >> THREADS_PER_ROW_BIT) + i; int local_cols = ((lX % THREADS_PER_ROW) << ELEMENTS_PER_THREAD_BIT) + j; - data = vload16(0, (__local uchar *)(local_data+local_row * LOCAL_MEM_STEP + local_cols)); - sum = sum + (mat_kernel[i * ANCHOR + j] * convert_int16_sat(data)); + data = VLOAD4(0, (__local T_IMG_C1 *)(local_data + local_row * LOCAL_MEM_STEP + local_cols)); + sum = sum + (mat_kernel[i * FILTER_SIZE + j] * CONVERT_TYPE_SUMx4(data)); } } } - if(dst_rows_index < dst_rows_end) { - uchar16 sum1 = convert_uchar16_sat(sum); - sum1.s0123 = ((dst_cols_index + 0 >= dst_cols_start) && (dst_cols_index + 0 < dst_cols_end))? - sum1.s0123 : dst_data.s0123; - sum1.s4567 = ((dst_cols_index + 1 >= dst_cols_start) && (dst_cols_index + 1 < dst_cols_end))? - sum1.s4567 : dst_data.s4567; - sum1.s89ab = ((dst_cols_index + 2 >= dst_cols_start) && (dst_cols_index + 2 < dst_cols_end))? - sum1.s89ab : dst_data.s89ab; - sum1.scdef = ((dst_cols_index + 3 >= dst_cols_start) && (dst_cols_index + 3 < dst_cols_end))? - sum1.scdef : dst_data.scdef; - - *((__global uchar16*)((__global char *)dst + dst_rows_index * dst_step + (dst_cols_index << 2))) = sum1; + T_IMGx4 tmp_dst = CONVERT_TYPEx4(sum); + tmp_dst.SX = ((dst_cols_index + 0 >= dst_cols_start) && (dst_cols_index + 0 < dst_cols_end)) ? + tmp_dst.SX : dst_data.SX; + tmp_dst.SY = ((dst_cols_index + 1 >= dst_cols_start) && (dst_cols_index + 1 < dst_cols_end)) ? + tmp_dst.SY : dst_data.SY; + tmp_dst.SZ = ((dst_cols_index + 2 >= dst_cols_start) && (dst_cols_index + 2 < dst_cols_end)) ? + tmp_dst.SZ : dst_data.SZ; + tmp_dst.SW = ((dst_cols_index + 3 >= dst_cols_start) && (dst_cols_index + 3 < dst_cols_end)) ? + tmp_dst.SW : dst_data.SW; + *(__global T_IMGx4 *)dst = tmp_dst; } } } -/////////////////////////////////////////////////////////////////////////////////////////////////// -/////////////////////////////////////////32FC4//////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////////////////////////// -#define ROWS_FETCH_C4 (1 + ANY + ANY) //(ROWS_PER_GROUP + anY * 2) -#define LOCAL_MEM_STEP_C4 260 //divup((get_local_size(0) + anX * 2), 4) * 4) -__kernel void filter2D_C4_D5(__global float4 *src, int src_step, int src_offset_x, int src_offset_y, - __global float4 *dst, int dst_step, int dst_offset_x, int dst_offset_y, - __constant int *mat_kernel __attribute__((max_constant_size (16384))), - int cols,int rows, int operate_cols, int wholecols, int wholerows) -{ - int gX = get_global_id(0); - int gY = get_global_id(1); - - int lX = get_local_id(0); - - int groupX_size = get_local_size(0); - int groupX_id = get_group_id(0); - - int cols_start_index_group = src_offset_x + groupX_size * groupX_id - ANX; - int rows_start_index = src_offset_y + gY - ANY; - - __local float4 local_data[LOCAL_MEM_STEP_C4 * ROWS_FETCH_C4]; - if((gY < rows) && (gX < (operate_cols + ANX + ANX))) - { - for(int i = 0; i < ROWS_FETCH_C4; ++i) - { - if((rows_start_index - src_offset_y) + i < rows + ANY) - { -#ifdef BORDER_CONSTANT - int selected_row = rows_start_index + i; - int selected_cols = cols_start_index_group + lX; - - float4 data = *((__global float4*)((__global char*)src + selected_row * src_step + (selected_cols << 4))); - int con = selected_row >=0 && selected_row < wholerows && selected_cols >=0 && selected_cols < wholecols; - data = con ? data : 0; - local_data[i * LOCAL_MEM_STEP + lX ] =data; - - if(lX < (ANX << 1)) - { - selected_cols = cols_start_index_group + lX + groupX_size; - - data = *((__global float4*)((__global char*)src + selected_row * src_step + (selected_cols << 4))); - con = selected_row >=0 && selected_row < wholerows && selected_cols >=0 && selected_cols < wholecols; - data = con ? data : 0; - local_data[i * LOCAL_MEM_STEP + lX + groupX_size] =data; - } -#else - int selected_row = ADDR_H(rows_start_index + i, 0, wholerows); - selected_row = ADDR_B(rows_start_index + i, wholerows, selected_row); - - int selected_cols = ADDR_L(cols_start_index_group + lX, 0, wholecols); - selected_cols = ADDR_R(cols_start_index_group + lX, wholecols, selected_cols); - - float4 data = *((__global float4*)((__global char*)src + selected_row * src_step + (selected_cols << 4))); - local_data[i * LOCAL_MEM_STEP_C4 + lX] =data; - - if(lX < (ANX << 1)) - { - selected_cols = cols_start_index_group + lX + groupX_size; - selected_cols = ADDR_R(selected_cols, wholecols, selected_cols); - - data = *((__global float4*)((__global char*)src + selected_row * src_step + (selected_cols << 4))); - local_data[i * LOCAL_MEM_STEP_C4 + lX + groupX_size] =data; - } -#endif - } - } - } - barrier(CLK_LOCAL_MEM_FENCE); - - if((gY < rows) && (gX < operate_cols)) - { - int dst_cols_index = dst_offset_x + gX; - int dst_rows_index = dst_offset_y + gY; - - float4 sum = (float4)(0); - - for(int i = 0; i < ANCHOR; i++) - { - for(int j = 0; j < ANCHOR; j++) - { - int local_cols = lX + j; - sum = sum + ((float)mat_kernel[i * ANCHOR + j] * local_data[i * LOCAL_MEM_STEP_C4 + local_cols]); - } - } - - *((__global float4*)((__global char *)dst + dst_rows_index * dst_step + (dst_cols_index << 4))) = sum; - } -} diff --git a/modules/ocl/test/test_filters.cpp b/modules/ocl/test/test_filters.cpp index ec46a5cd6a..9a1264f8a4 100644 --- a/modules/ocl/test/test_filters.cpp +++ b/modules/ocl/test/test_filters.cpp @@ -324,6 +324,35 @@ TEST_P(GaussianBlur, Mat) +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Filter2D +struct Filter2D : FilterTestBase +{ + int type; + cv::Size ksize; + int bordertype; + Point anchor; + virtual void SetUp() + { + type = GET_PARAM(0); + ksize = GET_PARAM(1); + bordertype = GET_PARAM(3); + Init(type); + anchor = Point(-1,-1); + } +}; + +TEST_P(Filter2D, Mat) +{ + cv::Mat kernel = randomMat(cv::Size(ksize.width, ksize.height), CV_32FC1, 0.0, 1.0); + for(int j = 0; j < LOOP_TIMES; j++) + { + random_roi(); + cv::filter2D(mat1_roi, dst_roi, -1, kernel, anchor, 0.0, bordertype); + cv::ocl::filter2D(gmat1, gdst, -1, kernel, anchor, bordertype); + Near(1); + } +} INSTANTIATE_TEST_CASE_P(Filter, Blur, Combine( Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC4), Values(cv::Size(3, 3), cv::Size(5, 5), cv::Size(7, 7)), @@ -331,7 +360,7 @@ INSTANTIATE_TEST_CASE_P(Filter, Blur, Combine( Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REPLICATE, (MatType)cv::BORDER_REFLECT, (MatType)cv::BORDER_REFLECT_101))); -INSTANTIATE_TEST_CASE_P(Filters, Laplacian, Combine( +INSTANTIATE_TEST_CASE_P(Filter, Laplacian, Combine( Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4), Values(Size(3, 3)), Values(Size(0, 0)), //not use @@ -365,4 +394,10 @@ INSTANTIATE_TEST_CASE_P(Filter, GaussianBlur, Combine( +INSTANTIATE_TEST_CASE_P(Filter, Filter2D, testing::Combine( + Values(CV_8UC1, CV_32FC1, CV_32FC4), + Values(Size(3, 3), Size(15, 15), Size(25, 25)), + Values(Size(0, 0)), //not use + Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REFLECT101, (MatType)cv::BORDER_REPLICATE, (MatType)cv::BORDER_REFLECT))); + #endif // HAVE_OPENCL From d4e098f40179dd3ddbe134bdfffb503b0d5dddae Mon Sep 17 00:00:00 2001 From: Andrey Pavlenko Date: Tue, 13 Aug 2013 12:04:36 +0400 Subject: [PATCH 016/139] enabling of VideoCapture(String) [fixing issue #3207] - enable auto-wrap of VideoCapture; - minor refactoring of generated code templates. --- modules/java/generator/gen_java.py | 104 ++++- .../java/generator/src/cpp/VideoCapture.cpp | 435 ------------------ .../src/java/highgui+VideoCapture.java | 240 ---------- 3 files changed, 87 insertions(+), 692 deletions(-) delete mode 100644 modules/java/generator/src/cpp/VideoCapture.cpp delete mode 100644 modules/java/generator/src/java/highgui+VideoCapture.java diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index 1da5cb68c6..1e084c70e3 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -12,7 +12,7 @@ class_ignore_list = ( #core "FileNode", "FileStorage", "KDTree", #highgui - "VideoWriter", "VideoCapture", + "VideoWriter", ) const_ignore_list = ( @@ -512,6 +512,54 @@ JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Core_n_1getTextSize "resizeWindow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' }, }, # Highgui + 'VideoCapture' : + { + "getSupportedPreviewSizes" : + { + 'j_code' : +""" + public java.util.List getSupportedPreviewSizes() + { + String[] sizes_str = getSupportedPreviewSizes_0(nativeObj).split(","); + java.util.List sizes = new java.util.LinkedList(); + + for (String str : sizes_str) { + String[] wh = str.split("x"); + sizes.add(new org.opencv.core.Size(Double.parseDouble(wh[0]), Double.parseDouble(wh[1]))); + } + + return sizes; + } + +""", + 'jn_code' : +"""\n private static native String getSupportedPreviewSizes_0(long nativeObj);\n""", + 'cpp_code' : +""" +JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_getSupportedPreviewSizes_10 + (JNIEnv *env, jclass, jlong self); + +JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_getSupportedPreviewSizes_10 + (JNIEnv *env, jclass, jlong self) +{ + static const char method_name[] = "highgui::VideoCapture_getSupportedPreviewSizes_10()"; + try { + LOGD(%s, method_name); + VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL + union {double prop; const char* name;} u; + u.prop = me->get(CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING); + return env->NewStringUTF(u.name); + } catch(const std::exception &e) { + throwJavaException(env, &e, method_name); + } catch (...) { + throwJavaException(env, 0, method_name); + } + return env->NewStringUTF(""); +} + +""", + }, # getSupportedPreviewSizes + }, # VideoCapture } # { class : { func : {arg_name : ctype} } } @@ -878,21 +926,48 @@ public class %(jc)s { self.add_func(decl) self.cpp_code = StringIO() - self.cpp_code.write(""" + self.cpp_code.write(Template(""" // // This file is auto-generated, please don't edit! // -#define LOG_TAG "org.opencv.%(m)s" +#define LOG_TAG "org.opencv.$m" #include "common.h" -#include "opencv2/%(m)s/%(m)s.hpp" + +#include "opencv2/opencv_modules.hpp" +#ifdef HAVE_OPENCV_$M +#include "opencv2/$m/$m.hpp" using namespace cv; +/// throw java exception +static void throwJavaException(JNIEnv *env, const std::exception *e, const char *method) { + std::string what = "unknown exception"; + jclass je = 0; + + if(e) { + std::string exception_type = "std::exception"; + + if(dynamic_cast(e)) { + exception_type = "cv::Exception"; + je = env->FindClass("org/opencv/core/CvException"); + } + + what = exception_type + ": " + e->what(); + } + + if(!je) je = env->FindClass("java/lang/Exception"); + env->ThrowNew(je, what.c_str()); + + LOGE("%s caught %s", method, what.c_str()); + (void)method; // avoid "unused" warning +} + + extern "C" { -""" % {'m' : module} ) +""").substitute( m = module, M = module.upper() ) ) # generate code for the classes for name in self.classes.keys(): @@ -907,7 +982,7 @@ extern "C" { java_code = Template(java_code).substitute(imports = imports) self.save("%s/%s+%s.java" % (output_path, module, self.classes[name].jname), java_code) - self.cpp_code.write( '\n} // extern "C"\n' ) + self.cpp_code.write( '\n} // extern "C"\n\n#endif // HAVE_OPENCV_%s\n' % module.upper() ) self.save(output_path+"/"+module+".cpp", self.cpp_code.getvalue()) # report @@ -1266,23 +1341,18 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname ($argst); JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname ($args) { + static const char method_name[] = "$module::$fname()"; try { - LOGD("$module::$fname()"); + LOGD("%s", method_name); $prologue $retval$cvname( $cvargs ); $epilogue$ret - } catch(cv::Exception e) { - LOGD("$module::$fname() catched cv::Exception: %s", e.what()); - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - $default + } catch(const std::exception &e) { + throwJavaException(env, &e, method_name); } catch (...) { - LOGD("$module::$fname() catched unknown exception (...)"); - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {$module::$fname()}"); - $default + throwJavaException(env, 0, method_name); } + $default } diff --git a/modules/java/generator/src/cpp/VideoCapture.cpp b/modules/java/generator/src/cpp/VideoCapture.cpp deleted file mode 100644 index 5b9266660f..0000000000 --- a/modules/java/generator/src/cpp/VideoCapture.cpp +++ /dev/null @@ -1,435 +0,0 @@ -#define LOG_TAG "org.opencv.highgui.VideoCapture" -#include "common.h" - -#include "opencv2/opencv_modules.hpp" -#ifdef HAVE_OPENCV_HIGHGUI - -#include "opencv2/highgui/highgui_c.h" -#include "opencv2/highgui/highgui.hpp" -using namespace cv; - - -extern "C" { - -// -// VideoCapture::VideoCapture() -// - -JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_n_1VideoCapture__ - (JNIEnv* env, jclass); - -JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_n_1VideoCapture__ - (JNIEnv* env, jclass) -{ - try { - LOGD("highgui::VideoCapture_n_1VideoCapture__()"); - - VideoCapture* _retval_ = new VideoCapture( ); - - return (jlong) _retval_; - } catch(cv::Exception e) { - LOGD("highgui::VideoCapture_n_1VideoCapture__() catched cv::Exception: %s", e.what()); - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - LOGD("highgui::VideoCapture_n_1VideoCapture__() catched unknown exception (...)"); - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1VideoCapture__()}"); - return 0; - } -} - - -// -// VideoCapture::VideoCapture(int device) -// - -JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_n_1VideoCapture__I - (JNIEnv* env, jclass, jint device); - -JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_n_1VideoCapture__I - (JNIEnv* env, jclass, jint device) -{ - try { - LOGD("highgui::VideoCapture_n_1VideoCapture__I()"); - - VideoCapture* _retval_ = new VideoCapture( device ); - - return (jlong) _retval_; - } catch(cv::Exception e) { - LOGD("highgui::VideoCapture_n_1VideoCapture__I() catched cv::Exception: %s", e.what()); - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - LOGD("highgui::VideoCapture_n_1VideoCapture__I() catched unknown exception (...)"); - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1VideoCapture__I()}"); - return 0; - } -} - - - -// -// double VideoCapture::get(int propId) -// - -JNIEXPORT jdouble JNICALL Java_org_opencv_highgui_VideoCapture_n_1get - (JNIEnv* env, jclass, jlong self, jint propId); - -JNIEXPORT jdouble JNICALL Java_org_opencv_highgui_VideoCapture_n_1get - (JNIEnv* env, jclass, jlong self, jint propId) -{ - try { - LOGD("highgui::VideoCapture_n_1get()"); - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - double _retval_ = me->get( propId ); - - return _retval_; - } catch(cv::Exception e) { - LOGD("highgui::VideoCapture_n_1get() catched cv::Exception: %s", e.what()); - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - LOGD("highgui::VideoCapture_n_1get() catched unknown exception (...)"); - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1get()}"); - return 0; - } -} - - - -// -// bool VideoCapture::grab() -// - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1grab - (JNIEnv* env, jclass, jlong self); - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1grab - (JNIEnv* env, jclass, jlong self) -{ - try { - LOGD("highgui::VideoCapture_n_1grab()"); - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - bool _retval_ = me->grab( ); - - return _retval_; - } catch(cv::Exception e) { - LOGD("highgui::VideoCapture_n_1grab() catched cv::Exception: %s", e.what()); - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - LOGD("highgui::VideoCapture_n_1grab() catched unknown exception (...)"); - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1grab()}"); - return 0; - } -} - - - -// -// bool VideoCapture::isOpened() -// - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1isOpened - (JNIEnv* env, jclass, jlong self); - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1isOpened - (JNIEnv* env, jclass, jlong self) -{ - try { - LOGD("highgui::VideoCapture_n_1isOpened()"); - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - bool _retval_ = me->isOpened( ); - - return _retval_; - } catch(cv::Exception e) { - LOGD("highgui::VideoCapture_n_1isOpened() catched cv::Exception: %s", e.what()); - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - LOGD("highgui::VideoCapture_n_1isOpened() catched unknown exception (...)"); - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1isOpened()}"); - return 0; - } -} - - -// -// bool VideoCapture::open(int device) -// - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1open__JI - (JNIEnv* env, jclass, jlong self, jint device); - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1open__JI - (JNIEnv* env, jclass, jlong self, jint device) -{ - try { - LOGD("highgui::VideoCapture_n_1open__JI()"); - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - bool _retval_ = me->open( device ); - - return _retval_; - } catch(cv::Exception e) { - LOGD("highgui::VideoCapture_n_1open__JI() catched cv::Exception: %s", e.what()); - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - LOGD("highgui::VideoCapture_n_1open__JI() catched unknown exception (...)"); - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1open__JI()}"); - return 0; - } -} - - - -// -// bool VideoCapture::read(Mat image) -// - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1read - (JNIEnv* env, jclass, jlong self, jlong image_nativeObj); - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1read - (JNIEnv* env, jclass, jlong self, jlong image_nativeObj) -{ - try { - LOGD("highgui::VideoCapture_n_1read()"); - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - Mat& image = *((Mat*)image_nativeObj); - bool _retval_ = me->read( image ); - - return _retval_; - } catch(cv::Exception e) { - LOGD("highgui::VideoCapture_n_1read() catched cv::Exception: %s", e.what()); - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - LOGD("highgui::VideoCapture_n_1read() catched unknown exception (...)"); - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1read()}"); - return 0; - } -} - - - -// -// void VideoCapture::release() -// - -JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1release - (JNIEnv* env, jclass, jlong self); - -JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1release - (JNIEnv* env, jclass, jlong self) -{ - try { - - LOGD("highgui::VideoCapture_n_1release()"); - - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - me->release( ); - - return; - } catch(cv::Exception e) { - - LOGD("highgui::VideoCapture_n_1release() catched cv::Exception: %s", e.what()); - - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return; - } catch (...) { - - LOGD("highgui::VideoCapture_n_1release() catched unknown exception (...)"); - - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1release()}"); - return; - } -} - - - -// -// bool VideoCapture::retrieve(Mat image, int channel = 0) -// - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJI - (JNIEnv* env, jclass, jlong self, jlong image_nativeObj, jint channel); - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJI - (JNIEnv* env, jclass, jlong self, jlong image_nativeObj, jint channel) -{ - try { - - LOGD("highgui::VideoCapture_n_1retrieve__JJI()"); - - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - Mat& image = *((Mat*)image_nativeObj); - bool _retval_ = me->retrieve( image, channel ); - - return _retval_; - } catch(cv::Exception e) { - - LOGD("highgui::VideoCapture_n_1retrieve__JJI() catched cv::Exception: %s", e.what()); - - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - - LOGD("highgui::VideoCapture_n_1retrieve__JJI() catched unknown exception (...)"); - - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1retrieve__JJI()}"); - return 0; - } -} - - - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJ - (JNIEnv* env, jclass, jlong self, jlong image_nativeObj); - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJ - (JNIEnv* env, jclass, jlong self, jlong image_nativeObj) -{ - try { - - LOGD("highgui::VideoCapture_n_1retrieve__JJ()"); - - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - Mat& image = *((Mat*)image_nativeObj); - bool _retval_ = me->retrieve( image ); - - return _retval_; - } catch(cv::Exception e) { - - LOGD("highgui::VideoCapture_n_1retrieve__JJ() catched cv::Exception: %s", e.what()); - - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - - LOGD("highgui::VideoCapture_n_1retrieve__JJ() catched unknown exception (...)"); - - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1retrieve__JJ()}"); - return 0; - } -} - - - -// -// bool VideoCapture::set(int propId, double value) -// - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1set - (JNIEnv* env, jclass, jlong self, jint propId, jdouble value); - -JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1set - (JNIEnv* env, jclass, jlong self, jint propId, jdouble value) -{ - try { - - LOGD("highgui::VideoCapture_n_1set()"); - - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - bool _retval_ = me->set( propId, value ); - - return _retval_; - } catch(cv::Exception e) { - - LOGD("highgui::VideoCapture_n_1set() catched cv::Exception: %s", e.what()); - - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return 0; - } catch (...) { - - LOGD("highgui::VideoCapture_n_1set() catched unknown exception (...)"); - - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1set()}"); - return 0; - } -} - -JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_n_1getSupportedPreviewSizes - (JNIEnv *env, jclass, jlong self); - -JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_n_1getSupportedPreviewSizes - (JNIEnv *env, jclass, jlong self) -{ - try { - - LOGD("highgui::VideoCapture_n_1set()"); - - VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL - union {double prop; const char* name;} u; - u.prop = me->get(CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING); - return env->NewStringUTF(u.name); - } catch(cv::Exception e) { - - LOGD("highgui::VideoCapture_n_1getSupportedPreviewSizes() catched cv::Exception: %s", e.what()); - - jclass je = env->FindClass("org/opencv/core/CvException"); - if(!je) je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, e.what()); - return env->NewStringUTF(""); - } catch (...) { - - LOGD("highgui::VideoCapture_n_1getSupportedPreviewSizes() catched unknown exception (...)"); - - jclass je = env->FindClass("java/lang/Exception"); - env->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1getSupportedPreviewSizes()}"); - return env->NewStringUTF(""); - } -} - - - -// -// native support for java finalize() -// static void VideoCapture::n_delete( __int64 self ) -// - -JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1delete - (JNIEnv*, jclass, jlong self); - -JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1delete - (JNIEnv*, jclass, jlong self) -{ - delete (VideoCapture*) self; -} - -} // extern "C" - -#endif // HAVE_OPENCV_HIGHGUI \ No newline at end of file diff --git a/modules/java/generator/src/java/highgui+VideoCapture.java b/modules/java/generator/src/java/highgui+VideoCapture.java deleted file mode 100644 index 6f3b03540d..0000000000 --- a/modules/java/generator/src/java/highgui+VideoCapture.java +++ /dev/null @@ -1,240 +0,0 @@ -package org.opencv.highgui; - -import java.util.List; -import java.util.LinkedList; - -import org.opencv.core.Mat; -import org.opencv.core.Size; - -// C++: class VideoCapture -//javadoc: VideoCapture -public class VideoCapture { - - protected final long nativeObj; - - protected VideoCapture(long addr) { - nativeObj = addr; - } - - // - // C++: VideoCapture::VideoCapture() - // - - // javadoc: VideoCapture::VideoCapture() - public VideoCapture() - { - - nativeObj = n_VideoCapture(); - - return; - } - - // - // C++: VideoCapture::VideoCapture(int device) - // - - // javadoc: VideoCapture::VideoCapture(device) - public VideoCapture(int device) - { - - nativeObj = n_VideoCapture(device); - - return; - } - - // - // C++: double VideoCapture::get(int propId) - // - -/** - * Returns the specified "VideoCapture" property. - * - * Note: When querying a property that is not supported by the backend used by - * the "VideoCapture" class, value 0 is returned. - * - * @param propId property identifier; it can be one of the following: - * * CV_CAP_PROP_FRAME_WIDTH width of the frames in the video stream. - * * CV_CAP_PROP_FRAME_HEIGHT height of the frames in the video stream. - * - * @see org.opencv.highgui.VideoCapture.get - */ - public double get(int propId) - { - - double retVal = n_get(nativeObj, propId); - - return retVal; - } - - public List getSupportedPreviewSizes() - { - String[] sizes_str = n_getSupportedPreviewSizes(nativeObj).split(","); - List sizes = new LinkedList(); - - for (String str : sizes_str) { - String[] wh = str.split("x"); - sizes.add(new Size(Double.parseDouble(wh[0]), Double.parseDouble(wh[1]))); - } - - return sizes; - } - - // - // C++: bool VideoCapture::grab() - // - - // javadoc: VideoCapture::grab() - public boolean grab() - { - - boolean retVal = n_grab(nativeObj); - - return retVal; - } - - // - // C++: bool VideoCapture::isOpened() - // - - // javadoc: VideoCapture::isOpened() - public boolean isOpened() - { - - boolean retVal = n_isOpened(nativeObj); - - return retVal; - } - - // - // C++: bool VideoCapture::open(int device) - // - - // javadoc: VideoCapture::open(device) - public boolean open(int device) - { - - boolean retVal = n_open(nativeObj, device); - - return retVal; - } - - // - // C++: bool VideoCapture::read(Mat image) - // - - // javadoc: VideoCapture::read(image) - public boolean read(Mat image) - { - - boolean retVal = n_read(nativeObj, image.nativeObj); - - return retVal; - } - - // - // C++: void VideoCapture::release() - // - - // javadoc: VideoCapture::release() - public void release() - { - - n_release(nativeObj); - - return; - } - - // - // C++: bool VideoCapture::retrieve(Mat image, int channel = 0) - // - - // javadoc: VideoCapture::retrieve(image, channel) - public boolean retrieve(Mat image, int channel) - { - - boolean retVal = n_retrieve(nativeObj, image.nativeObj, channel); - - return retVal; - } - - // javadoc: VideoCapture::retrieve(image) - public boolean retrieve(Mat image) - { - - boolean retVal = n_retrieve(nativeObj, image.nativeObj); - - return retVal; - } - - // - // C++: bool VideoCapture::set(int propId, double value) - // - -/** - * Sets a property in the "VideoCapture". - * - * @param propId property identifier; it can be one of the following: - * * CV_CAP_PROP_FRAME_WIDTH width of the frames in the video stream. - * * CV_CAP_PROP_FRAME_HEIGHT height of the frames in the video stream. - * @param value value of the property. - * - * @see org.opencv.highgui.VideoCapture.set - */ - public boolean set(int propId, double value) - { - - boolean retVal = n_set(nativeObj, propId, value); - - return retVal; - } - - @Override - protected void finalize() throws Throwable { - n_delete(nativeObj); - super.finalize(); - } - - // C++: VideoCapture::VideoCapture() - private static native long n_VideoCapture(); - - // C++: VideoCapture::VideoCapture(string filename) - private static native long n_VideoCapture(java.lang.String filename); - - // C++: VideoCapture::VideoCapture(int device) - private static native long n_VideoCapture(int device); - - // C++: double VideoCapture::get(int propId) - private static native double n_get(long nativeObj, int propId); - - // C++: bool VideoCapture::grab() - private static native boolean n_grab(long nativeObj); - - // C++: bool VideoCapture::isOpened() - private static native boolean n_isOpened(long nativeObj); - - // C++: bool VideoCapture::open(string filename) - private static native boolean n_open(long nativeObj, java.lang.String filename); - - // C++: bool VideoCapture::open(int device) - private static native boolean n_open(long nativeObj, int device); - - // C++: bool VideoCapture::read(Mat image) - private static native boolean n_read(long nativeObj, long image_nativeObj); - - // C++: void VideoCapture::release() - private static native void n_release(long nativeObj); - - // C++: bool VideoCapture::retrieve(Mat image, int channel = 0) - private static native boolean n_retrieve(long nativeObj, long image_nativeObj, int channel); - - private static native boolean n_retrieve(long nativeObj, long image_nativeObj); - - // C++: bool VideoCapture::set(int propId, double value) - private static native boolean n_set(long nativeObj, int propId, double value); - - private static native String n_getSupportedPreviewSizes(long nativeObj); - - // native support for java finalize() - private static native void n_delete(long nativeObj); - -} From 29eefe52bb6b927cf6cd2983d429964a4bbef4eb Mon Sep 17 00:00:00 2001 From: peng xiao Date: Tue, 13 Aug 2013 16:27:12 +0800 Subject: [PATCH 017/139] Add OpenCL accelerated implementation for Retina. --- modules/bioinspired/CMakeLists.txt | 2 +- .../include/opencv2/bioinspired/retina.hpp | 3 +- .../bioinspired/src/opencl/retina_kernel.cl | 753 ++++++++ modules/bioinspired/src/precomp.hpp | 6 + modules/bioinspired/src/retina_ocl.cpp | 1651 +++++++++++++++++ modules/bioinspired/src/retina_ocl.hpp | 633 +++++++ modules/bioinspired/test/test_retina_ocl.cpp | 139 ++ 7 files changed, 3185 insertions(+), 2 deletions(-) create mode 100644 modules/bioinspired/src/opencl/retina_kernel.cl create mode 100644 modules/bioinspired/src/retina_ocl.cpp create mode 100644 modules/bioinspired/src/retina_ocl.hpp create mode 100644 modules/bioinspired/test/test_retina_ocl.cpp diff --git a/modules/bioinspired/CMakeLists.txt b/modules/bioinspired/CMakeLists.txt index a27ad73d06..b0f152cfcf 100644 --- a/modules/bioinspired/CMakeLists.txt +++ b/modules/bioinspired/CMakeLists.txt @@ -1,2 +1,2 @@ set(the_description "Biologically inspired algorithms") -ocv_define_module(bioinspired opencv_core OPTIONAL opencv_highgui) +ocv_define_module(bioinspired opencv_core OPTIONAL opencv_highgui opencv_ocl) diff --git a/modules/bioinspired/include/opencv2/bioinspired/retina.hpp b/modules/bioinspired/include/opencv2/bioinspired/retina.hpp index 4b4c19f90a..b4fda7038e 100644 --- a/modules/bioinspired/include/opencv2/bioinspired/retina.hpp +++ b/modules/bioinspired/include/opencv2/bioinspired/retina.hpp @@ -304,7 +304,8 @@ public: CV_EXPORTS Ptr createRetina(Size inputSize); CV_EXPORTS Ptr createRetina(Size inputSize, const bool colorMode, int colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); - +CV_EXPORTS Ptr createRetina_OCL(Size inputSize); +CV_EXPORTS Ptr createRetina_OCL(Size inputSize, const bool colorMode, int colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); } } #endif /* __OPENCV_BIOINSPIRED_RETINA_HPP__ */ diff --git a/modules/bioinspired/src/opencl/retina_kernel.cl b/modules/bioinspired/src/opencl/retina_kernel.cl new file mode 100644 index 0000000000..ed5f8319db --- /dev/null +++ b/modules/bioinspired/src/opencl/retina_kernel.cl @@ -0,0 +1,753 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Peng Xiao, pengxiao@multicorewareinc.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +///////////////////////////////////////////////////////// +//******************************************************* +// basicretinafilter +//////////////// _spatiotemporalLPfilter //////////////// +//_horizontalCausalFilter_addInput +kernel void horizontalCausalFilter_addInput( + global const float * input, + global float * output, + const int cols, + const int rows, + const int elements_per_row, + const int in_offset, + const int out_offset, + const float _tau, + const float _a +) +{ + int gid = get_global_id(0); + if(gid >= rows) + { + return; + } + + global const float * iptr = + input + mad24(gid, elements_per_row, in_offset / 4); + global float * optr = + output + mad24(gid, elements_per_row, out_offset / 4); + + float res; + float4 in_v4, out_v4, res_v4 = (float4)(0); + //vectorize to increase throughput + for(int i = 0; i < cols / 4; ++i, iptr += 4, optr += 4) + { + in_v4 = vload4(0, iptr); + out_v4 = vload4(0, optr); + + res_v4.x = in_v4.x + _tau * out_v4.x + _a * res_v4.w; + res_v4.y = in_v4.y + _tau * out_v4.y + _a * res_v4.x; + res_v4.z = in_v4.z + _tau * out_v4.z + _a * res_v4.y; + res_v4.w = in_v4.w + _tau * out_v4.w + _a * res_v4.z; + + vstore4(res_v4, 0, optr); + } + res = res_v4.w; + // there may be left some + for(int i = 0; i < cols % 4; ++i, ++iptr, ++optr) + { + res = *iptr + _tau * *optr + _a * res; + *optr = res; + } +} + +//_horizontalAnticausalFilter +kernel void horizontalAnticausalFilter( + global float * output, + const int cols, + const int rows, + const int elements_per_row, + const int out_offset, + const float _a +) +{ + int gid = get_global_id(0); + if(gid >= rows) + { + return; + } + + global float * optr = output + + mad24(gid + 1, elements_per_row, - 1 + out_offset / 4); + + float4 result = (float4)(0), out_v4; + // we assume elements_per_row is multple of 4 + for(int i = 0; i < elements_per_row / 4; ++i, optr -= 4) + { + // shift left, `offset` is type `size_t` so it cannot be negative + out_v4 = vload4(0, optr - 3); + + result.w = out_v4.w + _a * result.x; + result.z = out_v4.z + _a * result.w; + result.y = out_v4.y + _a * result.z; + result.x = out_v4.x + _a * result.y; + + vstore4(result, 0, optr - 3); + } +} + +//_verticalCausalFilter +kernel void verticalCausalFilter( + global float * output, + const int cols, + const int rows, + const int elements_per_row, + const int out_offset, + const float _a +) +{ + int gid = get_global_id(0); + if(gid >= cols) + { + return; + } + + global float * optr = output + gid + out_offset / 4; + float result = 0; + for(int i = 0; i < rows; ++i, optr += elements_per_row) + { + result = *optr + _a * result; + *optr = result; + } +} + +//_verticalCausalFilter +kernel void verticalAnticausalFilter_multGain( + global float * output, + const int cols, + const int rows, + const int elements_per_row, + const int out_offset, + const float _a, + const float _gain +) +{ + int gid = get_global_id(0); + if(gid >= cols) + { + return; + } + + global float * optr = output + (rows - 1) * elements_per_row + gid + out_offset / 4; + float result = 0; + for(int i = 0; i < rows; ++i, optr -= elements_per_row) + { + result = *optr + _a * result; + *optr = _gain * result; + } +} +// +// end of _spatiotemporalLPfilter +///////////////////////////////////////////////////////////////////// + +//////////////// horizontalAnticausalFilter_Irregular //////////////// +kernel void horizontalAnticausalFilter_Irregular( + global float * output, + global float * buffer, + const int cols, + const int rows, + const int elements_per_row, + const int out_offset, + const int buffer_offset +) +{ + int gid = get_global_id(0); + if(gid >= rows) + { + return; + } + + global float * optr = + output + mad24(rows - gid, elements_per_row, -1 + out_offset / 4); + global float * bptr = + buffer + mad24(rows - gid, elements_per_row, -1 + buffer_offset / 4); + + float4 buf_v4, out_v4, res_v4 = (float4)(0); + + for(int i = 0; i < elements_per_row / 4; ++i, optr -= 4, bptr -= 4) + { + buf_v4 = vload4(0, bptr - 3); + out_v4 = vload4(0, optr - 3); + + res_v4.w = out_v4.w + buf_v4.w * res_v4.x; + res_v4.z = out_v4.z + buf_v4.z * res_v4.w; + res_v4.y = out_v4.y + buf_v4.y * res_v4.z; + res_v4.x = out_v4.x + buf_v4.x * res_v4.y; + + vstore4(res_v4, 0, optr - 3); + } +} + +//////////////// verticalCausalFilter_Irregular //////////////// +kernel void verticalCausalFilter_Irregular( + global float * output, + global float * buffer, + const int cols, + const int rows, + const int elements_per_row, + const int out_offset, + const int buffer_offset +) +{ + int gid = get_global_id(0); + if(gid >= cols) + { + return; + } + + global float * optr = output + gid + out_offset / 4; + global float * bptr = buffer + gid + buffer_offset / 4; + float result = 0; + for(int i = 0; i < rows; ++i, optr += elements_per_row, bptr += elements_per_row) + { + result = *optr + *bptr * result; + *optr = result; + } +} + +//////////////// _adaptiveHorizontalCausalFilter_addInput //////////////// +kernel void adaptiveHorizontalCausalFilter_addInput( + global const float * input, + global const float * gradient, + global float * output, + const int cols, + const int rows, + const int elements_per_row, + const int in_offset, + const int grad_offset, + const int out_offset +) +{ + int gid = get_global_id(0); + if(gid >= rows) + { + return; + } + + global const float * iptr = + input + mad24(gid, elements_per_row, in_offset / 4); + global const float * gptr = + gradient + mad24(gid, elements_per_row, grad_offset / 4); + global float * optr = + output + mad24(gid, elements_per_row, out_offset / 4); + + float4 in_v4, grad_v4, out_v4, res_v4 = (float4)(0); + for(int i = 0; i < cols / 4; ++i, iptr += 4, gptr += 4, optr += 4) + { + in_v4 = vload4(0, iptr); + grad_v4 = vload4(0, gptr); + + res_v4.x = in_v4.x + grad_v4.x * res_v4.w; + res_v4.y = in_v4.y + grad_v4.y * res_v4.x; + res_v4.z = in_v4.z + grad_v4.z * res_v4.y; + res_v4.w = in_v4.w + grad_v4.w * res_v4.z; + + vstore4(res_v4, 0, optr); + } + for(int i = 0; i < cols % 4; ++i, ++iptr, ++gptr, ++optr) + { + res_v4.w = *iptr + *gptr * res_v4.w; + *optr = res_v4.w; + } +} + +//////////////// _adaptiveVerticalAnticausalFilter_multGain //////////////// +kernel void adaptiveVerticalAnticausalFilter_multGain( + global const float * gradient, + global float * output, + const int cols, + const int rows, + const int elements_per_row, + const int grad_offset, + const int out_offset, + const float gain +) +{ + int gid = get_global_id(0); + if(gid >= cols) + { + return; + } + + int start_idx = mad24(rows - 1, elements_per_row, gid); + + global const float * gptr = gradient + start_idx + grad_offset / 4; + global float * optr = output + start_idx + out_offset / 4; + + float result = 0; + for(int i = 0; i < rows; ++i, gptr -= elements_per_row, optr -= elements_per_row) + { + result = *optr + *gptr * result; + *optr = gain * result; + } +} + +//////////////// _localLuminanceAdaptation //////////////// +// FIXME: +// This kernel seems to have precision problem on GPU +kernel void localLuminanceAdaptation( + global const float * luma, + global const float * input, + global float * output, + const int cols, + const int rows, + const int elements_per_row, + const float _localLuminanceAddon, + const float _localLuminanceFactor, + const float _maxInputValue +) +{ + int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + int offset = mad24(gidy, elements_per_row, gidx); + + float X0 = luma[offset] * _localLuminanceFactor + _localLuminanceAddon; + float input_val = input[offset]; + // output of the following line may be different between GPU and CPU + output[offset] = (_maxInputValue + X0) * input_val / (input_val + X0 + 0.00000000001f); +} +// end of basicretinafilter +//******************************************************* +///////////////////////////////////////////////////////// + + + +///////////////////////////////////////////////////////// +//****************************************************** +// magno +// TODO: this kernel has too many buffer accesses, better to make it +// vector read/write for fetch efficiency +kernel void amacrineCellsComputing( + global const float * opl_on, + global const float * opl_off, + global float * prev_in_on, + global float * prev_in_off, + global float * out_on, + global float * out_off, + const int cols, + const int rows, + const int elements_per_row, + const float coeff +) +{ + int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + + int offset = mad24(gidy, elements_per_row, gidx); + opl_on += offset; + opl_off += offset; + prev_in_on += offset; + prev_in_off += offset; + out_on += offset; + out_off += offset; + + float magnoXonPixelResult = coeff * (*out_on + *opl_on - *prev_in_on); + *out_on = fmax(magnoXonPixelResult, 0); + float magnoXoffPixelResult = coeff * (*out_off + *opl_off - *prev_in_off); + *out_off = fmax(magnoXoffPixelResult, 0); + + *prev_in_on = *opl_on; + *prev_in_off = *opl_off; +} + +///////////////////////////////////////////////////////// +//****************************************************** +// parvo +// TODO: this kernel has too many buffer accesses, needs optimization +kernel void OPL_OnOffWaysComputing( + global float4 * photo_out, + global float4 * horiz_out, + global float4 * bipol_on, + global float4 * bipol_off, + global float4 * parvo_on, + global float4 * parvo_off, + const int cols, + const int rows, + const int elements_per_row +) +{ + int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx * 4 >= cols || gidy >= rows) + { + return; + } + // we assume elements_per_row must be multiples of 4 + int offset = mad24(gidy, elements_per_row >> 2, gidx); + photo_out += offset; + horiz_out += offset; + bipol_on += offset; + bipol_off += offset; + parvo_on += offset; + parvo_off += offset; + + float4 diff = *photo_out - *horiz_out; + float4 isPositive;// = convert_float4(diff > (float4)(0.0f, 0.0f, 0.0f, 0.0f)); + isPositive.x = diff.x > 0.0f; + isPositive.y = diff.y > 0.0f; + isPositive.z = diff.z > 0.0f; + isPositive.w = diff.w > 0.0f; + float4 res_on = isPositive * diff; + float4 res_off = (isPositive - (float4)(1.0f)) * diff; + + *bipol_on = res_on; + *parvo_on = res_on; + + *bipol_off = res_off; + *parvo_off = res_off; +} + +///////////////////////////////////////////////////////// +//****************************************************** +// retinacolor +inline int bayerSampleOffset(int step, int rows, int x, int y) +{ + return mad24(y, step, x) + + ((y % 2) + (x % 2)) * rows * step; +} + + +/////// colorMultiplexing ////// +kernel void runColorMultiplexingBayer( + global const float * input, + global float * output, + const int cols, + const int rows, + const int elements_per_row +) +{ + int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + + int offset = mad24(gidy, elements_per_row, gidx); + output[offset] = input[bayerSampleOffset(elements_per_row, rows, gidx, gidy)]; +} + +kernel void runColorDemultiplexingBayer( + global const float * input, + global float * output, + const int cols, + const int rows, + const int elements_per_row +) +{ + int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + + int offset = mad24(gidy, elements_per_row, gidx); + output[bayerSampleOffset(elements_per_row, rows, gidx, gidy)] = input[offset]; +} + +kernel void demultiplexAssign( + global const float * input, + global float * output, + const int cols, + const int rows, + const int elements_per_row +) +{ + int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + + int offset = bayerSampleOffset(elements_per_row, rows, gidx, gidy); + output[offset] = input[offset]; +} + + +//// normalizeGrayOutputCentredSigmoide +kernel void normalizeGrayOutputCentredSigmoide( + global const float * input, + global float * output, + const int cols, + const int rows, + const int elements_per_row, + const float meanval, + const float X0 +) + +{ + int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + int offset = mad24(gidy, elements_per_row, gidx); + + float input_val = input[offset]; + output[offset] = meanval + + (meanval + X0) * (input_val - meanval) / (fabs(input_val - meanval) + X0); +} + +//// normalize by photoreceptors density +kernel void normalizePhotoDensity( + global const float * chroma, + global const float * colorDensity, + global const float * multiplex, + global float * luma, + global float * demultiplex, + const int cols, + const int rows, + const int elements_per_row, + const float pG +) +{ + const int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + const int offset = mad24(gidy, elements_per_row, gidx); + int index = offset; + + float Cr = chroma[index] * colorDensity[index]; + index += elements_per_row * rows; + float Cg = chroma[index] * colorDensity[index]; + index += elements_per_row * rows; + float Cb = chroma[index] * colorDensity[index]; + + const float luma_res = (Cr + Cg + Cb) * pG; + luma[offset] = luma_res; + demultiplex[bayerSampleOffset(elements_per_row, rows, gidx, gidy)] = + multiplex[offset] - luma_res; +} + + + +//////// computeGradient /////// +// TODO: +// this function maybe accelerated by image2d_t or lds +kernel void computeGradient( + global const float * luma, + global float * gradient, + const int cols, + const int rows, + const int elements_per_row +) +{ + int gidx = get_global_id(0) + 2, gidy = get_global_id(1) + 2; + if(gidx >= cols - 2 || gidy >= rows - 2) + { + return; + } + int offset = mad24(gidy, elements_per_row, gidx); + luma += offset; + + // horizontal and vertical local gradients + const float v_grad = fabs(luma[elements_per_row] - luma[- elements_per_row]); + const float h_grad = fabs(luma[1] - luma[-1]); + + // neighborhood horizontal and vertical gradients + const float cur_val = luma[0]; + const float v_grad_p = fabs(cur_val - luma[- 2 * elements_per_row]); + const float h_grad_p = fabs(cur_val - luma[- 2]); + const float v_grad_n = fabs(cur_val - luma[2 * elements_per_row]); + const float h_grad_n = fabs(cur_val - luma[2]); + + const float horiz_grad = 0.5f * h_grad + 0.25f * (h_grad_p + h_grad_n); + const float verti_grad = 0.5f * v_grad + 0.25f * (v_grad_p + v_grad_n); + const bool is_vertical_greater = horiz_grad < verti_grad; + + gradient[offset + elements_per_row * rows] = is_vertical_greater ? 0.06f : 0.57f; + gradient[offset ] = is_vertical_greater ? 0.57f : 0.06f; +} + + +/////// substractResidual /////// +kernel void substractResidual( + global float * input, + const int cols, + const int rows, + const int elements_per_row, + const float pR, + const float pG, + const float pB +) +{ + const int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + int indices [3] = + { + mad24(gidy, elements_per_row, gidx), + mad24(gidy + rows, elements_per_row, gidx), + mad24(gidy + 2 * rows, elements_per_row, gidx) + }; + float vals[3] = {input[indices[0]], input[indices[1]], input[indices[2]]}; + float residu = pR * vals[0] + pG * vals[1] + pB * vals[2]; + + input[indices[0]] = vals[0] - residu; + input[indices[1]] = vals[1] - residu; + input[indices[2]] = vals[2] - residu; +} + +///// clipRGBOutput_0_maxInputValue ///// +kernel void clipRGBOutput_0_maxInputValue( + global float * input, + const int cols, + const int rows, + const int elements_per_row, + const float maxVal +) +{ + const int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + const int offset = mad24(gidy, elements_per_row, gidx); + float val = input[offset]; + val = clamp(val, 0.0f, maxVal); + input[offset] = val; +} + +//// normalizeGrayOutputNearZeroCentreredSigmoide //// +kernel void normalizeGrayOutputNearZeroCentreredSigmoide( + global float * input, + global float * output, + const int cols, + const int rows, + const int elements_per_row, + const float maxVal, + const float X0cube +) +{ + const int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + const int offset = mad24(gidy, elements_per_row, gidx); + float currentCubeLuminance = input[offset]; + currentCubeLuminance = currentCubeLuminance * currentCubeLuminance * currentCubeLuminance; + output[offset] = currentCubeLuminance * X0cube / (X0cube + currentCubeLuminance); +} + +//// centerReductImageLuminance //// +kernel void centerReductImageLuminance( + global float * input, + const int cols, + const int rows, + const int elements_per_row, + const float mean, + const float std_dev +) +{ + const int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + const int offset = mad24(gidy, elements_per_row, gidx); + + float val = input[offset]; + input[offset] = (val - mean) / std_dev; +} + +//// inverseValue //// +kernel void inverseValue( + global float * input, + const int cols, + const int rows, + const int elements_per_row +) +{ + const int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + const int offset = mad24(gidy, elements_per_row, gidx); + input[offset] = 1.f / input[offset]; +} + +#define CV_PI 3.1415926535897932384626433832795 + +//// _processRetinaParvoMagnoMapping //// +kernel void processRetinaParvoMagnoMapping( + global float * parvo, + global float * magno, + global float * output, + const int cols, + const int rows, + const int halfCols, + const int halfRows, + const int elements_per_row, + const float minDistance +) +{ + const int gidx = get_global_id(0), gidy = get_global_id(1); + if(gidx >= cols || gidy >= rows) + { + return; + } + const int offset = mad24(gidy, elements_per_row, gidx); + + float distanceToCenter = + sqrt(((float)(gidy - halfRows) * (gidy - halfRows) + (gidx - halfCols) * (gidx - halfCols))); + + float a = distanceToCenter < minDistance ? + (0.5f + 0.5f * (float)cos(CV_PI * distanceToCenter / minDistance)) : 0; + float b = 1.f - a; + + output[offset] = parvo[offset] * a + magno[offset] * b; +} diff --git a/modules/bioinspired/src/precomp.hpp b/modules/bioinspired/src/precomp.hpp index 066e15b445..541b970325 100644 --- a/modules/bioinspired/src/precomp.hpp +++ b/modules/bioinspired/src/precomp.hpp @@ -43,11 +43,17 @@ #ifndef __OPENCV_PRECOMP_H__ #define __OPENCV_PRECOMP_H__ +#include "opencv2/opencv_modules.hpp" #include "opencv2/bioinspired.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/core/private.hpp" #include + +#ifdef HAVE_OPENCV_OCL + #include "opencv2/ocl/private/util.hpp" +#endif + namespace cv { diff --git a/modules/bioinspired/src/retina_ocl.cpp b/modules/bioinspired/src/retina_ocl.cpp new file mode 100644 index 0000000000..ba98da2d71 --- /dev/null +++ b/modules/bioinspired/src/retina_ocl.cpp @@ -0,0 +1,1651 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Peng Xiao, pengxiao@multicorewareinc.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "retina_ocl.hpp" +#include +#include + +#ifdef HAVE_OPENCV_OCL + +#define NOT_IMPLEMENTED CV_Error(cv::Error::StsNotImplemented, "Not implemented") + +namespace cv +{ +namespace ocl +{ +//OpenCL kernel file string pointer +extern const char * retina_kernel; +} +} + +namespace cv +{ +namespace bioinspired +{ +namespace ocl +{ +using namespace cv::ocl; + +class RetinaOCLImpl : public Retina +{ +public: + RetinaOCLImpl(Size getInputSize); + RetinaOCLImpl(Size getInputSize, const bool colorMode, int colorSamplingMethod = RETINA_COLOR_BAYER, const bool useRetinaLogSampling = false, const double reductionFactor = 1.0, const double samplingStrenght = 10.0); + virtual ~RetinaOCLImpl(); + + Size getInputSize(); + Size getOutputSize(); + + void setup(String retinaParameterFile = "", const bool applyDefaultSetupOnFailure = true); + void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure = true); + void setup(RetinaParameters newParameters); + + RetinaOCLImpl::RetinaParameters getParameters(); + + const String printSetup(); + virtual void write( String fs ) const; + virtual void write( FileStorage& fs ) const; + + void setupOPLandIPLParvoChannel(const bool colorMode = true, const bool normaliseOutput = true, const float photoreceptorsLocalAdaptationSensitivity = 0.7, const float photoreceptorsTemporalConstant = 0.5, const float photoreceptorsSpatialConstant = 0.53, const float horizontalCellsGain = 0, const float HcellsTemporalConstant = 1, const float HcellsSpatialConstant = 7, const float ganglionCellsSensitivity = 0.7); + void setupIPLMagnoChannel(const bool normaliseOutput = true, const float parasolCells_beta = 0, const float parasolCells_tau = 0, const float parasolCells_k = 7, const float amacrinCellsTemporalCutFrequency = 1.2, const float V0CompressionParameter = 0.95, const float localAdaptintegration_tau = 0, const float localAdaptintegration_k = 7); + + void run(InputArray inputImage); + void getParvo(OutputArray retinaOutput_parvo); + void getMagno(OutputArray retinaOutput_magno); + + void setColorSaturation(const bool saturateColors = true, const float colorSaturationValue = 4.0); + void clearBuffers(); + void activateMovingContoursProcessing(const bool activate); + void activateContoursProcessing(const bool activate); + + // unimplemented interfaces: + void applyFastToneMapping(InputArray /*inputImage*/, OutputArray /*outputToneMappedImage*/) { NOT_IMPLEMENTED; } + void getParvoRAW(OutputArray /*retinaOutput_parvo*/) { NOT_IMPLEMENTED; } + void getMagnoRAW(OutputArray /*retinaOutput_magno*/) { NOT_IMPLEMENTED; } + const Mat getMagnoRAW() const { NOT_IMPLEMENTED; return Mat(); } + const Mat getParvoRAW() const { NOT_IMPLEMENTED; return Mat(); } + +protected: + RetinaParameters _retinaParameters; + cv::ocl::oclMat _inputBuffer; + RetinaFilter* _retinaFilter; + bool convertToColorPlanes(const cv::ocl::oclMat& input, cv::ocl::oclMat &output); + void convertToInterleaved(const cv::ocl::oclMat& input, bool colorMode, cv::ocl::oclMat &output); + void _init(const Size getInputSize, const bool colorMode, int colorSamplingMethod = RETINA_COLOR_BAYER, const bool useRetinaLogSampling = false, const double reductionFactor = 1.0, const double samplingStrenght = 10.0); +}; + +RetinaOCLImpl::RetinaOCLImpl(const cv::Size inputSz) +{ + _retinaFilter = 0; + _init(inputSz, true, RETINA_COLOR_BAYER, false); +} + +RetinaOCLImpl::RetinaOCLImpl(const cv::Size inputSz, const bool colorMode, int colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght) +{ + _retinaFilter = 0; + _init(inputSz, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght); +}; + +RetinaOCLImpl::~RetinaOCLImpl() +{ + if (_retinaFilter) + { + delete _retinaFilter; + } +} + +/** +* retreive retina input buffer size +*/ +Size RetinaOCLImpl::getInputSize() +{ + return cv::Size(_retinaFilter->getInputNBcolumns(), _retinaFilter->getInputNBrows()); +} + +/** +* retreive retina output buffer size +*/ +Size RetinaOCLImpl::getOutputSize() +{ + return cv::Size(_retinaFilter->getOutputNBcolumns(), _retinaFilter->getOutputNBrows()); +} + + +void RetinaOCLImpl::setColorSaturation(const bool saturateColors, const float colorSaturationValue) +{ + _retinaFilter->setColorSaturation(saturateColors, colorSaturationValue); +} + +struct RetinaOCLImpl::RetinaParameters RetinaOCLImpl::getParameters() +{ + return _retinaParameters; +} + + +void RetinaOCLImpl::setup(String retinaParameterFile, const bool applyDefaultSetupOnFailure) +{ + try + { + // opening retinaParameterFile in read mode + cv::FileStorage fs(retinaParameterFile, cv::FileStorage::READ); + setup(fs, applyDefaultSetupOnFailure); + } + catch(Exception &e) + { + std::cout << "RetinaOCLImpl::setup: wrong/unappropriate xml parameter file : error report :`n=>" << e.what() << std::endl; + if (applyDefaultSetupOnFailure) + { + std::cout << "RetinaOCLImpl::setup: resetting retina with default parameters" << std::endl; + setupOPLandIPLParvoChannel(); + setupIPLMagnoChannel(); + } + else + { + std::cout << "=> keeping current parameters" << std::endl; + } + } +} + +void RetinaOCLImpl::setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure) +{ + try + { + // read parameters file if it exists or apply default setup if asked for + if (!fs.isOpened()) + { + std::cout << "RetinaOCLImpl::setup: provided parameters file could not be open... skeeping configuration" << std::endl; + return; + // implicit else case : retinaParameterFile could be open (it exists at least) + } + // OPL and Parvo init first... update at the same time the parameters structure and the retina core + cv::FileNode rootFn = fs.root(), currFn = rootFn["OPLandIPLparvo"]; + currFn["colorMode"] >> _retinaParameters.OPLandIplParvo.colorMode; + currFn["normaliseOutput"] >> _retinaParameters.OPLandIplParvo.normaliseOutput; + currFn["photoreceptorsLocalAdaptationSensitivity"] >> _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity; + currFn["photoreceptorsTemporalConstant"] >> _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant; + currFn["photoreceptorsSpatialConstant"] >> _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant; + currFn["horizontalCellsGain"] >> _retinaParameters.OPLandIplParvo.horizontalCellsGain; + currFn["hcellsTemporalConstant"] >> _retinaParameters.OPLandIplParvo.hcellsTemporalConstant; + currFn["hcellsSpatialConstant"] >> _retinaParameters.OPLandIplParvo.hcellsSpatialConstant; + currFn["ganglionCellsSensitivity"] >> _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity; + setupOPLandIPLParvoChannel(_retinaParameters.OPLandIplParvo.colorMode, _retinaParameters.OPLandIplParvo.normaliseOutput, _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity, _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant, _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant, _retinaParameters.OPLandIplParvo.horizontalCellsGain, _retinaParameters.OPLandIplParvo.hcellsTemporalConstant, _retinaParameters.OPLandIplParvo.hcellsSpatialConstant, _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity); + + // init retina IPL magno setup... update at the same time the parameters structure and the retina core + currFn = rootFn["IPLmagno"]; + currFn["normaliseOutput"] >> _retinaParameters.IplMagno.normaliseOutput; + currFn["parasolCells_beta"] >> _retinaParameters.IplMagno.parasolCells_beta; + currFn["parasolCells_tau"] >> _retinaParameters.IplMagno.parasolCells_tau; + currFn["parasolCells_k"] >> _retinaParameters.IplMagno.parasolCells_k; + currFn["amacrinCellsTemporalCutFrequency"] >> _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency; + currFn["V0CompressionParameter"] >> _retinaParameters.IplMagno.V0CompressionParameter; + currFn["localAdaptintegration_tau"] >> _retinaParameters.IplMagno.localAdaptintegration_tau; + currFn["localAdaptintegration_k"] >> _retinaParameters.IplMagno.localAdaptintegration_k; + + setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency, _retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k); + + } + catch(Exception &e) + { + std::cout << "RetinaOCLImpl::setup: resetting retina with default parameters" << std::endl; + if (applyDefaultSetupOnFailure) + { + setupOPLandIPLParvoChannel(); + setupIPLMagnoChannel(); + } + std::cout << "RetinaOCLImpl::setup: wrong/unappropriate xml parameter file : error report :`n=>" << e.what() << std::endl; + std::cout << "=> keeping current parameters" << std::endl; + } +} + +void RetinaOCLImpl::setup(cv::bioinspired::Retina::RetinaParameters newConfiguration) +{ + // simply copy structures + memcpy(&_retinaParameters, &newConfiguration, sizeof(cv::bioinspired::Retina::RetinaParameters)); + // apply setup + setupOPLandIPLParvoChannel(_retinaParameters.OPLandIplParvo.colorMode, _retinaParameters.OPLandIplParvo.normaliseOutput, _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity, _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant, _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant, _retinaParameters.OPLandIplParvo.horizontalCellsGain, _retinaParameters.OPLandIplParvo.hcellsTemporalConstant, _retinaParameters.OPLandIplParvo.hcellsSpatialConstant, _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity); + setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency, _retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k); +} + +const String RetinaOCLImpl::printSetup() +{ + std::stringstream outmessage; + + // displaying OPL and IPL parvo setup + outmessage << "Current Retina instance setup :" + << "\nOPLandIPLparvo" << "{" + << "\n==> colorMode : " << _retinaParameters.OPLandIplParvo.colorMode + << "\n==> normalizeParvoOutput :" << _retinaParameters.OPLandIplParvo.normaliseOutput + << "\n==> photoreceptorsLocalAdaptationSensitivity : " << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity + << "\n==> photoreceptorsTemporalConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant + << "\n==> photoreceptorsSpatialConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant + << "\n==> horizontalCellsGain : " << _retinaParameters.OPLandIplParvo.horizontalCellsGain + << "\n==> hcellsTemporalConstant : " << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant + << "\n==> hcellsSpatialConstant : " << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant + << "\n==> parvoGanglionCellsSensitivity : " << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity + << "}\n"; + + // displaying IPL magno setup + outmessage << "Current Retina instance setup :" + << "\nIPLmagno" << "{" + << "\n==> normaliseOutput : " << _retinaParameters.IplMagno.normaliseOutput + << "\n==> parasolCells_beta : " << _retinaParameters.IplMagno.parasolCells_beta + << "\n==> parasolCells_tau : " << _retinaParameters.IplMagno.parasolCells_tau + << "\n==> parasolCells_k : " << _retinaParameters.IplMagno.parasolCells_k + << "\n==> amacrinCellsTemporalCutFrequency : " << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency + << "\n==> V0CompressionParameter : " << _retinaParameters.IplMagno.V0CompressionParameter + << "\n==> localAdaptintegration_tau : " << _retinaParameters.IplMagno.localAdaptintegration_tau + << "\n==> localAdaptintegration_k : " << _retinaParameters.IplMagno.localAdaptintegration_k + << "}"; + return outmessage.str().c_str(); +} + +void RetinaOCLImpl::write( String fs ) const +{ + FileStorage parametersSaveFile(fs, cv::FileStorage::WRITE ); + write(parametersSaveFile); +} + +void RetinaOCLImpl::write( FileStorage& fs ) const +{ + if (!fs.isOpened()) + { + return; // basic error case + } + fs << "OPLandIPLparvo" << "{"; + fs << "colorMode" << _retinaParameters.OPLandIplParvo.colorMode; + fs << "normaliseOutput" << _retinaParameters.OPLandIplParvo.normaliseOutput; + fs << "photoreceptorsLocalAdaptationSensitivity" << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity; + fs << "photoreceptorsTemporalConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant; + fs << "photoreceptorsSpatialConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant; + fs << "horizontalCellsGain" << _retinaParameters.OPLandIplParvo.horizontalCellsGain; + fs << "hcellsTemporalConstant" << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant; + fs << "hcellsSpatialConstant" << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant; + fs << "ganglionCellsSensitivity" << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity; + fs << "}"; + fs << "IPLmagno" << "{"; + fs << "normaliseOutput" << _retinaParameters.IplMagno.normaliseOutput; + fs << "parasolCells_beta" << _retinaParameters.IplMagno.parasolCells_beta; + fs << "parasolCells_tau" << _retinaParameters.IplMagno.parasolCells_tau; + fs << "parasolCells_k" << _retinaParameters.IplMagno.parasolCells_k; + fs << "amacrinCellsTemporalCutFrequency" << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency; + fs << "V0CompressionParameter" << _retinaParameters.IplMagno.V0CompressionParameter; + fs << "localAdaptintegration_tau" << _retinaParameters.IplMagno.localAdaptintegration_tau; + fs << "localAdaptintegration_k" << _retinaParameters.IplMagno.localAdaptintegration_k; + fs << "}"; +} + +void RetinaOCLImpl::setupOPLandIPLParvoChannel(const bool colorMode, const bool normaliseOutput, const float photoreceptorsLocalAdaptationSensitivity, const float photoreceptorsTemporalConstant, const float photoreceptorsSpatialConstant, const float horizontalCellsGain, const float HcellsTemporalConstant, const float HcellsSpatialConstant, const float ganglionCellsSensitivity) +{ + // retina core parameters setup + _retinaFilter->setColorMode(colorMode); + _retinaFilter->setPhotoreceptorsLocalAdaptationSensitivity(photoreceptorsLocalAdaptationSensitivity); + _retinaFilter->setOPLandParvoParameters(0, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, HcellsTemporalConstant, HcellsSpatialConstant, ganglionCellsSensitivity); + _retinaFilter->setParvoGanglionCellsLocalAdaptationSensitivity(ganglionCellsSensitivity); + _retinaFilter->activateNormalizeParvoOutput_0_maxOutputValue(normaliseOutput); + + // update parameters struture + + _retinaParameters.OPLandIplParvo.colorMode = colorMode; + _retinaParameters.OPLandIplParvo.normaliseOutput = normaliseOutput; + _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity = photoreceptorsLocalAdaptationSensitivity; + _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant = photoreceptorsTemporalConstant; + _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant = photoreceptorsSpatialConstant; + _retinaParameters.OPLandIplParvo.horizontalCellsGain = horizontalCellsGain; + _retinaParameters.OPLandIplParvo.hcellsTemporalConstant = HcellsTemporalConstant; + _retinaParameters.OPLandIplParvo.hcellsSpatialConstant = HcellsSpatialConstant; + _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity = ganglionCellsSensitivity; +} + +void RetinaOCLImpl::setupIPLMagnoChannel(const bool normaliseOutput, const float parasolCells_beta, const float parasolCells_tau, const float parasolCells_k, const float amacrinCellsTemporalCutFrequency, const float V0CompressionParameter, const float localAdaptintegration_tau, const float localAdaptintegration_k) +{ + + _retinaFilter->setMagnoCoefficientsTable(parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k); + _retinaFilter->activateNormalizeMagnoOutput_0_maxOutputValue(normaliseOutput); + + // update parameters struture + _retinaParameters.IplMagno.normaliseOutput = normaliseOutput; + _retinaParameters.IplMagno.parasolCells_beta = parasolCells_beta; + _retinaParameters.IplMagno.parasolCells_tau = parasolCells_tau; + _retinaParameters.IplMagno.parasolCells_k = parasolCells_k; + _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency = amacrinCellsTemporalCutFrequency; + _retinaParameters.IplMagno.V0CompressionParameter = V0CompressionParameter; + _retinaParameters.IplMagno.localAdaptintegration_tau = localAdaptintegration_tau; + _retinaParameters.IplMagno.localAdaptintegration_k = localAdaptintegration_k; +} + +void RetinaOCLImpl::run(const InputArray input) +{ + oclMat &inputMatToConvert = getOclMatRef(input); + bool colorMode = convertToColorPlanes(inputMatToConvert, _inputBuffer); + // first convert input image to the compatible format : std::valarray + // process the retina + if (!_retinaFilter->runFilter(_inputBuffer, colorMode, false, _retinaParameters.OPLandIplParvo.colorMode && colorMode, false)) + { + throw cv::Exception(-1, "Retina cannot be applied, wrong input buffer size", "RetinaOCLImpl::run", "Retina.h", 0); + } +} + +void RetinaOCLImpl::getParvo(OutputArray output) +{ + oclMat &retinaOutput_parvo = getOclMatRef(output); + if (_retinaFilter->getColorMode()) + { + // reallocate output buffer (if necessary) + convertToInterleaved(_retinaFilter->getColorOutput(), true, retinaOutput_parvo); + } + else + { + // reallocate output buffer (if necessary) + convertToInterleaved(_retinaFilter->getContours(), false, retinaOutput_parvo); + } + //retinaOutput_parvo/=255.0; +} +void RetinaOCLImpl::getMagno(OutputArray output) +{ + oclMat &retinaOutput_magno = getOclMatRef(output); + // reallocate output buffer (if necessary) + convertToInterleaved(_retinaFilter->getMovingContours(), false, retinaOutput_magno); + //retinaOutput_magno/=255.0; +} +// private method called by constructirs +void RetinaOCLImpl::_init(const cv::Size inputSz, const bool colorMode, int colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght) +{ + // basic error check + if (inputSz.height*inputSz.width <= 0) + { + throw cv::Exception(-1, "Bad retina size setup : size height and with must be superior to zero", "RetinaOCLImpl::setup", "Retina.h", 0); + } + + // allocate the retina model + if (_retinaFilter) + { + delete _retinaFilter; + } + _retinaFilter = new RetinaFilter(inputSz.height, inputSz.width, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght); + + // prepare the default parameter XML file with default setup + setup(_retinaParameters); + + // init retina + _retinaFilter->clearAllBuffers(); +} + +bool RetinaOCLImpl::convertToColorPlanes(const oclMat& input, oclMat &output) +{ + oclMat convert_input; + input.convertTo(convert_input, CV_32F); + if(convert_input.channels() == 3 || convert_input.channels() == 4) + { + ocl::ensureSizeIsEnough(int(_retinaFilter->getInputNBrows() * 4), + int(_retinaFilter->getInputNBcolumns()), CV_32FC1, output); + oclMat channel_splits[4] = + { + output(Rect(Point(0, _retinaFilter->getInputNBrows() * 2), getInputSize())), + output(Rect(Point(0, _retinaFilter->getInputNBrows()), getInputSize())), + output(Rect(Point(0, 0), getInputSize())), + output(Rect(Point(0, _retinaFilter->getInputNBrows() * 3), getInputSize())) + }; + ocl::split(convert_input, channel_splits); + return true; + } + else if(convert_input.channels() == 1) + { + convert_input.copyTo(output); + return false; + } + else + { + CV_Error(-1, "Retina ocl only support 1, 3, 4 channel input"); + return false; + } +} +void RetinaOCLImpl::convertToInterleaved(const oclMat& input, bool colorMode, oclMat &output) +{ + input.convertTo(output, CV_8U); + if(colorMode) + { + int numOfSplits = input.rows / getInputSize().height; + std::vector channel_splits(numOfSplits); + for(int i = 0; i < static_cast(channel_splits.size()); i ++) + { + channel_splits[i] = + output(Rect(Point(0, _retinaFilter->getInputNBrows() * (numOfSplits - i - 1)), getInputSize())); + } + merge(channel_splits, output); + } + else + { + //... + } +} + +void RetinaOCLImpl::clearBuffers() +{ + _retinaFilter->clearAllBuffers(); +} + +void RetinaOCLImpl::activateMovingContoursProcessing(const bool activate) +{ + _retinaFilter->activateMovingContoursProcessing(activate); +} + +void RetinaOCLImpl::activateContoursProcessing(const bool activate) +{ + _retinaFilter->activateContoursProcessing(activate); +} + +/////////////////////////////////////// +///////// BasicRetinaFilter /////////// +/////////////////////////////////////// +BasicRetinaFilter::BasicRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns, const unsigned int parametersListSize, const bool) + : _NBrows(NBrows), _NBcols(NBcolumns), + _filterOutput(NBrows, NBcolumns, CV_32FC1), + _localBuffer(NBrows, NBcolumns, CV_32FC1), + _filteringCoeficientsTable(3 * parametersListSize) +{ + _halfNBrows = _filterOutput.rows / 2; + _halfNBcolumns = _filterOutput.cols / 2; + + // set default values + _maxInputValue = 256.0; + + // reset all buffers + clearAllBuffers(); +} + +BasicRetinaFilter::~BasicRetinaFilter() +{ +} + +void BasicRetinaFilter::resize(const unsigned int NBrows, const unsigned int NBcolumns) +{ + // resizing buffers + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _filterOutput); + + // updating variables + _halfNBrows = _filterOutput.rows / 2; + _halfNBcolumns = _filterOutput.cols / 2; + + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _localBuffer); + // reset buffers + clearAllBuffers(); +} + +void BasicRetinaFilter::setLPfilterParameters(const float beta, const float tau, const float desired_k, const unsigned int filterIndex) +{ + float _beta = beta + tau; + float k = desired_k; + // check if the spatial constant is correct (avoid 0 value to avoid division by 0) + if (desired_k <= 0) + { + k = 0.001f; + std::cerr << "BasicRetinaFilter::spatial constant of the low pass filter must be superior to zero !!! correcting parameter setting to 0,001" << std::endl; + } + + float _alpha = k * k; + float _mu = 0.8f; + unsigned int tableOffset = filterIndex * 3; + if (k <= 0) + { + std::cerr << "BasicRetinaFilter::spatial filtering coefficient must be superior to zero, correcting value to 0.01" << std::endl; + _alpha = 0.0001f; + } + + float _temp = (1.0f + _beta) / (2.0f * _mu * _alpha); + float a = _filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f + _temp) * (1.0f + _temp) - 1.0f); + _filteringCoeficientsTable[1 + tableOffset] = (1.0f - a) * (1.0f - a) * (1.0f - a) * (1.0f - a) / (1.0f + _beta); + _filteringCoeficientsTable[2 + tableOffset] = tau; +} +const oclMat &BasicRetinaFilter::runFilter_LocalAdapdation(const oclMat &inputFrame, const oclMat &localLuminance) +{ + _localLuminanceAdaptation(inputFrame, localLuminance, _filterOutput); + return _filterOutput; +} + + +void BasicRetinaFilter::runFilter_LocalAdapdation(const oclMat &inputFrame, const oclMat &localLuminance, oclMat &outputFrame) +{ + _localLuminanceAdaptation(inputFrame, localLuminance, outputFrame); +} + +const oclMat &BasicRetinaFilter::runFilter_LocalAdapdation_autonomous(const oclMat &inputFrame) +{ + _spatiotemporalLPfilter(inputFrame, _filterOutput); + _localLuminanceAdaptation(inputFrame, _filterOutput, _filterOutput); + return _filterOutput; +} +void BasicRetinaFilter::runFilter_LocalAdapdation_autonomous(const oclMat &inputFrame, oclMat &outputFrame) +{ + _spatiotemporalLPfilter(inputFrame, _filterOutput); + _localLuminanceAdaptation(inputFrame, _filterOutput, outputFrame); +} + +void BasicRetinaFilter::_localLuminanceAdaptation(oclMat &inputOutputFrame, const oclMat &localLuminance) +{ + _localLuminanceAdaptation(inputOutputFrame, localLuminance, inputOutputFrame, false); +} + +void BasicRetinaFilter::_localLuminanceAdaptation(const oclMat &inputFrame, const oclMat &localLuminance, oclMat &outputFrame, const bool updateLuminanceMean) +{ + if (updateLuminanceMean) + { + float meanLuminance = saturate_cast(ocl::sum(inputFrame)[0]) / getNBpixels(); + updateCompressionParameter(meanLuminance); + } + int elements_per_row = static_cast(inputFrame.step / inputFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {_NBcols, _NBrows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &localLuminance.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &inputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &outputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBcols)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBrows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_float), &_localLuminanceAddon)); + args.push_back(std::make_pair(sizeof(cl_float), &_localLuminanceFactor)); + args.push_back(std::make_pair(sizeof(cl_float), &_maxInputValue)); + openCLExecuteKernel(ctx, &retina_kernel, "localLuminanceAdaptation", globalSize, localSize, args, -1, -1); +} + +const oclMat &BasicRetinaFilter::runFilter_LPfilter(const oclMat &inputFrame, const unsigned int filterIndex) +{ + _spatiotemporalLPfilter(inputFrame, _filterOutput, filterIndex); + return _filterOutput; +} +void BasicRetinaFilter::runFilter_LPfilter(const oclMat &inputFrame, oclMat &outputFrame, const unsigned int filterIndex) +{ + _spatiotemporalLPfilter(inputFrame, outputFrame, filterIndex); +} + +void BasicRetinaFilter::_spatiotemporalLPfilter(const oclMat &inputFrame, oclMat &LPfilterOutput, const unsigned int filterIndex) +{ + unsigned int coefTableOffset = filterIndex * 3; + + _a = _filteringCoeficientsTable[coefTableOffset]; + _gain = _filteringCoeficientsTable[1 + coefTableOffset]; + _tau = _filteringCoeficientsTable[2 + coefTableOffset]; + + _horizontalCausalFilter_addInput(inputFrame, LPfilterOutput); + _horizontalAnticausalFilter(LPfilterOutput); + _verticalCausalFilter(LPfilterOutput); + _verticalAnticausalFilter_multGain(LPfilterOutput); +} + +void BasicRetinaFilter::_horizontalCausalFilter_addInput(const oclMat &inputFrame, oclMat &outputFrame) +{ + int elements_per_row = static_cast(inputFrame.step / inputFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {_NBrows, 1, 1}; + size_t localSize[] = {256, 1, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &inputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &outputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBcols)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBrows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_int), &inputFrame.offset)); + args.push_back(std::make_pair(sizeof(cl_int), &inputFrame.offset)); + args.push_back(std::make_pair(sizeof(cl_float), &_tau)); + args.push_back(std::make_pair(sizeof(cl_float), &_a)); + openCLExecuteKernel(ctx, &retina_kernel, "horizontalCausalFilter_addInput", globalSize, localSize, args, -1, -1); +} + +void BasicRetinaFilter::_horizontalAnticausalFilter(oclMat &outputFrame) +{ + int elements_per_row = static_cast(outputFrame.step / outputFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {_NBrows, 1, 1}; + size_t localSize[] = {256, 1, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &outputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBcols)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBrows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.offset)); + args.push_back(std::make_pair(sizeof(cl_float), &_a)); + openCLExecuteKernel(ctx, &retina_kernel, "horizontalAnticausalFilter", globalSize, localSize, args, -1, -1); +} + +void BasicRetinaFilter::_verticalCausalFilter(oclMat &outputFrame) +{ + int elements_per_row = static_cast(outputFrame.step / outputFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {_NBcols, 1, 1}; + size_t localSize[] = {256, 1, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &outputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBcols)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBrows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.offset)); + args.push_back(std::make_pair(sizeof(cl_float), &_a)); + openCLExecuteKernel(ctx, &retina_kernel, "verticalCausalFilter", globalSize, localSize, args, -1, -1); +} + +void BasicRetinaFilter::_verticalAnticausalFilter_multGain(oclMat &outputFrame) +{ + int elements_per_row = static_cast(outputFrame.step / outputFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {_NBcols, 1, 1}; + size_t localSize[] = {256, 1, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &outputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBcols)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBrows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.offset)); + args.push_back(std::make_pair(sizeof(cl_float), &_a)); + args.push_back(std::make_pair(sizeof(cl_float), &_gain)); + openCLExecuteKernel(ctx, &retina_kernel, "verticalAnticausalFilter_multGain", globalSize, localSize, args, -1, -1); +} + +void BasicRetinaFilter::_horizontalAnticausalFilter_Irregular(oclMat &outputFrame, const oclMat &spatialConstantBuffer) +{ + int elements_per_row = static_cast(outputFrame.step / outputFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {outputFrame.rows, 1, 1}; + size_t localSize[] = {256, 1, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &outputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &spatialConstantBuffer.data)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.offset)); + args.push_back(std::make_pair(sizeof(cl_int), &spatialConstantBuffer.offset)); + openCLExecuteKernel(ctx, &retina_kernel, "horizontalAnticausalFilter_Irregular", globalSize, localSize, args, -1, -1); +} + +// vertical anticausal filter +void BasicRetinaFilter::_verticalCausalFilter_Irregular(oclMat &outputFrame, const oclMat &spatialConstantBuffer) +{ + int elements_per_row = static_cast(outputFrame.step / outputFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {outputFrame.cols, 1, 1}; + size_t localSize[] = {256, 1, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &outputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &spatialConstantBuffer.data)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.offset)); + args.push_back(std::make_pair(sizeof(cl_int), &spatialConstantBuffer.offset)); + openCLExecuteKernel(ctx, &retina_kernel, "verticalCausalFilter_Irregular", globalSize, localSize, args, -1, -1); +} + +void cv::bioinspired::ocl::normalizeGrayOutput_0_maxOutputValue(oclMat &inputOutputBuffer, const float maxOutputValue) +{ + double min_val, max_val; + ocl::minMax(inputOutputBuffer, &min_val, &max_val); + float factor = maxOutputValue / static_cast(max_val - min_val); + float offset = - static_cast(min_val) * factor; + ocl::multiply(factor, inputOutputBuffer, inputOutputBuffer); + ocl::add(inputOutputBuffer, offset, inputOutputBuffer); +} + +void cv::bioinspired::ocl::normalizeGrayOutputCentredSigmoide(const float meanValue, const float sensitivity, oclMat &in, oclMat &out, const float maxValue) +{ + if (sensitivity == 1.0f) + { + std::cerr << "TemplateBuffer::TemplateBuffer::normalizeGrayOutputCentredSigmoide error: 2nd parameter (sensitivity) must not equal 0, copying original data..." << std::endl; + in.copyTo(out); + return; + } + + float X0 = maxValue / (sensitivity - 1.0f); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {in.cols, out.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + int elements_per_row = static_cast(out.step / out.elemSize()); + + args.push_back(std::make_pair(sizeof(cl_mem), &in.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &out.data)); + args.push_back(std::make_pair(sizeof(cl_int), &in.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &in.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_float), &meanValue)); + args.push_back(std::make_pair(sizeof(cl_float), &X0)); + openCLExecuteKernel(ctx, &retina_kernel, "normalizeGrayOutputCentredSigmoide", globalSize, localSize, args, -1, -1); +} + +void cv::bioinspired::ocl::normalizeGrayOutputNearZeroCentreredSigmoide(oclMat &inputPicture, oclMat &outputBuffer, const float sensitivity, const float maxOutputValue) +{ + float X0cube = sensitivity * sensitivity * sensitivity; + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {inputPicture.cols, inputPicture.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + int elements_per_row = static_cast(inputPicture.step / inputPicture.elemSize()); + args.push_back(std::make_pair(sizeof(cl_mem), &inputPicture.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &outputBuffer.data)); + args.push_back(std::make_pair(sizeof(cl_int), &inputPicture.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &inputPicture.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_float), &maxOutputValue)); + args.push_back(std::make_pair(sizeof(cl_float), &X0cube)); + openCLExecuteKernel(ctx, &retina_kernel, "normalizeGrayOutputNearZeroCentreredSigmoide", globalSize, localSize, args, -1, -1); +} + +void cv::bioinspired::ocl::centerReductImageLuminance(oclMat &inputoutput) +{ + Scalar mean, stddev; + cv::meanStdDev((Mat)inputoutput, mean, stddev); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {inputoutput.cols, inputoutput.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + float f_mean = static_cast(mean[0]); + float f_stddev = static_cast(stddev[0]); + int elements_per_row = static_cast(inputoutput.step / inputoutput.elemSize()); + args.push_back(std::make_pair(sizeof(cl_mem), &inputoutput.data)); + args.push_back(std::make_pair(sizeof(cl_int), &inputoutput.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &inputoutput.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_float), &f_mean)); + args.push_back(std::make_pair(sizeof(cl_float), &f_stddev)); + openCLExecuteKernel(ctx, &retina_kernel, "centerReductImageLuminance", globalSize, localSize, args, -1, -1); +} + +/////////////////////////////////////// +///////// ParvoRetinaFilter /////////// +/////////////////////////////////////// +ParvoRetinaFilter::ParvoRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns) + : BasicRetinaFilter(NBrows, NBcolumns, 3), + _photoreceptorsOutput(NBrows, NBcolumns, CV_32FC1), + _horizontalCellsOutput(NBrows, NBcolumns, CV_32FC1), + _parvocellularOutputON(NBrows, NBcolumns, CV_32FC1), + _parvocellularOutputOFF(NBrows, NBcolumns, CV_32FC1), + _bipolarCellsOutputON(NBrows, NBcolumns, CV_32FC1), + _bipolarCellsOutputOFF(NBrows, NBcolumns, CV_32FC1), + _localAdaptationOFF(NBrows, NBcolumns, CV_32FC1) +{ + // link to the required local parent adaptation buffers + _localAdaptationON = _localBuffer; + _parvocellularOutputONminusOFF = _filterOutput; + + // init: set all the values to 0 + clearAllBuffers(); +} + +ParvoRetinaFilter::~ParvoRetinaFilter() +{ +} + +void ParvoRetinaFilter::clearAllBuffers() +{ + BasicRetinaFilter::clearAllBuffers(); + _photoreceptorsOutput = 0; + _horizontalCellsOutput = 0; + _parvocellularOutputON = 0; + _parvocellularOutputOFF = 0; + _bipolarCellsOutputON = 0; + _bipolarCellsOutputOFF = 0; + _localAdaptationOFF = 0; +} +void ParvoRetinaFilter::resize(const unsigned int NBrows, const unsigned int NBcolumns) +{ + BasicRetinaFilter::resize(NBrows, NBcolumns); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _photoreceptorsOutput); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _horizontalCellsOutput); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _parvocellularOutputON); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _parvocellularOutputOFF); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _bipolarCellsOutputON); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _bipolarCellsOutputOFF); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _localAdaptationOFF); + + // link to the required local parent adaptation buffers + _localAdaptationON = _localBuffer; + _parvocellularOutputONminusOFF = _filterOutput; + + // clean buffers + clearAllBuffers(); +} + +void ParvoRetinaFilter::setOPLandParvoFiltersParameters(const float beta1, const float tau1, const float k1, const float beta2, const float tau2, const float k2) +{ + // init photoreceptors low pass filter + setLPfilterParameters(beta1, tau1, k1); + // init horizontal cells low pass filter + setLPfilterParameters(beta2, tau2, k2, 1); + // init parasol ganglion cells low pass filter (default parameters) + setLPfilterParameters(0, tau1, k1, 2); + +} +const oclMat &ParvoRetinaFilter::runFilter(const oclMat &inputFrame, const bool useParvoOutput) +{ + _spatiotemporalLPfilter(inputFrame, _photoreceptorsOutput); + _spatiotemporalLPfilter(_photoreceptorsOutput, _horizontalCellsOutput, 1); + _OPL_OnOffWaysComputing(); + + if (useParvoOutput) + { + // local adaptation processes on ON and OFF ways + _spatiotemporalLPfilter(_bipolarCellsOutputON, _localAdaptationON, 2); + _localLuminanceAdaptation(_parvocellularOutputON, _localAdaptationON); + _spatiotemporalLPfilter(_bipolarCellsOutputOFF, _localAdaptationOFF, 2); + _localLuminanceAdaptation(_parvocellularOutputOFF, _localAdaptationOFF); + ocl::subtract(_parvocellularOutputON, _parvocellularOutputOFF, _parvocellularOutputONminusOFF); + } + + return _parvocellularOutputONminusOFF; +} +void ParvoRetinaFilter::_OPL_OnOffWaysComputing() +{ + int elements_per_row = static_cast(_photoreceptorsOutput.step / _photoreceptorsOutput.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {(_photoreceptorsOutput.cols + 3) / 4, _photoreceptorsOutput.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &_photoreceptorsOutput.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &_horizontalCellsOutput.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &_bipolarCellsOutputON.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &_bipolarCellsOutputOFF.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &_parvocellularOutputON.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &_parvocellularOutputOFF.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_photoreceptorsOutput.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &_photoreceptorsOutput.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + openCLExecuteKernel(ctx, &retina_kernel, "OPL_OnOffWaysComputing", globalSize, localSize, args, -1, -1); +} + +/////////////////////////////////////// +//////////// MagnoFilter ////////////// +/////////////////////////////////////// +MagnoRetinaFilter::MagnoRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns) + : BasicRetinaFilter(NBrows, NBcolumns, 2), + _previousInput_ON(NBrows, NBcolumns, CV_32FC1), + _previousInput_OFF(NBrows, NBcolumns, CV_32FC1), + _amacrinCellsTempOutput_ON(NBrows, NBcolumns, CV_32FC1), + _amacrinCellsTempOutput_OFF(NBrows, NBcolumns, CV_32FC1), + _magnoXOutputON(NBrows, NBcolumns, CV_32FC1), + _magnoXOutputOFF(NBrows, NBcolumns, CV_32FC1), + _localProcessBufferON(NBrows, NBcolumns, CV_32FC1), + _localProcessBufferOFF(NBrows, NBcolumns, CV_32FC1) +{ + _magnoYOutput = _filterOutput; + _magnoYsaturated = _localBuffer; + + clearAllBuffers(); +} + +MagnoRetinaFilter::~MagnoRetinaFilter() +{ +} +void MagnoRetinaFilter::clearAllBuffers() +{ + BasicRetinaFilter::clearAllBuffers(); + _previousInput_ON = 0; + _previousInput_OFF = 0; + _amacrinCellsTempOutput_ON = 0; + _amacrinCellsTempOutput_OFF = 0; + _magnoXOutputON = 0; + _magnoXOutputOFF = 0; + _localProcessBufferON = 0; + _localProcessBufferOFF = 0; + +} +void MagnoRetinaFilter::resize(const unsigned int NBrows, const unsigned int NBcolumns) +{ + BasicRetinaFilter::resize(NBrows, NBcolumns); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _previousInput_ON); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _previousInput_OFF); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _amacrinCellsTempOutput_ON); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _amacrinCellsTempOutput_OFF); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _magnoXOutputON); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _magnoXOutputOFF); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _localProcessBufferON); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _localProcessBufferOFF); + + // to be sure, relink buffers + _magnoYOutput = _filterOutput; + _magnoYsaturated = _localBuffer; + + // reset all buffers + clearAllBuffers(); +} + +void MagnoRetinaFilter::setCoefficientsTable(const float parasolCells_beta, const float parasolCells_tau, const float parasolCells_k, const float amacrinCellsTemporalCutFrequency, const float localAdaptIntegration_tau, const float localAdaptIntegration_k ) +{ + _temporalCoefficient = (float)std::exp(-1.0f / amacrinCellsTemporalCutFrequency); + // the first set of parameters is dedicated to the low pass filtering property of the ganglion cells + BasicRetinaFilter::setLPfilterParameters(parasolCells_beta, parasolCells_tau, parasolCells_k, 0); + // the second set of parameters is dedicated to the ganglion cells output intergartion for their local adaptation property + BasicRetinaFilter::setLPfilterParameters(0, localAdaptIntegration_tau, localAdaptIntegration_k, 1); +} + +void MagnoRetinaFilter::_amacrineCellsComputing( + const oclMat &OPL_ON, + const oclMat &OPL_OFF +) +{ + int elements_per_row = static_cast(OPL_ON.step / OPL_ON.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {OPL_ON.cols, OPL_ON.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &OPL_ON.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &OPL_OFF.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &_previousInput_ON.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &_previousInput_OFF.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &_amacrinCellsTempOutput_ON.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &_amacrinCellsTempOutput_OFF.data)); + args.push_back(std::make_pair(sizeof(cl_int), &OPL_ON.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &OPL_ON.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_float), &_temporalCoefficient)); + openCLExecuteKernel(ctx, &retina_kernel, "amacrineCellsComputing", globalSize, localSize, args, -1, -1); +} + +const oclMat &MagnoRetinaFilter::runFilter(const oclMat &OPL_ON, const oclMat &OPL_OFF) +{ + // Compute the high pass temporal filter + _amacrineCellsComputing(OPL_ON, OPL_OFF); + + // apply low pass filtering on ON and OFF ways after temporal high pass filtering + _spatiotemporalLPfilter(_amacrinCellsTempOutput_ON, _magnoXOutputON, 0); + _spatiotemporalLPfilter(_amacrinCellsTempOutput_OFF, _magnoXOutputOFF, 0); + + // local adaptation of the ganglion cells to the local contrast of the moving contours + _spatiotemporalLPfilter(_magnoXOutputON, _localProcessBufferON, 1); + _localLuminanceAdaptation(_magnoXOutputON, _localProcessBufferON); + + _spatiotemporalLPfilter(_magnoXOutputOFF, _localProcessBufferOFF, 1); + _localLuminanceAdaptation(_magnoXOutputOFF, _localProcessBufferOFF); + + _magnoYOutput = _magnoXOutputON + _magnoXOutputOFF; + + return _magnoYOutput; +} + +/////////////////////////////////////// +//////////// RetinaColor ////////////// +/////////////////////////////////////// + +// define an array of ROI headers of input x +#define MAKE_OCLMAT_SLICES(x, n) \ + oclMat x##_slices[n];\ + for(int _SLICE_INDEX_ = 0; _SLICE_INDEX_ < n; _SLICE_INDEX_ ++)\ + {\ + x##_slices[_SLICE_INDEX_] = x(getROI(_SLICE_INDEX_));\ + } + +static float _LMStoACr1Cr2[] = {1.0, 1.0, 0.0, 1.0, -1.0, 0.0, -0.5, -0.5, 1.0}; +static float _LMStoLab[] = {0.5774f, 0.5774f, 0.5774f, 0.4082f, 0.4082f, -0.8165f, 0.7071f, -0.7071f, 0.f}; + +RetinaColor::RetinaColor(const unsigned int NBrows, const unsigned int NBcolumns, const int samplingMethod) + : BasicRetinaFilter(NBrows, NBcolumns, 3), + _RGBmosaic(NBrows * 3, NBcolumns, CV_32FC1), + _tempMultiplexedFrame(NBrows, NBcolumns, CV_32FC1), + _demultiplexedTempBuffer(NBrows * 3, NBcolumns, CV_32FC1), + _demultiplexedColorFrame(NBrows * 3, NBcolumns, CV_32FC1), + _chrominance(NBrows * 3, NBcolumns, CV_32FC1), + _colorLocalDensity(NBrows * 3, NBcolumns, CV_32FC1), + _imageGradient(NBrows * 3, NBcolumns, CV_32FC1) +{ + // link to parent buffers (let's recycle !) + _luminance = _filterOutput; + _multiplexedFrame = _localBuffer; + + _objectInit = false; + _samplingMethod = samplingMethod; + _saturateColors = false; + _colorSaturationValue = 4.0; + + // set default spatio-temporal filter parameters + setLPfilterParameters(0.0, 0.0, 1.5); + setLPfilterParameters(0.0, 0.0, 10.5, 1);// for the low pass filter dedicated to contours energy extraction (demultiplexing process) + setLPfilterParameters(0.f, 0.f, 0.9f, 2); + + // init default value on image Gradient + _imageGradient = 0.57f; + + // init color sampling map + _initColorSampling(); + + // flush all buffers + clearAllBuffers(); +} + +RetinaColor::~RetinaColor() +{ + +} + +void RetinaColor::clearAllBuffers() +{ + BasicRetinaFilter::clearAllBuffers(); + _tempMultiplexedFrame = 0.f; + _demultiplexedTempBuffer = 0.f; + + _demultiplexedColorFrame = 0.f; + _chrominance = 0.f; + _imageGradient = 0.57f; +} + +void RetinaColor::resize(const unsigned int NBrows, const unsigned int NBcolumns) +{ + BasicRetinaFilter::clearAllBuffers(); + ensureSizeIsEnough(NBrows, NBcolumns, CV_32FC1, _tempMultiplexedFrame); + ensureSizeIsEnough(NBrows * 2, NBcolumns, CV_32FC1, _imageGradient); + ensureSizeIsEnough(NBrows * 3, NBcolumns, CV_32FC1, _RGBmosaic); + ensureSizeIsEnough(NBrows * 3, NBcolumns, CV_32FC1, _demultiplexedTempBuffer); + ensureSizeIsEnough(NBrows * 3, NBcolumns, CV_32FC1, _demultiplexedColorFrame); + ensureSizeIsEnough(NBrows * 3, NBcolumns, CV_32FC1, _chrominance); + ensureSizeIsEnough(NBrows * 3, NBcolumns, CV_32FC1, _colorLocalDensity); + + // link to parent buffers (let's recycle !) + _luminance = _filterOutput; + _multiplexedFrame = _localBuffer; + + // init color sampling map + _initColorSampling(); + + // clean buffers + clearAllBuffers(); +} + +static void inverseValue(oclMat &input) +{ + int elements_per_row = static_cast(input.step / input.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {input.cols, input.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &input.data)); + args.push_back(std::make_pair(sizeof(cl_int), &input.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &input.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + openCLExecuteKernel(ctx, &retina_kernel, "inverseValue", globalSize, localSize, args, -1, -1); +} + +void RetinaColor::_initColorSampling() +{ + CV_Assert(_samplingMethod == RETINA_COLOR_BAYER); + _pR = _pB = 0.25; + _pG = 0.5; + // filling the mosaic buffer: + _RGBmosaic = 0; + Mat tmp_mat(_NBrows * 3, _NBcols, CV_32FC1); + float * tmp_mat_ptr = tmp_mat.ptr(); + tmp_mat.setTo(0); + for (unsigned int index = 0 ; index < getNBpixels(); ++index) + { + tmp_mat_ptr[bayerSampleOffset(index)] = 1.0; + } + _RGBmosaic.upload(tmp_mat); + // computing photoreceptors local density + MAKE_OCLMAT_SLICES(_RGBmosaic, 3); + MAKE_OCLMAT_SLICES(_colorLocalDensity, 3); + + _spatiotemporalLPfilter(_RGBmosaic_slices[0], _colorLocalDensity_slices[0]); + _spatiotemporalLPfilter(_RGBmosaic_slices[1], _colorLocalDensity_slices[1]); + _spatiotemporalLPfilter(_RGBmosaic_slices[2], _colorLocalDensity_slices[2]); + + //_colorLocalDensity = oclMat(_colorLocalDensity.size(), _colorLocalDensity.type(), 1.f) / _colorLocalDensity; + inverseValue(_colorLocalDensity); + + _objectInit = true; +} + +static void demultiplex(const oclMat &input, oclMat &ouput) +{ + int elements_per_row = static_cast(input.step / input.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {input.cols, input.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &input.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &ouput.data)); + args.push_back(std::make_pair(sizeof(cl_int), &input.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &input.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + openCLExecuteKernel(ctx, &retina_kernel, "runColorDemultiplexingBayer", globalSize, localSize, args, -1, -1); +} + +static void normalizePhotoDensity( + const oclMat &chroma, + const oclMat &colorDensity, + const oclMat &multiplex, + oclMat &ocl_luma, + oclMat &demultiplex, + const float pG +) +{ + int elements_per_row = static_cast(ocl_luma.step / ocl_luma.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {ocl_luma.cols, ocl_luma.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &chroma.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &colorDensity.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &multiplex.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &ocl_luma.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &demultiplex.data)); + args.push_back(std::make_pair(sizeof(cl_int), &ocl_luma.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &ocl_luma.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_float), &pG)); + openCLExecuteKernel(ctx, &retina_kernel, "normalizePhotoDensity", globalSize, localSize, args, -1, -1); +} + +static void substractResidual( + oclMat &colorDemultiplex, + float pR, + float pG, + float pB +) +{ + int elements_per_row = static_cast(colorDemultiplex.step / colorDemultiplex.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + int rows = colorDemultiplex.rows / 3, cols = colorDemultiplex.cols; + size_t globalSize[] = {cols, rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &colorDemultiplex.data)); + args.push_back(std::make_pair(sizeof(cl_int), &cols)); + args.push_back(std::make_pair(sizeof(cl_int), &rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_float), &pR)); + args.push_back(std::make_pair(sizeof(cl_float), &pG)); + args.push_back(std::make_pair(sizeof(cl_float), &pB)); + openCLExecuteKernel(ctx, &retina_kernel, "substractResidual", globalSize, localSize, args, -1, -1); +} + +static void demultiplexAssign(const oclMat& input, const oclMat& output) +{ + // only supports bayer + int elements_per_row = static_cast(input.step / input.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + int rows = input.rows / 3, cols = input.cols; + size_t globalSize[] = {cols, rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &input.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &output.data)); + args.push_back(std::make_pair(sizeof(cl_int), &cols)); + args.push_back(std::make_pair(sizeof(cl_int), &rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + openCLExecuteKernel(ctx, &retina_kernel, "demultiplexAssign", globalSize, localSize, args, -1, -1); +} + +void RetinaColor::runColorDemultiplexing( + const oclMat &ocl_multiplexed_input, + const bool adaptiveFiltering, + const float maxInputValue +) +{ + MAKE_OCLMAT_SLICES(_demultiplexedTempBuffer, 3); + MAKE_OCLMAT_SLICES(_chrominance, 3); + MAKE_OCLMAT_SLICES(_RGBmosaic, 3); + MAKE_OCLMAT_SLICES(_demultiplexedColorFrame, 3); + MAKE_OCLMAT_SLICES(_colorLocalDensity, 3); + + _demultiplexedTempBuffer.setTo(0); + demultiplex(ocl_multiplexed_input, _demultiplexedTempBuffer); + + // interpolate the demultiplexed frame depending on the color sampling method + if (!adaptiveFiltering) + { + CV_Assert(adaptiveFiltering == false); + } + + _spatiotemporalLPfilter(_demultiplexedTempBuffer_slices[0], _chrominance_slices[0]); + _spatiotemporalLPfilter(_demultiplexedTempBuffer_slices[1], _chrominance_slices[1]); + _spatiotemporalLPfilter(_demultiplexedTempBuffer_slices[2], _chrominance_slices[2]); + + if (!adaptiveFiltering)// compute the gradient on the luminance + { + // TODO: implement me! + CV_Assert(adaptiveFiltering == false); + } + else + { + normalizePhotoDensity(_chrominance, _colorLocalDensity, ocl_multiplexed_input, _luminance, _demultiplexedTempBuffer, _pG); + // compute the gradient of the luminance + _computeGradient(_luminance, _imageGradient); + + _adaptiveSpatialLPfilter(_RGBmosaic_slices[0], _imageGradient, _chrominance_slices[0]); + _adaptiveSpatialLPfilter(_RGBmosaic_slices[1], _imageGradient, _chrominance_slices[1]); + _adaptiveSpatialLPfilter(_RGBmosaic_slices[2], _imageGradient, _chrominance_slices[2]); + + _adaptiveSpatialLPfilter(_demultiplexedTempBuffer_slices[0], _imageGradient, _demultiplexedColorFrame_slices[0]); + _adaptiveSpatialLPfilter(_demultiplexedTempBuffer_slices[1], _imageGradient, _demultiplexedColorFrame_slices[1]); + _adaptiveSpatialLPfilter(_demultiplexedTempBuffer_slices[2], _imageGradient, _demultiplexedColorFrame_slices[2]); + + _demultiplexedColorFrame /= _chrominance; // per element division + substractResidual(_demultiplexedColorFrame, _pR, _pG, _pB); + runColorMultiplexing(_demultiplexedColorFrame, _tempMultiplexedFrame); + + _demultiplexedTempBuffer.setTo(0); + _luminance = ocl_multiplexed_input - _tempMultiplexedFrame; + demultiplexAssign(_demultiplexedColorFrame, _demultiplexedTempBuffer); + + for(int i = 0; i < 3; i ++) + { + _spatiotemporalLPfilter(_demultiplexedTempBuffer_slices[i], _demultiplexedTempBuffer_slices[i]); + _demultiplexedColorFrame_slices[i] = _demultiplexedTempBuffer_slices[i] * _colorLocalDensity_slices[i] + _luminance; + } + } + // eliminate saturated colors by simple clipping values to the input range + clipRGBOutput_0_maxInputValue(_demultiplexedColorFrame, maxInputValue); + + if (_saturateColors) + { + ocl::normalizeGrayOutputCentredSigmoide(128, maxInputValue, _demultiplexedColorFrame, _demultiplexedColorFrame); + } +} +void RetinaColor::runColorMultiplexing(const oclMat &demultiplexedInputFrame, oclMat &multiplexedFrame) +{ + int elements_per_row = static_cast(multiplexedFrame.step / multiplexedFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {multiplexedFrame.cols, multiplexedFrame.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &demultiplexedInputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &multiplexedFrame.data)); + args.push_back(std::make_pair(sizeof(cl_int), &multiplexedFrame.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &multiplexedFrame.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + openCLExecuteKernel(ctx, &retina_kernel, "runColorMultiplexingBayer", globalSize, localSize, args, -1, -1); +} + +void RetinaColor::clipRGBOutput_0_maxInputValue(oclMat &inputOutputBuffer, const float maxInputValue) +{ + // the kernel is equivalent to: + //ocl::threshold(inputOutputBuffer, inputOutputBuffer, maxInputValue, maxInputValue, THRESH_TRUNC); + //ocl::threshold(inputOutputBuffer, inputOutputBuffer, 0, 0, THRESH_TOZERO); + int elements_per_row = static_cast(inputOutputBuffer.step / inputOutputBuffer.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {_NBcols, inputOutputBuffer.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &inputOutputBuffer.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBcols)); + args.push_back(std::make_pair(sizeof(cl_int), &inputOutputBuffer.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_float), &maxInputValue)); + openCLExecuteKernel(ctx, &retina_kernel, "clipRGBOutput_0_maxInputValue", globalSize, localSize, args, -1, -1); +} + +void RetinaColor::_adaptiveSpatialLPfilter(const oclMat &inputFrame, const oclMat &gradient, oclMat &outputFrame) +{ + /**********/ + _gain = (1 - 0.57f) * (1 - 0.57f) * (1 - 0.06f) * (1 - 0.06f); + + // launch the serie of 1D directional filters in order to compute the 2D low pass filter + // -> horizontal filters work with the first layer of imageGradient + _adaptiveHorizontalCausalFilter_addInput(inputFrame, gradient, outputFrame); + _horizontalAnticausalFilter_Irregular(outputFrame, gradient); + // -> horizontal filters work with the second layer of imageGradient + _verticalCausalFilter_Irregular(outputFrame, gradient(getROI(1))); + _adaptiveVerticalAnticausalFilter_multGain(gradient, outputFrame); +} + +void RetinaColor::_adaptiveHorizontalCausalFilter_addInput(const oclMat &inputFrame, const oclMat &gradient, oclMat &outputFrame) +{ + int elements_per_row = static_cast(inputFrame.step / inputFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {_NBrows, 1, 1}; + size_t localSize[] = {256, 1, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &inputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &gradient.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &outputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBcols)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBrows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_int), &inputFrame.offset)); + args.push_back(std::make_pair(sizeof(cl_int), &gradient.offset)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.offset)); + openCLExecuteKernel(ctx, &retina_kernel, "adaptiveHorizontalCausalFilter_addInput", globalSize, localSize, args, -1, -1); +} + +void RetinaColor::_adaptiveVerticalAnticausalFilter_multGain(const oclMat &gradient, oclMat &outputFrame) +{ + int elements_per_row = static_cast(outputFrame.step / outputFrame.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {_NBcols, 1, 1}; + size_t localSize[] = {256, 1, 1}; + + int gradOffset = gradient.offset + static_cast(gradient.step * _NBrows); + + args.push_back(std::make_pair(sizeof(cl_mem), &gradient.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &outputFrame.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBcols)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBrows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_int), &gradOffset)); + args.push_back(std::make_pair(sizeof(cl_int), &outputFrame.offset)); + args.push_back(std::make_pair(sizeof(cl_float), &_gain)); + openCLExecuteKernel(ctx, &retina_kernel, "adaptiveVerticalAnticausalFilter_multGain", globalSize, localSize, args, -1, -1); +} +void RetinaColor::_computeGradient(const oclMat &luminance, oclMat &gradient) +{ + int elements_per_row = static_cast(luminance.step / luminance.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {_NBcols, _NBrows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &luminance.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &gradient.data)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBcols)); + args.push_back(std::make_pair(sizeof(cl_int), &_NBrows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + openCLExecuteKernel(ctx, &retina_kernel, "computeGradient", globalSize, localSize, args, -1, -1); +} + +/////////////////////////////////////// +//////////// RetinaFilter ///////////// +/////////////////////////////////////// +RetinaFilter::RetinaFilter(const unsigned int sizeRows, const unsigned int sizeColumns, const bool colorMode, const int samplingMethod, const bool useRetinaLogSampling, const double, const double) + : + _photoreceptorsPrefilter(sizeRows, sizeColumns, 4), + _ParvoRetinaFilter(sizeRows, sizeColumns), + _MagnoRetinaFilter(sizeRows, sizeColumns), + _colorEngine(sizeRows, sizeColumns, samplingMethod) +{ + CV_Assert(!useRetinaLogSampling); + + // set default processing activities + _useParvoOutput = true; + _useMagnoOutput = true; + + _useColorMode = colorMode; + + // set default parameters + setGlobalParameters(); + + // stability controls values init + _setInitPeriodCount(); + _globalTemporalConstant = 25; + + // reset all buffers + clearAllBuffers(); +} + +RetinaFilter::~RetinaFilter() +{ +} + +void RetinaFilter::clearAllBuffers() +{ + _photoreceptorsPrefilter.clearAllBuffers(); + _ParvoRetinaFilter.clearAllBuffers(); + _MagnoRetinaFilter.clearAllBuffers(); + _colorEngine.clearAllBuffers(); + // stability controls value init + _setInitPeriodCount(); +} + +void RetinaFilter::resize(const unsigned int NBrows, const unsigned int NBcolumns) +{ + unsigned int rows = NBrows, cols = NBcolumns; + + // resize optionnal member and adjust other modules size if required + _photoreceptorsPrefilter.resize(rows, cols); + _ParvoRetinaFilter.resize(rows, cols); + _MagnoRetinaFilter.resize(rows, cols); + _colorEngine.resize(rows, cols); + + // clean buffers + clearAllBuffers(); + +} + +void RetinaFilter::_setInitPeriodCount() +{ + // find out the maximum temporal constant value and apply a security factor + // false value (obviously too long) but appropriate for simple use + _globalTemporalConstant = (unsigned int)(_ParvoRetinaFilter.getPhotoreceptorsTemporalConstant() + _ParvoRetinaFilter.getHcellsTemporalConstant() + _MagnoRetinaFilter.getTemporalConstant()); + // reset frame counter + _ellapsedFramesSinceLastReset = 0; +} + +void RetinaFilter::setGlobalParameters(const float OPLspatialResponse1, const float OPLtemporalresponse1, const float OPLassymetryGain, const float OPLspatialResponse2, const float OPLtemporalresponse2, const float LPfilterSpatialResponse, const float LPfilterGain, const float LPfilterTemporalresponse, const float MovingContoursExtractorCoefficient, const bool normalizeParvoOutput_0_maxOutputValue, const bool normalizeMagnoOutput_0_maxOutputValue, const float maxOutputValue, const float maxInputValue, const float meanValue) +{ + _normalizeParvoOutput_0_maxOutputValue = normalizeParvoOutput_0_maxOutputValue; + _normalizeMagnoOutput_0_maxOutputValue = normalizeMagnoOutput_0_maxOutputValue; + _maxOutputValue = maxOutputValue; + _photoreceptorsPrefilter.setV0CompressionParameter(0.9f, maxInputValue, meanValue); + _photoreceptorsPrefilter.setLPfilterParameters(0, 0, 10, 3); // keeps low pass filter with low cut frequency in memory (usefull for the tone mapping function) + _ParvoRetinaFilter.setOPLandParvoFiltersParameters(0, OPLtemporalresponse1, OPLspatialResponse1, OPLassymetryGain, OPLtemporalresponse2, OPLspatialResponse2); + _ParvoRetinaFilter.setV0CompressionParameter(0.9f, maxInputValue, meanValue); + _MagnoRetinaFilter.setCoefficientsTable(LPfilterGain, LPfilterTemporalresponse, LPfilterSpatialResponse, MovingContoursExtractorCoefficient, 0, 2.0f * LPfilterSpatialResponse); + _MagnoRetinaFilter.setV0CompressionParameter(0.7f, maxInputValue, meanValue); + + // stability controls value init + _setInitPeriodCount(); +} + +bool RetinaFilter::checkInput(const oclMat &input, const bool) +{ + BasicRetinaFilter *inputTarget = &_photoreceptorsPrefilter; + + bool test = (input.rows == static_cast(inputTarget->getNBrows()) + || input.rows == static_cast(inputTarget->getNBrows()) * 3 + || input.rows == static_cast(inputTarget->getNBrows()) * 4) + && input.cols == static_cast(inputTarget->getNBcolumns()); + if (!test) + { + std::cerr << "RetinaFilter::checkInput: input buffer does not match retina buffer size, conversion aborted" << std::endl; + return false; + } + + return true; +} + +// main function that runs the filter for a given input frame +bool RetinaFilter::runFilter(const oclMat &imageInput, const bool useAdaptiveFiltering, const bool processRetinaParvoMagnoMapping, const bool useColorMode, const bool inputIsColorMultiplexed) +{ + // preliminary check + bool processSuccess = true; + if (!checkInput(imageInput, useColorMode)) + { + return false; + } + + // run the color multiplexing if needed and compute each suub filter of the retina: + // -> local adaptation + // -> contours OPL extraction + // -> moving contours extraction + + // stability controls value update + ++_ellapsedFramesSinceLastReset; + + _useColorMode = useColorMode; + + oclMat selectedPhotoreceptorsLocalAdaptationInput = imageInput; + oclMat selectedPhotoreceptorsColorInput = imageInput; + + //********** Following is input data specific photoreceptors processing + if (useColorMode && (!inputIsColorMultiplexed)) // not multiplexed color input case + { + _colorEngine.runColorMultiplexing(selectedPhotoreceptorsColorInput); + selectedPhotoreceptorsLocalAdaptationInput = _colorEngine.getMultiplexedFrame(); + } + //********** Following is generic Retina processing + + // photoreceptors local adaptation + _photoreceptorsPrefilter.runFilter_LocalAdapdation(selectedPhotoreceptorsLocalAdaptationInput, _ParvoRetinaFilter.getHorizontalCellsOutput()); + + // run parvo filter + _ParvoRetinaFilter.runFilter(_photoreceptorsPrefilter.getOutput(), _useParvoOutput); + + if (_useParvoOutput) + { + _ParvoRetinaFilter.normalizeGrayOutputCentredSigmoide(); // models the saturation of the cells, usefull for visualisation of the ON-OFF Parvo Output, Bipolar cells outputs do not change !!! + _ParvoRetinaFilter.centerReductImageLuminance(); // best for further spectrum analysis + + if (_normalizeParvoOutput_0_maxOutputValue) + { + _ParvoRetinaFilter.normalizeGrayOutput_0_maxOutputValue(_maxOutputValue); + } + } + + if (_useParvoOutput && _useMagnoOutput) + { + _MagnoRetinaFilter.runFilter(_ParvoRetinaFilter.getBipolarCellsON(), _ParvoRetinaFilter.getBipolarCellsOFF()); + if (_normalizeMagnoOutput_0_maxOutputValue) + { + _MagnoRetinaFilter.normalizeGrayOutput_0_maxOutputValue(_maxOutputValue); + } + _MagnoRetinaFilter.normalizeGrayOutputNearZeroCentreredSigmoide(); + } + + if (_useParvoOutput && _useMagnoOutput && processRetinaParvoMagnoMapping) + { + _processRetinaParvoMagnoMapping(); + if (_useColorMode) + { + _colorEngine.runColorDemultiplexing(_retinaParvoMagnoMappedFrame, useAdaptiveFiltering, _maxOutputValue); + } + return processSuccess; + } + + if (_useParvoOutput && _useColorMode) + { + _colorEngine.runColorDemultiplexing(_ParvoRetinaFilter.getOutput(), useAdaptiveFiltering, _maxOutputValue); + } + return processSuccess; +} + +const oclMat &RetinaFilter::getContours() +{ + if (_useColorMode) + { + return _colorEngine.getLuminance(); + } + else + { + return _ParvoRetinaFilter.getOutput(); + } +} +void RetinaFilter::_processRetinaParvoMagnoMapping() +{ + oclMat parvo = _ParvoRetinaFilter.getOutput(); + oclMat magno = _MagnoRetinaFilter.getOutput(); + + int halfRows = parvo.rows / 2; + int halfCols = parvo.cols / 2; + float minDistance = MIN(halfRows, halfCols) * 0.7f; + + int elements_per_row = static_cast(parvo.step / parvo.elemSize()); + + Context * ctx = Context::getContext(); + std::vector > args; + size_t globalSize[] = {parvo.cols, parvo.rows, 1}; + size_t localSize[] = {16, 16, 1}; + + args.push_back(std::make_pair(sizeof(cl_mem), &parvo.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &magno.data)); + args.push_back(std::make_pair(sizeof(cl_int), &parvo.cols)); + args.push_back(std::make_pair(sizeof(cl_int), &parvo.rows)); + args.push_back(std::make_pair(sizeof(cl_int), &halfCols)); + args.push_back(std::make_pair(sizeof(cl_int), &halfRows)); + args.push_back(std::make_pair(sizeof(cl_int), &elements_per_row)); + args.push_back(std::make_pair(sizeof(cl_float), &minDistance)); + openCLExecuteKernel(ctx, &retina_kernel, "processRetinaParvoMagnoMapping", globalSize, localSize, args, -1, -1); +} +} /* namespace ocl */ + +Ptr createRetina_OCL(Size getInputSize){ return new ocl::RetinaOCLImpl(getInputSize); } +Ptr createRetina_OCL(Size getInputSize, const bool colorMode, int colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght) +{ + return new ocl::RetinaOCLImpl(getInputSize, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght); +} + +} /* namespace bioinspired */ +} /* namespace cv */ + +#endif /* #ifdef HAVE_OPENCV_OCL */ diff --git a/modules/bioinspired/src/retina_ocl.hpp b/modules/bioinspired/src/retina_ocl.hpp new file mode 100644 index 0000000000..69a833cc42 --- /dev/null +++ b/modules/bioinspired/src/retina_ocl.hpp @@ -0,0 +1,633 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Peng Xiao, pengxiao@multicorewareinc.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OCL_RETINA_HPP__ +#define __OCL_RETINA_HPP__ + +#include "precomp.hpp" + +#ifdef HAVE_OPENCV_OCL + +// please refer to c++ headers for API comments +namespace cv +{ +namespace bioinspired +{ +namespace ocl +{ +void normalizeGrayOutputCentredSigmoide(const float meanValue, const float sensitivity, cv::ocl::oclMat &in, cv::ocl::oclMat &out, const float maxValue = 255.f); +void normalizeGrayOutput_0_maxOutputValue(cv::ocl::oclMat &inputOutputBuffer, const float maxOutputValue = 255.0); +void normalizeGrayOutputNearZeroCentreredSigmoide(cv::ocl::oclMat &inputPicture, cv::ocl::oclMat &outputBuffer, const float sensitivity = 40, const float maxOutputValue = 255.0f); +void centerReductImageLuminance(cv::ocl::oclMat &inputOutputBuffer); + +class BasicRetinaFilter +{ +public: + BasicRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns, const unsigned int parametersListSize = 1, const bool useProgressiveFilter = false); + ~BasicRetinaFilter(); + inline void clearOutputBuffer() + { + _filterOutput = 0; + }; + inline void clearSecondaryBuffer() + { + _localBuffer = 0; + }; + inline void clearAllBuffers() + { + clearOutputBuffer(); + clearSecondaryBuffer(); + }; + void resize(const unsigned int NBrows, const unsigned int NBcolumns); + const cv::ocl::oclMat &runFilter_LPfilter(const cv::ocl::oclMat &inputFrame, const unsigned int filterIndex = 0); + void runFilter_LPfilter(const cv::ocl::oclMat &inputFrame, cv::ocl::oclMat &outputFrame, const unsigned int filterIndex = 0); + void runFilter_LPfilter_Autonomous(cv::ocl::oclMat &inputOutputFrame, const unsigned int filterIndex = 0); + const cv::ocl::oclMat &runFilter_LocalAdapdation(const cv::ocl::oclMat &inputOutputFrame, const cv::ocl::oclMat &localLuminance); + void runFilter_LocalAdapdation(const cv::ocl::oclMat &inputFrame, const cv::ocl::oclMat &localLuminance, cv::ocl::oclMat &outputFrame); + const cv::ocl::oclMat &runFilter_LocalAdapdation_autonomous(const cv::ocl::oclMat &inputFrame); + void runFilter_LocalAdapdation_autonomous(const cv::ocl::oclMat &inputFrame, cv::ocl::oclMat &outputFrame); + void setLPfilterParameters(const float beta, const float tau, const float k, const unsigned int filterIndex = 0); + inline void setV0CompressionParameter(const float v0, const float maxInputValue, const float) + { + _v0 = v0 * maxInputValue; + _localLuminanceFactor = v0; + _localLuminanceAddon = maxInputValue * (1.0f - v0); + _maxInputValue = maxInputValue; + }; + inline void setV0CompressionParameter(const float v0, const float meanLuminance) + { + this->setV0CompressionParameter(v0, _maxInputValue, meanLuminance); + }; + inline void setV0CompressionParameter(const float v0) + { + _v0 = v0 * _maxInputValue; + _localLuminanceFactor = v0; + _localLuminanceAddon = _maxInputValue * (1.0f - v0); + }; + inline void setV0CompressionParameterToneMapping(const float v0, const float maxInputValue, const float meanLuminance = 128.0f) + { + _v0 = v0 * maxInputValue; + _localLuminanceFactor = 1.0f; + _localLuminanceAddon = meanLuminance * _v0; + _maxInputValue = maxInputValue; + }; + inline void updateCompressionParameter(const float meanLuminance) + { + _localLuminanceFactor = 1; + _localLuminanceAddon = meanLuminance * _v0; + }; + inline float getV0CompressionParameter() + { + return _v0 / _maxInputValue; + }; + inline const cv::ocl::oclMat &getOutput() const + { + return _filterOutput; + }; + inline unsigned int getNBrows() + { + return _filterOutput.rows; + }; + inline unsigned int getNBcolumns() + { + return _filterOutput.cols; + }; + inline unsigned int getNBpixels() + { + return _filterOutput.size().area(); + }; + inline void normalizeGrayOutput_0_maxOutputValue(const float maxValue) + { + ocl::normalizeGrayOutput_0_maxOutputValue(_filterOutput, maxValue); + }; + inline void normalizeGrayOutputCentredSigmoide() + { + ocl::normalizeGrayOutputCentredSigmoide(0.0, 2.0, _filterOutput, _filterOutput); + }; + inline void centerReductImageLuminance() + { + ocl::centerReductImageLuminance(_filterOutput); + }; + inline float getMaxInputValue() + { + return this->_maxInputValue; + }; + inline void setMaxInputValue(const float newMaxInputValue) + { + this->_maxInputValue = newMaxInputValue; + }; + +protected: + cv::ocl::oclMat _filterOutput; + cv::ocl::oclMat _localBuffer; + + int _NBrows; + int _NBcols; + unsigned int _halfNBrows; + unsigned int _halfNBcolumns; + + std::valarray _filteringCoeficientsTable; + float _v0; + float _maxInputValue; + float _meanInputValue; + float _localLuminanceFactor; + float _localLuminanceAddon; + + float _a; + float _tau; + float _gain; + + void _spatiotemporalLPfilter(const cv::ocl::oclMat &inputFrame, cv::ocl::oclMat &LPfilterOutput, const unsigned int coefTableOffset = 0); + float _squaringSpatiotemporalLPfilter(const cv::ocl::oclMat &inputFrame, cv::ocl::oclMat &outputFrame, const unsigned int filterIndex = 0); + void _spatiotemporalLPfilter_Irregular(const cv::ocl::oclMat &inputFrame, cv::ocl::oclMat &outputFrame, const unsigned int filterIndex = 0); + void _localSquaringSpatioTemporalLPfilter(const cv::ocl::oclMat &inputFrame, cv::ocl::oclMat &LPfilterOutput, const unsigned int *integrationAreas, const unsigned int filterIndex = 0); + void _localLuminanceAdaptation(const cv::ocl::oclMat &inputFrame, const cv::ocl::oclMat &localLuminance, cv::ocl::oclMat &outputFrame, const bool updateLuminanceMean = true); + void _localLuminanceAdaptation(cv::ocl::oclMat &inputOutputFrame, const cv::ocl::oclMat &localLuminance); + void _localLuminanceAdaptationPosNegValues(const cv::ocl::oclMat &inputFrame, const cv::ocl::oclMat &localLuminance, float *outputFrame); + void _horizontalCausalFilter_addInput(const cv::ocl::oclMat &inputFrame, cv::ocl::oclMat &outputFrame); + void _horizontalAnticausalFilter(cv::ocl::oclMat &outputFrame); + void _verticalCausalFilter(cv::ocl::oclMat &outputFrame); + void _horizontalAnticausalFilter_Irregular(cv::ocl::oclMat &outputFrame, const cv::ocl::oclMat &spatialConstantBuffer); + void _verticalCausalFilter_Irregular(cv::ocl::oclMat &outputFrame, const cv::ocl::oclMat &spatialConstantBuffer); + void _verticalAnticausalFilter_multGain(cv::ocl::oclMat &outputFrame); +}; + +class MagnoRetinaFilter: public BasicRetinaFilter +{ +public: + MagnoRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns); + virtual ~MagnoRetinaFilter(); + void clearAllBuffers(); + void resize(const unsigned int NBrows, const unsigned int NBcolumns); + void setCoefficientsTable(const float parasolCells_beta, const float parasolCells_tau, const float parasolCells_k, const float amacrinCellsTemporalCutFrequency, const float localAdaptIntegration_tau, const float localAdaptIntegration_k); + + const cv::ocl::oclMat &runFilter(const cv::ocl::oclMat &OPL_ON, const cv::ocl::oclMat &OPL_OFF); + + inline const cv::ocl::oclMat &getMagnoON() const + { + return _magnoXOutputON; + }; + inline const cv::ocl::oclMat &getMagnoOFF() const + { + return _magnoXOutputOFF; + }; + inline const cv::ocl::oclMat &getMagnoYsaturated() const + { + return _magnoYsaturated; + }; + inline void normalizeGrayOutputNearZeroCentreredSigmoide() + { + ocl::normalizeGrayOutputNearZeroCentreredSigmoide(_magnoYOutput, _magnoYsaturated); + }; + inline float getTemporalConstant() + { + return this->_filteringCoeficientsTable[2]; + }; +private: + cv::ocl::oclMat _previousInput_ON; + cv::ocl::oclMat _previousInput_OFF; + cv::ocl::oclMat _amacrinCellsTempOutput_ON; + cv::ocl::oclMat _amacrinCellsTempOutput_OFF; + cv::ocl::oclMat _magnoXOutputON; + cv::ocl::oclMat _magnoXOutputOFF; + cv::ocl::oclMat _localProcessBufferON; + cv::ocl::oclMat _localProcessBufferOFF; + cv::ocl::oclMat _magnoYOutput; + cv::ocl::oclMat _magnoYsaturated; + + float _temporalCoefficient; + void _amacrineCellsComputing(const cv::ocl::oclMat &OPL_ON, const cv::ocl::oclMat &OPL_OFF); +}; + +class ParvoRetinaFilter: public BasicRetinaFilter +{ +public: + ParvoRetinaFilter(const unsigned int NBrows = 480, const unsigned int NBcolumns = 640); + virtual ~ParvoRetinaFilter(); + void resize(const unsigned int NBrows, const unsigned int NBcolumns); + void clearAllBuffers(); + void setOPLandParvoFiltersParameters(const float beta1, const float tau1, const float k1, const float beta2, const float tau2, const float k2); + + inline void setGanglionCellsLocalAdaptationLPfilterParameters(const float tau, const float k) + { + BasicRetinaFilter::setLPfilterParameters(0, tau, k, 2); + }; + const cv::ocl::oclMat &runFilter(const cv::ocl::oclMat &inputFrame, const bool useParvoOutput = true); + + inline const cv::ocl::oclMat &getPhotoreceptorsLPfilteringOutput() const + { + return _photoreceptorsOutput; + }; + + inline const cv::ocl::oclMat &getHorizontalCellsOutput() const + { + return _horizontalCellsOutput; + }; + + inline const cv::ocl::oclMat &getParvoON() const + { + return _parvocellularOutputON; + }; + + inline const cv::ocl::oclMat &getParvoOFF() const + { + return _parvocellularOutputOFF; + }; + + inline const cv::ocl::oclMat &getBipolarCellsON() const + { + return _bipolarCellsOutputON; + }; + + inline const cv::ocl::oclMat &getBipolarCellsOFF() const + { + return _bipolarCellsOutputOFF; + }; + + inline float getPhotoreceptorsTemporalConstant() + { + return this->_filteringCoeficientsTable[2]; + }; + + inline float getHcellsTemporalConstant() + { + return this->_filteringCoeficientsTable[5]; + }; +private: + cv::ocl::oclMat _photoreceptorsOutput; + cv::ocl::oclMat _horizontalCellsOutput; + cv::ocl::oclMat _parvocellularOutputON; + cv::ocl::oclMat _parvocellularOutputOFF; + cv::ocl::oclMat _bipolarCellsOutputON; + cv::ocl::oclMat _bipolarCellsOutputOFF; + cv::ocl::oclMat _localAdaptationOFF; + cv::ocl::oclMat _localAdaptationON; + cv::ocl::oclMat _parvocellularOutputONminusOFF; + void _OPL_OnOffWaysComputing(); +}; +class RetinaColor: public BasicRetinaFilter +{ +public: + RetinaColor(const unsigned int NBrows, const unsigned int NBcolumns, const int samplingMethod = RETINA_COLOR_DIAGONAL); + virtual ~RetinaColor(); + + void clearAllBuffers(); + void resize(const unsigned int NBrows, const unsigned int NBcolumns); + inline void runColorMultiplexing(const cv::ocl::oclMat &inputRGBFrame) + { + runColorMultiplexing(inputRGBFrame, _multiplexedFrame); + }; + void runColorMultiplexing(const cv::ocl::oclMat &demultiplexedInputFrame, cv::ocl::oclMat &multiplexedFrame); + void runColorDemultiplexing(const cv::ocl::oclMat &multiplexedColorFrame, const bool adaptiveFiltering = false, const float maxInputValue = 255.0); + + void setColorSaturation(const bool saturateColors = true, const float colorSaturationValue = 4.0) + { + _saturateColors = saturateColors; + _colorSaturationValue = colorSaturationValue; + }; + + void setChrominanceLPfilterParameters(const float beta, const float tau, const float k) + { + setLPfilterParameters(beta, tau, k); + }; + + bool applyKrauskopfLMS2Acr1cr2Transform(cv::ocl::oclMat &result); + bool applyLMS2LabTransform(cv::ocl::oclMat &result); + inline const cv::ocl::oclMat &getMultiplexedFrame() const + { + return _multiplexedFrame; + }; + + inline const cv::ocl::oclMat &getDemultiplexedColorFrame() const + { + return _demultiplexedColorFrame; + }; + + inline const cv::ocl::oclMat &getLuminance() const + { + return _luminance; + }; + inline const cv::ocl::oclMat &getChrominance() const + { + return _chrominance; + }; + void clipRGBOutput_0_maxInputValue(cv::ocl::oclMat &inputOutputBuffer, const float maxOutputValue = 255.0); + void normalizeRGBOutput_0_maxOutputValue(const float maxOutputValue = 255.0); + inline void setDemultiplexedColorFrame(const cv::ocl::oclMat &demultiplexedImage) + { + _demultiplexedColorFrame = demultiplexedImage; + }; +protected: + inline unsigned int bayerSampleOffset(unsigned int index) + { + return index + ((index / getNBcolumns()) % 2) * getNBpixels() + ((index % getNBcolumns()) % 2) * getNBpixels(); + } + inline Rect getROI(int idx) + { + return Rect(0, idx * _NBrows, _NBcols, _NBrows); + } + int _samplingMethod; + bool _saturateColors; + float _colorSaturationValue; + cv::ocl::oclMat _luminance; + cv::ocl::oclMat _multiplexedFrame; + cv::ocl::oclMat _RGBmosaic; + cv::ocl::oclMat _tempMultiplexedFrame; + cv::ocl::oclMat _demultiplexedTempBuffer; + cv::ocl::oclMat _demultiplexedColorFrame; + cv::ocl::oclMat _chrominance; + cv::ocl::oclMat _colorLocalDensity; + cv::ocl::oclMat _imageGradient; + + float _pR, _pG, _pB; + bool _objectInit; + + void _initColorSampling(); + void _adaptiveSpatialLPfilter(const cv::ocl::oclMat &inputFrame, const cv::ocl::oclMat &gradient, cv::ocl::oclMat &outputFrame); + void _adaptiveHorizontalCausalFilter_addInput(const cv::ocl::oclMat &inputFrame, const cv::ocl::oclMat &gradient, cv::ocl::oclMat &outputFrame); + void _adaptiveVerticalAnticausalFilter_multGain(const cv::ocl::oclMat &gradient, cv::ocl::oclMat &outputFrame); + void _computeGradient(const cv::ocl::oclMat &luminance, cv::ocl::oclMat &gradient); + void _normalizeOutputs_0_maxOutputValue(void); + void _applyImageColorSpaceConversion(const cv::ocl::oclMat &inputFrame, cv::ocl::oclMat &outputFrame, const float *transformTable); +}; +class RetinaFilter +{ +public: + RetinaFilter(const unsigned int sizeRows, const unsigned int sizeColumns, const bool colorMode = false, const int samplingMethod = RETINA_COLOR_BAYER, const bool useRetinaLogSampling = false, const double reductionFactor = 1.0, const double samplingStrenght = 10.0); + ~RetinaFilter(); + + void clearAllBuffers(); + void resize(const unsigned int NBrows, const unsigned int NBcolumns); + bool checkInput(const cv::ocl::oclMat &input, const bool colorMode); + bool runFilter(const cv::ocl::oclMat &imageInput, const bool useAdaptiveFiltering = true, const bool processRetinaParvoMagnoMapping = false, const bool useColorMode = false, const bool inputIsColorMultiplexed = false); + + void setGlobalParameters(const float OPLspatialResponse1 = 0.7, const float OPLtemporalresponse1 = 1, const float OPLassymetryGain = 0, const float OPLspatialResponse2 = 5, const float OPLtemporalresponse2 = 1, const float LPfilterSpatialResponse = 5, const float LPfilterGain = 0, const float LPfilterTemporalresponse = 0, const float MovingContoursExtractorCoefficient = 5, const bool normalizeParvoOutput_0_maxOutputValue = false, const bool normalizeMagnoOutput_0_maxOutputValue = false, const float maxOutputValue = 255.0, const float maxInputValue = 255.0, const float meanValue = 128.0); + + inline void setPhotoreceptorsLocalAdaptationSensitivity(const float V0CompressionParameter) + { + _photoreceptorsPrefilter.setV0CompressionParameter(1 - V0CompressionParameter); + _setInitPeriodCount(); + }; + + inline void setParvoGanglionCellsLocalAdaptationSensitivity(const float V0CompressionParameter) + { + _ParvoRetinaFilter.setV0CompressionParameter(V0CompressionParameter); + _setInitPeriodCount(); + }; + + inline void setGanglionCellsLocalAdaptationLPfilterParameters(const float spatialResponse, const float temporalResponse) + { + _ParvoRetinaFilter.setGanglionCellsLocalAdaptationLPfilterParameters(temporalResponse, spatialResponse); + _setInitPeriodCount(); + }; + + inline void setMagnoGanglionCellsLocalAdaptationSensitivity(const float V0CompressionParameter) + { + _MagnoRetinaFilter.setV0CompressionParameter(V0CompressionParameter); + _setInitPeriodCount(); + }; + + void setOPLandParvoParameters(const float beta1, const float tau1, const float k1, const float beta2, const float tau2, const float k2, const float V0CompressionParameter) + { + _ParvoRetinaFilter.setOPLandParvoFiltersParameters(beta1, tau1, k1, beta2, tau2, k2); + _ParvoRetinaFilter.setV0CompressionParameter(V0CompressionParameter); + _setInitPeriodCount(); + }; + + void setMagnoCoefficientsTable(const float parasolCells_beta, const float parasolCells_tau, const float parasolCells_k, const float amacrinCellsTemporalCutFrequency, const float V0CompressionParameter, const float localAdaptintegration_tau, const float localAdaptintegration_k) + { + _MagnoRetinaFilter.setCoefficientsTable(parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, localAdaptintegration_tau, localAdaptintegration_k); + _MagnoRetinaFilter.setV0CompressionParameter(V0CompressionParameter); + _setInitPeriodCount(); + }; + + inline void activateNormalizeParvoOutput_0_maxOutputValue(const bool normalizeParvoOutput_0_maxOutputValue) + { + _normalizeParvoOutput_0_maxOutputValue = normalizeParvoOutput_0_maxOutputValue; + }; + + inline void activateNormalizeMagnoOutput_0_maxOutputValue(const bool normalizeMagnoOutput_0_maxOutputValue) + { + _normalizeMagnoOutput_0_maxOutputValue = normalizeMagnoOutput_0_maxOutputValue; + }; + + inline void setMaxOutputValue(const float maxOutputValue) + { + _maxOutputValue = maxOutputValue; + }; + + void setColorMode(const bool desiredColorMode) + { + _useColorMode = desiredColorMode; + }; + inline void setColorSaturation(const bool saturateColors = true, const float colorSaturationValue = 4.0) + { + _colorEngine.setColorSaturation(saturateColors, colorSaturationValue); + }; + inline const cv::ocl::oclMat &getLocalAdaptation() const + { + return _photoreceptorsPrefilter.getOutput(); + }; + inline const cv::ocl::oclMat &getPhotoreceptors() const + { + return _ParvoRetinaFilter.getPhotoreceptorsLPfilteringOutput(); + }; + + inline const cv::ocl::oclMat &getHorizontalCells() const + { + return _ParvoRetinaFilter.getHorizontalCellsOutput(); + }; + inline bool areContoursProcessed() + { + return _useParvoOutput; + }; + bool getParvoFoveaResponse(cv::ocl::oclMat &parvoFovealResponse); + inline void activateContoursProcessing(const bool useParvoOutput) + { + _useParvoOutput = useParvoOutput; + }; + + const cv::ocl::oclMat &getContours(); + + inline const cv::ocl::oclMat &getContoursON() const + { + return _ParvoRetinaFilter.getParvoON(); + }; + + inline const cv::ocl::oclMat &getContoursOFF() const + { + return _ParvoRetinaFilter.getParvoOFF(); + }; + + inline bool areMovingContoursProcessed() + { + return _useMagnoOutput; + }; + + inline void activateMovingContoursProcessing(const bool useMagnoOutput) + { + _useMagnoOutput = useMagnoOutput; + }; + + inline const cv::ocl::oclMat &getMovingContours() const + { + return _MagnoRetinaFilter.getOutput(); + }; + + inline const cv::ocl::oclMat &getMovingContoursSaturated() const + { + return _MagnoRetinaFilter.getMagnoYsaturated(); + }; + + inline const cv::ocl::oclMat &getMovingContoursON() const + { + return _MagnoRetinaFilter.getMagnoON(); + }; + + inline const cv::ocl::oclMat &getMovingContoursOFF() const + { + return _MagnoRetinaFilter.getMagnoOFF(); + }; + + inline const cv::ocl::oclMat &getRetinaParvoMagnoMappedOutput() const + { + return _retinaParvoMagnoMappedFrame; + }; + + inline const cv::ocl::oclMat &getParvoContoursChannel() const + { + return _colorEngine.getLuminance(); + }; + + inline const cv::ocl::oclMat &getParvoChrominance() const + { + return _colorEngine.getChrominance(); + }; + inline const cv::ocl::oclMat &getColorOutput() const + { + return _colorEngine.getDemultiplexedColorFrame(); + }; + + inline bool isColorMode() + { + return _useColorMode; + }; + bool getColorMode() + { + return _useColorMode; + }; + + inline bool isInitTransitionDone() + { + if (_ellapsedFramesSinceLastReset < _globalTemporalConstant) + { + return false; + } + return true; + }; + inline float getRetinaSamplingBackProjection(const float projectedRadiusLength) + { + return projectedRadiusLength; + }; + + inline unsigned int getInputNBrows() + { + return _photoreceptorsPrefilter.getNBrows(); + }; + + inline unsigned int getInputNBcolumns() + { + return _photoreceptorsPrefilter.getNBcolumns(); + }; + + inline unsigned int getInputNBpixels() + { + return _photoreceptorsPrefilter.getNBpixels(); + }; + + inline unsigned int getOutputNBrows() + { + return _photoreceptorsPrefilter.getNBrows(); + }; + + inline unsigned int getOutputNBcolumns() + { + return _photoreceptorsPrefilter.getNBcolumns(); + }; + + inline unsigned int getOutputNBpixels() + { + return _photoreceptorsPrefilter.getNBpixels(); + }; +private: + bool _useParvoOutput; + bool _useMagnoOutput; + + unsigned int _ellapsedFramesSinceLastReset; + unsigned int _globalTemporalConstant; + + cv::ocl::oclMat _retinaParvoMagnoMappedFrame; + BasicRetinaFilter _photoreceptorsPrefilter; + ParvoRetinaFilter _ParvoRetinaFilter; + MagnoRetinaFilter _MagnoRetinaFilter; + RetinaColor _colorEngine; + + bool _useMinimalMemoryForToneMappingONLY; + bool _normalizeParvoOutput_0_maxOutputValue; + bool _normalizeMagnoOutput_0_maxOutputValue; + float _maxOutputValue; + bool _useColorMode; + + void _setInitPeriodCount(); + void _processRetinaParvoMagnoMapping(); + void _runGrayToneMapping(const cv::ocl::oclMat &grayImageInput, cv::ocl::oclMat &grayImageOutput , const float PhotoreceptorsCompression = 0.6, const float ganglionCellsCompression = 0.6); +}; + +} /* namespace ocl */ +} /* namespace bioinspired */ +} /* namespace cv */ + +#endif /* HAVE_OPENCV_OCL */ +#endif /* __OCL_RETINA_HPP__ */ diff --git a/modules/bioinspired/test/test_retina_ocl.cpp b/modules/bioinspired/test/test_retina_ocl.cpp new file mode 100644 index 0000000000..ea40bf6aa6 --- /dev/null +++ b/modules/bioinspired/test/test_retina_ocl.cpp @@ -0,0 +1,139 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Peng Xiao, pengxiao@multicorewareinc.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "test_precomp.hpp" +#include "opencv2/opencv_modules.hpp" +#include "opencv2/bioinspired.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/ocl.hpp" + +#if defined(HAVE_OPENCV_OCL) && defined(HAVE_OPENCL) + +#define RETINA_ITERATIONS 5 + +static double checkNear(const cv::Mat &m1, const cv::Mat &m2) +{ + return cv::norm(m1, m2, cv::NORM_INF); +} + +#define PARAM_TEST_CASE(name, ...) struct name : testing::TestWithParam< std::tr1::tuple< __VA_ARGS__ > > +#define GET_PARAM(k) std::tr1::get< k >(GetParam()) + + +PARAM_TEST_CASE(Retina_OCL, bool, int, bool, double, double) +{ + bool colorMode; + int colorSamplingMethod; + bool useLogSampling; + double reductionFactor; + double samplingStrength; + + std::vector infos; + + virtual void SetUp() + { + colorMode = GET_PARAM(0); + colorSamplingMethod = GET_PARAM(1); + useLogSampling = GET_PARAM(2); + reductionFactor = GET_PARAM(3); + samplingStrength = GET_PARAM(4); + + cv::ocl::getDevice(infos); + std::cout << "Device name:" << infos[0].DeviceName[0] << std::endl; + } +}; + +TEST_P(Retina_OCL, Accuracy) +{ + using namespace cv; + Mat input = imread(cvtest::TS::ptr()->get_data_path() + "shared/lena.png", colorMode); + CV_Assert(!input.empty()); + ocl::oclMat ocl_input(input); + + Ptr ocl_retina = bioinspired::createRetina_OCL( + input.size(), + colorMode, + colorSamplingMethod, + useLogSampling, + reductionFactor, + samplingStrength); + + Ptr gold_retina = bioinspired::createRetina( + input.size(), + colorMode, + colorSamplingMethod, + useLogSampling, + reductionFactor, + samplingStrength); + + Mat gold_parvo; + Mat gold_magno; + ocl::oclMat ocl_parvo; + ocl::oclMat ocl_magno; + + for(int i = 0; i < RETINA_ITERATIONS; i ++) + { + ocl_retina->run(ocl_input); + gold_retina->run(input); + + gold_retina->getParvo(gold_parvo); + gold_retina->getMagno(gold_magno); + + ocl_retina->getParvo(ocl_parvo); + ocl_retina->getMagno(ocl_magno); + + EXPECT_LE(checkNear(gold_parvo, (Mat)ocl_parvo), 1.0); + EXPECT_LE(checkNear(gold_magno, (Mat)ocl_magno), 1.0); + } +} + +INSTANTIATE_TEST_CASE_P(Contrib, Retina_OCL, testing::Combine( + testing::Values(false, true), + testing::Values((int)cv::bioinspired::RETINA_COLOR_BAYER), + testing::Values(false/*,true*/), + testing::Values(1.0, 0.5), + testing::Values(10.0, 5.0))); +#endif From 93f05fed35b40ae3fb215beb272eb90373b9fad3 Mon Sep 17 00:00:00 2001 From: Miroslav Kobetski Date: Tue, 13 Aug 2013 10:34:31 +0200 Subject: [PATCH 018/139] Fixed elif/elseif error that made highgui-windows fail on mac+cocoa. --- cmake/OpenCVFindLibsGUI.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/OpenCVFindLibsGUI.cmake b/cmake/OpenCVFindLibsGUI.cmake index 04c77d8ec5..270853aeec 100644 --- a/cmake/OpenCVFindLibsGUI.cmake +++ b/cmake/OpenCVFindLibsGUI.cmake @@ -70,7 +70,7 @@ endif(WITH_OPENGL) if(APPLE) if(WITH_CARBON) set(HAVE_CARBON YES) - elif(NOT IOS) + elseif(NOT IOS) set(HAVE_COCOA YES) endif() endif() From 2fb2ac88360f6f2c0dde2ea54e32d2d643b53360 Mon Sep 17 00:00:00 2001 From: peng xiao Date: Tue, 13 Aug 2013 17:17:07 +0800 Subject: [PATCH 019/139] Apply bugfix #3121 for OCL. --- modules/ocl/src/color.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ocl/src/color.cpp b/modules/ocl/src/color.cpp index e14bd27569..493dbc33c4 100644 --- a/modules/ocl/src/color.cpp +++ b/modules/ocl/src/color.cpp @@ -213,7 +213,7 @@ void cvtColor_caller(const oclMat &src, oclMat &dst, int code, int dcn) case CV_RGB2YUV: { CV_Assert(scn == 3 || scn == 4); - bidx = code == CV_BGR2YUV ? 0 : 2; + bidx = code == CV_RGB2YUV ? 0 : 2; dst.create(sz, CV_MAKETYPE(depth, 3)); RGB2YUV_caller(src, dst, bidx); break; @@ -222,7 +222,7 @@ void cvtColor_caller(const oclMat &src, oclMat &dst, int code, int dcn) case CV_YUV2RGB: { CV_Assert(scn == 3 || scn == 4); - bidx = code == CV_YUV2BGR ? 0 : 2; + bidx = code == CV_YUV2RGB ? 0 : 2; dst.create(sz, CV_MAKETYPE(depth, 3)); YUV2RGB_caller(src, dst, bidx); break; From 4902db1c42a5ee268e661a9049eec306bae76b93 Mon Sep 17 00:00:00 2001 From: Andrey Pavlenko Date: Tue, 13 Aug 2013 13:18:01 +0400 Subject: [PATCH 020/139] also fixing JNI library name in JUnit tests --- modules/java/test/src/org/opencv/test/OpenCVTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/java/test/src/org/opencv/test/OpenCVTestCase.java b/modules/java/test/src/org/opencv/test/OpenCVTestCase.java index ac1bf863ce..a9b5041189 100644 --- a/modules/java/test/src/org/opencv/test/OpenCVTestCase.java +++ b/modules/java/test/src/org/opencv/test/OpenCVTestCase.java @@ -97,7 +97,7 @@ public class OpenCVTestCase extends TestCase { super.setUp(); try { - System.loadLibrary("opencv_java"); + System.loadLibrary(Core.NATIVE_LIBRARY_NAME); } catch (SecurityException e) { System.out.println(e.toString()); System.exit(-1); From f3b1933a923a615a9f7bc6a5159182ca9cce25ba Mon Sep 17 00:00:00 2001 From: Andrey Pavlenko Date: Tue, 13 Aug 2013 15:51:37 +0400 Subject: [PATCH 021/139] LinkedList -> ArrayList --- modules/java/generator/gen_java.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index 1e084c70e3..254298d8b0 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -521,7 +521,7 @@ JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Core_n_1getTextSize public java.util.List getSupportedPreviewSizes() { String[] sizes_str = getSupportedPreviewSizes_0(nativeObj).split(","); - java.util.List sizes = new java.util.LinkedList(); + java.util.List sizes = new java.util.ArrayList(sizes_str.length); for (String str : sizes_str) { String[] wh = str.split("x"); From 89012e8f6bb4654e0b0fac5065be9be94132868d Mon Sep 17 00:00:00 2001 From: StevenPuttemans Date: Tue, 13 Aug 2013 14:12:57 +0200 Subject: [PATCH 022/139] Missing including of nonfree module to support the use of SURF detector/descriptor. --- doc/tutorials/features2d/feature_detection/feature_detection.rst | 1 + .../features2d/feature_flann_matcher/feature_flann_matcher.rst | 1 + .../features2d/feature_homography/feature_homography.rst | 1 + 3 files changed, 3 insertions(+) diff --git a/doc/tutorials/features2d/feature_detection/feature_detection.rst b/doc/tutorials/features2d/feature_detection/feature_detection.rst index 26798f8f6f..1051fe5dee 100644 --- a/doc/tutorials/features2d/feature_detection/feature_detection.rst +++ b/doc/tutorials/features2d/feature_detection/feature_detection.rst @@ -31,6 +31,7 @@ This tutorial code's is shown lines below. You can also download it from `here < #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" + #include "opencv2/nonfree/nonfree.hpp" using namespace cv; diff --git a/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst b/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst index 54d28890ab..3bf757fc00 100644 --- a/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst +++ b/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst @@ -28,6 +28,7 @@ This tutorial code's is shown lines below. You can also download it from `here < #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" + #include "opencv2/nonfree/nonfree.hpp" using namespace cv; diff --git a/doc/tutorials/features2d/feature_homography/feature_homography.rst b/doc/tutorials/features2d/feature_homography/feature_homography.rst index ad764ce9b7..eb06083b05 100644 --- a/doc/tutorials/features2d/feature_homography/feature_homography.rst +++ b/doc/tutorials/features2d/feature_homography/feature_homography.rst @@ -30,6 +30,7 @@ This tutorial code's is shown lines below. You can also download it from `here < #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/calib3d/calib3d.hpp" + #include "opencv2/nonfree/nonfree.hpp" using namespace cv; From 96c1df14f4724c9710e845f83b79d6b0f43dc2d6 Mon Sep 17 00:00:00 2001 From: abidrahmank Date: Tue, 13 Aug 2013 19:51:32 +0530 Subject: [PATCH 023/139] CLAHE Python bindings --- .../imgproc/include/opencv2/imgproc/imgproc.hpp | 16 ++++++++-------- modules/python/src2/cv2.cpp | 1 + 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/modules/imgproc/include/opencv2/imgproc/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc/imgproc.hpp index 223ee32770..f51bbaab77 100644 --- a/modules/imgproc/include/opencv2/imgproc/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc/imgproc.hpp @@ -759,20 +759,20 @@ CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int met //! normalizes the grayscale image brightness and contrast by normalizing its histogram CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); -class CV_EXPORTS CLAHE : public Algorithm +class CV_EXPORTS_W CLAHE : public Algorithm { public: - virtual void apply(InputArray src, OutputArray dst) = 0; + CV_WRAP virtual void apply(InputArray src, OutputArray dst) = 0; - virtual void setClipLimit(double clipLimit) = 0; - virtual double getClipLimit() const = 0; + CV_WRAP virtual void setClipLimit(double clipLimit) = 0; + CV_WRAP virtual double getClipLimit() const = 0; - virtual void setTilesGridSize(Size tileGridSize) = 0; - virtual Size getTilesGridSize() const = 0; + CV_WRAP virtual void setTilesGridSize(Size tileGridSize) = 0; + CV_WRAP virtual Size getTilesGridSize() const = 0; - virtual void collectGarbage() = 0; + CV_WRAP virtual void collectGarbage() = 0; }; -CV_EXPORTS Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); +CV_EXPORTS_W Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, int distType, InputArray cost=noArray(), diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index 64c4ad9ff3..610d71863d 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -123,6 +123,7 @@ typedef Ptr Ptr_FeatureDetector; typedef Ptr Ptr_DescriptorExtractor; typedef Ptr Ptr_Feature2D; typedef Ptr Ptr_DescriptorMatcher; +typedef Ptr Ptr_CLAHE; typedef SimpleBlobDetector::Params SimpleBlobDetector_Params; From 124ede611b89e0b24ba7ae251cc6f6b29de94829 Mon Sep 17 00:00:00 2001 From: peng xiao Date: Wed, 14 Aug 2013 10:00:09 +0800 Subject: [PATCH 024/139] Update with apavlenko's suggestions. --- modules/ocl/include/opencv2/ocl/ocl.hpp | 2 +- modules/ocl/src/filtering.cpp | 15 +++++++++------ modules/ocl/src/opencl/filtering_laplacian.cl | 1 - 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/modules/ocl/include/opencv2/ocl/ocl.hpp b/modules/ocl/include/opencv2/ocl/ocl.hpp index 3c83129881..aa0283fbeb 100644 --- a/modules/ocl/include/opencv2/ocl/ocl.hpp +++ b/modules/ocl/include/opencv2/ocl/ocl.hpp @@ -691,7 +691,7 @@ namespace cv //! returns 2D filter with the specified kernel // supports CV_8UC1 and CV_8UC4 types CV_EXPORTS Ptr getLinearFilter_GPU(int srcType, int dstType, const Mat &kernel, const Size &ksize, - Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT); + const Point &anchor = Point(-1, -1), int borderType = BORDER_DEFAULT); //! returns the non-separable linear filter engine CV_EXPORTS Ptr createLinearFilter_GPU(int srcType, int dstType, const Mat &kernel, diff --git a/modules/ocl/src/filtering.cpp b/modules/ocl/src/filtering.cpp index 324bf83eb7..6e858d1d6b 100644 --- a/modules/ocl/src/filtering.cpp +++ b/modules/ocl/src/filtering.cpp @@ -572,7 +572,7 @@ void cv::ocl::morphologyEx(const oclMat &src, oclMat &dst, int op, const Mat &ke namespace { -typedef void (*GPUFilter2D_t)(const oclMat & , oclMat & , oclMat & , Size &, const Point, const int); +typedef void (*GPUFilter2D_t)(const oclMat & , oclMat & , const oclMat & , const Size &, const Point&, const int); class LinearFilter_GPU : public BaseFilter_GPU { @@ -591,8 +591,8 @@ public: }; } -static void GPUFilter2D(const oclMat &src, oclMat &dst, oclMat &mat_kernel, - Size &ksize, const Point anchor, const int borderType) +static void GPUFilter2D(const oclMat &src, oclMat &dst, const oclMat &mat_kernel, + const Size &ksize, const Point& anchor, const int borderType) { CV_Assert(src.clCxt == dst.clCxt); CV_Assert((src.cols == dst.cols) && @@ -614,7 +614,7 @@ static void GPUFilter2D(const oclMat &src, oclMat &dst, oclMat &mat_kernel, size_t dst_offset_x = (dst.offset % dst.step) / dst.elemSize(); size_t dst_offset_y = dst.offset / dst.step; - int paddingPixels = (int)(filterWidth/2)*2; + int paddingPixels = filterWidth & (-2); size_t localThreads[3] = {ksize_3x3 ? 256 : 16, ksize_3x3 ? 1 : 16, 1}; size_t globalThreads[3] = {src.wholecols, src.wholerows, 1}; @@ -626,6 +626,8 @@ static void GPUFilter2D(const oclMat &src, oclMat &dst, oclMat &mat_kernel, int localWidth = localThreads[0] + paddingPixels; int localHeight = localThreads[1] + paddingPixels; + // 260 = divup((localThreads[0] + filterWidth * 2), 4) * 4 + // 6 = (ROWS_PER_GROUP_WHICH_IS_4 + filterWidth * 2) size_t localMemSize = ksize_3x3 ? 260 * 6 * src.elemSize() : (localWidth * localHeight) * src.elemSize(); int vector_lengths[4][7] = {{4, 4, 4, 4, 4, 4, 4}, @@ -677,15 +679,16 @@ static void GPUFilter2D(const oclMat &src, oclMat &dst, oclMat &mat_kernel, } Ptr cv::ocl::getLinearFilter_GPU(int srcType, int dstType, const Mat &kernel, const Size &ksize, - Point anchor, int borderType) + const Point &anchor, int borderType) { static const GPUFilter2D_t GPUFilter2D_callers[] = {0, GPUFilter2D, 0, GPUFilter2D, GPUFilter2D}; CV_Assert((srcType == CV_8UC1 || srcType == CV_8UC3 || srcType == CV_8UC4 || srcType == CV_32FC1 || srcType == CV_32FC3 || srcType == CV_32FC4) && dstType == srcType); oclMat gpu_krnl; + Point norm_archor = anchor; normalizeKernel(kernel, gpu_krnl, CV_32FC1); - normalizeAnchor(anchor, ksize); + normalizeAnchor(norm_archor, ksize); return Ptr(new LinearFilter_GPU(ksize, anchor, gpu_krnl, GPUFilter2D_callers[CV_MAT_CN(srcType)], borderType)); diff --git a/modules/ocl/src/opencl/filtering_laplacian.cl b/modules/ocl/src/opencl/filtering_laplacian.cl index 5016b0b6aa..f7430d5332 100644 --- a/modules/ocl/src/opencl/filtering_laplacian.cl +++ b/modules/ocl/src/opencl/filtering_laplacian.cl @@ -44,7 +44,6 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -//#define BORDER_REFLECT_101 /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////Macro for border type//////////////////////////////////////////// From 0bc609257d1b3e47162761e3e6034cd031358e79 Mon Sep 17 00:00:00 2001 From: Achal Dave Date: Fri, 31 May 2013 18:34:20 -0600 Subject: [PATCH 025/139] Comparing rejected stage index with minimum required neighbors does not make sense As described here, http://code.opencv.org/issues/3064, the test before led to a case where we compared the index of the latest stage at which a rectangle was rejected with the minimum number of neighbors required as specified in `detectMultiScale`. Please see the issue for more information. --- modules/objdetect/src/cascadedetect.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/objdetect/src/cascadedetect.cpp b/modules/objdetect/src/cascadedetect.cpp index 9373f1c1dd..2be6f1b182 100644 --- a/modules/objdetect/src/cascadedetect.cpp +++ b/modules/objdetect/src/cascadedetect.cpp @@ -196,8 +196,11 @@ void groupRectangles(std::vector& rectList, int groupThreshold, double eps for( i = 0; i < nclasses; i++ ) { Rect r1 = rrects[i]; - int n1 = levelWeights ? rejectLevels[i] : rweights[i]; + int n1 = rweights[i]; double w1 = rejectWeights[i]; + int l1 = rejectLevels[i]; + + // filter out rectangles which don't have enough similar rectangles if( n1 <= groupThreshold ) continue; // filter out small face rectangles inside large rectangles @@ -225,7 +228,7 @@ void groupRectangles(std::vector& rectList, int groupThreshold, double eps { rectList.push_back(r1); if( weights ) - weights->push_back(n1); + weights->push_back(l1); if( levelWeights ) levelWeights->push_back(w1); } @@ -988,7 +991,7 @@ public: { if( result == 1 ) result = -(int)classifier->data.stages.size(); - if( classifier->data.stages.size() + result < 4 ) + if( classifier->data.stages.size() + result == 0 ) { mtx->lock(); rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor), winSize.width, winSize.height)); From c1dd38cbff27957bba22571bdd62b0ece381b37b Mon Sep 17 00:00:00 2001 From: Changlin Hsieh Date: Wed, 14 Aug 2013 16:16:03 +0800 Subject: [PATCH 026/139] Fix GPU example build failed with cuda 5.5 --- samples/gpu/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/gpu/CMakeLists.txt b/samples/gpu/CMakeLists.txt index 85bee50585..8d14e15469 100644 --- a/samples/gpu/CMakeLists.txt +++ b/samples/gpu/CMakeLists.txt @@ -36,7 +36,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) set(the_target "example_${project}_${name}") add_executable(${the_target} ${srcs}) - target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_GPU_SAMPLES_REQUIRED_DEPS}) + target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_GPU_SAMPLES_REQUIRED_DEPS} cuda) if(HAVE_opencv_nonfree) target_link_libraries(${the_target} opencv_nonfree) endif() From 4eb85189e547f6175ef0f626565ebf9ad51313de Mon Sep 17 00:00:00 2001 From: Dmitry Retinskiy Date: Wed, 14 Aug 2013 15:30:35 +0400 Subject: [PATCH 027/139] bug #3094: added CV_Assert to PyrDown_ to check if source matrix is not empty --- modules/imgproc/src/pyramids.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index e7d315cb52..01e510e7de 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -191,6 +191,7 @@ pyrDown_( const Mat& _src, Mat& _dst, int borderType ) typedef typename CastOp::type1 WT; typedef typename CastOp::rtype T; + CV_Assert( !_src.empty() ); Size ssize = _src.size(), dsize = _dst.size(); int cn = _src.channels(); int bufstep = (int)alignSize(dsize.width*cn, 16); From 7ff07e94a3e66dfcea6bd6e290a07a41a2712098 Mon Sep 17 00:00:00 2001 From: kamjagin Date: Wed, 14 Aug 2013 13:33:47 +0200 Subject: [PATCH 028/139] Fix for bug Bug #3215. Added HAVE_QTKIT as a separate mode from HAVE_QUICKTIME --- CMakeLists.txt | 4 ++-- cmake/OpenCVFindLibsVideo.cmake | 2 ++ cmake/templates/cvconfig.h.cmake | 3 +++ modules/highgui/CMakeLists.txt | 2 +- modules/highgui/src/cap.cpp | 9 +++++---- 5 files changed, 13 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 46881c4531..0e745e7f97 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -745,8 +745,8 @@ if(DEFINED WITH_GIGEAPI) endif(DEFINED WITH_GIGEAPI) if(DEFINED WITH_QUICKTIME) - status(" QuickTime:" WITH_QUICKTIME THEN YES ELSE NO) - status(" QTKit:" WITH_QUICKTIME THEN NO ELSE YES) + status(" QuickTime:" HAVE_QUICKTIME THEN YES ELSE NO) + status(" QTKit:" HAVE_QTKIT THEN YES ELSE NO) endif(DEFINED WITH_QUICKTIME) if(DEFINED WITH_UNICAP) diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index 027b53668a..d80531bf44 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -244,4 +244,6 @@ endif() # --- QuickTime --- if(WITH_QUICKTIME) set(HAVE_QUICKTIME YES) +elseif(APPLE) + set(HAVE_QTKIT YES) endif() diff --git a/cmake/templates/cvconfig.h.cmake b/cmake/templates/cvconfig.h.cmake index 7587eef6be..833d1eb3d8 100644 --- a/cmake/templates/cvconfig.h.cmake +++ b/cmake/templates/cvconfig.h.cmake @@ -133,6 +133,9 @@ /* QuickTime video libraries */ #cmakedefine HAVE_QUICKTIME +/* QTKit video libraries */ +#cmakedefine HAVE_QTKIT + /* Intel Threading Building Blocks */ #cmakedefine HAVE_TBB diff --git a/modules/highgui/CMakeLists.txt b/modules/highgui/CMakeLists.txt index 3d7667b65f..b49d93a965 100644 --- a/modules/highgui/CMakeLists.txt +++ b/modules/highgui/CMakeLists.txt @@ -213,7 +213,7 @@ endif() if(HAVE_QUICKTIME) list(APPEND highgui_srcs src/cap_qt.cpp) list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore") -elseif(APPLE) +elseif(HAVE_QTKIT) list(APPEND highgui_srcs src/cap_qtkit.mm) list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit") endif() diff --git a/modules/highgui/src/cap.cpp b/modules/highgui/src/cap.cpp index b5cdc5e9f9..e9242b849d 100644 --- a/modules/highgui/src/cap.cpp +++ b/modules/highgui/src/cap.cpp @@ -135,7 +135,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) #ifdef HAVE_MIL CV_CAP_MIL, #endif -#ifdef HAVE_QUICKTIME +if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) CV_CAP_QT, #endif #ifdef HAVE_UNICAP @@ -185,6 +185,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) defined(HAVE_CMU1394) || \ defined(HAVE_MIL) || \ defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ defined(HAVE_UNICAP) || \ defined(HAVE_PVAPI) || \ defined(HAVE_OPENNI) || \ @@ -277,7 +278,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) break; #endif -#ifdef HAVE_QUICKTIME +#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) case CV_CAP_QT: capture = cvCreateCameraCapture_QT (index); if (capture) @@ -378,7 +379,7 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename) result = cvCreateCapture_GStreamer (CV_CAP_GSTREAMER_FILE, filename); #endif -#ifdef HAVE_QUICKTIME +#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) if (! result) result = cvCreateFileCapture_QT (filename); #endif @@ -436,7 +437,7 @@ CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc, result = cvCreateVideoWriter_AVFoundation(filename, fourcc, fps, frameSize, is_color); #endif -#ifdef HAVE_QUICKTIME +#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) if(!result) result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color); #endif From c24c1a5926c139ea1b5c48fda7e9332a69e00719 Mon Sep 17 00:00:00 2001 From: kamjagin Date: Wed, 14 Aug 2013 13:40:50 +0200 Subject: [PATCH 029/139] added HAVE_QTKIT to tests --- modules/gpu/perf/perf_video.cpp | 1 + modules/gpu/test/test_bgfg.cpp | 1 + modules/highgui/perf/perf_precomp.hpp | 2 ++ modules/highgui/test/test_precomp.hpp | 3 +++ 4 files changed, 7 insertions(+) diff --git a/modules/gpu/perf/perf_video.cpp b/modules/gpu/perf/perf_video.cpp index 672d657b21..f0f33970fe 100644 --- a/modules/gpu/perf/perf_video.cpp +++ b/modules/gpu/perf/perf_video.cpp @@ -49,6 +49,7 @@ using namespace perf; #if defined(HAVE_XINE) || \ defined(HAVE_GSTREAMER) || \ defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ defined(HAVE_AVFOUNDATION) || \ defined(HAVE_FFMPEG) || \ defined(WIN32) /* assume that we have ffmpeg */ diff --git a/modules/gpu/test/test_bgfg.cpp b/modules/gpu/test/test_bgfg.cpp index 0ab89842d6..6ef93c51b4 100644 --- a/modules/gpu/test/test_bgfg.cpp +++ b/modules/gpu/test/test_bgfg.cpp @@ -49,6 +49,7 @@ using namespace cvtest; #if defined(HAVE_XINE) || \ defined(HAVE_GSTREAMER) || \ defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ defined(HAVE_AVFOUNDATION) || \ defined(HAVE_FFMPEG) || \ defined(WIN32) /* assume that we have ffmpeg */ diff --git a/modules/highgui/perf/perf_precomp.hpp b/modules/highgui/perf/perf_precomp.hpp index d6b28b6d23..7a9ea8adcc 100644 --- a/modules/highgui/perf/perf_precomp.hpp +++ b/modules/highgui/perf/perf_precomp.hpp @@ -19,6 +19,7 @@ #if defined(HAVE_XINE) || \ defined(HAVE_GSTREAMER) || \ defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ defined(HAVE_AVFOUNDATION) || \ defined(HAVE_FFMPEG) || \ defined(HAVE_MSMF) || \ @@ -33,6 +34,7 @@ #if /*defined(HAVE_XINE) || */\ defined(HAVE_GSTREAMER) || \ defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ defined(HAVE_AVFOUNDATION) || \ defined(HAVE_FFMPEG) || \ defined(HAVE_MSMF) || \ diff --git a/modules/highgui/test/test_precomp.hpp b/modules/highgui/test/test_precomp.hpp index 5e030810fa..7e9f4c63af 100644 --- a/modules/highgui/test/test_precomp.hpp +++ b/modules/highgui/test/test_precomp.hpp @@ -27,6 +27,7 @@ defined(HAVE_CMU1394) || \ defined(HAVE_MIL) || \ defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ defined(HAVE_UNICAP) || \ defined(HAVE_PVAPI) || \ defined(HAVE_OPENNI) || \ @@ -43,6 +44,7 @@ #if defined(HAVE_XINE) || \ defined(HAVE_GSTREAMER) || \ defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ defined(HAVE_AVFOUNDATION) || \ /*defined(HAVE_OPENNI) || too specialized */ \ defined(HAVE_FFMPEG) || \ @@ -55,6 +57,7 @@ #if /*defined(HAVE_XINE) || */\ defined(HAVE_GSTREAMER) || \ defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ defined(HAVE_AVFOUNDATION) || \ defined(HAVE_FFMPEG) || \ defined(HAVE_MSMF) From 69287c936bb5c3a6d62169c72e48211a49efcd9f Mon Sep 17 00:00:00 2001 From: kamjagin Date: Wed, 14 Aug 2013 14:25:46 +0200 Subject: [PATCH 030/139] missed # in if defined --- modules/highgui/src/cap.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/highgui/src/cap.cpp b/modules/highgui/src/cap.cpp index e9242b849d..c97db180fe 100644 --- a/modules/highgui/src/cap.cpp +++ b/modules/highgui/src/cap.cpp @@ -135,7 +135,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index) #ifdef HAVE_MIL CV_CAP_MIL, #endif -if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) +#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) CV_CAP_QT, #endif #ifdef HAVE_UNICAP From 21641d04a73aeb612aa614611595bc0fad165fc6 Mon Sep 17 00:00:00 2001 From: Nghia Ho Date: Wed, 14 Aug 2013 23:36:29 +1000 Subject: [PATCH 031/139] Fixed mismatching allocation and deallocation: el_ptr --- samples/c/mushroom.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/c/mushroom.cpp b/samples/c/mushroom.cpp index 5763b12464..60eb9f066c 100644 --- a/samples/c/mushroom.cpp +++ b/samples/c/mushroom.cpp @@ -90,7 +90,7 @@ static int mushroom_read_database( const char* filename, CvMat** data, CvMat** m } cvReleaseMemStorage( &storage ); - delete el_ptr; + delete [] el_ptr; return 1; } From f013baf73955fd176b0fdca549ce1c6a2722c213 Mon Sep 17 00:00:00 2001 From: Dmitry Retinskiy Date: Wed, 14 Aug 2013 17:08:58 +0400 Subject: [PATCH 032/139] bug #1922: restored most of the class definitions in basic_structures.rst --- modules/core/doc/basic_structures.rst | 318 +++++++++++++++++++++++++- 1 file changed, 308 insertions(+), 10 deletions(-) diff --git a/modules/core/doc/basic_structures.rst b/modules/core/doc/basic_structures.rst index 981ac5c7f2..ff8470183c 100644 --- a/modules/core/doc/basic_structures.rst +++ b/modules/core/doc/basic_structures.rst @@ -49,6 +49,43 @@ Point\_ ------- .. ocv:class:: Point_ +:: + + template class CV_EXPORTS Point_ + { + public: + typedef _Tp value_type; + + // various constructors + Point_(); + Point_(_Tp _x, _Tp _y); + Point_(const Point_& pt); + Point_(const CvPoint& pt); + Point_(const CvPoint2D32f& pt); + Point_(const Size_<_Tp>& sz); + Point_(const Vec<_Tp, 2>& v); + + Point_& operator = (const Point_& pt); + //! conversion to another data type + template operator Point_<_Tp2>() const; + + //! conversion to the old-style C structures + operator CvPoint() const; + operator CvPoint2D32f() const; + operator Vec<_Tp, 2>() const; + + //! dot product + _Tp dot(const Point_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point_& pt) const; + //! cross-product + double cross(const Point_& pt) const; + //! checks whether the point is inside the specified rectangle + bool inside(const Rect_<_Tp>& r) const; + + _Tp x, y; //< the point coordinates + }; + Template class for 2D points specified by its coordinates :math:`x` and :math:`y` . @@ -84,6 +121,39 @@ Point3\_ -------- .. ocv:class:: Point3_ +:: + + template class CV_EXPORTS Point3_ + { + public: + typedef _Tp value_type; + + // various constructors + Point3_(); + Point3_(_Tp _x, _Tp _y, _Tp _z); + Point3_(const Point3_& pt); + explicit Point3_(const Point_<_Tp>& pt); + Point3_(const CvPoint3D32f& pt); + Point3_(const Vec<_Tp, 3>& v); + + Point3_& operator = (const Point3_& pt); + //! conversion to another data type + template operator Point3_<_Tp2>() const; + //! conversion to the old-style CvPoint... + operator CvPoint3D32f() const; + //! conversion to cv::Vec<> + operator Vec<_Tp, 3>() const; + + //! dot product + _Tp dot(const Point3_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point3_& pt) const; + //! cross product of the 2 3D points + Point3_ cross(const Point3_& pt) const; + + _Tp x, y, z; //< the point coordinates + }; + Template class for 3D points specified by its coordinates :math:`x`, :math:`y` and @@ -100,6 +170,35 @@ Size\_ ------ .. ocv:class:: Size_ +:: + + template class CV_EXPORTS Size_ + { + public: + typedef _Tp value_type; + + //! various constructors + Size_(); + Size_(_Tp _width, _Tp _height); + Size_(const Size_& sz); + Size_(const CvSize& sz); + Size_(const CvSize2D32f& sz); + Size_(const Point_<_Tp>& pt); + + Size_& operator = (const Size_& sz); + //! the area (width*height) + _Tp area() const; + + //! conversion of another data type. + template operator Size_<_Tp2>() const; + + //! conversion to the old-style OpenCV types + operator CvSize() const; + operator CvSize2D32f() const; + + _Tp width, height; // the width and the height + }; + Template class for specifying the size of an image or rectangle. The class includes two members called ``width`` and ``height``. The structure can be converted to and from the old OpenCV structures ``CvSize`` and ``CvSize2D32f`` . The same set of arithmetic and comparison operations as for ``Point_`` is available. @@ -113,6 +212,43 @@ Rect\_ ------ .. ocv:class:: Rect_ +:: + + template class CV_EXPORTS Rect_ + { + public: + typedef _Tp value_type; + + //! various constructors + Rect_(); + Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); + Rect_(const Rect_& r); + Rect_(const CvRect& r); + Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz); + Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2); + + Rect_& operator = ( const Rect_& r ); + //! the top-left corner + Point_<_Tp> tl() const; + //! the bottom-right corner + Point_<_Tp> br() const; + + //! size (width, height) of the rectangle + Size_<_Tp> size() const; + //! area (width*height) of the rectangle + _Tp area() const; + + //! conversion to another data type + template operator Rect_<_Tp2>() const; + //! conversion to the old-style CvRect + operator CvRect() const; + + //! checks whether the rectangle contains the point + bool contains(const Point_<_Tp>& pt) const; + + _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle + }; + Template class for 2D rectangles, described by the following parameters: * Coordinates of the top-left corner. This is a default interpretation of ``Rect_::x`` and ``Rect_::y`` in OpenCV. Though, in your algorithms you may count ``x`` and ``y`` from the bottom-left corner. @@ -171,6 +307,28 @@ RotatedRect ----------- .. ocv:class:: RotatedRect +:: + + class CV_EXPORTS RotatedRect + { + public: + //! various constructors + RotatedRect(); + RotatedRect(const Point2f& center, const Size2f& size, float angle); + RotatedRect(const CvBox2D& box); + + //! returns 4 vertices of the rectangle + void points(Point2f pts[]) const; + //! returns the minimal up-right rectangle containing the rotated rectangle + Rect boundingRect() const; + //! conversion to the old-style CvBox2D structure + operator CvBox2D() const; + + Point2f center; //< the rectangle mass center + Size2f size; //< width and height of the rectangle + float angle; //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. + }; + The class represents rotated (i.e. not up-right) rectangles on a plane. Each rectangle is specified by the center point (mass center), length of each side (represented by cv::Size2f structure) and the rotation angle in degrees. .. ocv:function:: RotatedRect::RotatedRect() @@ -219,7 +377,33 @@ TermCriteria ------------ .. ocv:class:: TermCriteria - The class defining termination criteria for iterative algorithms. You can initialize it by default constructor and then override any parameters, or the structure may be fully initialized using the advanced variant of the constructor. +:: + + class CV_EXPORTS TermCriteria + { + public: + enum + { + COUNT=1, //!< the maximum number of iterations or elements to compute + MAX_ITER=COUNT, //!< ditto + EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops + }; + + //! default constructor + TermCriteria(); + //! full constructor + TermCriteria(int type, int maxCount, double epsilon); + //! conversion from CvTermCriteria + TermCriteria(const CvTermCriteria& criteria); + //! conversion to CvTermCriteria + operator CvTermCriteria() const; + + int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS + int maxCount; // the maximum number of iterations/elements + double epsilon; // the desired accuracy + }; + +The class defining termination criteria for iterative algorithms. You can initialize it by default constructor and then override any parameters, or the structure may be fully initialized using the advanced variant of the constructor. TermCriteria::TermCriteria -------------------------- @@ -330,6 +514,35 @@ Scalar\_ -------- .. ocv:class:: Scalar_ +:: + + template class CV_EXPORTS Scalar_ : public Vec<_Tp, 4> + { + public: + //! various constructors + Scalar_(); + Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0); + Scalar_(const CvScalar& s); + Scalar_(_Tp v0); + + //! returns a scalar with all elements set to v0 + static Scalar_<_Tp> all(_Tp v0); + //! conversion to the old-style CvScalar + operator CvScalar() const; + + //! conversion to another data type + template operator Scalar_() const; + + //! per-element product + Scalar_<_Tp> mul(const Scalar_<_Tp>& t, double scale=1 ) const; + + // returns (v0, -v1, -v2, -v3) + Scalar_<_Tp> conj() const; + + // returns true iff v1 == v2 == v3 == 0 + bool isReal() const; + }; + Template class for a 4-element vector derived from Vec. :: template class Scalar_ : public Vec<_Tp, 4> { ... }; @@ -342,12 +555,21 @@ Range ----- .. ocv:class:: Range -Template class specifying a continuous subsequence (slice) of a sequence. :: +Template class specifying a continuous subsequence (slice) of a sequence. + +:: - class Range + class CV_EXPORTS Range { public: - ... + Range(); + Range(int _start, int _end); + Range(const CvSlice& slice); + int size() const; + bool empty() const; + static Range all(); + operator CvSlice() const; + int start, end; }; @@ -536,8 +758,8 @@ Ptr::operator -> ---------------- Provide access to the object fields and methods. - .. ocv:function:: template _Tp* Ptr::operator -> () - .. ocv:function:: template const _Tp* Ptr::operator -> () const +.. ocv:function:: template _Tp* Ptr::operator -> () +.. ocv:function:: template const _Tp* Ptr::operator -> () const Ptr::operator _Tp* @@ -545,15 +767,16 @@ Ptr::operator _Tp* Returns the underlying object pointer. Thanks to the methods, the ``Ptr<_Tp>`` can be used instead of ``_Tp*``. - .. ocv:function:: template Ptr::operator _Tp* () - .. ocv:function:: template Ptr::operator const _Tp*() const +.. ocv:function:: template Ptr::operator _Tp* () +.. ocv:function:: template Ptr::operator const _Tp*() const Mat --- .. ocv:class:: Mat -OpenCV C++ n-dimensional dense array class :: +OpenCV C++ n-dimensional dense array class +:: class CV_EXPORTS Mat { @@ -583,7 +806,6 @@ OpenCV C++ n-dimensional dense array class :: ... }; - The class ``Mat`` represents an n-dimensional dense numerical single-channel or multi-channel array. It can be used to store real or complex-valued vectors and matrices, grayscale or color images, voxel volumes, vector fields, point clouds, tensors, histograms (though, very high-dimensional histograms may be better stored in a ``SparseMat`` ). The data layout of the array :math:`M` is defined by the array ``M.step[]``, so that the address of element :math:`(i_0,...,i_{M.dims-1})`, where @@ -2442,6 +2664,82 @@ Algorithm --------- .. ocv:class:: Algorithm +:: + + class CV_EXPORTS_W Algorithm + { + public: + Algorithm(); + virtual ~Algorithm(); + string name() const; + + template typename ParamType<_Tp>::member_type get(const string& name) const; + template typename ParamType<_Tp>::member_type get(const char* name) const; + + CV_WRAP int getInt(const string& name) const; + CV_WRAP double getDouble(const string& name) const; + CV_WRAP bool getBool(const string& name) const; + CV_WRAP string getString(const string& name) const; + CV_WRAP Mat getMat(const string& name) const; + CV_WRAP vector getMatVector(const string& name) const; + CV_WRAP Ptr getAlgorithm(const string& name) const; + + void set(const string& name, int value); + void set(const string& name, double value); + void set(const string& name, bool value); + void set(const string& name, const string& value); + void set(const string& name, const Mat& value); + void set(const string& name, const vector& value); + void set(const string& name, const Ptr& value); + template void set(const string& name, const Ptr<_Tp>& value); + + CV_WRAP void setInt(const string& name, int value); + CV_WRAP void setDouble(const string& name, double value); + CV_WRAP void setBool(const string& name, bool value); + CV_WRAP void setString(const string& name, const string& value); + CV_WRAP void setMat(const string& name, const Mat& value); + CV_WRAP void setMatVector(const string& name, const vector& value); + CV_WRAP void setAlgorithm(const string& name, const Ptr& value); + template void setAlgorithm(const string& name, const Ptr<_Tp>& value); + + void set(const char* name, int value); + void set(const char* name, double value); + void set(const char* name, bool value); + void set(const char* name, const string& value); + void set(const char* name, const Mat& value); + void set(const char* name, const vector& value); + void set(const char* name, const Ptr& value); + template void set(const char* name, const Ptr<_Tp>& value); + + void setInt(const char* name, int value); + void setDouble(const char* name, double value); + void setBool(const char* name, bool value); + void setString(const char* name, const string& value); + void setMat(const char* name, const Mat& value); + void setMatVector(const char* name, const vector& value); + void setAlgorithm(const char* name, const Ptr& value); + template void setAlgorithm(const char* name, const Ptr<_Tp>& value); + + CV_WRAP string paramHelp(const string& name) const; + int paramType(const char* name) const; + CV_WRAP int paramType(const string& name) const; + CV_WRAP void getParams(CV_OUT vector& names) const; + + + virtual void write(FileStorage& fs) const; + virtual void read(const FileNode& fn); + + typedef Algorithm* (*Constructor)(void); + typedef int (Algorithm::*Getter)() const; + typedef void (Algorithm::*Setter)(int); + + CV_WRAP static void getList(CV_OUT vector& algorithms); + CV_WRAP static Ptr _create(const string& name); + template static Ptr<_Tp> create(const string& name); + + virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; } + }; + This is a base class for all more or less complex algorithms in OpenCV, especially for classes of algorithms, for which there can be multiple implementations. The examples are stereo correspondence (for which there are algorithms like block matching, semi-global block matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck etc.). The class provides the following features for all derived classes: From 6cf9070b9a18ba5cb5e11137f49d04e44ee3f619 Mon Sep 17 00:00:00 2001 From: Bahram Dahi Date: Wed, 14 Aug 2013 15:08:34 -0700 Subject: [PATCH 033/139] Added ability to read several opencv types (Size, Point, etc.) to FileStorage. Solves issue #3196 --- .../core/include/opencv2/core/operations.hpp | 52 +++++++++++++++++++ modules/core/test/test_io.cpp | 36 ++++++++++++- 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/modules/core/include/opencv2/core/operations.hpp b/modules/core/include/opencv2/core/operations.hpp index 9d8696a05d..0c697275bb 100644 --- a/modules/core/include/opencv2/core/operations.hpp +++ b/modules/core/include/opencv2/core/operations.hpp @@ -3001,6 +3001,58 @@ static inline void read(const FileNode& node, string& value, const string& defau value = !node.node ? default_value : CV_NODE_IS_STRING(node.node->tag) ? string(node.node->data.str.ptr) : string(""); } +template static inline void read(const FileNode& node, Point_<_Tp>& value, const Point_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Point_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Point3_<_Tp>& value, const Point3_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 3 ? default_value : Point3_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2])); +} + +template static inline void read(const FileNode& node, Size_<_Tp>& value, const Size_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Size_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Complex<_Tp>& value, const Complex<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Complex<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Rect_<_Tp>& value, Rect_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +template static inline void read(const FileNode& node, Vec<_Tp, cn>& value, const Vec<_Tp, cn>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != cn ? default_value : Vec<_Tp, cn>(&temp[0]); +} + +template static inline void read(const FileNode& node, Scalar_<_Tp>& value, const Scalar_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Scalar_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +static inline void read(const FileNode& node, Range& value, const Range& default_value) +{ + Point2i temp(value.start, value.end); const Point2i default_temp = Point2i(default_value.start, default_value.end); + read(node, temp, default_temp); + value.start = temp.x; value.end = temp.y; +} + CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() ); CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() ); diff --git a/modules/core/test/test_io.cpp b/modules/core/test/test_io.cpp index 3526e83768..58c8817a4d 100644 --- a/modules/core/test/test_io.cpp +++ b/modules/core/test/test_io.cpp @@ -390,7 +390,6 @@ protected: try { string fname = cv::tempfile(".xml"); - FileStorage fs(fname, FileStorage::WRITE); vector mi, mi2, mi3, mi4; vector mv, mv2, mv3, mv4; Mat m(10, 9, CV_32F); @@ -398,24 +397,59 @@ protected: randu(m, 0, 1); mi3.push_back(5); mv3.push_back(m); + Point_ p1(1.1f, 2.2f), op1; + Point3i p2(3, 4, 5), op2; + Size s1(6, 7), os1; + Complex c1(9, 10), oc1; + Rect r1(11, 12, 13, 14), or1; + Vec v1(15, 16, 17, 18, 19), ov1; + Scalar sc1(20.0, 21.1, 22.2, 23.3), osc1; + Range g1(7, 8), og1; + + FileStorage fs(fname, FileStorage::WRITE); fs << "mi" << mi; fs << "mv" << mv; fs << "mi3" << mi3; fs << "mv3" << mv3; fs << "empty" << empty; + fs << "p1" << p1; + fs << "p2" << p2; + fs << "s1" << s1; + fs << "c1" << c1; + fs << "r1" << r1; + fs << "v1" << v1; + fs << "sc1" << sc1; + fs << "g1" << g1; fs.release(); + fs.open(fname, FileStorage::READ); fs["mi"] >> mi2; fs["mv"] >> mv2; fs["mi3"] >> mi4; fs["mv3"] >> mv4; fs["empty"] >> empty; + fs["p1"] >> op1; + fs["p2"] >> op2; + fs["s1"] >> os1; + fs["c1"] >> oc1; + fs["r1"] >> or1; + fs["v1"] >> ov1; + fs["sc1"] >> osc1; + fs["g1"] >> og1; CV_Assert( mi2.empty() ); CV_Assert( mv2.empty() ); CV_Assert( norm(mi3, mi4, CV_C) == 0 ); CV_Assert( mv4.size() == 1 ); double n = norm(mv3[0], mv4[0], CV_C); CV_Assert( n == 0 ); + CV_Assert( op1 == p1 ); + CV_Assert( op2 == p2 ); + CV_Assert( os1 == s1 ); + CV_Assert( oc1 == c1 ); + CV_Assert( or1 == r1 ); + CV_Assert( ov1 == v1 ); + CV_Assert( osc1 == sc1 ); + CV_Assert( og1 == g1 ); } catch(...) { From 83e21568ee1740fd69838f797fd2d8472e93c5f8 Mon Sep 17 00:00:00 2001 From: Bahram Dahi Date: Wed, 14 Aug 2013 15:54:13 -0700 Subject: [PATCH 034/139] Minor bug fix, missing const in method signature --- modules/core/include/opencv2/core/operations.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/include/opencv2/core/operations.hpp b/modules/core/include/opencv2/core/operations.hpp index 0c697275bb..7b2c94007a 100644 --- a/modules/core/include/opencv2/core/operations.hpp +++ b/modules/core/include/opencv2/core/operations.hpp @@ -3026,7 +3026,7 @@ template static inline void read(const FileNode& node, Complex<_Tp value = temp.size() != 2 ? default_value : Complex<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); } -template static inline void read(const FileNode& node, Rect_<_Tp>& value, Rect_<_Tp>& default_value) +template static inline void read(const FileNode& node, Rect_<_Tp>& value, const Rect_<_Tp>& default_value) { vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), From ab8c0fde522426ed41815cf36fee8aaaa20e0b25 Mon Sep 17 00:00:00 2001 From: Dmitry Retinskiy Date: Thu, 15 Aug 2013 16:18:01 +0400 Subject: [PATCH 035/139] applied comments after review --- modules/core/doc/basic_structures.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/modules/core/doc/basic_structures.rst b/modules/core/doc/basic_structures.rst index ff8470183c..66ea59b6b8 100644 --- a/modules/core/doc/basic_structures.rst +++ b/modules/core/doc/basic_structures.rst @@ -514,6 +514,8 @@ Scalar\_ -------- .. ocv:class:: Scalar_ +Template class for a 4-element vector derived from Vec. + :: template class CV_EXPORTS Scalar_ : public Vec<_Tp, 4> @@ -543,10 +545,6 @@ Scalar\_ bool isReal() const; }; -Template class for a 4-element vector derived from Vec. :: - - template class Scalar_ : public Vec<_Tp, 4> { ... }; - typedef Scalar_ Scalar; Being derived from ``Vec<_Tp, 4>`` , ``Scalar_`` and ``Scalar`` can be used just as typical 4-element vectors. In addition, they can be converted to/from ``CvScalar`` . The type ``Scalar`` is widely used in OpenCV to pass pixel values. From 9af2c1661a8cfa44b3dabfe502d01f1a93a26636 Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Mon, 12 Aug 2013 06:30:40 -0700 Subject: [PATCH 036/139] Restricted Win32 API was fixed in Highgui and some 3rd party libs. Patch for libpng added. --- 3rdparty/libpng/CMakeLists.txt | 4 ++++ 3rdparty/libpng/opencv-libpng.path | 22 +++++++++++++++++++ 3rdparty/libpng/pngpriv.h | 4 ++-- modules/core/include/opencv2/core/version.hpp | 3 +++ modules/highgui/src/cap_ffmpeg.cpp | 12 ++++++++++ modules/highgui/src/cap_ffmpeg_impl.hpp | 10 ++++++++- 6 files changed, 52 insertions(+), 3 deletions(-) create mode 100644 3rdparty/libpng/opencv-libpng.path diff --git a/3rdparty/libpng/CMakeLists.txt b/3rdparty/libpng/CMakeLists.txt index d47dd53fcd..59dca6990e 100644 --- a/3rdparty/libpng/CMakeLists.txt +++ b/3rdparty/libpng/CMakeLists.txt @@ -29,6 +29,10 @@ if(MSVC) add_definitions(-D_CRT_SECURE_NO_DEPRECATE) endif(MSVC) +if (HAVE_WINRT) + add_definitions(-DHAVE_WINRT) +endif() + add_library(${PNG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs}) target_link_libraries(${PNG_LIBRARY} ${ZLIB_LIBRARY}) diff --git a/3rdparty/libpng/opencv-libpng.path b/3rdparty/libpng/opencv-libpng.path new file mode 100644 index 0000000000..6ca96392a0 --- /dev/null +++ b/3rdparty/libpng/opencv-libpng.path @@ -0,0 +1,22 @@ +diff --git a/3rdparty/libpng/pngpriv.h b/3rdparty/libpng/pngpriv.h +index 07b2b0b..e7824b8 100644 +--- a/3rdparty/libpng/pngpriv.h ++++ b/3rdparty/libpng/pngpriv.h +@@ -360,7 +360,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp; + + /* Memory model/platform independent fns */ + #ifndef PNG_ABORT +-# ifdef _WINDOWS_ ++# if defined(_WINDOWS_) && !defined(HAVE_WINRT) + # define PNG_ABORT() ExitProcess(0) + # else + # define PNG_ABORT() abort() +@@ -378,7 +378,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp; + # define png_memcpy _fmemcpy + # define png_memset _fmemset + #else +-# ifdef _WINDOWS_ /* Favor Windows over C runtime fns */ ++# if defined(_WINDOWS_) && !defined(HAVE_WINRT) /* Favor Windows over C runtime fns */ + # define CVT_PTR(ptr) (ptr) + # define CVT_PTR_NOCHECK(ptr) (ptr) + # define png_strlen lstrlenA diff --git a/3rdparty/libpng/pngpriv.h b/3rdparty/libpng/pngpriv.h index 07b2b0b05b..e7824b839e 100644 --- a/3rdparty/libpng/pngpriv.h +++ b/3rdparty/libpng/pngpriv.h @@ -360,7 +360,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp; /* Memory model/platform independent fns */ #ifndef PNG_ABORT -# ifdef _WINDOWS_ +# if defined(_WINDOWS_) && !defined(HAVE_WINRT) # define PNG_ABORT() ExitProcess(0) # else # define PNG_ABORT() abort() @@ -378,7 +378,7 @@ typedef PNG_CONST png_uint_16p FAR * png_const_uint_16pp; # define png_memcpy _fmemcpy # define png_memset _fmemset #else -# ifdef _WINDOWS_ /* Favor Windows over C runtime fns */ +# if defined(_WINDOWS_) && !defined(HAVE_WINRT) /* Favor Windows over C runtime fns */ # define CVT_PTR(ptr) (ptr) # define CVT_PTR_NOCHECK(ptr) (ptr) # define png_strlen lstrlenA diff --git a/modules/core/include/opencv2/core/version.hpp b/modules/core/include/opencv2/core/version.hpp index ba71a82592..bd95e6359b 100644 --- a/modules/core/include/opencv2/core/version.hpp +++ b/modules/core/include/opencv2/core/version.hpp @@ -55,6 +55,9 @@ #define CVAUX_STR_EXP(__A) #__A #define CVAUX_STR(__A) CVAUX_STR_EXP(__A) +#define CVAUX_STRW_EXP(__A) L#__A +#define CVAUX_STRW(__A) CVAUX_STRW_EXP(__A) + #if CV_VERSION_REVISION # define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION) #else diff --git a/modules/highgui/src/cap_ffmpeg.cpp b/modules/highgui/src/cap_ffmpeg.cpp index bf73c0810f..00f0494d89 100644 --- a/modules/highgui/src/cap_ffmpeg.cpp +++ b/modules/highgui/src/cap_ffmpeg.cpp @@ -85,6 +85,16 @@ private: icvInitFFMPEG() { #if defined WIN32 || defined _WIN32 + # ifdef HAVE_WINRT + const wchar_t* module_name = L"opencv_ffmpeg" + CVAUX_STRW(CV_MAJOR_VERSION) CVAUX_STRW(CV_MINOR_VERSION) CVAUX_STRW(CV_SUBMINOR_VERSION) + #if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__) + L"_64" + #endif + L".dll"; + + icvFFOpenCV = LoadPackagedLibrary( module_name, 0 ); + # else const char* module_name = "opencv_ffmpeg" CVAUX_STR(CV_MAJOR_VERSION) CVAUX_STR(CV_MINOR_VERSION) CVAUX_STR(CV_SUBMINOR_VERSION) #if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__) @@ -93,6 +103,8 @@ private: ".dll"; icvFFOpenCV = LoadLibrary( module_name ); + # endif + if( icvFFOpenCV ) { icvCreateFileCapture_FFMPEG_p = diff --git a/modules/highgui/src/cap_ffmpeg_impl.hpp b/modules/highgui/src/cap_ffmpeg_impl.hpp index d494948f96..5da449618d 100644 --- a/modules/highgui/src/cap_ffmpeg_impl.hpp +++ b/modules/highgui/src/cap_ffmpeg_impl.hpp @@ -366,7 +366,15 @@ private: struct ImplMutex::Impl { - void init() { InitializeCriticalSection(&cs); refcount = 1; } + void init() + { +#if (_WIN32_WINNT >= 0x0600) + ::InitializeCriticalSectionEx(&cs, 1000, 0); +#else + ::InitializeCriticalSection(&cs); +#endif + refcount = 1; + } void destroy() { DeleteCriticalSection(&cs); } void lock() { EnterCriticalSection(&cs); } From 5a9bd3f080f3050a771b237ed5235436b301cca1 Mon Sep 17 00:00:00 2001 From: Andrey Pavlenko Date: Thu, 15 Aug 2013 23:49:32 +0400 Subject: [PATCH 037/139] restoring binary compatibility --- .../generator/src/cpp/videocap_compat.cpp | 173 ++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 modules/java/generator/src/cpp/videocap_compat.cpp diff --git a/modules/java/generator/src/cpp/videocap_compat.cpp b/modules/java/generator/src/cpp/videocap_compat.cpp new file mode 100644 index 0000000000..4c4e64bf84 --- /dev/null +++ b/modules/java/generator/src/cpp/videocap_compat.cpp @@ -0,0 +1,173 @@ +// emulating the 'old' JNI names existed before the VideoCapture wrapping became automatic + +#define LOG_TAG "org.opencv.highgui.VideoCapture" +#include "common.h" + +#include "opencv2/opencv_modules.hpp" +#ifdef HAVE_OPENCV_HIGHGUI + +#include "opencv2/core/version.hpp" + +#if (CV_VERSION_EPOCH == 2) && (CV_VERSION_MAJOR == 4) +extern "C" { + +JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_n_1VideoCapture__ + (JNIEnv* env, jclass c); + +JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_VideoCapture_10 (JNIEnv*, jclass); + +JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_n_1VideoCapture__ + (JNIEnv* env, jclass c) +{ + return Java_org_opencv_highgui_VideoCapture_VideoCapture_10(env, c); +} + + +JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_n_1VideoCapture__I + (JNIEnv* env, jclass c, jint device); + +JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_VideoCapture_12 (JNIEnv*, jclass, jint); + +JNIEXPORT jlong JNICALL Java_org_opencv_highgui_VideoCapture_n_1VideoCapture__I + (JNIEnv* env, jclass c, jint device) +{ + return Java_org_opencv_highgui_VideoCapture_VideoCapture_12(env, c, device); +} + + +JNIEXPORT jdouble JNICALL Java_org_opencv_highgui_VideoCapture_n_1get + (JNIEnv* env, jclass c, jlong self, jint propId); + +JNIEXPORT jdouble JNICALL Java_org_opencv_highgui_VideoCapture_get_10 (JNIEnv*, jclass, jlong, jint); + +JNIEXPORT jdouble JNICALL Java_org_opencv_highgui_VideoCapture_n_1get + (JNIEnv* env, jclass c, jlong self, jint propId) +{ + return Java_org_opencv_highgui_VideoCapture_get_10(env, c, self, propId); +} + + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1grab + (JNIEnv* env, jclass c, jlong self); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_grab_10 (JNIEnv*, jclass, jlong); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1grab + (JNIEnv* env, jclass c, jlong self) +{ + return Java_org_opencv_highgui_VideoCapture_grab_10(env, c, self); +} + + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1isOpened + (JNIEnv* env, jclass c, jlong self); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_isOpened_10 (JNIEnv*, jclass, jlong); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1isOpened + (JNIEnv* env, jclass c, jlong self) +{ + return Java_org_opencv_highgui_VideoCapture_isOpened_10(env, c, self); +} + + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1open__JI + (JNIEnv* env, jclass c, jlong self, jint device); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_open_11 (JNIEnv*, jclass, jlong, jint); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1open__JI + (JNIEnv* env, jclass c, jlong self, jint device) +{ + return Java_org_opencv_highgui_VideoCapture_open_11(env, c, self, device); +} + + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1read + (JNIEnv* env, jclass c, jlong self, jlong image_nativeObj); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_read_10 (JNIEnv*, jclass, jlong, jlong); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1read + (JNIEnv* env, jclass c, jlong self, jlong image_nativeObj) +{ + return Java_org_opencv_highgui_VideoCapture_read_10(env, c, self, image_nativeObj); +} + + +JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1release + (JNIEnv* env, jclass c, jlong self); + +JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_release_10 (JNIEnv*, jclass, jlong); + +JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1release + (JNIEnv* env, jclass c, jlong self) +{ + Java_org_opencv_highgui_VideoCapture_release_10(env, c, self); +} + + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJI + (JNIEnv* env, jclass c, jlong self, jlong image_nativeObj, jint channel); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_retrieve_10 (JNIEnv*, jclass, jlong, jlong, jint); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJI + (JNIEnv* env, jclass c, jlong self, jlong image_nativeObj, jint channel) +{ + return Java_org_opencv_highgui_VideoCapture_retrieve_10(env, c, self, image_nativeObj, channel); +} + + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJ + (JNIEnv* env, jclass c, jlong self, jlong image_nativeObj); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_retrieve_11 (JNIEnv*, jclass, jlong, jlong); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1retrieve__JJ + (JNIEnv* env, jclass c, jlong self, jlong image_nativeObj) +{ + return Java_org_opencv_highgui_VideoCapture_retrieve_11(env, c, self, image_nativeObj); +} + + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1set + (JNIEnv* env, jclass c, jlong self, jint propId, jdouble value); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_set_10 (JNIEnv*, jclass, jlong, jint, jdouble); + +JNIEXPORT jboolean JNICALL Java_org_opencv_highgui_VideoCapture_n_1set + (JNIEnv* env, jclass c, jlong self, jint propId, jdouble value) +{ + return Java_org_opencv_highgui_VideoCapture_set_10(env, c, self, propId, value); +} + + +JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_n_1getSupportedPreviewSizes + (JNIEnv *env, jclass c, jlong self); + +JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_getSupportedPreviewSizes_10 + (JNIEnv *env, jclass, jlong self); + +JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_n_1getSupportedPreviewSizes + (JNIEnv *env, jclass c, jlong self) +{ + return Java_org_opencv_highgui_VideoCapture_getSupportedPreviewSizes_10(env, c, self); +} + + +JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1delete + (JNIEnv *env, jclass c, jlong self); + +JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_delete(JNIEnv*, jclass, jlong); + +JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1delete + (JNIEnv *env, jclass c, jlong self) +{ + Java_org_opencv_highgui_VideoCapture_delete(env, c, self); +} + + +} // extern "C" +#endif // (CV_VERSION_EPOCH == 2) && (CV_VERSION_MAJOR == 4) +#endif // HAVE_OPENCV_HIGHGUI From a3dd13450cbb038abe004c21e20e54bce4d7e604 Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Thu, 15 Aug 2013 00:01:40 -0700 Subject: [PATCH 038/139] All optimized function tables wrapped to getters to change its initialization time. --- modules/core/src/arithm.cpp | 219 ++++++++++++++++++++------------- modules/core/src/convert.cpp | 228 +++++++++++++++++++---------------- modules/core/src/matmul.cpp | 51 +++++--- modules/core/src/stat.cpp | 155 ++++++++++++++---------- 4 files changed, 385 insertions(+), 268 deletions(-) diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 5fda1415cf..99390402e0 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -1131,23 +1131,33 @@ static void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst, } } -static BinaryFunc maxTab[] = +static BinaryFunc* getMaxTab() { - (BinaryFunc)GET_OPTIMIZED(max8u), (BinaryFunc)GET_OPTIMIZED(max8s), - (BinaryFunc)GET_OPTIMIZED(max16u), (BinaryFunc)GET_OPTIMIZED(max16s), - (BinaryFunc)GET_OPTIMIZED(max32s), - (BinaryFunc)GET_OPTIMIZED(max32f), (BinaryFunc)max64f, - 0 -}; + static BinaryFunc maxTab[] = + { + (BinaryFunc)GET_OPTIMIZED(max8u), (BinaryFunc)GET_OPTIMIZED(max8s), + (BinaryFunc)GET_OPTIMIZED(max16u), (BinaryFunc)GET_OPTIMIZED(max16s), + (BinaryFunc)GET_OPTIMIZED(max32s), + (BinaryFunc)GET_OPTIMIZED(max32f), (BinaryFunc)max64f, + 0 + }; + + return maxTab; +} -static BinaryFunc minTab[] = +static BinaryFunc* getMinTab() { - (BinaryFunc)GET_OPTIMIZED(min8u), (BinaryFunc)GET_OPTIMIZED(min8s), - (BinaryFunc)GET_OPTIMIZED(min16u), (BinaryFunc)GET_OPTIMIZED(min16s), - (BinaryFunc)GET_OPTIMIZED(min32s), - (BinaryFunc)GET_OPTIMIZED(min32f), (BinaryFunc)min64f, - 0 -}; + static BinaryFunc minTab[] = + { + (BinaryFunc)GET_OPTIMIZED(min8u), (BinaryFunc)GET_OPTIMIZED(min8s), + (BinaryFunc)GET_OPTIMIZED(min16u), (BinaryFunc)GET_OPTIMIZED(min16s), + (BinaryFunc)GET_OPTIMIZED(min32s), + (BinaryFunc)GET_OPTIMIZED(min32f), (BinaryFunc)min64f, + 0 + }; + + return minTab; +} } @@ -1177,36 +1187,36 @@ void cv::bitwise_not(InputArray a, OutputArray c, InputArray mask) void cv::max( InputArray src1, InputArray src2, OutputArray dst ) { - binary_op(src1, src2, dst, noArray(), maxTab, false ); + binary_op(src1, src2, dst, noArray(), getMaxTab(), false ); } void cv::min( InputArray src1, InputArray src2, OutputArray dst ) { - binary_op(src1, src2, dst, noArray(), minTab, false ); + binary_op(src1, src2, dst, noArray(), getMinTab(), false ); } void cv::max(const Mat& src1, const Mat& src2, Mat& dst) { OutputArray _dst(dst); - binary_op(src1, src2, _dst, noArray(), maxTab, false ); + binary_op(src1, src2, _dst, noArray(), getMaxTab(), false ); } void cv::min(const Mat& src1, const Mat& src2, Mat& dst) { OutputArray _dst(dst); - binary_op(src1, src2, _dst, noArray(), minTab, false ); + binary_op(src1, src2, _dst, noArray(), getMinTab(), false ); } void cv::max(const Mat& src1, double src2, Mat& dst) { OutputArray _dst(dst); - binary_op(src1, src2, _dst, noArray(), maxTab, false ); + binary_op(src1, src2, _dst, noArray(), getMaxTab(), false ); } void cv::min(const Mat& src1, double src2, Mat& dst) { OutputArray _dst(dst); - binary_op(src1, src2, _dst, noArray(), minTab, false ); + binary_op(src1, src2, _dst, noArray(), getMinTab(), false ); } /****************************************************************************************\ @@ -1493,39 +1503,54 @@ static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst, } } -static BinaryFunc addTab[] = +static BinaryFunc* getAddTab() { - (BinaryFunc)GET_OPTIMIZED(add8u), (BinaryFunc)GET_OPTIMIZED(add8s), - (BinaryFunc)GET_OPTIMIZED(add16u), (BinaryFunc)GET_OPTIMIZED(add16s), - (BinaryFunc)GET_OPTIMIZED(add32s), - (BinaryFunc)GET_OPTIMIZED(add32f), (BinaryFunc)add64f, - 0 -}; + static BinaryFunc addTab[] = + { + (BinaryFunc)GET_OPTIMIZED(add8u), (BinaryFunc)GET_OPTIMIZED(add8s), + (BinaryFunc)GET_OPTIMIZED(add16u), (BinaryFunc)GET_OPTIMIZED(add16s), + (BinaryFunc)GET_OPTIMIZED(add32s), + (BinaryFunc)GET_OPTIMIZED(add32f), (BinaryFunc)add64f, + 0 + }; -static BinaryFunc subTab[] = + return addTab; +} + +static BinaryFunc* getSubTab() { - (BinaryFunc)GET_OPTIMIZED(sub8u), (BinaryFunc)GET_OPTIMIZED(sub8s), - (BinaryFunc)GET_OPTIMIZED(sub16u), (BinaryFunc)GET_OPTIMIZED(sub16s), - (BinaryFunc)GET_OPTIMIZED(sub32s), - (BinaryFunc)GET_OPTIMIZED(sub32f), (BinaryFunc)sub64f, - 0 -}; + static BinaryFunc subTab[] = + { + (BinaryFunc)GET_OPTIMIZED(sub8u), (BinaryFunc)GET_OPTIMIZED(sub8s), + (BinaryFunc)GET_OPTIMIZED(sub16u), (BinaryFunc)GET_OPTIMIZED(sub16s), + (BinaryFunc)GET_OPTIMIZED(sub32s), + (BinaryFunc)GET_OPTIMIZED(sub32f), (BinaryFunc)sub64f, + 0 + }; + + return subTab; +} -static BinaryFunc absdiffTab[] = +static BinaryFunc* getAbsDiffTab() { - (BinaryFunc)GET_OPTIMIZED(absdiff8u), (BinaryFunc)GET_OPTIMIZED(absdiff8s), - (BinaryFunc)GET_OPTIMIZED(absdiff16u), (BinaryFunc)GET_OPTIMIZED(absdiff16s), - (BinaryFunc)GET_OPTIMIZED(absdiff32s), - (BinaryFunc)GET_OPTIMIZED(absdiff32f), (BinaryFunc)absdiff64f, - 0 -}; + static BinaryFunc absDiffTab[] = + { + (BinaryFunc)GET_OPTIMIZED(absdiff8u), (BinaryFunc)GET_OPTIMIZED(absdiff8s), + (BinaryFunc)GET_OPTIMIZED(absdiff16u), (BinaryFunc)GET_OPTIMIZED(absdiff16s), + (BinaryFunc)GET_OPTIMIZED(absdiff32s), + (BinaryFunc)GET_OPTIMIZED(absdiff32f), (BinaryFunc)absdiff64f, + 0 + }; + + return absDiffTab; +} } void cv::add( InputArray src1, InputArray src2, OutputArray dst, InputArray mask, int dtype ) { - arithm_op(src1, src2, dst, mask, dtype, addTab ); + arithm_op(src1, src2, dst, mask, dtype, getAddTab() ); } void cv::subtract( InputArray src1, InputArray src2, OutputArray dst, @@ -1560,12 +1585,12 @@ void cv::subtract( InputArray src1, InputArray src2, OutputArray dst, } } #endif - arithm_op(src1, src2, dst, mask, dtype, subTab ); + arithm_op(src1, src2, dst, mask, dtype, getSubTab() ); } void cv::absdiff( InputArray src1, InputArray src2, OutputArray dst ) { - arithm_op(src1, src2, dst, noArray(), -1, absdiffTab); + arithm_op(src1, src2, dst, noArray(), -1, getAbsDiffTab()); } /****************************************************************************************\ @@ -1855,46 +1880,60 @@ static void recip64f( const double* src1, size_t step1, const double* src2, size } -static BinaryFunc mulTab[] = +static BinaryFunc* getMulTab() { - (BinaryFunc)mul8u, (BinaryFunc)mul8s, (BinaryFunc)mul16u, - (BinaryFunc)mul16s, (BinaryFunc)mul32s, (BinaryFunc)mul32f, - (BinaryFunc)mul64f, 0 -}; + static BinaryFunc mulTab[] = + { + (BinaryFunc)mul8u, (BinaryFunc)mul8s, (BinaryFunc)mul16u, + (BinaryFunc)mul16s, (BinaryFunc)mul32s, (BinaryFunc)mul32f, + (BinaryFunc)mul64f, 0 + }; + + return mulTab; +} -static BinaryFunc divTab[] = +static BinaryFunc* getDivTab() { - (BinaryFunc)div8u, (BinaryFunc)div8s, (BinaryFunc)div16u, - (BinaryFunc)div16s, (BinaryFunc)div32s, (BinaryFunc)div32f, - (BinaryFunc)div64f, 0 -}; + static BinaryFunc divTab[] = + { + (BinaryFunc)div8u, (BinaryFunc)div8s, (BinaryFunc)div16u, + (BinaryFunc)div16s, (BinaryFunc)div32s, (BinaryFunc)div32f, + (BinaryFunc)div64f, 0 + }; -static BinaryFunc recipTab[] = + return divTab; +} + +static BinaryFunc* getRecipTab() { - (BinaryFunc)recip8u, (BinaryFunc)recip8s, (BinaryFunc)recip16u, - (BinaryFunc)recip16s, (BinaryFunc)recip32s, (BinaryFunc)recip32f, - (BinaryFunc)recip64f, 0 -}; + static BinaryFunc recipTab[] = + { + (BinaryFunc)recip8u, (BinaryFunc)recip8s, (BinaryFunc)recip16u, + (BinaryFunc)recip16s, (BinaryFunc)recip32s, (BinaryFunc)recip32f, + (BinaryFunc)recip64f, 0 + }; + return recipTab; +} } void cv::multiply(InputArray src1, InputArray src2, OutputArray dst, double scale, int dtype) { - arithm_op(src1, src2, dst, noArray(), dtype, mulTab, true, &scale); + arithm_op(src1, src2, dst, noArray(), dtype, getMulTab(), true, &scale); } void cv::divide(InputArray src1, InputArray src2, OutputArray dst, double scale, int dtype) { - arithm_op(src1, src2, dst, noArray(), dtype, divTab, true, &scale); + arithm_op(src1, src2, dst, noArray(), dtype, getDivTab(), true, &scale); } void cv::divide(double scale, InputArray src2, OutputArray dst, int dtype) { - arithm_op(src2, src2, dst, noArray(), dtype, recipTab, true, &scale); + arithm_op(src2, src2, dst, noArray(), dtype, getRecipTab(), true, &scale); } /****************************************************************************************\ @@ -2037,12 +2076,17 @@ static void addWeighted64f( const double* src1, size_t step1, const double* src2 addWeighted_(src1, step1, src2, step2, dst, step, sz, scalars); } -static BinaryFunc addWeightedTab[] = +static BinaryFunc* getAddWeightedTab() { - (BinaryFunc)GET_OPTIMIZED(addWeighted8u), (BinaryFunc)GET_OPTIMIZED(addWeighted8s), (BinaryFunc)GET_OPTIMIZED(addWeighted16u), - (BinaryFunc)GET_OPTIMIZED(addWeighted16s), (BinaryFunc)GET_OPTIMIZED(addWeighted32s), (BinaryFunc)addWeighted32f, - (BinaryFunc)addWeighted64f, 0 -}; + static BinaryFunc addWeightedTab[] = + { + (BinaryFunc)GET_OPTIMIZED(addWeighted8u), (BinaryFunc)GET_OPTIMIZED(addWeighted8s), (BinaryFunc)GET_OPTIMIZED(addWeighted16u), + (BinaryFunc)GET_OPTIMIZED(addWeighted16s), (BinaryFunc)GET_OPTIMIZED(addWeighted32s), (BinaryFunc)addWeighted32f, + (BinaryFunc)addWeighted64f, 0 + }; + + return addWeightedTab; +} } @@ -2050,7 +2094,7 @@ void cv::addWeighted( InputArray src1, double alpha, InputArray src2, double beta, double gamma, OutputArray dst, int dtype ) { double scalars[] = {alpha, beta, gamma}; - arithm_op(src1, src2, dst, noArray(), dtype, addWeightedTab, true, scalars); + arithm_op(src1, src2, dst, noArray(), dtype, getAddWeightedTab(), true, scalars); } @@ -2310,15 +2354,19 @@ static void cmp64f(const double* src1, size_t step1, const double* src2, size_t cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop); } -static BinaryFunc cmpTab[] = +static BinaryFunc getCmpFunc(int depth) { - (BinaryFunc)GET_OPTIMIZED(cmp8u), (BinaryFunc)GET_OPTIMIZED(cmp8s), - (BinaryFunc)GET_OPTIMIZED(cmp16u), (BinaryFunc)GET_OPTIMIZED(cmp16s), - (BinaryFunc)GET_OPTIMIZED(cmp32s), - (BinaryFunc)GET_OPTIMIZED(cmp32f), (BinaryFunc)cmp64f, - 0 -}; + static BinaryFunc cmpTab[] = + { + (BinaryFunc)GET_OPTIMIZED(cmp8u), (BinaryFunc)GET_OPTIMIZED(cmp8s), + (BinaryFunc)GET_OPTIMIZED(cmp16u), (BinaryFunc)GET_OPTIMIZED(cmp16s), + (BinaryFunc)GET_OPTIMIZED(cmp32s), + (BinaryFunc)GET_OPTIMIZED(cmp32f), (BinaryFunc)cmp64f, + 0 + }; + return cmpTab[depth]; +} static double getMinVal(int depth) { @@ -2348,7 +2396,7 @@ void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op) _dst.create(src1.size(), CV_8UC(cn)); Mat dst = _dst.getMat(); Size sz = getContinuousSize(src1, src2, dst, src1.channels()); - cmpTab[src1.depth()](src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, &op); + getCmpFunc(src1.depth())(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, &op); return; } @@ -2380,7 +2428,7 @@ void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op) size_t esz = src1.elemSize(); size_t blocksize0 = (size_t)(BLOCK_SIZE + esz-1)/esz; - BinaryFunc func = cmpTab[depth1]; + BinaryFunc func = getCmpFunc(depth1); if( !haveScalar ) { @@ -2557,12 +2605,17 @@ static void inRangeReduce(const uchar* src, uchar* dst, size_t len, int cn) typedef void (*InRangeFunc)( const uchar* src1, size_t step1, const uchar* src2, size_t step2, const uchar* src3, size_t step3, uchar* dst, size_t step, Size sz ); -static InRangeFunc inRangeTab[] = +static InRangeFunc getInRangeFunc(int depth) { - (InRangeFunc)GET_OPTIMIZED(inRange8u), (InRangeFunc)GET_OPTIMIZED(inRange8s), (InRangeFunc)GET_OPTIMIZED(inRange16u), - (InRangeFunc)GET_OPTIMIZED(inRange16s), (InRangeFunc)GET_OPTIMIZED(inRange32s), (InRangeFunc)GET_OPTIMIZED(inRange32f), - (InRangeFunc)inRange64f, 0 -}; + static InRangeFunc inRangeTab[] = + { + (InRangeFunc)GET_OPTIMIZED(inRange8u), (InRangeFunc)GET_OPTIMIZED(inRange8s), (InRangeFunc)GET_OPTIMIZED(inRange16u), + (InRangeFunc)GET_OPTIMIZED(inRange16s), (InRangeFunc)GET_OPTIMIZED(inRange32s), (InRangeFunc)GET_OPTIMIZED(inRange32f), + (InRangeFunc)inRange64f, 0 + }; + + return inRangeTab[depth]; +} } @@ -2601,7 +2654,7 @@ void cv::inRange(InputArray _src, InputArray _lowerb, _dst.create(src.dims, src.size, CV_8U); Mat dst = _dst.getMat(); - InRangeFunc func = inRangeTab[depth]; + InRangeFunc func = getInRangeFunc(depth); const Mat* arrays_sc[] = { &src, &dst, 0 }; const Mat* arrays_nosc[] = { &src, &dst, &lb, &ub, 0 }; diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index d313f3362e..288eb1dfab 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -194,17 +194,27 @@ static void merge64s(const int64** src, int64* dst, int len, int cn ) typedef void (*SplitFunc)(const uchar* src, uchar** dst, int len, int cn); typedef void (*MergeFunc)(const uchar** src, uchar* dst, int len, int cn); -static SplitFunc splitTab[] = +static SplitFunc getSplitFunc(int depth) { - (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split16u), (SplitFunc)GET_OPTIMIZED(split16u), - (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split64s), 0 -}; + static SplitFunc splitTab[] = + { + (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split8u), (SplitFunc)GET_OPTIMIZED(split16u), (SplitFunc)GET_OPTIMIZED(split16u), + (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split32s), (SplitFunc)GET_OPTIMIZED(split64s), 0 + }; + + return splitTab[depth]; +} -static MergeFunc mergeTab[] = +static MergeFunc getMergeFunc(int depth) { - (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge16u), (MergeFunc)GET_OPTIMIZED(merge16u), - (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge64s), 0 -}; + static MergeFunc mergeTab[] = + { + (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge16u), (MergeFunc)GET_OPTIMIZED(merge16u), + (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge64s), 0 + }; + + return mergeTab[depth]; +} } @@ -217,7 +227,7 @@ void cv::split(const Mat& src, Mat* mv) return; } - SplitFunc func = splitTab[depth]; + SplitFunc func = getSplitFunc(depth); CV_Assert( func != 0 ); int esz = (int)src.elemSize(), esz1 = (int)src.elemSize1(); @@ -328,7 +338,7 @@ void cv::merge(const Mat* mv, size_t n, OutputArray _dst) NAryMatIterator it(arrays, ptrs, cn+1); int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0); - MergeFunc func = mergeTab[depth]; + MergeFunc func = getMergeFunc(depth); for( i = 0; i < it.nplanes; i++, ++it ) { @@ -429,12 +439,17 @@ static void mixChannels64s( const int64** src, const int* sdelta, typedef void (*MixChannelsFunc)( const uchar** src, const int* sdelta, uchar** dst, const int* ddelta, int len, int npairs ); -static MixChannelsFunc mixchTab[] = +static MixChannelsFunc getMixchFunc(int depth) { - (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u, - (MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s, - (MixChannelsFunc)mixChannels64s, 0 -}; + static MixChannelsFunc mixchTab[] = + { + (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u, + (MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s, + (MixChannelsFunc)mixChannels64s, 0 + }; + + return mixchTab[depth]; +} } @@ -489,7 +504,7 @@ void cv::mixChannels( const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, cons NAryMatIterator it(arrays, ptrs, (int)(nsrcs + ndsts)); int total = (int)it.size, blocksize = std::min(total, (int)((BLOCK_SIZE + esz1-1)/esz1)); - MixChannelsFunc func = mixchTab[depth]; + MixChannelsFunc func = getMixchFunc(depth); for( i = 0; i < it.nplanes; i++, ++it ) { @@ -941,104 +956,109 @@ DEF_CVT_FUNC(32s64f, int, double); DEF_CVT_FUNC(32f64f, float, double); DEF_CPY_FUNC(64s, int64); -static BinaryFunc cvtScaleAbsTab[] = -{ - (BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u, - (BinaryFunc)cvtScaleAbs16s8u, (BinaryFunc)cvtScaleAbs32s8u, (BinaryFunc)cvtScaleAbs32f8u, - (BinaryFunc)cvtScaleAbs64f8u, 0 -}; - -static BinaryFunc cvtScaleTab[][8] = +static BinaryFunc getCvtScaleAbsFunc(int depth) { + static BinaryFunc cvtScaleAbsTab[] = { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8u), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8u), - (BinaryFunc)cvtScale64f8u, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u8s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8s), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8s), - (BinaryFunc)cvtScale64f8s, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u16u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16u), - (BinaryFunc)cvtScale64f16u, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u16s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u16s), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16s), - (BinaryFunc)cvtScale64f16s, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u32s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32s), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f32s), - (BinaryFunc)cvtScale64f32s, 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvtScale8u32f), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32f), - (BinaryFunc)GET_OPTIMIZED(cvtScale16s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32f), - (BinaryFunc)cvtScale64f32f, 0 - }, - { - (BinaryFunc)cvtScale8u64f, (BinaryFunc)cvtScale8s64f, (BinaryFunc)cvtScale16u64f, - (BinaryFunc)cvtScale16s64f, (BinaryFunc)cvtScale32s64f, (BinaryFunc)cvtScale32f64f, - (BinaryFunc)cvtScale64f, 0 - }, - { - 0, 0, 0, 0, 0, 0, 0, 0 - } -}; + (BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u, + (BinaryFunc)cvtScaleAbs16s8u, (BinaryFunc)cvtScaleAbs32s8u, (BinaryFunc)cvtScaleAbs32f8u, + (BinaryFunc)cvtScaleAbs64f8u, 0 + }; -static BinaryFunc cvtTab[][8] = -{ - { - (BinaryFunc)(cvt8u), (BinaryFunc)GET_OPTIMIZED(cvt8s8u), (BinaryFunc)GET_OPTIMIZED(cvt16u8u), - (BinaryFunc)GET_OPTIMIZED(cvt16s8u), (BinaryFunc)GET_OPTIMIZED(cvt32s8u), (BinaryFunc)GET_OPTIMIZED(cvt32f8u), - (BinaryFunc)GET_OPTIMIZED(cvt64f8u), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u8s), (BinaryFunc)cvt8u, (BinaryFunc)GET_OPTIMIZED(cvt16u8s), - (BinaryFunc)GET_OPTIMIZED(cvt16s8s), (BinaryFunc)GET_OPTIMIZED(cvt32s8s), (BinaryFunc)GET_OPTIMIZED(cvt32f8s), - (BinaryFunc)GET_OPTIMIZED(cvt64f8s), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u16u), (BinaryFunc)GET_OPTIMIZED(cvt8s16u), (BinaryFunc)cvt16u, - (BinaryFunc)GET_OPTIMIZED(cvt16s16u), (BinaryFunc)GET_OPTIMIZED(cvt32s16u), (BinaryFunc)GET_OPTIMIZED(cvt32f16u), - (BinaryFunc)GET_OPTIMIZED(cvt64f16u), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u16s), (BinaryFunc)GET_OPTIMIZED(cvt8s16s), (BinaryFunc)GET_OPTIMIZED(cvt16u16s), - (BinaryFunc)cvt16u, (BinaryFunc)GET_OPTIMIZED(cvt32s16s), (BinaryFunc)GET_OPTIMIZED(cvt32f16s), - (BinaryFunc)GET_OPTIMIZED(cvt64f16s), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u32s), (BinaryFunc)GET_OPTIMIZED(cvt8s32s), (BinaryFunc)GET_OPTIMIZED(cvt16u32s), - (BinaryFunc)GET_OPTIMIZED(cvt16s32s), (BinaryFunc)cvt32s, (BinaryFunc)GET_OPTIMIZED(cvt32f32s), - (BinaryFunc)GET_OPTIMIZED(cvt64f32s), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u32f), (BinaryFunc)GET_OPTIMIZED(cvt8s32f), (BinaryFunc)GET_OPTIMIZED(cvt16u32f), - (BinaryFunc)GET_OPTIMIZED(cvt16s32f), (BinaryFunc)GET_OPTIMIZED(cvt32s32f), (BinaryFunc)cvt32s, - (BinaryFunc)GET_OPTIMIZED(cvt64f32f), 0 - }, - { - (BinaryFunc)GET_OPTIMIZED(cvt8u64f), (BinaryFunc)GET_OPTIMIZED(cvt8s64f), (BinaryFunc)GET_OPTIMIZED(cvt16u64f), - (BinaryFunc)GET_OPTIMIZED(cvt16s64f), (BinaryFunc)GET_OPTIMIZED(cvt32s64f), (BinaryFunc)GET_OPTIMIZED(cvt32f64f), - (BinaryFunc)(cvt64s), 0 - }, - { - 0, 0, 0, 0, 0, 0, 0, 0 - } -}; + return cvtScaleAbsTab[depth]; +} BinaryFunc getConvertFunc(int sdepth, int ddepth) { + static BinaryFunc cvtTab[][8] = + { + { + (BinaryFunc)(cvt8u), (BinaryFunc)GET_OPTIMIZED(cvt8s8u), (BinaryFunc)GET_OPTIMIZED(cvt16u8u), + (BinaryFunc)GET_OPTIMIZED(cvt16s8u), (BinaryFunc)GET_OPTIMIZED(cvt32s8u), (BinaryFunc)GET_OPTIMIZED(cvt32f8u), + (BinaryFunc)GET_OPTIMIZED(cvt64f8u), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u8s), (BinaryFunc)cvt8u, (BinaryFunc)GET_OPTIMIZED(cvt16u8s), + (BinaryFunc)GET_OPTIMIZED(cvt16s8s), (BinaryFunc)GET_OPTIMIZED(cvt32s8s), (BinaryFunc)GET_OPTIMIZED(cvt32f8s), + (BinaryFunc)GET_OPTIMIZED(cvt64f8s), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u16u), (BinaryFunc)GET_OPTIMIZED(cvt8s16u), (BinaryFunc)cvt16u, + (BinaryFunc)GET_OPTIMIZED(cvt16s16u), (BinaryFunc)GET_OPTIMIZED(cvt32s16u), (BinaryFunc)GET_OPTIMIZED(cvt32f16u), + (BinaryFunc)GET_OPTIMIZED(cvt64f16u), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u16s), (BinaryFunc)GET_OPTIMIZED(cvt8s16s), (BinaryFunc)GET_OPTIMIZED(cvt16u16s), + (BinaryFunc)cvt16u, (BinaryFunc)GET_OPTIMIZED(cvt32s16s), (BinaryFunc)GET_OPTIMIZED(cvt32f16s), + (BinaryFunc)GET_OPTIMIZED(cvt64f16s), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u32s), (BinaryFunc)GET_OPTIMIZED(cvt8s32s), (BinaryFunc)GET_OPTIMIZED(cvt16u32s), + (BinaryFunc)GET_OPTIMIZED(cvt16s32s), (BinaryFunc)cvt32s, (BinaryFunc)GET_OPTIMIZED(cvt32f32s), + (BinaryFunc)GET_OPTIMIZED(cvt64f32s), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u32f), (BinaryFunc)GET_OPTIMIZED(cvt8s32f), (BinaryFunc)GET_OPTIMIZED(cvt16u32f), + (BinaryFunc)GET_OPTIMIZED(cvt16s32f), (BinaryFunc)GET_OPTIMIZED(cvt32s32f), (BinaryFunc)cvt32s, + (BinaryFunc)GET_OPTIMIZED(cvt64f32f), 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvt8u64f), (BinaryFunc)GET_OPTIMIZED(cvt8s64f), (BinaryFunc)GET_OPTIMIZED(cvt16u64f), + (BinaryFunc)GET_OPTIMIZED(cvt16s64f), (BinaryFunc)GET_OPTIMIZED(cvt32s64f), (BinaryFunc)GET_OPTIMIZED(cvt32f64f), + (BinaryFunc)(cvt64s), 0 + }, + { + 0, 0, 0, 0, 0, 0, 0, 0 + } + }; + return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)]; } BinaryFunc getConvertScaleFunc(int sdepth, int ddepth) { + static BinaryFunc cvtScaleTab[][8] = + { + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8u), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8u), + (BinaryFunc)cvtScale64f8u, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u8s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u8s), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s8s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f8s), + (BinaryFunc)cvtScale64f8s, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u16u), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale16u), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16u), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16u), + (BinaryFunc)cvtScale64f16u, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u16s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u16s), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s16s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f16s), + (BinaryFunc)cvtScale64f16s, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u32s), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32s), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32s), (BinaryFunc)GET_OPTIMIZED(cvtScale32f32s), + (BinaryFunc)cvtScale64f32s, 0 + }, + { + (BinaryFunc)GET_OPTIMIZED(cvtScale8u32f), (BinaryFunc)GET_OPTIMIZED(cvtScale8s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale16u32f), + (BinaryFunc)GET_OPTIMIZED(cvtScale16s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32s32f), (BinaryFunc)GET_OPTIMIZED(cvtScale32f), + (BinaryFunc)cvtScale64f32f, 0 + }, + { + (BinaryFunc)cvtScale8u64f, (BinaryFunc)cvtScale8s64f, (BinaryFunc)cvtScale16u64f, + (BinaryFunc)cvtScale16s64f, (BinaryFunc)cvtScale32s64f, (BinaryFunc)cvtScale32f64f, + (BinaryFunc)cvtScale64f, 0 + }, + { + 0, 0, 0, 0, 0, 0, 0, 0 + } + }; + return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)]; } @@ -1051,7 +1071,7 @@ void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, doubl double scale[] = {alpha, beta}; _dst.create( src.dims, src.size, CV_8UC(cn) ); Mat dst = _dst.getMat(); - BinaryFunc func = cvtScaleAbsTab[src.depth()]; + BinaryFunc func = getCvtScaleAbsFunc(src.depth()); CV_Assert( func != 0 ); if( src.dims <= 2 ) diff --git a/modules/core/src/matmul.cpp b/modules/core/src/matmul.cpp index 05a0c55524..19443cb054 100644 --- a/modules/core/src/matmul.cpp +++ b/modules/core/src/matmul.cpp @@ -1725,19 +1725,29 @@ diagtransform_64f(const double* src, double* dst, const double* m, int len, int typedef void (*TransformFunc)( const uchar* src, uchar* dst, const uchar* m, int, int, int ); -static TransformFunc transformTab[] = +static TransformFunc getTransformFunc(int depth) { - (TransformFunc)transform_8u, (TransformFunc)transform_8s, (TransformFunc)transform_16u, - (TransformFunc)transform_16s, (TransformFunc)transform_32s, (TransformFunc)transform_32f, - (TransformFunc)transform_64f, 0 -}; + static TransformFunc transformTab[] = + { + (TransformFunc)transform_8u, (TransformFunc)transform_8s, (TransformFunc)transform_16u, + (TransformFunc)transform_16s, (TransformFunc)transform_32s, (TransformFunc)transform_32f, + (TransformFunc)transform_64f, 0 + }; + + return transformTab[depth]; +} -static TransformFunc diagTransformTab[] = +static TransformFunc getDiagTransformFunc(int depth) { - (TransformFunc)diagtransform_8u, (TransformFunc)diagtransform_8s, (TransformFunc)diagtransform_16u, - (TransformFunc)diagtransform_16s, (TransformFunc)diagtransform_32s, (TransformFunc)diagtransform_32f, - (TransformFunc)diagtransform_64f, 0 -}; + static TransformFunc diagTransformTab[] = + { + (TransformFunc)diagtransform_8u, (TransformFunc)diagtransform_8s, (TransformFunc)diagtransform_16u, + (TransformFunc)diagtransform_16s, (TransformFunc)diagtransform_32s, (TransformFunc)diagtransform_32f, + (TransformFunc)diagtransform_64f, 0 + }; + + return diagTransformTab[depth]; +} } @@ -1800,7 +1810,7 @@ void cv::transform( InputArray _src, OutputArray _dst, InputArray _mtx ) } } - TransformFunc func = isDiag ? diagTransformTab[depth] : transformTab[depth]; + TransformFunc func = isDiag ? getDiagTransformFunc(depth): getTransformFunc(depth); CV_Assert( func != 0 ); const Mat* arrays[] = {&src, &dst, 0}; @@ -2766,19 +2776,24 @@ static double dotProd_64f(const double* src1, const double* src2, int len) typedef double (*DotProdFunc)(const uchar* src1, const uchar* src2, int len); -static DotProdFunc dotProdTab[] = +static DotProdFunc getDotProdFunc(int depth) { - (DotProdFunc)GET_OPTIMIZED(dotProd_8u), (DotProdFunc)GET_OPTIMIZED(dotProd_8s), - (DotProdFunc)dotProd_16u, (DotProdFunc)dotProd_16s, - (DotProdFunc)dotProd_32s, (DotProdFunc)GET_OPTIMIZED(dotProd_32f), - (DotProdFunc)dotProd_64f, 0 -}; + static DotProdFunc dotProdTab[] = + { + (DotProdFunc)GET_OPTIMIZED(dotProd_8u), (DotProdFunc)GET_OPTIMIZED(dotProd_8s), + (DotProdFunc)dotProd_16u, (DotProdFunc)dotProd_16s, + (DotProdFunc)dotProd_32s, (DotProdFunc)GET_OPTIMIZED(dotProd_32f), + (DotProdFunc)dotProd_64f, 0 + }; + + return dotProdTab[depth]; +} double Mat::dot(InputArray _mat) const { Mat mat = _mat.getMat(); int cn = channels(); - DotProdFunc func = dotProdTab[depth()]; + DotProdFunc func = getDotProdFunc(depth()); CV_Assert( mat.type() == type() && mat.size == size && func != 0 ); if( isContinuous() && mat.isContinuous() ) diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index d8f28e204f..1509c9218f 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -199,14 +199,19 @@ static int sum64f( const double* src, const uchar* mask, double* dst, int len, i typedef int (*SumFunc)(const uchar*, const uchar* mask, uchar*, int, int); -static SumFunc sumTab[] = +static SumFunc getSumFunc(int depth) { - (SumFunc)GET_OPTIMIZED(sum8u), (SumFunc)sum8s, - (SumFunc)sum16u, (SumFunc)sum16s, - (SumFunc)sum32s, - (SumFunc)GET_OPTIMIZED(sum32f), (SumFunc)sum64f, - 0 -}; + static SumFunc sumTab[] = + { + (SumFunc)GET_OPTIMIZED(sum8u), (SumFunc)sum8s, + (SumFunc)sum16u, (SumFunc)sum16s, + (SumFunc)sum32s, + (SumFunc)GET_OPTIMIZED(sum32f), (SumFunc)sum64f, + 0 + }; + + return sumTab[depth]; +} template static int countNonZero_(const T* src, int len ) @@ -271,14 +276,18 @@ static int countNonZero64f( const double* src, int len ) typedef int (*CountNonZeroFunc)(const uchar*, int); -static CountNonZeroFunc countNonZeroTab[] = +static CountNonZeroFunc getCountNonZeroTab(int depth) { - (CountNonZeroFunc)GET_OPTIMIZED(countNonZero8u), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero8u), - (CountNonZeroFunc)GET_OPTIMIZED(countNonZero16u), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero16u), - (CountNonZeroFunc)GET_OPTIMIZED(countNonZero32s), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero32f), - (CountNonZeroFunc)GET_OPTIMIZED(countNonZero64f), 0 -}; + static CountNonZeroFunc countNonZeroTab[] = + { + (CountNonZeroFunc)GET_OPTIMIZED(countNonZero8u), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero8u), + (CountNonZeroFunc)GET_OPTIMIZED(countNonZero16u), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero16u), + (CountNonZeroFunc)GET_OPTIMIZED(countNonZero32s), (CountNonZeroFunc)GET_OPTIMIZED(countNonZero32f), + (CountNonZeroFunc)GET_OPTIMIZED(countNonZero64f), 0 + }; + return countNonZeroTab[depth]; +} template static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn ) @@ -427,11 +436,16 @@ static int sqsum64f( const double* src, const uchar* mask, double* sum, double* typedef int (*SumSqrFunc)(const uchar*, const uchar* mask, uchar*, uchar*, int, int); -static SumSqrFunc sumSqrTab[] = +static SumSqrFunc getSumSqrTab(int depth) { - (SumSqrFunc)GET_OPTIMIZED(sqsum8u), (SumSqrFunc)sqsum8s, (SumSqrFunc)sqsum16u, (SumSqrFunc)sqsum16s, - (SumSqrFunc)sqsum32s, (SumSqrFunc)GET_OPTIMIZED(sqsum32f), (SumSqrFunc)sqsum64f, 0 -}; + static SumSqrFunc sumSqrTab[] = + { + (SumSqrFunc)GET_OPTIMIZED(sqsum8u), (SumSqrFunc)sqsum8s, (SumSqrFunc)sqsum16u, (SumSqrFunc)sqsum16s, + (SumSqrFunc)sqsum32s, (SumSqrFunc)GET_OPTIMIZED(sqsum32f), (SumSqrFunc)sqsum64f, 0 + }; + + return sumSqrTab[depth]; +} } @@ -478,7 +492,7 @@ cv::Scalar cv::sum( InputArray _src ) } #endif - SumFunc func = sumTab[depth]; + SumFunc func = getSumFunc(depth); CV_Assert( cn <= 4 && func != 0 ); @@ -530,7 +544,7 @@ cv::Scalar cv::sum( InputArray _src ) int cv::countNonZero( InputArray _src ) { Mat src = _src.getMat(); - CountNonZeroFunc func = countNonZeroTab[src.depth()]; + CountNonZeroFunc func = getCountNonZeroTab(src.depth()); CV_Assert( src.channels() == 1 && func != 0 ); @@ -626,7 +640,7 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask ) } #endif - SumFunc func = sumTab[depth]; + SumFunc func = getSumFunc(depth); CV_Assert( cn <= 4 && func != 0 ); @@ -685,7 +699,7 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input CV_Assert( mask.empty() || mask.type() == CV_8U ); int k, cn = src.channels(), depth = src.depth(); - SumSqrFunc func = sumSqrTab[depth]; + SumSqrFunc func = getSumSqrTab(depth); CV_Assert( func != 0 ); @@ -859,14 +873,19 @@ static void minMaxIdx_64f(const double* src, const uchar* mask, double* minval, typedef void (*MinMaxIdxFunc)(const uchar*, const uchar*, int*, int*, size_t*, size_t*, int, size_t); -static MinMaxIdxFunc minmaxTab[] = +static MinMaxIdxFunc getMinmaxTab(int depth) { - (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_8u), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_8s), - (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_16u), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_16s), - (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_32s), - (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_32f), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_64f), - 0 -}; + static MinMaxIdxFunc minmaxTab[] = + { + (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_8u), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_8s), + (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_16u), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_16s), + (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_32s), + (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_32f), (MinMaxIdxFunc)GET_OPTIMIZED(minMaxIdx_64f), + 0 + }; + + return minmaxTab[depth]; +} static void ofs2idx(const Mat& a, size_t ofs, int* idx) { @@ -899,7 +918,7 @@ void cv::minMaxIdx(InputArray _src, double* minVal, CV_Assert( (cn == 1 && (mask.empty() || mask.type() == CV_8U)) || (cn >= 1 && mask.empty() && !minIdx && !maxIdx) ); - MinMaxIdxFunc func = minmaxTab[depth]; + MinMaxIdxFunc func = getMinmaxTab(depth); CV_Assert( func != 0 ); const Mat* arrays[] = {&src, &mask, 0}; @@ -1362,43 +1381,53 @@ CV_DEF_NORM_ALL(64f, double, double, double, double) typedef int (*NormFunc)(const uchar*, const uchar*, uchar*, int, int); typedef int (*NormDiffFunc)(const uchar*, const uchar*, const uchar*, uchar*, int, int); -static NormFunc normTab[3][8] = +static NormFunc getNormFunc(int normType, int depth) { + static NormFunc normTab[3][8] = { - (NormFunc)GET_OPTIMIZED(normInf_8u), (NormFunc)GET_OPTIMIZED(normInf_8s), (NormFunc)GET_OPTIMIZED(normInf_16u), (NormFunc)GET_OPTIMIZED(normInf_16s), - (NormFunc)GET_OPTIMIZED(normInf_32s), (NormFunc)GET_OPTIMIZED(normInf_32f), (NormFunc)normInf_64f, 0 - }, - { - (NormFunc)GET_OPTIMIZED(normL1_8u), (NormFunc)GET_OPTIMIZED(normL1_8s), (NormFunc)GET_OPTIMIZED(normL1_16u), (NormFunc)GET_OPTIMIZED(normL1_16s), - (NormFunc)GET_OPTIMIZED(normL1_32s), (NormFunc)GET_OPTIMIZED(normL1_32f), (NormFunc)normL1_64f, 0 - }, - { - (NormFunc)GET_OPTIMIZED(normL2_8u), (NormFunc)GET_OPTIMIZED(normL2_8s), (NormFunc)GET_OPTIMIZED(normL2_16u), (NormFunc)GET_OPTIMIZED(normL2_16s), - (NormFunc)GET_OPTIMIZED(normL2_32s), (NormFunc)GET_OPTIMIZED(normL2_32f), (NormFunc)normL2_64f, 0 - } -}; + { + (NormFunc)GET_OPTIMIZED(normInf_8u), (NormFunc)GET_OPTIMIZED(normInf_8s), (NormFunc)GET_OPTIMIZED(normInf_16u), (NormFunc)GET_OPTIMIZED(normInf_16s), + (NormFunc)GET_OPTIMIZED(normInf_32s), (NormFunc)GET_OPTIMIZED(normInf_32f), (NormFunc)normInf_64f, 0 + }, + { + (NormFunc)GET_OPTIMIZED(normL1_8u), (NormFunc)GET_OPTIMIZED(normL1_8s), (NormFunc)GET_OPTIMIZED(normL1_16u), (NormFunc)GET_OPTIMIZED(normL1_16s), + (NormFunc)GET_OPTIMIZED(normL1_32s), (NormFunc)GET_OPTIMIZED(normL1_32f), (NormFunc)normL1_64f, 0 + }, + { + (NormFunc)GET_OPTIMIZED(normL2_8u), (NormFunc)GET_OPTIMIZED(normL2_8s), (NormFunc)GET_OPTIMIZED(normL2_16u), (NormFunc)GET_OPTIMIZED(normL2_16s), + (NormFunc)GET_OPTIMIZED(normL2_32s), (NormFunc)GET_OPTIMIZED(normL2_32f), (NormFunc)normL2_64f, 0 + } + }; -static NormDiffFunc normDiffTab[3][8] = + return normTab[normType][depth]; +} + +static NormDiffFunc getNormDiffFunc(int normType, int depth) { + static NormDiffFunc normDiffTab[3][8] = { - (NormDiffFunc)GET_OPTIMIZED(normDiffInf_8u), (NormDiffFunc)normDiffInf_8s, - (NormDiffFunc)normDiffInf_16u, (NormDiffFunc)normDiffInf_16s, - (NormDiffFunc)normDiffInf_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffInf_32f), - (NormDiffFunc)normDiffInf_64f, 0 - }, - { - (NormDiffFunc)GET_OPTIMIZED(normDiffL1_8u), (NormDiffFunc)normDiffL1_8s, - (NormDiffFunc)normDiffL1_16u, (NormDiffFunc)normDiffL1_16s, - (NormDiffFunc)normDiffL1_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffL1_32f), - (NormDiffFunc)normDiffL1_64f, 0 - }, - { - (NormDiffFunc)GET_OPTIMIZED(normDiffL2_8u), (NormDiffFunc)normDiffL2_8s, - (NormDiffFunc)normDiffL2_16u, (NormDiffFunc)normDiffL2_16s, - (NormDiffFunc)normDiffL2_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffL2_32f), - (NormDiffFunc)normDiffL2_64f, 0 - } -}; + { + (NormDiffFunc)GET_OPTIMIZED(normDiffInf_8u), (NormDiffFunc)normDiffInf_8s, + (NormDiffFunc)normDiffInf_16u, (NormDiffFunc)normDiffInf_16s, + (NormDiffFunc)normDiffInf_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffInf_32f), + (NormDiffFunc)normDiffInf_64f, 0 + }, + { + (NormDiffFunc)GET_OPTIMIZED(normDiffL1_8u), (NormDiffFunc)normDiffL1_8s, + (NormDiffFunc)normDiffL1_16u, (NormDiffFunc)normDiffL1_16s, + (NormDiffFunc)normDiffL1_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffL1_32f), + (NormDiffFunc)normDiffL1_64f, 0 + }, + { + (NormDiffFunc)GET_OPTIMIZED(normDiffL2_8u), (NormDiffFunc)normDiffL2_8s, + (NormDiffFunc)normDiffL2_16u, (NormDiffFunc)normDiffL2_16s, + (NormDiffFunc)normDiffL2_32s, (NormDiffFunc)GET_OPTIMIZED(normDiffL2_32f), + (NormDiffFunc)normDiffL2_64f, 0 + } + }; + + return normDiffTab[normType][depth]; +} } @@ -1482,7 +1511,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask ) return result; } - NormFunc func = normTab[normType >> 1][depth]; + NormFunc func = getNormFunc(normType >> 1, depth); CV_Assert( func != 0 ); const Mat* arrays[] = {&src, &mask, 0}; @@ -1623,7 +1652,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m return result; } - NormDiffFunc func = normDiffTab[normType >> 1][depth]; + NormDiffFunc func = getNormDiffFunc(normType >> 1, depth); CV_Assert( func != 0 ); const Mat* arrays[] = {&src1, &src2, &mask, 0}; From 8fb6b689c69992c8b83d9bd9e4d9a976fe4a8589 Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Fri, 16 Aug 2013 16:30:40 +0800 Subject: [PATCH 039/139] 1. Added OpenCL version of superres to the sample. 2. Modified according to the feedback of the community. --- samples/gpu/CMakeLists.txt | 9 +- samples/gpu/super_resolution.cpp | 140 ++++++++++++++++++++++++++++--- 2 files changed, 136 insertions(+), 13 deletions(-) diff --git a/samples/gpu/CMakeLists.txt b/samples/gpu/CMakeLists.txt index 85bee50585..80889fbf4a 100644 --- a/samples/gpu/CMakeLists.txt +++ b/samples/gpu/CMakeLists.txt @@ -2,7 +2,6 @@ SET(OPENCV_GPU_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc ope opencv_ml opencv_video opencv_objdetect opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_gpu opencv_superres) - ocv_check_dependencies(${OPENCV_GPU_SAMPLES_REQUIRED_DEPS}) if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) @@ -25,6 +24,10 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) ocv_include_directories(${CUDA_INCLUDE_DIRS}) endif() + if(HAVE_OPENCL) + ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/ocl/include") + endif() + if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function") endif() @@ -41,6 +44,10 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) target_link_libraries(${the_target} opencv_nonfree) endif() + if(HAVE_OPENCL) + target_link_libraries(${the_target} opencv_ocl) + endif() + set_target_properties(${the_target} PROPERTIES OUTPUT_NAME "${project}-example-${name}" PROJECT_LABEL "(EXAMPLE_${project_upper}) ${name}") diff --git a/samples/gpu/super_resolution.cpp b/samples/gpu/super_resolution.cpp index 80aa480290..d62b42a238 100644 --- a/samples/gpu/super_resolution.cpp +++ b/samples/gpu/super_resolution.cpp @@ -7,11 +7,16 @@ #include "opencv2/contrib/contrib.hpp" #include "opencv2/superres/superres.hpp" #include "opencv2/superres/optical_flow.hpp" +#include "opencv2/opencv_modules.hpp" + +#if defined(HAVE_OPENCV_OCL) +#include "opencv2/ocl/ocl.hpp" +#endif using namespace std; using namespace cv; using namespace cv::superres; - +bool useOclChanged; #define MEASURE_TIME(op) \ { \ TickMeter tm; \ @@ -49,9 +54,38 @@ static Ptr createOptFlow(const string& name, bool useGpu) exit(-1); } } - +#if defined(HAVE_OPENCV_OCL) +static Ptr createOptFlow(const string& name) +{ + if (name == "farneback") + { + return createOptFlow_Farneback_OCL(); + } + else if (name == "simple") + { + useOclChanged = true; + std::cout<<"simple on OpenCL has not been implemented. Use CPU instead!\n"; + return createOptFlow_Simple(); + } + else if (name == "tvl1") + return createOptFlow_DualTVL1_OCL(); + else if (name == "brox") + { + std::cout<<"brox has not been implemented!\n"; + return NULL; + } + else if (name == "pyrlk") + return createOptFlow_PyrLK_OCL(); + else + { + cerr << "Incorrect Optical Flow algorithm - " << name << endl; + } + return 0; +} +#endif int main(int argc, const char* argv[]) { + useOclChanged = false; CommandLineParser cmd(argc, argv, "{ v | video | | Input video }" "{ o | output | | Output video }" @@ -59,7 +93,7 @@ int main(int argc, const char* argv[]) "{ i | iterations | 180 | Iteration count }" "{ t | temporal | 4 | Radius of the temporal search area }" "{ f | flow | farneback | Optical flow algorithm (farneback, simple, tvl1, brox, pyrlk) }" - "{ gpu | gpu | false | Use GPU }" + "{ g | gpu | | CPU as default device, cuda for CUDA and ocl for OpenCL }" "{ h | help | false | Print help message }" ); @@ -76,21 +110,79 @@ int main(int argc, const char* argv[]) const int iterations = cmd.get("iterations"); const int temporalAreaRadius = cmd.get("temporal"); const string optFlow = cmd.get("flow"); - const bool useGpu = cmd.get("gpu"); + string gpuOption = cmd.get("gpu"); + + std::transform(gpuOption.begin(), gpuOption.end(), gpuOption.begin(), ::tolower); + bool useCuda = false; + bool useOcl = false; + + if(gpuOption.compare("ocl") == 0) + useOcl = true; + else if(gpuOption.compare("cuda") == 0) + useCuda = true; + +#ifndef HAVE_OPENCV_OCL + if(useOcl) + { + { + cout<<"OPENCL is not compiled\n"; + return 0; + } + } +#endif +#if defined(HAVE_OPENCV_OCL) + std::vectorinfo; + if(useCuda) + { + CV_Assert(!useOcl); + info.clear(); + } + + if(useOcl) + { + CV_Assert(!useCuda); + cv::ocl::getDevice(info); + } +#endif Ptr superRes; - if (useGpu) - superRes = createSuperResolution_BTVL1_GPU(); + + +#if defined(HAVE_OPENCV_OCL) + if(useOcl) + { + Ptr of = createOptFlow(optFlow); + if (of.empty()) + exit(-1); + if(useOclChanged) + { + superRes = createSuperResolution_BTVL1(); + useOcl = !useOcl; + }else + superRes = createSuperResolution_BTVL1_OCL(); + superRes->set("opticalFlow", of); + } else - superRes = createSuperResolution_BTVL1(); +#endif + { + if (useCuda) + superRes = createSuperResolution_BTVL1_GPU(); + else + superRes = createSuperResolution_BTVL1(); + + Ptr of = createOptFlow(optFlow, useCuda); + + if (of.empty()) + exit(-1); + superRes->set("opticalFlow", of); + } superRes->set("scale", scale); superRes->set("iterations", iterations); superRes->set("temporalAreaRadius", temporalAreaRadius); - superRes->set("opticalFlow", createOptFlow(optFlow, useGpu)); Ptr frameSource; - if (useGpu) + if (useCuda) { // Try to use gpu Video Decoding try @@ -116,7 +208,11 @@ int main(int argc, const char* argv[]) cout << "Iterations : " << iterations << endl; cout << "Temporal radius : " << temporalAreaRadius << endl; cout << "Optical Flow : " << optFlow << endl; - cout << "Mode : " << (useGpu ? "GPU" : "CPU") << endl; +#if defined(HAVE_OPENCV_OCL) + cout << "Mode : " << (useCuda ? "CUDA" : useOcl? "OpenCL" : "CPU") << endl; +#else + cout << "Mode : " << (useGpu ? "CUDA" : "CPU") << endl; +#endif } superRes->setInput(frameSource); @@ -126,10 +222,30 @@ int main(int argc, const char* argv[]) for (int i = 0;; ++i) { cout << '[' << setw(3) << i << "] : "; - Mat result; - MEASURE_TIME(superRes->nextFrame(result)); +#if defined(HAVE_OPENCV_OCL) + cv::ocl::oclMat result_; + + if(useOcl) + { + MEASURE_TIME(superRes->nextFrame(result_)); + } + else +#endif + { + MEASURE_TIME(superRes->nextFrame(result)); + } + +#ifdef HAVE_OPENCV_OCL + if(useOcl) + { + if(!result_.empty()) + { + result_.download(result); + } + } +#endif if (result.empty()) break; From 2304a561ca87d0ab3e1869cdc7dd1ffed4d462e8 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Fri, 16 Aug 2013 13:53:37 +0400 Subject: [PATCH 040/139] Fixed a -Wenum-compare warning in cudev. --- modules/cudev/include/opencv2/cudev/util/type_traits.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/cudev/include/opencv2/cudev/util/type_traits.hpp b/modules/cudev/include/opencv2/cudev/util/type_traits.hpp index 992e50bb6c..ca800c0b7a 100644 --- a/modules/cudev/include/opencv2/cudev/util/type_traits.hpp +++ b/modules/cudev/include/opencv2/cudev/util/type_traits.hpp @@ -152,7 +152,7 @@ template struct TypeTraits template struct LargerType { typedef typename SelectIf< - VecTraits::cn != VecTraits::cn, + unsigned(VecTraits::cn) != unsigned(VecTraits::cn), void, typename MakeVec< typename type_traits_detail::LargerDepth< From 7d768d2159525d93eb28889154ae4124ac8369f7 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Fri, 16 Aug 2013 14:43:18 +0400 Subject: [PATCH 041/139] In calcOpticalFlowSF, fixed several uninitialized uses of matrices. This should fix that pesky test failure that pops up from time to time. I don't actually know if the default values should be zeros, but the tests pass, so... --- modules/video/src/simpleflow.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/video/src/simpleflow.cpp b/modules/video/src/simpleflow.cpp index 1de8084c58..7a32245555 100644 --- a/modules/video/src/simpleflow.cpp +++ b/modules/video/src/simpleflow.cpp @@ -287,7 +287,7 @@ static Mat upscaleOpticalFlow(int new_rows, static Mat calcIrregularityMat(const Mat& flow, int radius) { const int rows = flow.rows; const int cols = flow.cols; - Mat irregularity(rows, cols, CV_32F); + Mat irregularity = Mat::zeros(rows, cols, CV_32F); for (int r = 0; r < rows; ++r) { const int start_row = max(0, r - radius); const int end_row = min(rows - 1, r + radius); @@ -409,7 +409,7 @@ static void extrapolateFlow(Mat& flow, const Mat& speed_up) { const int rows = flow.rows; const int cols = flow.cols; - Mat done(rows, cols, CV_8U); + Mat done = Mat::zeros(rows, cols, CV_8U); for (int r = 0; r < rows; ++r) { for (int c = 0; c < cols; ++c) { if (!done.at(r, c) && speed_up.at(r, c) > 1) { @@ -504,8 +504,8 @@ CV_EXPORTS_W void calcOpticalFlowSF(Mat& from, Mat mask = Mat::ones(curr_from.size(), CV_8U); Mat mask_inv = Mat::ones(curr_from.size(), CV_8U); - Mat flow(curr_from.size(), CV_32FC2); - Mat flow_inv(curr_to.size(), CV_32FC2); + Mat flow = Mat::zeros(curr_from.size(), CV_32FC2); + Mat flow_inv = Mat::zeros(curr_to.size(), CV_32FC2); Mat confidence; Mat confidence_inv; From ddbeac503cc4bc8cc8a6664b65c16f5bb8be49a6 Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Thu, 15 Aug 2013 06:01:38 -0700 Subject: [PATCH 042/139] WindowsRT availability check reorganized. Build script updated. --- cmake/OpenCVCRTLinkage.cmake | 56 ++++++++++++++++--------------- platforms/scripts/cmake_winrt.cmd | 7 ++-- 2 files changed, 34 insertions(+), 29 deletions(-) diff --git a/cmake/OpenCVCRTLinkage.cmake b/cmake/OpenCVCRTLinkage.cmake index 295b914b68..62d89af4a9 100644 --- a/cmake/OpenCVCRTLinkage.cmake +++ b/cmake/OpenCVCRTLinkage.cmake @@ -4,37 +4,39 @@ endif() #INCLUDE (CheckIncludeFiles) -if (ENABLE_WINRT_MODE) - set(HAVE_WINRT True) +set(HAVE_WINRT FALSE) - # search Windows Platform SDK - message(STATUS "Checking for Windows Platfrom SDK") - GET_FILENAME_COMPONENT(WINDOWS_SDK_PATH "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v8.0;InstallationFolder]" ABSOLUTE CACHE) - if (WINDOWS_SDK_PATH STREQUAL "") - message(ERROR "Windows Platform SDK 8.0 was not found!") - set(HAVE_WINRT False) - endif() +# search Windows Platform SDK +message(STATUS "Checking for Windows Platform SDK") +GET_FILENAME_COMPONENT(WINDOWS_SDK_PATH "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v8.0;InstallationFolder]" ABSOLUTE CACHE) +if (WINDOWS_SDK_PATH STREQUAL "") + set(HAVE_MSPDK FALSE) + message(STATUS "Windows Platform SDK 8.0 was not found") +else() + set(HAVE_MSPDK TRUE) +endif() - #search for Visual Studio 11.0 install directory - message(STATUS "Checking for Visual Studio 2012") - GET_FILENAME_COMPONENT(VISUAL_STUDIO_PATH [HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\11.0\\Setup\\VS;ProductDir] REALPATH CACHE) - if (VISUAL_STUDIO_PATH STREQUAL "") - message(ERROR "Visual Studio 2012 was not found!") - set(HAVE_WINRT False) - endif() +#search for Visual Studio 11.0 install directory +message(STATUS "Checking for Visual Studio 2012") +GET_FILENAME_COMPONENT(VISUAL_STUDIO_PATH [HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\11.0\\Setup\\VS;ProductDir] REALPATH CACHE) +if (VISUAL_STUDIO_PATH STREQUAL "") + set(HAVE_MSVC2012 FALSE) + message(STATUS "Visual Studio 2012 was not found") +else() + set(HAVE_MSVC2012 TRUE) +endif() - if (HAVE_WINRT) - TRY_COMPILE(HAVE_WINRT - "${OPENCV_BINARY_DIR}/CMakeFiles/CMakeTmp" - "${OpenCV_SOURCE_DIR}/cmake/checks/winrttest.cpp" - CMAKE_FLAGS "\"kernel.lib\" \"user32.lib\"" - OUTPUT_VARIABLE OUTPUT) - endif() +TRY_COMPILE(HAVE_WINRT_SDK + "${OPENCV_BINARY_DIR}/CMakeFiles/CMakeTmp" + "${OpenCV_SOURCE_DIR}/cmake/checks/winrttest.cpp") - if (HAVE_WINRT) - add_definitions(/DWINVER=0x0602 /DNTDDI_VERSION=NTDDI_WIN8 /D_WIN32_WINNT=0x0602) - endif() -endif(ENABLE_WINRT_MODE) +if (ENABLE_WINRT_MODE AND HAVE_WINRT_SDK AND HAVE_MSVC2012 AND HAVE_MSPDK) + set(HAVE_WINRT TRUE) +endif() + +if (HAVE_WINRT) + add_definitions(/DWINVER=0x0602 /DNTDDI_VERSION=NTDDI_WIN8 /D_WIN32_WINNT=0x0602) +endif() if(NOT BUILD_SHARED_LIBS AND BUILD_WITH_STATIC_CRT) foreach(flag_var diff --git a/platforms/scripts/cmake_winrt.cmd b/platforms/scripts/cmake_winrt.cmd index df70e856c5..3cdff97b75 100644 --- a/platforms/scripts/cmake_winrt.cmd +++ b/platforms/scripts/cmake_winrt.cmd @@ -1,6 +1,9 @@ mkdir build_winrt_arm cd build_winrt_arm -rem call "C:\Program Files\Microsoft Visual Studio 11.0\VC\bin\x86_arm\vcvarsx86_arm.bat" +set msvc_path=C:\Program Files\Microsoft Visual Studio 11.0 -cmake.exe -GNinja -DWITH_TBB=ON -DBUILD_TBB=ON -DCMAKE_BUILD_TYPE=Release -DWITH_FFMPEG=OFF -DBUILD_opencv_gpu=OFF -DBUILD_opencv_python=OFF -DCMAKE_TOOLCHAIN_FILE=..\winrt\arm.winrt.toolchain.cmake ..\.. +call "%msvc_path%\Common7\Tools\VsDevCmd.bat" +call "%msvc_path%\VC\bin\x86_arm\vcvarsx86_arm.bat" + +cmake.exe -GNinja -DCMAKE_BUILD_TYPE=Release -DENABLE_WINRT_MODE=ON -DWITH_FFMPEG=OFF -DWITH_MSMF=OFF -DWITH_DSHOW=OFF -DWITH_VFW=OFF -DWITH_TIFF=OFF -DWITH_OPENEXR=OFF -DWITH_CUDA=OFF -DBUILD_opencv_gpu=OFF -DBUILD_opencv_python=OFF -DBUILD_opencv_java=OFF -DCMAKE_TOOLCHAIN_FILE=..\winrt\arm.winrt.toolchain.cmake %* ..\.. \ No newline at end of file From 230bc03d97789683c70540b647a180fad3dfe4fa Mon Sep 17 00:00:00 2001 From: Bahram Dahi Date: Fri, 16 Aug 2013 08:14:32 -0700 Subject: [PATCH 043/139] Moved Scalar variable 'value' to int main() to work around a GCC -Wshadow warning in operations.hpp --- samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp b/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp index 4b2783e31c..a4b8bad4dd 100644 --- a/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp @@ -15,7 +15,6 @@ using namespace cv; Mat src, dst; int top, bottom, left, right; int borderType; -Scalar value; const char* window_name = "copyMakeBorder Demo"; RNG rng(12345); @@ -64,7 +63,7 @@ int main( int, char** argv ) else if( (char)c == 'r' ) { borderType = BORDER_REPLICATE; } - value = Scalar( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) ); + Scalar value( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) ); copyMakeBorder( src, dst, top, bottom, left, right, borderType, value ); imshow( window_name, dst ); From c6858c3fb0939e088237fbe8a0adc6c5dede0f43 Mon Sep 17 00:00:00 2001 From: Nghia Ho Date: Sat, 17 Aug 2013 20:32:13 +1000 Subject: [PATCH 044/139] Buffer is accessed out of bounds. --- modules/ocl/src/haar.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ocl/src/haar.cpp b/modules/ocl/src/haar.cpp index 6283ac8d9f..934957e593 100644 --- a/modules/ocl/src/haar.cpp +++ b/modules/ocl/src/haar.cpp @@ -262,7 +262,7 @@ static GpuHidHaarClassifierCascade * gpuCreateHidHaarClassifierCascade( CvHaarCl int datasize; int total_classifiers = 0; int total_nodes = 0; - char errorstr[100]; + char errorstr[256]; GpuHidHaarStageClassifier *stage_classifier_ptr; GpuHidHaarClassifier *haar_classifier_ptr; From 3c9f307193d39c5f76e6b28c972201ae77d8502e Mon Sep 17 00:00:00 2001 From: kdrobnyh Date: Tue, 13 Aug 2013 14:49:32 +0400 Subject: [PATCH 045/139] macros -> template in IPP support cvtColor --- modules/imgproc/src/color.cpp | 922 ++++++++++++++++++---------------- 1 file changed, 490 insertions(+), 432 deletions(-) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index f2c32f5de6..0c20bb046f 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -94,9 +94,9 @@ #include #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) -#define MAX_IPP8u 255 -#define MAX_IPP16u 65535 -#define MAX_IPP32f 1.0 +#define MAX_IPP8u 255 +#define MAX_IPP16u 65535 +#define MAX_IPP32f 1.0 static IppStatus sts = ippInit(); #endif @@ -198,283 +198,298 @@ void CvtColorLoop(const Mat& src, Mat& dst, const Cvt& cvt) } #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) -#define CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(func, src, dst ) \ - if( depth == CV_8U ) { \ - CV_DEF_IPP_PARALLEL_FOR(func##_8u, src, dst); } \ - else if( depth == CV_16U ) { \ - CV_DEF_IPP_PARALLEL_FOR(func##_16u, src, dst); } \ - else { CV_DEF_IPP_PARALLEL_FOR(func##_32f, src, dst); } - -#define CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(func, src, dst ) \ - if( depth == CV_8U ) { \ - CV_DEF_IPP_PARALLEL_FOR(func##_8u, src, dst); } \ - else if( depth == CV_16U ){ \ - CV_DEF_IPP_PARALLEL_FOR(func##_16u, src, dst); } - -#define CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(func, src, dst ) \ - Mat temp; Mat &source = src; \ - if( src.data == dst.data ) { \ - src.copyTo(temp); source = temp; } \ - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(func, source, dst) - -#define CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(func, src, dst ) \ - Mat temp; Mat &source = src; \ - if( src.data == dst.data ) { \ - src.copyTo(temp); source = temp; } \ - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(func, source, dst) - -#define CV_DEF_IPP_PARALLEL_FOR(Functor, src, dst) \ - bool ok; \ - Functor invoker(src, dst, &ok); \ - parallel_for_(Range(0, src.rows), invoker, src.total()/(double)(1<<16) ); \ - if( ok ) { return; } - -#define CV_DEF_IPP_COLORLOOP_BODY(mode, type, funcbody) \ -class IPPColorLoop_Invoker_##mode##_##type## : public ParallelLoopBody { \ -public: IPPColorLoop_Invoker_##mode##_##type##(const Mat& _src, Mat& _dst, bool *_ok) : \ - ParallelLoopBody(), src(_src), dst(_dst), ok(_ok) \ -{ *ok = true; } \ - virtual void operator()(const Range& range) const \ -{ funcbody(type) } \ -private: const Mat& src; \ - Mat& dst; bool *ok; \ -const IPPColorLoop_Invoker_##mode##_##type##& operator= (const IPPColorLoop_Invoker_##mode##_##type##&); }; - -#define CV_DEF_IPP_FUNCPROC(func) \ - if( func < 0 ) { *ok = false; return; } - -#define CV_DEF_IPP_SWAP_CHANNELS_C3C4(order1, order2, order3, order4, type) \ - int order[4] = { order1, order2, order3, order4 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) - -#define CV_DEF_IPP_SWAP_CHANNELS_C3C4_0123(type) \ - CV_DEF_IPP_SWAP_CHANNELS_C3C4(0, 1, 2, 3, type) - -#define CV_DEF_IPP_SWAP_CHANNELS_C3C4_2103(type) \ - CV_DEF_IPP_SWAP_CHANNELS_C3C4(2, 1, 0, 3, type) - -#define CV_DEF_IPP_COPY_AC4C3(type) \ - CV_DEF_IPP_FUNCPROC(ippiCopy_##type##_AC4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ))) - -#define CV_DEF_IPP_SWAP_CHANNELS_C4C3(order1, order2, order3, type) \ - int order[3] = { order1, order2, order3 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) - -#define CV_DEF_IPP_SWAP_CHANNELS_C4C3_210(type) \ - CV_DEF_IPP_SWAP_CHANNELS_C4C3(2, 1, 0, type) - -#define CV_DEF_IPP_SWAP_CHANNELS_C3(order1, order2, order3, type) \ - int order[3] = { order1, order2, order3 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) - -#define CV_DEF_IPP_SWAP_CHANNELS_C3_210(type) \ - CV_DEF_IPP_SWAP_CHANNELS_C3(2, 1, 0, type) - -#define CV_DEF_IPP_SWAP_CHANNELS_C4(order1, order2, order3, type) \ - int order[3] = { order1, order2, order3 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_AC4R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) - -#define CV_DEF_IPP_SWAP_CHANNELS_C4_2103(type) \ - CV_DEF_IPP_SWAP_CHANNELS_C4(2, 1, 0, type) - -#define CV_DEF_IPP_BGR2GRAY(type) \ - Ipp32f coeff[3] = { 0.114f, 0.587f, 0.299f }; \ - CV_DEF_IPP_FUNCPROC(ippiColorToGray_##type##_C3C1R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), coeff )) - -#define CV_DEF_IPP_RGB2GRAY(type) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToGray_##type##_C3C1R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ))) - -#define CV_DEF_IPP_BGRA2GRAY(type) \ - Ipp32f coeff[3] = { 0.114f, 0.587f, 0.299f }; \ - CV_DEF_IPP_FUNCPROC(ippiColorToGray_##type##_AC4C1R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), coeff )) - -#define CV_DEF_IPP_RGBA2GRAY(type) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToGray_##type##_AC4C1R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ))) - -#define CV_DEF_IPP_Copy_P3C3(type) \ - Ipp##type *pointer = (Ipp##type *)src.ptr(range.start); \ - Ipp##type* srcarray[3] = { pointer, pointer, pointer }; \ - CV_DEF_IPP_FUNCPROC(ippiCopy_##type##_P3C3R( srcarray, (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ))) - -#define CV_DEF_IPP_GRAY2BGRA(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - Ipp##type *pointer = (Ipp##type *)src.ptr(range.start); \ - Ipp##type* srcarray[3] = { pointer, pointer, pointer }; \ - CV_DEF_IPP_FUNCPROC(ippiCopy_##type##_P3C3R( srcarray, (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ))) \ - int order[4] = { 0, 1, 2, 3 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) - -#define CV_DEF_IPP_BGR2XYZ(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - int order[3] = { 2, 1, 0 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToXYZ_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_BGRA2XYZ(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - int order[3] = { 2, 1, 0 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToXYZ_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_RGB2XYZ(type) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToXYZ_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_RGBA2XYZ(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - int order[3] = { 0, 1, 2 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToXYZ_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_XYZ2BGR(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - CV_DEF_IPP_FUNCPROC(ippiXYZToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ - int order[3] = { 2, 1, 0 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) - -#define CV_DEF_IPP_XYZ2BGRA_FAMILY(order1, order2, order3, type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - CV_DEF_IPP_FUNCPROC(ippiXYZToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ - int order[4] = { order1, order2, order3, 3 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) - -#define CV_DEF_IPP_XYZ2BGRA(type) \ - CV_DEF_IPP_XYZ2BGRA_FAMILY(2, 1, 0, type) - -#define CV_DEF_IPP_XYZ2RGB(type) \ - CV_DEF_IPP_FUNCPROC(ippiXYZToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_XYZ2RGBA(type) \ - CV_DEF_IPP_XYZ2BGRA_FAMILY(0, 1, 2, type) - -#define CV_DEF_IPP_BGR2HSV_FULL(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - int order[3] = { 2, 1, 0 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToHSV_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_BGRA2HSV_FULL(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - int order[3] = { 2, 1, 0 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToHSV_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_RGB2HSV_FULL(type) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToHSV_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_RGBA2HSV_FULL(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - int order[3] = { 0, 1, 2 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToHSV_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_BGR2HLS_FULL(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - int order[3] = { 2, 1, 0 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToHLS_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_BGRA2HLS_FULL(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - int order[3] = { 2, 1, 0 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToHLS_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_RGB2HLS_FULL(type) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToHLS_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_RGBA2HLS_FULL(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - int order[3] = { 0, 1, 2 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C4C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ), order )) \ - CV_DEF_IPP_FUNCPROC(ippiRGBToHLS_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_HSV2BGR_FULL(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - CV_DEF_IPP_FUNCPROC(ippiHSVToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ - int order[3] = { 2, 1, 0 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) - -#define CV_DEF_IPP_HSV2BGRA_FULL_FAMILY(order1, order2, order3, type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - CV_DEF_IPP_FUNCPROC(ippiHSVToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ - int order[4] = { order1, order2, order3, 3 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) - -#define CV_DEF_IPP_HSV2BGRA_FULL(type) \ - CV_DEF_IPP_HSV2BGRA_FULL_FAMILY(2, 1, 0, type) \ - -#define CV_DEF_IPP_HSV2RGB_FULL(type) \ - CV_DEF_IPP_FUNCPROC(ippiHSVToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_HSV2RGBA_FULL(type) \ - CV_DEF_IPP_HSV2BGRA_FULL_FAMILY(0, 1, 2, type) \ - -#define CV_DEF_IPP_HLS2BGR_FULL(type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - CV_DEF_IPP_FUNCPROC(ippiHLSToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ - int order[3] = { 2, 1, 0 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order )) - -#define CV_DEF_IPP_HLS2BGRA_FULL_FAMILY(order1, order2, order3, type) \ - Mat temp; temp.create(range.end - range.start, dst.cols, CV_MAKETYPE(src.depth(), 3)); \ - CV_DEF_IPP_FUNCPROC(ippiHLSToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)temp.data, (int)temp.step[0], ippiSize( src.cols, range.end - range.start ) )) \ - int order[4] = { order1, order2, order3, 3 }; \ - CV_DEF_IPP_FUNCPROC(ippiSwapChannels_##type##_C3C4R( (const Ipp##type *)temp.data, (int)temp.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ), order, MAX_IPP##type )) - -#define CV_DEF_IPP_HLS2BGRA_FULL(type) \ - CV_DEF_IPP_HLS2BGRA_FULL_FAMILY(2, 1, 0, type) \ - -#define CV_DEF_IPP_HLS2RGB_FULL(type) \ - CV_DEF_IPP_FUNCPROC(ippiHLSToRGB_##type##_C3R( (const Ipp##type *)src.ptr(range.start), (int)src.step[0], (Ipp##type *)dst.ptr(range.start), (int)dst.step[0], ippiSize( src.cols, range.end - range.start ) )) - -#define CV_DEF_IPP_HLS2RGBA_FULL(type) \ - CV_DEF_IPP_HLS2BGRA_FULL_FAMILY(0, 1, 2, type) \ - -#define CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(mode, funcbody) \ - CV_DEF_IPP_COLORLOOP_BODY(mode, 8u, funcbody) \ - CV_DEF_IPP_COLORLOOP_BODY(mode, 16u, funcbody) \ - CV_DEF_IPP_COLORLOOP_BODY(mode, 32f, funcbody) - -#define CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(mode, funcbody) \ - CV_DEF_IPP_COLORLOOP_BODY(mode, 8u, funcbody) \ - CV_DEF_IPP_COLORLOOP_BODY(mode, 16u, funcbody) - -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGR2BGRA, CV_DEF_IPP_SWAP_CHANNELS_C3C4_0123) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGRA2BGR, CV_DEF_IPP_COPY_AC4C3) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGR2RGBA, CV_DEF_IPP_SWAP_CHANNELS_C3C4_2103) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGBA2BGR, CV_DEF_IPP_SWAP_CHANNELS_C4C3_210) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGB2BGR, CV_DEF_IPP_SWAP_CHANNELS_C3_210) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGBA2BGRA, CV_DEF_IPP_SWAP_CHANNELS_C4_2103) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGR2GRAY, CV_DEF_IPP_BGR2GRAY) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGB2GRAY, CV_DEF_IPP_RGB2GRAY) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGRA2GRAY, CV_DEF_IPP_BGRA2GRAY) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGBA2GRAY, CV_DEF_IPP_RGBA2GRAY) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(GRAY2BGR, CV_DEF_IPP_Copy_P3C3) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(GRAY2BGRA, CV_DEF_IPP_GRAY2BGRA) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGR2XYZ, CV_DEF_IPP_BGR2XYZ) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(BGRA2XYZ, CV_DEF_IPP_BGRA2XYZ) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGB2XYZ, CV_DEF_IPP_RGB2XYZ) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(RGBA2XYZ, CV_DEF_IPP_RGBA2XYZ) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(XYZ2BGR, CV_DEF_IPP_XYZ2BGR) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(XYZ2BGRA, CV_DEF_IPP_XYZ2BGRA) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(XYZ2RGB, CV_DEF_IPP_XYZ2RGB) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U_32F(XYZ2RGBA, CV_DEF_IPP_XYZ2RGBA) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(BGR2HSV_FULL, CV_DEF_IPP_BGR2HSV_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(BGRA2HSV_FULL, CV_DEF_IPP_BGRA2HSV_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(RGB2HSV_FULL, CV_DEF_IPP_RGB2HSV_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(RGBA2HSV_FULL, CV_DEF_IPP_RGBA2HSV_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(BGR2HLS_FULL, CV_DEF_IPP_BGR2HLS_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(BGRA2HLS_FULL, CV_DEF_IPP_BGRA2HLS_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(RGB2HLS_FULL, CV_DEF_IPP_RGB2HLS_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(RGBA2HLS_FULL, CV_DEF_IPP_RGBA2HLS_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HSV2BGR_FULL, CV_DEF_IPP_HSV2BGR_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HSV2BGRA_FULL, CV_DEF_IPP_HSV2BGRA_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HSV2RGB_FULL, CV_DEF_IPP_HSV2RGB_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HSV2RGBA_FULL, CV_DEF_IPP_HSV2RGBA_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HLS2BGR_FULL, CV_DEF_IPP_HLS2BGR_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HLS2BGRA_FULL, CV_DEF_IPP_HLS2BGRA_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HLS2RGB_FULL, CV_DEF_IPP_HLS2RGB_FULL) -CV_DEF_IPP_COLORLOOP_IMPL_8U_16U(HLS2RGBA_FULL, CV_DEF_IPP_HLS2RGBA_FULL) +typedef IppStatus (CV_STDCALL* ippiReorderFunc)(const void *, int, void *, int, IppiSize, const int *); +typedef IppStatus (CV_STDCALL* ippiGeneralFunc)(const void *, int, void *, int, IppiSize); +typedef IppStatus (CV_STDCALL* ippiColor2GrayFunc)(const void *, int, void *, int, IppiSize, const Ipp32f *); + +template +class CvtColorIPPLoop_Invoker : public ParallelLoopBody +{ +public: + + CvtColorIPPLoop_Invoker(const Mat& _src, Mat& _dst, const Cvt& _cvt, bool *_ok) : + ParallelLoopBody(), src(_src), dst(_dst), cvt(_cvt), ok(_ok) + { + *ok = true; + } + + virtual void operator()(const Range& range) const + { + const void *yS = src.ptr(range.start); + void *yD = dst.ptr(range.start); + if( cvt(yS, (int)src.step[0], yD, (int)dst.step[0], src.cols, range.end - range.start) < 0 ) + *ok = false; + } + +private: + const Mat& src; + Mat& dst; + const Cvt& cvt; + bool *ok; + + const CvtColorIPPLoop_Invoker& operator= (const CvtColorIPPLoop_Invoker&); +}; + +template +bool CvtColorIPPLoop(const Mat& src, Mat& dst, const Cvt& cvt) +{ + bool ok; + parallel_for_(Range(0, src.rows), CvtColorIPPLoop_Invoker(src, dst, cvt, &ok), src.total()/(double)(1<<16) ); + return ok; +} + +template +bool CvtColorIPPLoopCopy(Mat& src, Mat& dst, const Cvt& cvt) +{ + Mat temp; + Mat &source = src; + if( src.data == dst.data ) + { + src.copyTo(temp); + source = temp; + } + bool ok; + parallel_for_(Range(0, source.rows), CvtColorIPPLoop_Invoker(source, dst, cvt, &ok), source.total()/(double)(1<<16) ); + return ok; +} + +IppStatus __stdcall ippiSwapChannels_8u_C3C4Rf(const Ipp8u* pSrc, int srcStep, Ipp8u* pDst, int dstStep, + IppiSize roiSize, const int *dstOrder) +{ + return ippiSwapChannels_8u_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP8u); +} + +IppStatus __stdcall ippiSwapChannels_16u_C3C4Rf(const Ipp16u* pSrc, int srcStep, Ipp16u* pDst, int dstStep, + IppiSize roiSize, const int *dstOrder) +{ + return ippiSwapChannels_16u_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP16u); +} + +IppStatus __stdcall ippiSwapChannels_32f_C3C4Rf(const Ipp32f* pSrc, int srcStep, Ipp32f* pDst, int dstStep, + IppiSize roiSize, const int *dstOrder) +{ + return ippiSwapChannels_32f_C3C4R(pSrc, srcStep, pDst, dstStep, roiSize, dstOrder, MAX_IPP32f); +} + +static ippiReorderFunc ippiSwapChannelsC3C4RTab[] = +{ + (ippiReorderFunc)ippiSwapChannels_8u_C3C4Rf, 0, (ippiReorderFunc)ippiSwapChannels_16u_C3C4Rf, 0, + 0, (ippiReorderFunc)ippiSwapChannels_32f_C3C4Rf, 0, 0 +}; + +static ippiGeneralFunc ippiCopyAC4C3RTab[] = +{ + (ippiGeneralFunc)ippiCopy_8u_AC4C3R, 0, (ippiGeneralFunc)ippiCopy_16u_AC4C3R, 0, + 0, (ippiGeneralFunc)ippiCopy_32f_AC4C3R, 0, 0 +}; + +static ippiReorderFunc ippiSwapChannelsC4C3RTab[] = +{ + (ippiReorderFunc)ippiSwapChannels_8u_C4C3R, 0, (ippiReorderFunc)ippiSwapChannels_16u_C4C3R, 0, + 0, (ippiReorderFunc)ippiSwapChannels_32f_C4C3R, 0, 0 +}; + +static ippiReorderFunc ippiSwapChannelsC3RTab[] = +{ + (ippiReorderFunc)ippiSwapChannels_8u_C3R, 0, (ippiReorderFunc)ippiSwapChannels_16u_C3R, 0, + 0, (ippiReorderFunc)ippiSwapChannels_32f_C3R, 0, 0 +}; + +static ippiReorderFunc ippiSwapChannelsC4RTab[] = +{ + (ippiReorderFunc)ippiSwapChannels_8u_AC4R, 0, (ippiReorderFunc)ippiSwapChannels_16u_AC4R, 0, + 0, (ippiReorderFunc)ippiSwapChannels_32f_AC4R, 0, 0 +}; + +static ippiColor2GrayFunc ippiColor2GrayC3Tab[] = +{ + (ippiColor2GrayFunc)ippiColorToGray_8u_C3C1R, 0, (ippiColor2GrayFunc)ippiColorToGray_16u_C3C1R, 0, + 0, (ippiColor2GrayFunc)ippiColorToGray_32f_C3C1R, 0, 0 +}; + +static ippiColor2GrayFunc ippiColor2GrayC4Tab[] = +{ + (ippiColor2GrayFunc)ippiColorToGray_8u_AC4C1R, 0, (ippiColor2GrayFunc)ippiColorToGray_16u_AC4C1R, 0, + 0, (ippiColor2GrayFunc)ippiColorToGray_32f_AC4C1R, 0, 0 +}; + +static ippiGeneralFunc ippiRGB2GrayC3Tab[] = +{ + (ippiGeneralFunc)ippiRGBToGray_8u_C3C1R, 0, (ippiGeneralFunc)ippiRGBToGray_16u_C3C1R, 0, + 0, (ippiGeneralFunc)ippiRGBToGray_32f_C3C1R, 0, 0 +}; + +static ippiGeneralFunc ippiRGB2GrayC4Tab[] = +{ + (ippiGeneralFunc)ippiRGBToGray_8u_AC4C1R, 0, (ippiGeneralFunc)ippiRGBToGray_16u_AC4C1R, 0, + 0, (ippiGeneralFunc)ippiRGBToGray_32f_AC4C1R, 0, 0 +}; + +static ippiGeneralFunc ippiCopyP3C3RTab[] = +{ + (ippiGeneralFunc)ippiCopy_8u_P3C3R, 0, (ippiGeneralFunc)ippiCopy_16u_P3C3R, 0, + 0, (ippiGeneralFunc)ippiCopy_32f_P3C3R, 0, 0 +}; + +static ippiGeneralFunc ippiRGB2XYZTab[] = +{ + (ippiGeneralFunc)ippiRGBToXYZ_8u_C3R, 0, (ippiGeneralFunc)ippiRGBToXYZ_16u_C3R, 0, + 0, (ippiGeneralFunc)ippiRGBToXYZ_32f_C3R, 0, 0 +}; + +static ippiGeneralFunc ippiXYZ2RGBTab[] = +{ + (ippiGeneralFunc)ippiXYZToRGB_8u_C3R, 0, (ippiGeneralFunc)ippiXYZToRGB_16u_C3R, 0, + 0, (ippiGeneralFunc)ippiXYZToRGB_32f_C3R, 0, 0 +}; + +static ippiGeneralFunc ippiRGB2HSVTab[] = +{ + (ippiGeneralFunc)ippiRGBToHSV_8u_C3R, 0, (ippiGeneralFunc)ippiRGBToHSV_16u_C3R, 0, + 0, 0, 0, 0 +}; + +static ippiGeneralFunc ippiHSV2RGBTab[] = +{ + (ippiGeneralFunc)ippiHSVToRGB_8u_C3R, 0, (ippiGeneralFunc)ippiHSVToRGB_16u_C3R, 0, + 0, 0, 0, 0 +}; + +static ippiGeneralFunc ippiRGB2HLSTab[] = +{ + (ippiGeneralFunc)ippiRGBToHLS_8u_C3R, 0, (ippiGeneralFunc)ippiRGBToHLS_16u_C3R, 0, + 0, (ippiGeneralFunc)ippiRGBToHLS_32f_C3R, 0, 0 +}; + +static ippiGeneralFunc ippiHLS2RGBTab[] = +{ + (ippiGeneralFunc)ippiHLSToRGB_8u_C3R, 0, (ippiGeneralFunc)ippiHLSToRGB_16u_C3R, 0, + 0, (ippiGeneralFunc)ippiHLSToRGB_32f_C3R, 0, 0 +}; + +struct IPPGeneralFunctor +{ + IPPGeneralFunctor(ippiGeneralFunc _func) : func(_func){} + bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const + { + return func(src, srcStep, dst, dstStep, ippiSize(cols, rows)) >= 0; + } +private: + ippiGeneralFunc func; +}; + +struct IPPReorderFunctor +{ + IPPReorderFunctor(ippiReorderFunc _func, int _order0, int _order1, int _order2) : func(_func) + { + order[0] = _order0; + order[1] = _order1; + order[2] = _order2; + order[3] = 3; + } + bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const + { + return func(src, srcStep, dst, dstStep, ippiSize(cols, rows), order) >= 0; + } +private: + ippiReorderFunc func; + int order[4]; +}; + +struct IPPColor2GrayFunctor +{ + IPPColor2GrayFunctor(ippiColor2GrayFunc _func) : func(_func) + { + coeffs[0] = 0.114f; + coeffs[1] = 0.587f; + coeffs[2] = 0.299f; + } + bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const + { + return func(src, srcStep, dst, dstStep, ippiSize(cols, rows), coeffs) >= 0; + } +private: + ippiColor2GrayFunc func; + Ipp32f coeffs[3]; +}; + +struct IPPGray2BGRFunctor +{ + IPPGray2BGRFunctor(ippiGeneralFunc _func) : func(_func){} + bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const + { + const void* srcarray[3] = { src, src, src }; + return func(srcarray, srcStep, dst, dstStep, ippiSize(cols, rows)) >= 0; + } +private: + ippiGeneralFunc func; +}; + +struct IPPGray2BGRAFunctor +{ + IPPGray2BGRAFunctor(ippiGeneralFunc _func1, ippiReorderFunc _func2, int _depth) : func1(_func1), func2(_func2), depth(_depth){} + bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const + { + const void* srcarray[3] = { src, src, src }; + Mat temp(rows, cols, CV_MAKETYPE(depth, 3)); + if(func1(srcarray, srcStep, temp.data, (int)temp.step[0], ippiSize(cols, rows)) < 0) + return false; + int order[4] = {0, 1, 2, 3}; + return func2(temp.data, (int)temp.step[0], dst, dstStep, ippiSize(cols, rows), order) >= 0; + } +private: + ippiGeneralFunc func1; + ippiReorderFunc func2; + int depth; +}; + +struct IPPReorderGeneralFunctor +{ + IPPReorderGeneralFunctor(ippiReorderFunc _func1, ippiGeneralFunc _func2, int _order0, int _order1, int _order2, int _depth) : func1(_func1), func2(_func2), depth(_depth) + { + order[0] = _order0; + order[1] = _order1; + order[2] = _order2; + order[3] = 3; + } + bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const + { + Mat temp; + temp.create(rows, cols, CV_MAKETYPE(depth, 3)); + if(func1(src, srcStep, temp.data, (int)temp.step[0], ippiSize(cols, rows), order) < 0) + return false; + return func2(temp.data, (int)temp.step[0], dst, dstStep, ippiSize(cols, rows)) >= 0; + } +private: + ippiReorderFunc func1; + ippiGeneralFunc func2; + int order[4]; + int depth; +}; + +struct IPPGeneralReorderFunctor +{ + IPPGeneralReorderFunctor(ippiGeneralFunc _func1, ippiReorderFunc _func2, int _order0, int _order1, int _order2, int _depth) : func1(_func1), func2(_func2), depth(_depth) + { + order[0] = _order0; + order[1] = _order1; + order[2] = _order2; + order[3] = 3; + } + bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const + { + Mat temp; + temp.create(rows, cols, CV_MAKETYPE(depth, 3)); + if(func1(src, srcStep, temp.data, (int)temp.step[0], ippiSize(cols, rows)) < 0) + return false; + return func2(temp.data, (int)temp.step[0], dst, dstStep, ippiSize(cols, rows), order) >= 0; + } +private: + ippiGeneralFunc func1; + ippiReorderFunc func2; + int order[4]; + int depth; +}; #endif ////////////////// Various 3/4-channel to 3/4-channel RGB transformations ///////////////// @@ -3636,32 +3651,38 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create( sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); - -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - if( code == CV_BGR2BGRA || code == CV_RGB2RGBA) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGR2BGRA, src, dst) - } - else if( code == CV_BGRA2BGR ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGRA2BGR, src, dst) - } - else if( code == CV_BGR2RGBA ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGR2RGBA, src, dst) - } - else if( code == CV_RGBA2BGR ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_RGBA2BGR, src, dst) - } - else if( code == CV_RGB2BGR ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_RGB2BGR, src, dst) - } - else if( code == CV_RGBA2BGRA ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_RGBA2BGRA, src, dst) - } + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if( code == CV_BGR2BGRA || code == CV_RGB2RGBA) + { + if ( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC3C4RTab[depth], 0, 1, 2)) ) + return; + } + else if( code == CV_BGRA2BGR ) + { + if ( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiCopyAC4C3RTab[depth])) ) + return; + } + else if( code == CV_BGR2RGBA ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC3C4RTab[depth], 2, 1, 0)) ) + return; + } + else if( code == CV_RGBA2BGR ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC4C3RTab[depth], 2, 1, 0)) ) + return; + } + else if( code == CV_RGB2BGR ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderFunctor(ippiSwapChannelsC3RTab[depth], 2, 1, 0)) ) + return; + } + else if( code == CV_RGBA2BGRA ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderFunctor(ippiSwapChannelsC4RTab[depth], 2, 1, 0)) ) + return; + } #endif if( depth == CV_8U ) @@ -3716,24 +3737,28 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) CV_Assert( scn == 3 || scn == 4 ); _dst.create(sz, CV_MAKETYPE(depth, 1)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - if( code == CV_BGR2GRAY ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGR2GRAY, src, dst) - } - else if( code == CV_RGB2GRAY ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_RGB2GRAY, src, dst) - } - else if( code == CV_BGRA2GRAY ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGRA2GRAY, src, dst) - } - else if( code == CV_RGBA2GRAY ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_RGBA2GRAY, src, dst) - } + if( code == CV_BGR2GRAY ) + { + if( CvtColorIPPLoop(src, dst, IPPColor2GrayFunctor(ippiColor2GrayC3Tab[depth])) ) + return; + } + else if( code == CV_RGB2GRAY ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiRGB2GrayC3Tab[depth])) ) + return; + } + else if( code == CV_BGRA2GRAY ) + { + if( CvtColorIPPLoop(src, dst, IPPColor2GrayFunctor(ippiColor2GrayC4Tab[depth])) ) + return; + } + else if( code == CV_RGBA2GRAY ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiRGB2GrayC4Tab[depth])) ) + return; + } #endif bidx = code == CV_BGR2GRAY || code == CV_BGRA2GRAY ? 0 : 2; @@ -3764,18 +3789,21 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) CV_Assert( scn == 1 && (dcn == 3 || dcn == 4)); _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - if( code == CV_GRAY2BGR ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_GRAY2BGR, src, dst) - } - else if( code == CV_GRAY2BGRA ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_GRAY2BGRA, src, dst) - } + if( code == CV_GRAY2BGR ) + { + if( CvtColorIPPLoop(src, dst, IPPGray2BGRFunctor(ippiCopyP3C3RTab[depth])) ) + return; + } + else if( code == CV_GRAY2BGRA ) + { + if( CvtColorIPPLoop(src, dst, IPPGray2BGRAFunctor(ippiCopyP3C3RTab[depth], ippiSwapChannelsC3C4RTab[depth], depth)) ) + return; + } #endif + if( depth == CV_8U ) { #ifdef HAVE_TEGRA_OPTIMIZATION @@ -3854,24 +3882,28 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - if( code == CV_BGR2XYZ && scn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_BGR2XYZ, src, dst) - } - else if( code == CV_BGR2XYZ && scn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_BGRA2XYZ, src, dst) - } - else if( code == CV_RGB2XYZ && scn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_RGB2XYZ, src, dst) - } - else if( code == CV_RGB2XYZ && scn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_RGBA2XYZ, src, dst) - } + if( code == CV_BGR2XYZ && scn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2XYZTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_BGR2XYZ && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2XYZTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_RGB2XYZ && scn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2XYZTab[depth])) ) + return; + } + else if( code == CV_RGB2XYZ && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2XYZTab[depth], 0, 1, 2, depth)) ) + return; + } #endif if( depth == CV_8U ) @@ -3889,24 +3921,28 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - if( code == CV_XYZ2BGR && dcn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_XYZ2BGR, src, dst) - } - else if( code == CV_XYZ2BGR && dcn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_XYZ2BGRA, src, dst) - } - else if( code == CV_XYZ2RGB && dcn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F_COPY(IPPColorLoop_Invoker_XYZ2RGB, src, dst) - } - else if( code == CV_XYZ2RGB && dcn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_32F(IPPColorLoop_Invoker_XYZ2RGBA, src, dst) - } + if( code == CV_XYZ2BGR && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiXYZ2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_XYZ2BGR && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiXYZ2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) + return; + } + if( code == CV_XYZ2RGB && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiXYZ2RGBTab[depth])) ) + return; + } + else if( code == CV_XYZ2RGB && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiXYZ2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) + return; + } #endif if( depth == CV_8U ) @@ -3928,40 +3964,51 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - if( code == CV_BGR2HSV_FULL && scn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_BGR2HSV_FULL, src, dst) - } - else if( code == CV_BGR2HSV_FULL && scn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_BGRA2HSV_FULL, src, dst) - } - else if( code == CV_RGB2HSV_FULL && scn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_RGB2HSV_FULL, src, dst) - } - else if( code == CV_RGB2HSV_FULL && scn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_RGBA2HSV_FULL, src, dst) - } - else if( code == CV_BGR2HLS_FULL && scn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_BGR2HLS_FULL, src, dst) - } - else if( code == CV_BGR2HLS_FULL && scn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_BGRA2HLS_FULL, src, dst) - } - else if( code == CV_RGB2HLS_FULL && scn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_RGB2HLS_FULL, src, dst) - } - else if( code == CV_RGB2HLS_FULL && scn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_RGBA2HLS_FULL, src, dst) - } + if( depth == CV_8U || depth == CV_16U ) + { + if( code == CV_BGR2HSV_FULL && scn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2HSVTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_BGR2HSV_FULL && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HSVTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_RGB2HSV_FULL && scn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2HSVTab[depth])) ) + return; + } + else if( code == CV_RGB2HSV_FULL && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HSVTab[depth], 0, 1, 2, depth)) ) + return; + } + else if( code == CV_BGR2HLS_FULL && scn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2HLSTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_BGR2HLS_FULL && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HLSTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_RGB2HLS_FULL && scn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2HLSTab[depth])) ) + return; + } + else if( code == CV_RGB2HLS_FULL && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HLSTab[depth], 0, 1, 2, depth)) ) + return; + } + } #endif if( code == CV_BGR2HSV || code == CV_RGB2HSV || @@ -3998,40 +4045,51 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - if( code == CV_HSV2BGR_FULL && dcn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_HSV2BGR_FULL, src, dst) - } - else if( code == CV_HSV2BGR_FULL && dcn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_HSV2BGRA_FULL, src, dst) - } - else if( code == CV_HSV2RGB_FULL && dcn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_HSV2RGB_FULL, src, dst) - } - else if( code == CV_HSV2RGB_FULL && dcn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_HSV2RGBA_FULL, src, dst) - } - else if( code == CV_HLS2BGR_FULL && dcn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_HLS2BGR_FULL, src, dst) - } - else if( code == CV_HLS2BGR_FULL && dcn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_HLS2BGRA_FULL, src, dst) - } - else if( code == CV_HLS2RGB_FULL && dcn == 3 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U_COPY(IPPColorLoop_Invoker_HLS2RGB_FULL, src, dst) - } - else if( code == CV_HLS2RGB_FULL && dcn == 4 ) - { - CV_DEF_IPP_PARALLEL_FOR_CALL_8U_16U(IPPColorLoop_Invoker_HLS2RGBA_FULL, src, dst) - } + if( depth == CV_8U || depth == CV_16U ) + { + if( code == CV_HSV2BGR_FULL && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_HSV2BGR_FULL && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_HSV2RGB_FULL && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiHSV2RGBTab[depth])) ) + return; + } + else if( code == CV_HSV2RGB_FULL && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) + return; + } + else if( code == CV_HLS2BGR_FULL && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_HLS2BGR_FULL && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) + return; + } + else if( code == CV_HLS2RGB_FULL && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiHLS2RGBTab[depth])) ) + return; + } + else if( code == CV_HLS2RGB_FULL && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) + return; + } + } #endif if( code == CV_HSV2BGR || code == CV_HSV2RGB || From cfbd791465dc03f5ed07c7287d91c4cdf671d49f Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Mon, 19 Aug 2013 11:56:58 +0400 Subject: [PATCH 046/139] fixed gpu cvtColor for BGR <-> YUV --- .../gpu/include/opencv2/gpu/device/color.hpp | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/modules/gpu/include/opencv2/gpu/device/color.hpp b/modules/gpu/include/opencv2/gpu/device/color.hpp index c087d179b8..5af64bf614 100644 --- a/modules/gpu/include/opencv2/gpu/device/color.hpp +++ b/modules/gpu/include/opencv2/gpu/device/color.hpp @@ -107,25 +107,25 @@ namespace cv { namespace gpu { namespace device #undef OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 2) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 2) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 2) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0) #undef OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 2) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 2) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 2) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0) #undef OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS From 9f33de7071b83c20088c163f3cfb594a80219b04 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Mon, 19 Aug 2013 12:46:57 +0400 Subject: [PATCH 047/139] added cvconfig.hpp header file to gpu tests it uses HAVE_CUDA, HAVE_NVCUVID, etc. macroses --- modules/cudev/test/test_precomp.hpp | 2 ++ modules/gpu/test/test_precomp.hpp | 2 ++ modules/gpuarithm/test/test_precomp.hpp | 2 ++ modules/gpubgsegm/test/test_precomp.hpp | 1 + modules/gpucodec/test/test_precomp.hpp | 2 ++ modules/gpufeatures2d/test/test_precomp.hpp | 2 ++ modules/gpufilters/test/test_precomp.hpp | 2 ++ modules/gpuimgproc/test/test_precomp.hpp | 2 ++ modules/gpulegacy/test/test_precomp.hpp | 2 ++ modules/gpuoptflow/test/test_precomp.hpp | 2 ++ modules/gpustereo/test/test_precomp.hpp | 2 ++ modules/gpuwarping/test/test_precomp.hpp | 2 ++ modules/nonfree/perf/perf_precomp.hpp | 1 + modules/nonfree/test/test_precomp.hpp | 1 + modules/photo/test/test_denoising_gpu.cpp | 1 + modules/softcascade/test/test_cuda_softcascade.cpp | 2 +- modules/softcascade/test/test_precomp.hpp | 11 ++++++----- modules/superres/test/test_precomp.hpp | 1 + modules/superres/test/test_superres.cpp | 6 +++++- modules/ts/include/opencv2/ts/gpu_perf.hpp | 1 + modules/ts/include/opencv2/ts/gpu_test.hpp | 1 + 21 files changed, 41 insertions(+), 7 deletions(-) diff --git a/modules/cudev/test/test_precomp.hpp b/modules/cudev/test/test_precomp.hpp index b2ed2d0348..18e7cc1ce2 100644 --- a/modules/cudev/test/test_precomp.hpp +++ b/modules/cudev/test/test_precomp.hpp @@ -52,4 +52,6 @@ #include "opencv2/ts.hpp" #include "opencv2/ts/gpu_test.hpp" +#include "cvconfig.h" + #endif diff --git a/modules/gpu/test/test_precomp.hpp b/modules/gpu/test/test_precomp.hpp index f2b0bf4051..0a9c1df3dc 100644 --- a/modules/gpu/test/test_precomp.hpp +++ b/modules/gpu/test/test_precomp.hpp @@ -62,4 +62,6 @@ #include "opencv2/calib3d.hpp" #include "opencv2/objdetect.hpp" +#include "cvconfig.h" + #endif diff --git a/modules/gpuarithm/test/test_precomp.hpp b/modules/gpuarithm/test/test_precomp.hpp index d25aa0c264..faa0f5adbd 100644 --- a/modules/gpuarithm/test/test_precomp.hpp +++ b/modules/gpuarithm/test/test_precomp.hpp @@ -60,4 +60,6 @@ #include "opencv2/core.hpp" #include "opencv2/imgproc.hpp" +#include "cvconfig.h" + #endif diff --git a/modules/gpubgsegm/test/test_precomp.hpp b/modules/gpubgsegm/test/test_precomp.hpp index 9422c2a80b..783e9e64cd 100644 --- a/modules/gpubgsegm/test/test_precomp.hpp +++ b/modules/gpubgsegm/test/test_precomp.hpp @@ -60,5 +60,6 @@ #include "opencv2/video.hpp" #include "opencv2/opencv_modules.hpp" +#include "cvconfig.h" #endif diff --git a/modules/gpucodec/test/test_precomp.hpp b/modules/gpucodec/test/test_precomp.hpp index 0dc79935dd..95e70d46df 100644 --- a/modules/gpucodec/test/test_precomp.hpp +++ b/modules/gpucodec/test/test_precomp.hpp @@ -57,4 +57,6 @@ #include "opencv2/gpucodec.hpp" #include "opencv2/highgui.hpp" +#include "cvconfig.h" + #endif diff --git a/modules/gpufeatures2d/test/test_precomp.hpp b/modules/gpufeatures2d/test/test_precomp.hpp index 7725d3f3d6..4ce8f78496 100644 --- a/modules/gpufeatures2d/test/test_precomp.hpp +++ b/modules/gpufeatures2d/test/test_precomp.hpp @@ -57,4 +57,6 @@ #include "opencv2/gpufeatures2d.hpp" #include "opencv2/features2d.hpp" +#include "cvconfig.h" + #endif diff --git a/modules/gpufilters/test/test_precomp.hpp b/modules/gpufilters/test/test_precomp.hpp index 95984929fa..7dfe713d3a 100644 --- a/modules/gpufilters/test/test_precomp.hpp +++ b/modules/gpufilters/test/test_precomp.hpp @@ -57,4 +57,6 @@ #include "opencv2/gpufilters.hpp" #include "opencv2/imgproc.hpp" +#include "cvconfig.h" + #endif diff --git a/modules/gpuimgproc/test/test_precomp.hpp b/modules/gpuimgproc/test/test_precomp.hpp index 71ba4020ce..7a8b1cbcd1 100644 --- a/modules/gpuimgproc/test/test_precomp.hpp +++ b/modules/gpuimgproc/test/test_precomp.hpp @@ -57,4 +57,6 @@ #include "opencv2/gpuimgproc.hpp" #include "opencv2/imgproc.hpp" +#include "cvconfig.h" + #endif diff --git a/modules/gpulegacy/test/test_precomp.hpp b/modules/gpulegacy/test/test_precomp.hpp index 90485a90a1..e1f2d5def4 100644 --- a/modules/gpulegacy/test/test_precomp.hpp +++ b/modules/gpulegacy/test/test_precomp.hpp @@ -74,6 +74,8 @@ #include "opencv2/core/private.gpu.hpp" +#include "cvconfig.h" + #include "NCVTest.hpp" #include "NCVAutoTestLister.hpp" #include "NCVTestSourceProvider.hpp" diff --git a/modules/gpuoptflow/test/test_precomp.hpp b/modules/gpuoptflow/test/test_precomp.hpp index 32a7443e8e..afac60d876 100644 --- a/modules/gpuoptflow/test/test_precomp.hpp +++ b/modules/gpuoptflow/test/test_precomp.hpp @@ -59,4 +59,6 @@ #include "opencv2/gpuoptflow.hpp" #include "opencv2/video.hpp" +#include "cvconfig.h" + #endif diff --git a/modules/gpustereo/test/test_precomp.hpp b/modules/gpustereo/test/test_precomp.hpp index d55b1ec0d9..eb34fcb568 100644 --- a/modules/gpustereo/test/test_precomp.hpp +++ b/modules/gpustereo/test/test_precomp.hpp @@ -57,4 +57,6 @@ #include "opencv2/gpustereo.hpp" #include "opencv2/calib3d.hpp" +#include "cvconfig.h" + #endif diff --git a/modules/gpuwarping/test/test_precomp.hpp b/modules/gpuwarping/test/test_precomp.hpp index 90a28fe1a7..9ac7fc0b83 100644 --- a/modules/gpuwarping/test/test_precomp.hpp +++ b/modules/gpuwarping/test/test_precomp.hpp @@ -57,6 +57,8 @@ #include "opencv2/gpuwarping.hpp" #include "opencv2/imgproc.hpp" +#include "cvconfig.h" + #include "interpolation.hpp" #endif diff --git a/modules/nonfree/perf/perf_precomp.hpp b/modules/nonfree/perf/perf_precomp.hpp index 5e3f3c3cb3..e8fad3b7be 100644 --- a/modules/nonfree/perf/perf_precomp.hpp +++ b/modules/nonfree/perf/perf_precomp.hpp @@ -14,6 +14,7 @@ #include "opencv2/highgui.hpp" #include "opencv2/opencv_modules.hpp" +#include "cvconfig.h" #ifdef HAVE_OPENCV_OCL # include "opencv2/nonfree/ocl.hpp" diff --git a/modules/nonfree/test/test_precomp.hpp b/modules/nonfree/test/test_precomp.hpp index 563dad43be..965e651e56 100644 --- a/modules/nonfree/test/test_precomp.hpp +++ b/modules/nonfree/test/test_precomp.hpp @@ -19,6 +19,7 @@ #include "opencv2/ts/gpu_test.hpp" #include "opencv2/opencv_modules.hpp" +#include "cvconfig.h" #ifdef HAVE_OPENCV_OCL # include "opencv2/nonfree/ocl.hpp" diff --git a/modules/photo/test/test_denoising_gpu.cpp b/modules/photo/test/test_denoising_gpu.cpp index caf3b23f27..f8de826f2a 100644 --- a/modules/photo/test/test_denoising_gpu.cpp +++ b/modules/photo/test/test_denoising_gpu.cpp @@ -46,6 +46,7 @@ #include "opencv2/ts/gpu_test.hpp" #include "opencv2/opencv_modules.hpp" +#include "cvconfig.h" #if defined (HAVE_CUDA) && defined(HAVE_OPENCV_GPUARITHM) && defined(HAVE_OPENCV_GPUIMGPROC) diff --git a/modules/softcascade/test/test_cuda_softcascade.cpp b/modules/softcascade/test/test_cuda_softcascade.cpp index 4d97f92082..5973d25f21 100644 --- a/modules/softcascade/test/test_cuda_softcascade.cpp +++ b/modules/softcascade/test/test_cuda_softcascade.cpp @@ -313,4 +313,4 @@ INSTANTIATE_TEST_CASE_P(cuda_accelerated, SCascadeTestAll, testing::Combine( ALL testing::Values(Fixture("cascades/inria_caltech-17.01.2013.xml", 7), Fixture("cascades/sc_cvpr_2012_to_opencv_new_format.xml", 1291)))); -#endif \ No newline at end of file +#endif diff --git a/modules/softcascade/test/test_precomp.hpp b/modules/softcascade/test/test_precomp.hpp index 80bff6536d..4a39744783 100644 --- a/modules/softcascade/test/test_precomp.hpp +++ b/modules/softcascade/test/test_precomp.hpp @@ -51,10 +51,11 @@ #ifndef __OPENCV_TEST_PRECOMP_HPP__ #define __OPENCV_TEST_PRECOMP_HPP__ -# include "opencv2/ts.hpp" -# include "opencv2/softcascade.hpp" -# include "opencv2/imgproc.hpp" -# include "opencv2/highgui.hpp" -# include "utility.hpp" +#include "opencv2/ts.hpp" +#include "opencv2/softcascade.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" +#include "cvconfig.h" +#include "utility.hpp" #endif diff --git a/modules/superres/test/test_precomp.hpp b/modules/superres/test/test_precomp.hpp index 4ef73030bd..47334e324a 100644 --- a/modules/superres/test/test_precomp.hpp +++ b/modules/superres/test/test_precomp.hpp @@ -56,6 +56,7 @@ #include "opencv2/imgproc.hpp" #include "opencv2/ts.hpp" #include "opencv2/superres.hpp" +#include "cvconfig.h" #include "input_array_utility.hpp" #endif diff --git a/modules/superres/test/test_superres.cpp b/modules/superres/test/test_superres.cpp index 445dd3a014..2cede096db 100644 --- a/modules/superres/test/test_superres.cpp +++ b/modules/superres/test/test_superres.cpp @@ -274,12 +274,16 @@ TEST_F(SuperResolution, BTVL1_GPU) { RunTest(cv::superres::createSuperResolution_BTVL1_GPU()); } + #endif + #if defined(HAVE_OPENCV_OCL) && defined(HAVE_OPENCL) + TEST_F(SuperResolution, BTVL1_OCL) { std::vector infos; cv::ocl::getDevice(infos); RunTest(cv::superres::createSuperResolution_BTVL1_OCL()); } -#endif + +#endif diff --git a/modules/ts/include/opencv2/ts/gpu_perf.hpp b/modules/ts/include/opencv2/ts/gpu_perf.hpp index abc17e9942..8f1ba8abc8 100644 --- a/modules/ts/include/opencv2/ts/gpu_perf.hpp +++ b/modules/ts/include/opencv2/ts/gpu_perf.hpp @@ -47,6 +47,7 @@ #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/ts/ts_perf.hpp" +#include "cvconfig.h" namespace perf { diff --git a/modules/ts/include/opencv2/ts/gpu_test.hpp b/modules/ts/include/opencv2/ts/gpu_test.hpp index dab5fd3b99..2ff2ca0bfc 100644 --- a/modules/ts/include/opencv2/ts/gpu_test.hpp +++ b/modules/ts/include/opencv2/ts/gpu_test.hpp @@ -44,6 +44,7 @@ #define __OPENCV_GPU_TEST_UTILITY_HPP__ #include +#include "cvconfig.h" #include "opencv2/core.hpp" #include "opencv2/core/gpu.hpp" #include "opencv2/highgui.hpp" From 7bda6992e584c75fd1bc569cdaac931385eb76c0 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Mon, 19 Aug 2013 13:17:55 +0400 Subject: [PATCH 048/139] fixed gpu BGR <-> YUV color conversion --- .../core/include/opencv2/core/cuda/color.hpp | 32 +++++++++---------- .../opencv2/cudev/functional/color_cvt.hpp | 32 +++++++++---------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/modules/core/include/opencv2/core/cuda/color.hpp b/modules/core/include/opencv2/core/cuda/color.hpp index a2b772d8b3..d5f94c92a4 100644 --- a/modules/core/include/opencv2/core/cuda/color.hpp +++ b/modules/core/include/opencv2/core/cuda/color.hpp @@ -107,25 +107,25 @@ namespace cv { namespace gpu { namespace cudev #undef OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 0) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 2) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 2) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 2) - OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0) #undef OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 0) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 2) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 2) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 2) - OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0) #undef OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS diff --git a/modules/cudev/include/opencv2/cudev/functional/color_cvt.hpp b/modules/cudev/include/opencv2/cudev/functional/color_cvt.hpp index 19846876e6..8be854780a 100644 --- a/modules/cudev/include/opencv2/cudev/functional/color_cvt.hpp +++ b/modules/cudev/include/opencv2/cudev/functional/color_cvt.hpp @@ -100,14 +100,14 @@ CV_CUDEV_GRAY2RGB_INST(GRAY_to_BGRA, 4) { \ }; -CV_CUDEV_RGB2YUV_INST(RGB_to_YUV, 3, 3, 0) -CV_CUDEV_RGB2YUV_INST(RGBA_to_YUV, 4, 3, 0) -CV_CUDEV_RGB2YUV_INST(RGB_to_YUV4, 3, 4, 0) -CV_CUDEV_RGB2YUV_INST(RGBA_to_YUV4, 4, 4, 0) -CV_CUDEV_RGB2YUV_INST(BGR_to_YUV, 3, 3, 2) -CV_CUDEV_RGB2YUV_INST(BGRA_to_YUV, 4, 3, 2) -CV_CUDEV_RGB2YUV_INST(BGR_to_YUV4, 3, 4, 2) -CV_CUDEV_RGB2YUV_INST(BGRA_to_YUV4, 4, 4, 2) +CV_CUDEV_RGB2YUV_INST(RGB_to_YUV, 3, 3, 2) +CV_CUDEV_RGB2YUV_INST(RGBA_to_YUV, 4, 3, 2) +CV_CUDEV_RGB2YUV_INST(RGB_to_YUV4, 3, 4, 2) +CV_CUDEV_RGB2YUV_INST(RGBA_to_YUV4, 4, 4, 2) +CV_CUDEV_RGB2YUV_INST(BGR_to_YUV, 3, 3, 0) +CV_CUDEV_RGB2YUV_INST(BGRA_to_YUV, 4, 3, 0) +CV_CUDEV_RGB2YUV_INST(BGR_to_YUV4, 3, 4, 0) +CV_CUDEV_RGB2YUV_INST(BGRA_to_YUV4, 4, 4, 0) #undef CV_CUDEV_RGB2YUV_INST @@ -118,14 +118,14 @@ CV_CUDEV_RGB2YUV_INST(BGRA_to_YUV4, 4, 4, 2) { \ }; -CV_CUDEV_YUV2RGB_INST(YUV_to_RGB, 3, 3, 0) -CV_CUDEV_YUV2RGB_INST(YUV_to_RGBA, 3, 4, 0) -CV_CUDEV_YUV2RGB_INST(YUV4_to_RGB, 4, 3, 0) -CV_CUDEV_YUV2RGB_INST(YUV4_to_RGBA, 4, 4, 0) -CV_CUDEV_YUV2RGB_INST(YUV_to_BGR, 3, 3, 2) -CV_CUDEV_YUV2RGB_INST(YUV_to_BGRA, 3, 4, 2) -CV_CUDEV_YUV2RGB_INST(YUV4_to_BGR, 4, 3, 2) -CV_CUDEV_YUV2RGB_INST(YUV4_to_BGRA, 4, 4, 2) +CV_CUDEV_YUV2RGB_INST(YUV_to_RGB, 3, 3, 2) +CV_CUDEV_YUV2RGB_INST(YUV_to_RGBA, 3, 4, 2) +CV_CUDEV_YUV2RGB_INST(YUV4_to_RGB, 4, 3, 2) +CV_CUDEV_YUV2RGB_INST(YUV4_to_RGBA, 4, 4, 2) +CV_CUDEV_YUV2RGB_INST(YUV_to_BGR, 3, 3, 0) +CV_CUDEV_YUV2RGB_INST(YUV_to_BGRA, 3, 4, 0) +CV_CUDEV_YUV2RGB_INST(YUV4_to_BGR, 4, 3, 0) +CV_CUDEV_YUV2RGB_INST(YUV4_to_BGRA, 4, 4, 0) #undef CV_CUDEV_YUV2RGB_INST From abf372d75921df4036d181e087beceda95380fb5 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Mon, 19 Aug 2013 15:00:36 +0400 Subject: [PATCH 049/139] Fixed incorrect extension on 3rdparty/libpng/opencv-libpng.patch. --- 3rdparty/libpng/{opencv-libpng.path => opencv-libpng.patch} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename 3rdparty/libpng/{opencv-libpng.path => opencv-libpng.patch} (100%) diff --git a/3rdparty/libpng/opencv-libpng.path b/3rdparty/libpng/opencv-libpng.patch similarity index 100% rename from 3rdparty/libpng/opencv-libpng.path rename to 3rdparty/libpng/opencv-libpng.patch From cd2b7448f4d84cbe84248805c1412972b01eb7e0 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Thu, 30 May 2013 13:10:11 +0400 Subject: [PATCH 050/139] eliminate unnecessary double arithmetics in CUDA --- modules/gpuarithm/src/cuda/reduce.cu | 17 +++++++++++++---- modules/gpubgsegm/src/cuda/mog2.cu | 2 +- modules/gpufeatures2d/perf/perf_features2d.cpp | 2 +- modules/gpufeatures2d/src/cuda/orb.cu | 6 +++--- modules/gpuimgproc/src/cuda/bilateral_filter.cu | 2 +- modules/gpuimgproc/src/cuda/canny.cu | 2 +- modules/gpuoptflow/src/cuda/needle_map.cu | 2 +- modules/softcascade/src/cuda/icf-sc.cu | 8 ++++---- modules/superres/perf/perf_superres.cpp | 2 +- 9 files changed, 26 insertions(+), 17 deletions(-) diff --git a/modules/gpuarithm/src/cuda/reduce.cu b/modules/gpuarithm/src/cuda/reduce.cu index 51c354cf95..8588a3b234 100644 --- a/modules/gpuarithm/src/cuda/reduce.cu +++ b/modules/gpuarithm/src/cuda/reduce.cu @@ -72,7 +72,7 @@ namespace reduce } template - __device__ __forceinline__ T result(T r, double) const + __device__ __forceinline__ T result(T r, int) const { return r; } @@ -81,6 +81,15 @@ namespace reduce __host__ __device__ __forceinline__ Sum(const Sum&) {} }; + template struct OutputType + { + typedef float type; + }; + template <> struct OutputType + { + typedef double type; + }; + struct Avg { template @@ -96,7 +105,7 @@ namespace reduce } template - __device__ __forceinline__ typename TypeVec::cn>::vec_type result(T r, double sz) const + __device__ __forceinline__ typename TypeVec::elem_type>::type, VecTraits::cn>::vec_type result(T r, float sz) const { return r / sz; } @@ -121,7 +130,7 @@ namespace reduce } template - __device__ __forceinline__ T result(T r, double) const + __device__ __forceinline__ T result(T r, int) const { return r; } @@ -146,7 +155,7 @@ namespace reduce } template - __device__ __forceinline__ T result(T r, double) const + __device__ __forceinline__ T result(T r, int) const { return r; } diff --git a/modules/gpubgsegm/src/cuda/mog2.cu b/modules/gpubgsegm/src/cuda/mog2.cu index 89b43b12fa..50cb9fa56d 100644 --- a/modules/gpubgsegm/src/cuda/mog2.cu +++ b/modules/gpubgsegm/src/cuda/mog2.cu @@ -227,7 +227,7 @@ namespace cv { namespace gpu { namespace cudev //check prune if (weight < -prune) { - weight = 0.0; + weight = 0.0f; nmodes--; } diff --git a/modules/gpufeatures2d/perf/perf_features2d.cpp b/modules/gpufeatures2d/perf/perf_features2d.cpp index 9396ba2908..fd28526335 100644 --- a/modules/gpufeatures2d/perf/perf_features2d.cpp +++ b/modules/gpufeatures2d/perf/perf_features2d.cpp @@ -123,7 +123,7 @@ PERF_TEST_P(Image_NFeatures, ORB, sortKeyPoints(gpu_keypoints, gpu_descriptors); - SANITY_CHECK_KEYPOINTS(gpu_keypoints); + SANITY_CHECK_KEYPOINTS(gpu_keypoints, 1e-4); SANITY_CHECK(gpu_descriptors); } else diff --git a/modules/gpufeatures2d/src/cuda/orb.cu b/modules/gpufeatures2d/src/cuda/orb.cu index 1e88648014..571ca12bde 100644 --- a/modules/gpufeatures2d/src/cuda/orb.cu +++ b/modules/gpufeatures2d/src/cuda/orb.cu @@ -197,8 +197,8 @@ namespace cv { namespace gpu { namespace cudev if (threadIdx.x == 0) { float kp_dir = ::atan2f((float)m_01, (float)m_10); - kp_dir += (kp_dir < 0) * (2.0f * CV_PI); - kp_dir *= 180.0f / CV_PI; + kp_dir += (kp_dir < 0) * (2.0f * CV_PI_F); + kp_dir *= 180.0f / CV_PI_F; angle[ptidx] = kp_dir; } @@ -349,7 +349,7 @@ namespace cv { namespace gpu { namespace cudev if (ptidx < npoints && descidx < dsize) { float angle = angle_[ptidx]; - angle *= (float)(CV_PI / 180.f); + angle *= (float)(CV_PI_F / 180.f); float sina, cosa; ::sincosf(angle, &sina, &cosa); diff --git a/modules/gpuimgproc/src/cuda/bilateral_filter.cu b/modules/gpuimgproc/src/cuda/bilateral_filter.cu index 6aa5df27a6..3192f649b7 100644 --- a/modules/gpuimgproc/src/cuda/bilateral_filter.cu +++ b/modules/gpuimgproc/src/cuda/bilateral_filter.cu @@ -133,7 +133,7 @@ namespace cv { namespace gpu { namespace cudev B b(src.rows, src.cols); float sigma_spatial2_inv_half = -0.5f/(sigma_spatial * sigma_spatial); - float sigma_color2_inv_half = -0.5f/(sigma_color * sigma_color); + float sigma_color2_inv_half = -0.5f/(sigma_color * sigma_color); cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel >, cudaFuncCachePreferL1) ); bilateral_kernel<<>>((PtrStepSz)src, (PtrStepSz)dst, b, kernel_size, sigma_spatial2_inv_half, sigma_color2_inv_half); diff --git a/modules/gpuimgproc/src/cuda/canny.cu b/modules/gpuimgproc/src/cuda/canny.cu index 177d14692b..271fffbc7d 100644 --- a/modules/gpuimgproc/src/cuda/canny.cu +++ b/modules/gpuimgproc/src/cuda/canny.cu @@ -43,7 +43,7 @@ #if !defined CUDA_DISABLER #include -#include //std::swap +#include #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/emulation.hpp" #include "opencv2/core/cuda/transform.hpp" diff --git a/modules/gpuoptflow/src/cuda/needle_map.cu b/modules/gpuoptflow/src/cuda/needle_map.cu index d361bcfc63..e0b1ef6b78 100644 --- a/modules/gpuoptflow/src/cuda/needle_map.cu +++ b/modules/gpuoptflow/src/cuda/needle_map.cu @@ -140,7 +140,7 @@ namespace cv { namespace gpu { namespace cudev const float u_avg_val = u_avg(y, x); const float v_avg_val = v_avg(y, x); - const float theta = ::atan2f(v_avg_val, u_avg_val);// + CV_PI; + const float theta = ::atan2f(v_avg_val, u_avg_val); float r = ::sqrtf(v_avg_val * v_avg_val + u_avg_val * u_avg_val); r = fmin(14.0f * (r / max_flow), 14.0f); diff --git a/modules/softcascade/src/cuda/icf-sc.cu b/modules/softcascade/src/cuda/icf-sc.cu index 3003e6ccb8..b119209dbe 100644 --- a/modules/softcascade/src/cuda/icf-sc.cu +++ b/modules/softcascade/src/cuda/icf-sc.cu @@ -137,10 +137,10 @@ typedef unsigned char uchar; template __device__ __forceinline__ int fast_angle_bin(const float& dx, const float& dy) { - const float angle_quantum = CV_PI / 6.f; + const float angle_quantum = CV_PI_F / 6.f; float angle = atan2(dx, dy) + (angle_quantum / 2.f); - if (angle < 0) angle += CV_PI; + if (angle < 0) angle += CV_PI_F; const float angle_scaling = 1.f / angle_quantum; return static_cast(angle * angle_scaling) % 6; @@ -174,8 +174,8 @@ typedef unsigned char uchar; { int i = 3; float2 bin_vector_i; - bin_vector_i.x = ::cos(i * (CV_PI / 6.f)); - bin_vector_i.y = ::sin(i * (CV_PI / 6.f)); + bin_vector_i.x = ::cos(i * (CV_PI_F / 6.f)); + bin_vector_i.y = ::sin(i * (CV_PI_F / 6.f)); const float dot_product = fabs(dx * bin_vector_i.x + dy * bin_vector_i.y); if(dot_product > max_dot) diff --git a/modules/superres/perf/perf_superres.cpp b/modules/superres/perf/perf_superres.cpp index 8651b55325..83fb76e8af 100644 --- a/modules/superres/perf/perf_superres.cpp +++ b/modules/superres/perf/perf_superres.cpp @@ -160,7 +160,7 @@ PERF_TEST_P(Size_MatType, SuperResolution_BTVL1, TEST_CYCLE_N(10) superRes->nextFrame(dst); - GPU_SANITY_CHECK(dst); + GPU_SANITY_CHECK(dst, 2); } else { From e37f7a4c73cde25f19c9cc636afca79c8d32c8b0 Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Mon, 19 Aug 2013 16:27:06 +0400 Subject: [PATCH 051/139] Typo in Java generator fixed. Typo breaks debug build for Android platform. --- modules/java/generator/gen_java.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index 254298d8b0..d82a593bfa 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -544,7 +544,7 @@ JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_getSupportedPrevi { static const char method_name[] = "highgui::VideoCapture_getSupportedPreviewSizes_10()"; try { - LOGD(%s, method_name); + LOGD("%s", method_name); VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL union {double prop; const char* name;} u; u.prop = me->get(CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING); From aa96d8d053c3d0c367da7c1c1f9c73ed376b3bfa Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Tue, 20 Aug 2013 12:51:33 +0400 Subject: [PATCH 052/139] Fix the name of perf tests' precompiled header. --- cmake/OpenCVModule.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake index 816758d37d..5556d52e19 100644 --- a/cmake/OpenCVModule.cmake +++ b/cmake/OpenCVModule.cmake @@ -648,7 +648,7 @@ function(ocv_add_perf_tests) set(OPENCV_PERF_${the_module}_SOURCES ${perf_srcs} ${perf_hdrs}) endif() - get_native_precompiled_header(${the_target} test_precomp.hpp) + get_native_precompiled_header(${the_target} perf_precomp.hpp) add_executable(${the_target} ${OPENCV_PERF_${the_module}_SOURCES} ${${the_target}_pch}) target_link_libraries(${the_target} ${OPENCV_MODULE_${the_module}_DEPS} ${perf_deps} ${OPENCV_LINKER_LIBS}) From 9667a8a1aef0af79c9e42a0bd312b555806d6b0c Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Tue, 20 Aug 2013 12:51:33 +0400 Subject: [PATCH 053/139] Fix the name of perf tests' precompiled header. (cherry picked from commit aa96d8d053c3d0c367da7c1c1f9c73ed376b3bfa) --- cmake/OpenCVModule.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake index 44e43fc2b2..80c07bda36 100644 --- a/cmake/OpenCVModule.cmake +++ b/cmake/OpenCVModule.cmake @@ -645,7 +645,7 @@ function(ocv_add_perf_tests) set(OPENCV_PERF_${the_module}_SOURCES ${perf_srcs} ${perf_hdrs}) endif() - get_native_precompiled_header(${the_target} test_precomp.hpp) + get_native_precompiled_header(${the_target} perf_precomp.hpp) add_executable(${the_target} ${OPENCV_PERF_${the_module}_SOURCES} ${${the_target}_pch}) target_link_libraries(${the_target} ${OPENCV_MODULE_${the_module}_DEPS} ${perf_deps} ${OPENCV_LINKER_LIBS}) From cb2985c52669bc91ce278c6acd4bcf23a52f757e Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Tue, 20 Aug 2013 16:28:45 +0400 Subject: [PATCH 054/139] fixed gpu bitwize operation: incorrect types for template instantiation --- modules/gpu/src/element_operations.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/gpu/src/element_operations.cpp b/modules/gpu/src/element_operations.cpp index a9b003937a..356b50aa09 100644 --- a/modules/gpu/src/element_operations.cpp +++ b/modules/gpu/src/element_operations.cpp @@ -1993,7 +1993,7 @@ void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask, St } else { - bitMatNot( + bitMatNot( PtrStepSzb(src.rows, bcols, src.data, src.step), PtrStepSzb(src.rows, bcols, dst.data, dst.step), mask, stream); @@ -2040,7 +2040,7 @@ void cv::gpu::bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, c { const int vcols = bcols >> 1; - bitMatAnd( + bitMatAnd( PtrStepSzb(src1.rows, vcols, src1.data, src1.step), PtrStepSzb(src1.rows, vcols, src2.data, src2.step), PtrStepSzb(src1.rows, vcols, dst.data, dst.step), @@ -2049,7 +2049,7 @@ void cv::gpu::bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, c else { - bitMatAnd( + bitMatAnd( PtrStepSzb(src1.rows, bcols, src1.data, src1.step), PtrStepSzb(src1.rows, bcols, src2.data, src2.step), PtrStepSzb(src1.rows, bcols, dst.data, dst.step), @@ -2087,7 +2087,7 @@ void cv::gpu::bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, co { const int vcols = bcols >> 1; - bitMatOr( + bitMatOr( PtrStepSzb(src1.rows, vcols, src1.data, src1.step), PtrStepSzb(src1.rows, vcols, src2.data, src2.step), PtrStepSzb(src1.rows, vcols, dst.data, dst.step), @@ -2096,7 +2096,7 @@ void cv::gpu::bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, co else { - bitMatOr( + bitMatOr( PtrStepSzb(src1.rows, bcols, src1.data, src1.step), PtrStepSzb(src1.rows, bcols, src2.data, src2.step), PtrStepSzb(src1.rows, bcols, dst.data, dst.step), @@ -2134,7 +2134,7 @@ void cv::gpu::bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, c { const int vcols = bcols >> 1; - bitMatXor( + bitMatXor( PtrStepSzb(src1.rows, vcols, src1.data, src1.step), PtrStepSzb(src1.rows, vcols, src2.data, src2.step), PtrStepSzb(src1.rows, vcols, dst.data, dst.step), @@ -2143,7 +2143,7 @@ void cv::gpu::bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, c else { - bitMatXor( + bitMatXor( PtrStepSzb(src1.rows, bcols, src1.data, src1.step), PtrStepSzb(src1.rows, bcols, src2.data, src2.step), PtrStepSzb(src1.rows, bcols, dst.data, dst.step), From 3cccded86ddfa3b85ab99a8d525c27b94556f23a Mon Sep 17 00:00:00 2001 From: peng xiao Date: Wed, 21 Aug 2013 11:18:02 +0800 Subject: [PATCH 055/139] Fix build error when WITH_OPENCL is disabled. --- modules/bioinspired/test/test_retina_ocl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/bioinspired/test/test_retina_ocl.cpp b/modules/bioinspired/test/test_retina_ocl.cpp index ea40bf6aa6..ee01d901fe 100644 --- a/modules/bioinspired/test/test_retina_ocl.cpp +++ b/modules/bioinspired/test/test_retina_ocl.cpp @@ -48,10 +48,10 @@ #include "opencv2/bioinspired.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/highgui.hpp" -#include "opencv2/ocl.hpp" #if defined(HAVE_OPENCV_OCL) && defined(HAVE_OPENCL) +#include "opencv2/ocl.hpp" #define RETINA_ITERATIONS 5 static double checkNear(const cv::Mat &m1, const cv::Mat &m2) From cf39ba5801d420f164f94c621e20d31e7939f15e Mon Sep 17 00:00:00 2001 From: Hanusz Leszek Date: Mon, 19 Aug 2013 19:05:37 +0200 Subject: [PATCH 056/139] Allow to read PNG image of color_type PNG_COLOR_TYPE_PALETTE with alpha channel Correct reading PNG color type palette with or without alpha imread flags -1 or 1 Better not using pnginfo.h, using png_get_tRNS instead --- modules/highgui/src/grfmt_png.cpp | 29 +++++---- modules/highgui/test/test_grfmt.cpp | 92 +++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 10 deletions(-) diff --git a/modules/highgui/src/grfmt_png.cpp b/modules/highgui/src/grfmt_png.cpp index fb0fe6c391..41bd58e80c 100644 --- a/modules/highgui/src/grfmt_png.cpp +++ b/modules/highgui/src/grfmt_png.cpp @@ -171,7 +171,9 @@ bool PngDecoder::readHeader() if( !m_buf.empty() || m_f ) { png_uint_32 wdth, hght; - int bit_depth, color_type; + int bit_depth, color_type, num_trans=0; + png_bytep trans; + png_color_16p trans_values; png_read_info( png_ptr, info_ptr ); @@ -187,15 +189,22 @@ bool PngDecoder::readHeader() { switch(color_type) { - case PNG_COLOR_TYPE_RGB: - case PNG_COLOR_TYPE_PALETTE: - m_type = CV_8UC3; - break; - case PNG_COLOR_TYPE_RGB_ALPHA: - m_type = CV_8UC4; - break; - default: - m_type = CV_8UC1; + case PNG_COLOR_TYPE_RGB: + m_type = CV_8UC3; + break; + case PNG_COLOR_TYPE_PALETTE: + png_get_tRNS( png_ptr, info_ptr, &trans, &num_trans, &trans_values); + //Check if there is a transparency value in the palette + if ( num_trans > 0 ) + m_type = CV_8UC4; + else + m_type = CV_8UC3; + break; + case PNG_COLOR_TYPE_RGB_ALPHA: + m_type = CV_8UC4; + break; + default: + m_type = CV_8UC1; } if( bit_depth == 16 ) m_type = CV_MAKETYPE(CV_16U, CV_MAT_CN(m_type)); diff --git a/modules/highgui/test/test_grfmt.cpp b/modules/highgui/test/test_grfmt.cpp index 8366fcdffc..ed16d1cde2 100644 --- a/modules/highgui/test/test_grfmt.cpp +++ b/modules/highgui/test/test_grfmt.cpp @@ -280,6 +280,98 @@ TEST(Highgui_ImreadVSCvtColor, regression) EXPECT_LT(actual_avg_diff, MAX_MEAN_DIFF); EXPECT_LT(actual_maxval, MAX_ABS_DIFF); } + +//Test OpenCV issue 3075 is solved +class CV_GrfmtReadPNGColorPaletteWithAlphaTest : public cvtest::BaseTest +{ +public: + void run(int) + { + try + { + // First Test : Read PNG with alpha, imread flag -1 + Mat img = imread(string(ts->get_data_path()) + "readwrite/color_palette_alpha.png",-1); + if (img.empty()) ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA); + + ASSERT_TRUE(img.channels() == 4); + + uint8_t* img_data = (uint8_t*)img.data; + + // Verification first pixel is red in BGRA + ASSERT_TRUE(img_data[0] == 0x00); + ASSERT_TRUE(img_data[1] == 0x00); + ASSERT_TRUE(img_data[2] == 0xFF); + ASSERT_TRUE(img_data[3] == 0xFF); + + // Verification second pixel is red in BGRA + ASSERT_TRUE(img_data[4] == 0x00); + ASSERT_TRUE(img_data[5] == 0x00); + ASSERT_TRUE(img_data[6] == 0xFF); + ASSERT_TRUE(img_data[7] == 0xFF); + + // Second Test : Read PNG without alpha, imread flag -1 + img = imread(string(ts->get_data_path()) + "readwrite/color_palette_no_alpha.png",-1); + if (img.empty()) ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA); + + ASSERT_TRUE(img.channels() == 3); + + img_data = (uint8_t*)img.data; + + // Verification first pixel is red in BGR + ASSERT_TRUE(img_data[0] == 0x00); + ASSERT_TRUE(img_data[1] == 0x00); + ASSERT_TRUE(img_data[2] == 0xFF); + + // Verification second pixel is red in BGR + ASSERT_TRUE(img_data[3] == 0x00); + ASSERT_TRUE(img_data[4] == 0x00); + ASSERT_TRUE(img_data[5] == 0xFF); + + // Third Test : Read PNG with alpha, imread flag 1 + img = imread(string(ts->get_data_path()) + "readwrite/color_palette_alpha.png",1); + if (img.empty()) ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA); + + ASSERT_TRUE(img.channels() == 3); + + img_data = (uint8_t*)img.data; + + // Verification first pixel is red in BGR + ASSERT_TRUE(img_data[0] == 0x00); + ASSERT_TRUE(img_data[1] == 0x00); + ASSERT_TRUE(img_data[2] == 0xFF); + + // Verification second pixel is red in BGR + ASSERT_TRUE(img_data[3] == 0x00); + ASSERT_TRUE(img_data[4] == 0x00); + ASSERT_TRUE(img_data[5] == 0xFF); + + // Fourth Test : Read PNG without alpha, imread flag 1 + img = imread(string(ts->get_data_path()) + "readwrite/color_palette_no_alpha.png",1); + if (img.empty()) ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA); + + ASSERT_TRUE(img.channels() == 3); + + img_data = (uint8_t*)img.data; + + // Verification first pixel is red in BGR + ASSERT_TRUE(img_data[0] == 0x00); + ASSERT_TRUE(img_data[1] == 0x00); + ASSERT_TRUE(img_data[2] == 0xFF); + + // Verification second pixel is red in BGR + ASSERT_TRUE(img_data[3] == 0x00); + ASSERT_TRUE(img_data[4] == 0x00); + ASSERT_TRUE(img_data[5] == 0xFF); + } + catch(...) + { + ts->set_failed_test_info(cvtest::TS::FAIL_EXCEPTION); + } + ts->set_failed_test_info(cvtest::TS::OK); + } +}; + +TEST(Highgui_Image, read_png_color_palette_with_alpha) { CV_GrfmtReadPNGColorPaletteWithAlphaTest test; test.safe_run(); } #endif #ifdef HAVE_JPEG From b7ec673001731dd07789a2fdcce8f79c9134ac56 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 21 Aug 2013 10:00:40 +0400 Subject: [PATCH 057/139] added cvconfig.h header, because ts_perf.cpp uses HAVE_CUDA macros --- modules/ts/src/precomp.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ts/src/precomp.hpp b/modules/ts/src/precomp.hpp index a74417da47..1133978f95 100644 --- a/modules/ts/src/precomp.hpp +++ b/modules/ts/src/precomp.hpp @@ -1,6 +1,7 @@ #include "opencv2/core/core_c.h" #include "opencv2/core/internal.hpp" #include "opencv2/ts/ts.hpp" +#include "cvconfig.h" #ifdef GTEST_LINKED_AS_SHARED_LIBRARY #error ts module should not have GTEST_LINKED_AS_SHARED_LIBRARY defined From 2899d558acfe861569ee5cb6766f56b82667701a Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 21 Aug 2013 10:12:44 +0400 Subject: [PATCH 058/139] record cuda gpu name to output report --- modules/ts/src/ts_perf.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/ts/src/ts_perf.cpp b/modules/ts/src/ts_perf.cpp index d84db18866..f36ff09dfe 100644 --- a/modules/ts/src/ts_perf.cpp +++ b/modules/ts/src/ts_perf.cpp @@ -741,6 +741,14 @@ void TestBase::RecordRunParameters() { ::testing::Test::RecordProperty("cv_implementation", param_impl); ::testing::Test::RecordProperty("cv_num_threads", param_threads); + +#ifdef HAVE_CUDA + if (param_impl == "cuda") + { + cv::gpu::DeviceInfo info(param_cuda_device); + ::testing::Test::RecordProperty("cv_cuda_gpu", info.name()); + } +#endif } std::string TestBase::getSelectedImpl() From 9b20b513896ca59dc01b7637c6e558b05e1d69e3 Mon Sep 17 00:00:00 2001 From: peng xiao Date: Wed, 21 Aug 2013 14:23:10 +0800 Subject: [PATCH 059/139] Fix build error on Mac --- modules/bioinspired/src/retina_ocl.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/bioinspired/src/retina_ocl.cpp b/modules/bioinspired/src/retina_ocl.cpp index ba98da2d71..8f1f2694ef 100644 --- a/modules/bioinspired/src/retina_ocl.cpp +++ b/modules/bioinspired/src/retina_ocl.cpp @@ -733,7 +733,7 @@ void BasicRetinaFilter::_verticalCausalFilter_Irregular(oclMat &outputFrame, con openCLExecuteKernel(ctx, &retina_kernel, "verticalCausalFilter_Irregular", globalSize, localSize, args, -1, -1); } -void cv::bioinspired::ocl::normalizeGrayOutput_0_maxOutputValue(oclMat &inputOutputBuffer, const float maxOutputValue) +void normalizeGrayOutput_0_maxOutputValue(oclMat &inputOutputBuffer, const float maxOutputValue) { double min_val, max_val; ocl::minMax(inputOutputBuffer, &min_val, &max_val); @@ -743,7 +743,7 @@ void cv::bioinspired::ocl::normalizeGrayOutput_0_maxOutputValue(oclMat &inputOut ocl::add(inputOutputBuffer, offset, inputOutputBuffer); } -void cv::bioinspired::ocl::normalizeGrayOutputCentredSigmoide(const float meanValue, const float sensitivity, oclMat &in, oclMat &out, const float maxValue) +void normalizeGrayOutputCentredSigmoide(const float meanValue, const float sensitivity, oclMat &in, oclMat &out, const float maxValue) { if (sensitivity == 1.0f) { @@ -771,7 +771,7 @@ void cv::bioinspired::ocl::normalizeGrayOutputCentredSigmoide(const float meanVa openCLExecuteKernel(ctx, &retina_kernel, "normalizeGrayOutputCentredSigmoide", globalSize, localSize, args, -1, -1); } -void cv::bioinspired::ocl::normalizeGrayOutputNearZeroCentreredSigmoide(oclMat &inputPicture, oclMat &outputBuffer, const float sensitivity, const float maxOutputValue) +void normalizeGrayOutputNearZeroCentreredSigmoide(oclMat &inputPicture, oclMat &outputBuffer, const float sensitivity, const float maxOutputValue) { float X0cube = sensitivity * sensitivity * sensitivity; @@ -791,7 +791,7 @@ void cv::bioinspired::ocl::normalizeGrayOutputNearZeroCentreredSigmoide(oclMat & openCLExecuteKernel(ctx, &retina_kernel, "normalizeGrayOutputNearZeroCentreredSigmoide", globalSize, localSize, args, -1, -1); } -void cv::bioinspired::ocl::centerReductImageLuminance(oclMat &inputoutput) +void centerReductImageLuminance(oclMat &inputoutput) { Scalar mean, stddev; cv::meanStdDev((Mat)inputoutput, mean, stddev); From ea5dd74af1e2924be4c7776e1e80ccf211701054 Mon Sep 17 00:00:00 2001 From: kdrobnyh Date: Wed, 14 Aug 2013 13:33:17 +0400 Subject: [PATCH 060/139] Add IPP 8.0 support in FindIPP script --- cmake/OpenCVFindIPP.cmake | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/cmake/OpenCVFindIPP.cmake b/cmake/OpenCVFindIPP.cmake index 9921d2503c..9f74d941fa 100644 --- a/cmake/OpenCVFindIPP.cmake +++ b/cmake/OpenCVFindIPP.cmake @@ -136,17 +136,20 @@ endfunction() # ------------------------------------------------------------------------ # This is auxiliary function called from set_ipp_variables() -# to set IPP_LIBRARIES variable in IPP 7.x style +# to set IPP_LIBRARIES variable in IPP 7.x and 8.x style # ------------------------------------------------------------------------ function(set_ipp_new_libraries _LATEST_VERSION) set(IPP_PREFIX "ipp") if(${_LATEST_VERSION} VERSION_LESS "8.0") - set(IPP_SUFFIX "_l") # static not threaded libs suffix + set(IPP_SUFFIX "_l") # static not threaded libs suffix IPP 7.x else() - set(IPP_SUFFIX "") # static not threaded libs suffix + if(WIN32) + set(IPP_SUFFIX "mt") # static not threaded libs suffix IPP 8.x for Windows + else() + set(IPP_SUFFIX "") # static not threaded libs suffix IPP 8.x for Linux/OS X + endif() endif() - set(IPP_THRD "_t") # static threaded libs suffix set(IPPCORE "core") # core functionality set(IPPSP "s") # signal processing set(IPPIP "i") # image processing @@ -218,7 +221,7 @@ function(set_ipp_variables _LATEST_VERSION) set(IPP_LIBRARY_DIRS ${IPP_ROOT_DIR}/lib/ia32 PARENT_SCOPE) endif() - # set IPP_LIBRARIES variable (7.x lib names) + # set IPP_LIBRARIES variable (7.x or 8.x lib names) set_ipp_new_libraries(${_LATEST_VERSION}) set(IPP_LIBRARIES ${IPP_LIBRARIES} PARENT_SCOPE) message(STATUS "IPP libs: ${IPP_LIBRARIES}") From 32635a68348c6f742706d75682cfa6e52b95edc8 Mon Sep 17 00:00:00 2001 From: Hanusz Leszek Date: Wed, 21 Aug 2013 12:33:51 +0200 Subject: [PATCH 061/139] using unsigned char instead of uint8_t to compile under windows --- modules/highgui/test/test_grfmt.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/highgui/test/test_grfmt.cpp b/modules/highgui/test/test_grfmt.cpp index ed16d1cde2..86954e3e10 100644 --- a/modules/highgui/test/test_grfmt.cpp +++ b/modules/highgui/test/test_grfmt.cpp @@ -295,7 +295,7 @@ public: ASSERT_TRUE(img.channels() == 4); - uint8_t* img_data = (uint8_t*)img.data; + unsigned char* img_data = (unsigned char*)img.data; // Verification first pixel is red in BGRA ASSERT_TRUE(img_data[0] == 0x00); @@ -315,7 +315,7 @@ public: ASSERT_TRUE(img.channels() == 3); - img_data = (uint8_t*)img.data; + img_data = (unsigned char*)img.data; // Verification first pixel is red in BGR ASSERT_TRUE(img_data[0] == 0x00); @@ -333,7 +333,7 @@ public: ASSERT_TRUE(img.channels() == 3); - img_data = (uint8_t*)img.data; + img_data = (unsigned char*)img.data; // Verification first pixel is red in BGR ASSERT_TRUE(img_data[0] == 0x00); @@ -351,7 +351,7 @@ public: ASSERT_TRUE(img.channels() == 3); - img_data = (uint8_t*)img.data; + img_data = (unsigned char*)img.data; // Verification first pixel is red in BGR ASSERT_TRUE(img_data[0] == 0x00); From de214950c4777ba7995ac06818cafe692ef225a4 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 21 Aug 2013 18:17:45 +0400 Subject: [PATCH 062/139] minor gpu TVL1 optical flow optimization: don't calc diff term if it is not used for epsilon criterion --- modules/gpu/src/tvl1flow.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/gpu/src/tvl1flow.cpp b/modules/gpu/src/tvl1flow.cpp index b8322e2c46..9971324af1 100644 --- a/modules/gpu/src/tvl1flow.cpp +++ b/modules/gpu/src/tvl1flow.cpp @@ -222,7 +222,8 @@ void cv::gpu::OpticalFlowDual_TVL1_GPU::procOneScale(const GpuMat& I0, const Gpu { estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, u1, u2, diff, l_t, static_cast(theta)); - error = gpu::sum(diff, norm_buf)[0]; + if (epsilon > 0) + error = gpu::sum(diff, norm_buf)[0]; estimateDualVariables(u1, u2, p11, p12, p21, p22, taut); } From d62c98527a8634774ae49fbe3ff8f3776d5d7cd0 Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Mon, 19 Aug 2013 01:25:53 -0700 Subject: [PATCH 063/139] Invalid usage of cv::Ptr for arrays fixed. --- modules/contrib/src/inputoutput.cpp | 37 ++++++++++++++--------------- modules/core/src/glob.cpp | 15 ++++++------ modules/core/src/system.cpp | 11 ++++----- 3 files changed, 30 insertions(+), 33 deletions(-) diff --git a/modules/contrib/src/inputoutput.cpp b/modules/contrib/src/inputoutput.cpp index e04740faee..d6d514f5b8 100644 --- a/modules/contrib/src/inputoutput.cpp +++ b/modules/contrib/src/inputoutput.cpp @@ -11,7 +11,7 @@ namespace cv { - std::vector Directory::GetListFiles( const std::string& path, const std::string & exten, bool addPath ) + std::vector Directory::GetListFiles( const std::string& path, const std::string & exten, bool addPath ) { std::vector list; list.clear(); @@ -25,10 +25,9 @@ namespace cv HANDLE hFind; #ifdef HAVE_WINRT - size_t size = mbstowcs(NULL, path_f.c_str(), path_f.size()); - Ptr wpath = new wchar_t[size+1]; - wpath[size] = 0; - mbstowcs(wpath, path_f.c_str(), path_f.size()); + wchar_t wpath[MAX_PATH]; + size_t copied = mbstowcs(wpath, path_f.c_str(), MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0); #else hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData); @@ -47,12 +46,12 @@ namespace cv FindFileData.dwFileAttributes == FILE_ATTRIBUTE_SYSTEM || FindFileData.dwFileAttributes == FILE_ATTRIBUTE_READONLY) { - cv::Ptr fname; + char* fname; #ifdef HAVE_WINRT - size_t asize = wcstombs(NULL, FindFileData.cFileName, 0); - fname = new char[asize+1]; - fname[asize] = 0; - wcstombs(fname, FindFileData.cFileName, asize); + char fname_tmp[MAX_PATH] = {0}; + size_t copied = wcstombs(fname_tmp, FindFileData.cFileName, MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + fname = fname_tmp; #else fname = FindFileData.cFileName; #endif @@ -109,10 +108,10 @@ namespace cv HANDLE hFind; #ifdef HAVE_WINRT - size_t size = mbstowcs(NULL, path_f.c_str(), path_f.size()); - Ptr wpath = new wchar_t[size+1]; - wpath[size] = 0; - mbstowcs(wpath, path_f.c_str(), path_f.size()); + wchar_t wpath [MAX_PATH]; + size_t copied = mbstowcs(wpath, path_f.c_str(), path_f.size()); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + hFind = FindFirstFileExW(wpath, FindExInfoStandard, &FindFileData, FindExSearchNameMatch, NULL, 0); #else hFind = FindFirstFileA((LPCSTR)path_f.c_str(), &FindFileData); @@ -135,12 +134,12 @@ namespace cv strcmp(FindFileData.cFileName, "..") != 0) #endif { - cv::Ptr fname; + char* fname; #ifdef HAVE_WINRT - size_t asize = wcstombs(NULL, FindFileData.cFileName, 0); - fname = new char[asize+1]; - fname[asize] = 0; - wcstombs(fname, FindFileData.cFileName, asize); + char fname_tmp[MAX_PATH]; + size_t copied = wcstombs(fname, FindFileData.cFileName, MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); + fname = fname_tmp; #else fname = FindFileData.cFileName; #endif diff --git a/modules/core/src/glob.cpp b/modules/core/src/glob.cpp index e39cba0163..208b4e05cf 100644 --- a/modules/core/src/glob.cpp +++ b/modules/core/src/glob.cpp @@ -79,10 +79,9 @@ namespace dir->ent.d_name = 0; #ifdef HAVE_WINRT cv::String full_path = cv::String(path) + "\\*"; - size_t size = mbstowcs(NULL, full_path.c_str(), full_path.size()); - cv::Ptr wfull_path = new wchar_t[size+1]; - wfull_path[size] = 0; - mbstowcs(wfull_path, full_path.c_str(), full_path.size()); + wchar_t wfull_path[MAX_PATH]; + size_t copied = mbstowcs(wfull_path, full_path.c_str(), MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); dir->handle = ::FindFirstFileExW(wfull_path, FindExInfoStandard, &dir->data, FindExSearchNameMatch, NULL, 0); #else @@ -106,6 +105,7 @@ namespace return 0; } size_t asize = wcstombs(NULL, dir->data.cFileName, 0); + CV_Assert((asize != 0) && (asize != (size_t)-1)); char* aname = new char[asize+1]; aname[asize] = 0; wcstombs(aname, dir->data.cFileName, asize); @@ -146,10 +146,9 @@ static bool isDir(const cv::String& path, DIR* dir) { WIN32_FILE_ATTRIBUTE_DATA all_attrs; #ifdef HAVE_WINRT - size_t size = mbstowcs(NULL, path.c_str(), path.size()); - cv::Ptr wpath = new wchar_t[size+1]; - wpath[size] = 0; - mbstowcs(wpath, path.c_str(), path.size()); + wchar_t wpath[MAX_PATH]; + size_t copied = mbstowcs(wpath, path.c_str(), MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &all_attrs); #else ::GetFileAttributesExA(path.c_str(), GetFileExInfoStandard, &all_attrs); diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index 7e01ca5ea4..685b5b756d 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -411,15 +411,14 @@ string tempfile( const char* suffix ) temp_file = temp_dir + std::wstring(L"\\") + temp_file; DeleteFileW(temp_file.c_str()); - size_t asize = wcstombs(NULL, temp_file.c_str(), 0); - Ptr aname = new char[asize+1]; - aname[asize] = 0; - wcstombs(aname, temp_file.c_str(), asize); + char aname[MAX_PATH]; + size_t copied = wcstombs(aname, temp_file.c_str(), MAX_PATH); + CV_Assert((copied != MAX_PATH) && (copied != (size_t)-1)); fname = std::string(aname); RoUninitialize(); #else - char temp_dir2[MAX_PATH + 1] = { 0 }; - char temp_file[MAX_PATH + 1] = { 0 }; + char temp_dir2[MAX_PATH] = { 0 }; + char temp_file[MAX_PATH] = { 0 }; if (temp_dir == 0 || temp_dir[0] == 0) { From b15addec7066c055849214c544f0c2f02adc7184 Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Wed, 21 Aug 2013 03:38:20 -0700 Subject: [PATCH 064/139] C4447 warning about main function format suppressed. --- modules/core/perf/perf_main.cpp | 5 +++++ modules/core/src/system.cpp | 6 ++++++ modules/core/test/test_main.cpp | 7 +++++-- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/modules/core/perf/perf_main.cpp b/modules/core/perf/perf_main.cpp index 79c28a645c..7c899c2446 100644 --- a/modules/core/perf/perf_main.cpp +++ b/modules/core/perf/perf_main.cpp @@ -1,3 +1,8 @@ #include "perf_precomp.hpp" +#ifdef _MSC_VER +# if _MSC_VER >= 1700 +# pragma warning(disable:4447) // Disable warning 'main' signature found without threading model +# endif +#endif CV_PERF_TEST_MAIN(core) diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index 7e01ca5ea4..4257665957 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -42,6 +42,12 @@ #include "precomp.hpp" +#ifdef _MSC_VER +# if _MSC_VER >= 1700 +# pragma warning(disable:4447) // Disable warning 'main' signature found without threading model +# endif +#endif + #if defined WIN32 || defined _WIN32 || defined WINCE #ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?) #define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx diff --git a/modules/core/test/test_main.cpp b/modules/core/test/test_main.cpp index 3294fab2b0..d5400e20fd 100644 --- a/modules/core/test/test_main.cpp +++ b/modules/core/test/test_main.cpp @@ -1,7 +1,10 @@ -#ifdef HAVE_WINRT - #pragma warning(disable:4447) // Disable warning 'main' signature found without threading model +#ifdef _MSC_VER +# if _MSC_VER >= 1700 +# pragma warning(disable:4447) // Disable warning 'main' signature found without threading model +# endif #endif + #include "test_precomp.hpp" CV_TEST_MAIN("cv") From 748201c3dc117e17fd2c406f93cd659d53149d9b Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Mon, 22 Jul 2013 17:14:08 +0400 Subject: [PATCH 065/139] Added more text/binary/whitespace attributes for files we have. --- .gitattributes | 82 +++++++++++++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 31 deletions(-) diff --git a/.gitattributes b/.gitattributes index cd4359ba34..35df6ca2fc 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,42 +1,58 @@ -.git* export-ignore - * text=auto whitespace=trailing-space,space-before-tab,-indent-with-non-tab,tab-in-indent,tabwidth=4 -*.py text -*.cpp text -*.hpp text -*.cxx text -*.hxx text -*.mm text -*.c text -*.h text -*.i text -*.js text -*.java text -*.scala text -*.cu text -*.cl text -*.css_t text -*.qrc text -*.qss text -*.S text -*.rst text -*.tex text -*.sty text - -*.aidl text -*.mk text +.git* text export-ignore + +*.aidl text +*.appxmanifest text +*.bib text +*.c text +*.cl text +*.conf text +*.cpp text +*.css_t text +*.cu text +*.cxx text +*.def text +*.filelist text +*.h text +*.hpp text +*.htm text +*.html text +*.hxx text +*.i text +*.idl text +*.java text +*.js text +*.mk text +*.mm text +*.plist text +*.properties text +*.py text +*.qrc text +*.qss text +*.S text +*.sbt text +*.scala text +*.sty text +*.tex text +*.txt text +*.xaml text + +# reST underlines/overlines can look like conflict markers +*.rst text conflict-marker-size=80 *.cmake text whitespace=tabwidth=2 *.cmakein text whitespace=tabwidth=2 *.in text whitespace=tabwidth=2 CMakeLists.txt text whitespace=tabwidth=2 -*.png binary -*.jpeg binary -*.jpg binary +*.avi binary +*.bmp binary *.exr binary *.ico binary +*.jpeg binary +*.jpg binary +*.png binary *.a binary *.so binary @@ -47,6 +63,7 @@ CMakeLists.txt text whitespace=tabwidth=2 *.pbxproj binary *.vec binary *.doc binary +*.dia binary *.xml -text whitespace=cr-at-eol *.yml -text whitespace=cr-at-eol @@ -55,9 +72,12 @@ CMakeLists.txt text whitespace=tabwidth=2 .cproject -text whitespace=cr-at-eol merge=union org.eclipse.jdt.core.prefs -text whitespace=cr-at-eol merge=union -*.vcproj text eol=crlf merge=union *.bat text eol=crlf *.cmd text eol=crlf *.cmd.tmpl text eol=crlf +*.dsp text eol=crlf -whitespace +*.sln text eol=crlf -whitespace +*.vcproj text eol=crlf -whitespace merge=union +*.vcxproj text eol=crlf -whitespace merge=union -*.sh text eol=lf \ No newline at end of file +*.sh text eol=lf From 0d8cb2e3197646cd40500dfceed5bf46a8961d1d Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Tue, 20 Aug 2013 19:33:23 +0400 Subject: [PATCH 066/139] Disabled whitespace checking for 3rdparty. --- 3rdparty/.gitattributes | 1 + 1 file changed, 1 insertion(+) create mode 100644 3rdparty/.gitattributes diff --git a/3rdparty/.gitattributes b/3rdparty/.gitattributes new file mode 100644 index 0000000000..562b12e16e --- /dev/null +++ b/3rdparty/.gitattributes @@ -0,0 +1 @@ +* -whitespace From f55740da70deb81ad3c80de26fc0cbf7f94e32ee Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Wed, 21 Aug 2013 16:44:09 +0400 Subject: [PATCH 067/139] Deleted all trailing whitespace. --- 3rdparty/ffmpeg/readme.txt | 2 +- 3rdparty/readme.txt | 4 +- cmake/OpenCVFindIPP.cmake | 2 +- cmake/OpenCVFindXimea.cmake | 2 +- doc/_static/insertIframe.js | 2 +- doc/packaging.txt | 6 +- .../camera_calibration/camera_calibration.rst | 108 +++--- .../camera_calibration_square_chess.rst | 12 +- .../table_of_content_calib3d.rst | 8 +- .../core/adding_images/adding_images.rst | 16 +- .../basic_linear_transform.rst | 50 +-- .../discrete_fourier_transform.rst | 58 ++-- .../file_input_output_with_xml_yml.rst | 44 +-- .../how_to_scan_images/how_to_scan_images.rst | 38 +-- .../interoperability_with_OpenCV_1.rst | 34 +- .../mat-mask-operations.rst | 24 +- .../mat_the_basic_image_container.rst | 40 +-- .../table_of_content_core.rst | 6 +- doc/tutorials/definitions/tocDefinitions.rst | 2 +- .../detection_of_planar_objects.rst | 10 +- .../table_of_content_features2d.rst | 104 +++--- .../table_of_content_gpu.rst | 4 +- .../table_of_content_highgui.rst | 20 +- doc/tutorials/highgui/trackbar/trackbar.rst | 40 +-- .../video-input-psnr-ssim.rst | 16 +- .../table_of_content_imgproc.rst | 312 +++++++++--------- .../dev_with_OCV_on_Android.rst | 104 +++--- .../introduction/ios_install/ios_install.rst | 6 +- .../linux_eclipse/linux_eclipse.rst | 62 ++-- .../linux_gcc_cmake/linux_gcc_cmake.rst | 8 +- .../linux_install/linux_install.rst | 16 +- .../load_save_image/load_save_image.rst | 20 +- .../windows_install/windows_install.rst | 6 +- .../windows_visual_studio_Opencv.rst | 44 +-- .../windows_visual_studio_image_watch.rst | 4 +- doc/tutorials/ios/hello/hello.rst | 12 +- .../image_manipulation/image_manipulation.rst | 34 +- .../table_of_content_ios.rst | 2 +- .../ios/video_processing/video_processing.rst | 60 ++-- .../introduction_to_svm.rst | 16 +- .../table_of_content_ml.rst | 10 +- .../table_of_content_objdetect.rst | 14 +- .../table_of_content_video.rst | 2 +- doc/user_guide/ug_features2d.rst | 2 +- doc/user_guide/ug_mat.rst | 12 +- doc/user_guide/ug_traincascade.rst | 142 ++++---- ...mera_calibration_and_3d_reconstruction.rst | 2 +- .../contrib/doc/facerec/facerec_changelog.rst | 30 +- .../contrib/doc/facerec/facerec_tutorial.rst | 2 +- .../facerec/tutorial/facerec_save_load.rst | 2 +- .../tutorial/facerec_video_recognition.rst | 2 +- modules/contrib/src/facerec.cpp | 2 +- modules/core/doc/basic_structures.rst | 4 +- modules/core/doc/clustering.rst | 4 +- modules/core/doc/drawing_functions.rst | 4 +- modules/core/doc/intro.rst | 4 +- .../core/include/opencv2/core/operations.hpp | 22 +- modules/core/src/arithm.cpp | 8 +- modules/core/src/stat.cpp | 18 +- modules/core/test/test_io.cpp | 2 +- modules/core/test/test_math.cpp | 2 +- .../features2d/doc/object_categorization.rst | 6 +- modules/features2d/src/bagofwords.cpp | 4 +- modules/features2d/src/evaluation.cpp | 2 +- ...st_approximate_nearest_neighbor_search.rst | 12 +- modules/gpu/CMakeLists.txt | 2 +- ...mera_calibration_and_3d_reconstruction.rst | 2 +- modules/gpu/doc/video.rst | 2 +- .../reading_and_writing_images_and_video.rst | 6 +- modules/highgui/src/cap_ffmpeg.cpp | 4 +- modules/highgui/src/cap_ffmpeg_impl.hpp | 4 +- modules/highgui/src/cap_ios_video_camera.mm | 14 +- modules/highgui/src/cap_libv4l.cpp | 2 +- modules/highgui/src/cap_msmf.cpp | 4 +- modules/highgui/src/cap_qtkit.mm | 8 +- modules/highgui/src/cap_v4l.cpp | 10 +- modules/highgui/src/cap_ximea.cpp | 18 +- modules/highgui/src/files_Qt/Milky/README.txt | 2 +- modules/highgui/src/ios_conversions.mm | 16 +- modules/highgui/src/window_QT.cpp | 2 +- modules/imgproc/doc/histograms.rst | 2 +- .../imgproc/include/opencv2/imgproc/types_c.h | 2 +- modules/imgproc/src/color.cpp | 2 +- modules/imgproc/src/morph.cpp | 42 +-- modules/imgproc/test/test_cvtyuv.cpp | 2 +- modules/java/generator/src/cpp/Mat.cpp | 82 ++--- modules/ml/doc/gradient_boosted_trees.rst | 30 +- modules/ml/doc/k_nearest_neighbors.rst | 2 +- modules/ml/doc/mldata.rst | 62 ++-- modules/ml/src/knearest.cpp | 2 +- modules/nonfree/src/opencl/surf.cl | 20 +- modules/nonfree/src/precomp.hpp | 2 +- modules/objdetect/src/haar.cpp | 6 +- modules/objdetect/test/test_cascadeandhog.cpp | 2 +- modules/ocl/doc/introduction.rst | 6 +- modules/ocl/doc/operations_on_matrices.rst | 6 +- modules/ocl/include/opencv2/ocl/ocl.hpp | 16 +- .../ocl/include/opencv2/ocl/private/util.hpp | 2 +- modules/ocl/perf/main.cpp | 2 +- modules/ocl/perf/perf_calib3d.cpp | 4 +- modules/ocl/perf/perf_filters.cpp | 4 +- modules/ocl/perf/perf_gftt.cpp | 4 +- modules/ocl/perf/perf_hog.cpp | 2 +- modules/ocl/perf/perf_imgproc.cpp | 2 +- modules/ocl/perf/perf_norm.cpp | 2 +- modules/ocl/perf/perf_precomp.cpp | 12 +- modules/ocl/perf/perf_split_merge.cpp | 2 +- modules/ocl/src/arithm.cpp | 6 +- modules/ocl/src/filtering.cpp | 16 +- modules/ocl/src/gftt.cpp | 22 +- modules/ocl/src/haar.cpp | 4 +- modules/ocl/src/hog.cpp | 184 +++++------ modules/ocl/src/imgproc.cpp | 6 +- modules/ocl/src/initialization.cpp | 10 +- modules/ocl/src/match_template.cpp | 6 +- modules/ocl/src/matrix_operations.cpp | 2 +- modules/ocl/src/mcwutil.cpp | 4 +- modules/ocl/src/moments.cpp | 2 +- modules/ocl/src/opencl/arithm_absdiff.cl | 20 +- modules/ocl/src/opencl/arithm_addWeighted.cl | 10 +- modules/ocl/src/opencl/arithm_add_scalar.cl | 8 +- .../ocl/src/opencl/arithm_add_scalar_mask.cl | 8 +- .../ocl/src/opencl/arithm_bitwise_binary.cl | 8 +- .../src/opencl/arithm_bitwise_binary_mask.cl | 12 +- .../opencl/arithm_bitwise_binary_scalar.cl | 12 +- .../arithm_bitwise_binary_scalar_mask.cl | 12 +- modules/ocl/src/opencl/arithm_bitwise_not.cl | 8 +- modules/ocl/src/opencl/arithm_compare_eq.cl | 24 +- modules/ocl/src/opencl/arithm_compare_ne.cl | 18 +- modules/ocl/src/opencl/arithm_div.cl | 12 +- modules/ocl/src/opencl/arithm_flip.cl | 8 +- modules/ocl/src/opencl/arithm_mul.cl | 6 +- modules/ocl/src/opencl/brute_force_match.cl | 2 +- modules/ocl/src/opencl/filter_sep_row.cl | 2 +- modules/ocl/src/opencl/filtering_boxFilter.cl | 2 +- modules/ocl/src/opencl/haarobjectdetect.cl | 4 +- .../src/opencl/haarobjectdetect_scaled2.cl | 2 +- modules/ocl/src/opencl/imgproc_clahe.cl | 16 +- modules/ocl/src/opencl/imgproc_gftt.cl | 32 +- modules/ocl/src/opencl/imgproc_warpAffine.cl | 4 +- .../ocl/src/opencl/imgproc_warpPerspective.cl | 2 +- .../src/opencl/kernel_radix_sort_by_key.cl | 8 +- modules/ocl/src/opencl/kernel_sort_by_key.cl | 28 +- modules/ocl/src/opencl/objdetect_hog.cl | 84 ++--- modules/ocl/src/opencl/stereobm.cl | 2 +- modules/ocl/src/opencl/stereobp.cl | 14 +- modules/ocl/src/opencl/stereocsbp.cl | 264 +++++++-------- modules/ocl/src/opencl/tvl1flow.cl | 30 +- modules/ocl/src/pyrlk.cpp | 2 +- modules/ocl/src/sort_by_key.cpp | 8 +- modules/ocl/src/stereo_csbp.cpp | 12 +- modules/ocl/src/stereobp.cpp | 2 +- modules/ocl/src/tvl1flow.cpp | 34 +- modules/ocl/test/test_arithm.cpp | 2 +- modules/ocl/test/test_brute_force_matcher.cpp | 16 +- modules/ocl/test/test_filters.cpp | 10 +- modules/ocl/test/test_imgproc.cpp | 2 +- modules/ocl/test/test_kmeans.cpp | 10 +- modules/ocl/test/test_objdetect.cpp | 6 +- modules/ocl/test/test_optflow.cpp | 4 +- modules/ocl/test/test_pyramids.cpp | 2 +- modules/ocl/test/test_sort.cpp | 6 +- modules/ocl/test/utility.cpp | 2 +- modules/photo/doc/photo.rst | 2 +- modules/python/src2/cv2.cpp | 2 +- modules/python/test/test2.py | 14 +- modules/stitching/doc/camera.rst | 2 +- modules/stitching/doc/high_level.rst | 12 +- modules/stitching/doc/seam_estimation.rst | 2 +- modules/stitching/doc/stitching.rst | 2 +- modules/superres/src/btv_l1_ocl.cpp | 6 +- modules/superres/src/opencl/superres_btvl1.cl | 2 +- modules/superres/test/test_superres.cpp | 2 +- modules/world/CMakeLists.txt | 8 +- samples/MacOSX/FaceTracker/README.txt | 18 +- samples/c/build_all.sh | 2 +- samples/c/example_cmake/README.txt | 6 +- samples/gpu/CMakeLists.txt | 4 +- samples/gpu/super_resolution.cpp | 4 +- samples/ocl/clahe.cpp | 2 +- samples/ocl/facedetect.cpp | 2 +- samples/python2/dft.py | 44 +-- samples/python2/grabcut.py | 34 +- samples/winrt/ImageManipulations/App.xaml | 4 +- .../winrt/ImageManipulations/MainPage.xaml | 4 +- .../MediaExtensions/Common/AsyncCB.h | 12 +- .../MediaExtensions/Common/CritSec.h | 2 +- .../MediaExtensions/Common/LinkList.h | 28 +- .../OcvTransform/OcvImageManipulations.idl | 2 +- .../OcvTransform/OcvTransform.cpp | 4 +- .../MediaExtensions/OcvTransform/dllmain.cpp | 2 +- .../common/StandardStyles.xaml | 8 +- .../sample-utils/SampleTemplateStyles.xaml | 2 +- 193 files changed, 1685 insertions(+), 1685 deletions(-) diff --git a/3rdparty/ffmpeg/readme.txt b/3rdparty/ffmpeg/readme.txt index 1089ee2a79..1928a53039 100644 --- a/3rdparty/ffmpeg/readme.txt +++ b/3rdparty/ffmpeg/readme.txt @@ -16,7 +16,7 @@ How to update opencv_ffmpeg.dll and opencv_ffmpeg_64.dll when a new version of F 2. Install 64-bit MinGW. http://mingw-w64.sourceforge.net/ Let's assume, it's installed in C:\MSYS64 3. Copy C:\MSYS32\msys to C:\MSYS64\msys. Edit C:\MSYS64\msys\etc\fstab, change C:\MSYS32 to C:\MSYS64. - + 4. Now you have working MSYS32 and MSYS64 environments. Launch, one by one, C:\MSYS32\msys\msys.bat and C:\MSYS64\msys\msys.bat to create your home directories. diff --git a/3rdparty/readme.txt b/3rdparty/readme.txt index 6d2aeeca93..ca46fbd576 100644 --- a/3rdparty/readme.txt +++ b/3rdparty/readme.txt @@ -45,13 +45,13 @@ jasper-1.900.1 - JasPer is a collection of software and manipulation of images. This software can handle image data in a variety of formats. One such format supported by JasPer is the JPEG-2000 format defined in ISO/IEC 15444-1. - + Copyright (c) 1999-2000 Image Power, Inc. Copyright (c) 1999-2000 The University of British Columbia Copyright (c) 2001-2003 Michael David Adams The JasPer license can be found in src/libjasper. - + OpenCV on Windows uses pre-built libjasper library (lib/libjasper*). To get the latest source code, please, visit the project homepage: diff --git a/cmake/OpenCVFindIPP.cmake b/cmake/OpenCVFindIPP.cmake index 9f74d941fa..afa0da9793 100644 --- a/cmake/OpenCVFindIPP.cmake +++ b/cmake/OpenCVFindIPP.cmake @@ -140,7 +140,7 @@ endfunction() # ------------------------------------------------------------------------ function(set_ipp_new_libraries _LATEST_VERSION) set(IPP_PREFIX "ipp") - + if(${_LATEST_VERSION} VERSION_LESS "8.0") set(IPP_SUFFIX "_l") # static not threaded libs suffix IPP 7.x else() diff --git a/cmake/OpenCVFindXimea.cmake b/cmake/OpenCVFindXimea.cmake index 27e2a78ad4..20dc9e8a54 100644 --- a/cmake/OpenCVFindXimea.cmake +++ b/cmake/OpenCVFindXimea.cmake @@ -19,7 +19,7 @@ set(XIMEA_LIBRARY_DIR) if(WIN32) # Try to find the XIMEA API path in registry. GET_FILENAME_COMPONENT(XIMEA_PATH "[HKEY_CURRENT_USER\\Software\\XIMEA\\CamSupport\\API;Path]" ABSOLUTE) - + if(EXISTS ${XIMEA_PATH}) set(XIMEA_FOUND 1) # set LIB folders diff --git a/doc/_static/insertIframe.js b/doc/_static/insertIframe.js index 904ff361de..ba464246da 100644 --- a/doc/_static/insertIframe.js +++ b/doc/_static/insertIframe.js @@ -1,4 +1,4 @@ -function insertIframe (elementId, iframeSrc) +function insertIframe (elementId, iframeSrc) { var iframe; if (document.createElement && (iframe = document.createElement('iframe'))) diff --git a/doc/packaging.txt b/doc/packaging.txt index a4d3dc2831..e8dd7699bb 100644 --- a/doc/packaging.txt +++ b/doc/packaging.txt @@ -4,14 +4,14 @@ INSTRUCTIONS TO BUILD WIN32 PACKAGES WITH CMAKE+CPACK - Install NSIS. - Generate OpenCV solutions for MSVC using CMake as usual. -- In cmake-gui: +- In cmake-gui: - Mark BUILD_PACKAGE - Mark BUILD_EXAMPLES (If examples are desired to be shipped as binaries...) - Unmark ENABLE_OPENMP, since this feature seems to have some issues yet... - Mark INSTALL_*_EXAMPLES - Open the OpenCV solution and build ALL in Debug and Release. -- Build PACKAGE, from the Release configuration. An NSIS installer package will be +- Build PACKAGE, from the Release configuration. An NSIS installer package will be created with both release and debug LIBs and DLLs. - + Jose Luis Blanco, 2009/JUL/29 diff --git a/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst b/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst index 0e2c764930..2cf00f42a9 100644 --- a/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst +++ b/doc/tutorials/calib3d/camera_calibration/camera_calibration.rst @@ -3,30 +3,30 @@ Camera calibration With OpenCV ****************************** -Cameras have been around for a long-long time. However, with the introduction of the cheap *pinhole* cameras in the late 20th century, they became a common occurrence in our everyday life. Unfortunately, this cheapness comes with its price: significant distortion. Luckily, these are constants and with a calibration and some remapping we can correct this. Furthermore, with calibration you may also determine the relation between the camera's natural units (pixels) and the real world units (for example millimeters). +Cameras have been around for a long-long time. However, with the introduction of the cheap *pinhole* cameras in the late 20th century, they became a common occurrence in our everyday life. Unfortunately, this cheapness comes with its price: significant distortion. Luckily, these are constants and with a calibration and some remapping we can correct this. Furthermore, with calibration you may also determine the relation between the camera's natural units (pixels) and the real world units (for example millimeters). Theory ====== -For the distortion OpenCV takes into account the radial and tangential factors. For the radial factor one uses the following formula: +For the distortion OpenCV takes into account the radial and tangential factors. For the radial factor one uses the following formula: -.. math:: +.. math:: x_{corrected} = x( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6) \\ y_{corrected} = y( 1 + k_1 r^2 + k_2 r^4 + k_3 r^6) -So for an old pixel point at :math:`(x,y)` coordinates in the input image, its position on the corrected output image will be :math:`(x_{corrected} y_{corrected})`. The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect. +So for an old pixel point at :math:`(x,y)` coordinates in the input image, its position on the corrected output image will be :math:`(x_{corrected} y_{corrected})`. The presence of the radial distortion manifests in form of the "barrel" or "fish-eye" effect. -Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. It can be corrected via the formulas: +Tangential distortion occurs because the image taking lenses are not perfectly parallel to the imaging plane. It can be corrected via the formulas: -.. math:: +.. math:: x_{corrected} = x + [ 2p_1xy + p_2(r^2+2x^2)] \\ y_{corrected} = y + [ p_1(r^2+ 2y^2)+ 2p_2xy] -So we have five distortion parameters which in OpenCV are presented as one row matrix with 5 columns: +So we have five distortion parameters which in OpenCV are presented as one row matrix with 5 columns: -.. math:: +.. math:: Distortion_{coefficients}=(k_1 \hspace{10pt} k_2 \hspace{10pt} p_1 \hspace{10pt} p_2 \hspace{10pt} k_3) @@ -38,7 +38,7 @@ Now for the unit conversion we use the following formula: Here the presence of :math:`w` is explained by the use of homography coordinate system (and :math:`w=Z`). The unknown parameters are :math:`f_x` and :math:`f_y` (camera focal lengths) and :math:`(c_x, c_y)` which are the optical centers expressed in pixels coordinates. If for both axes a common focal length is used with a given :math:`a` aspect ratio (usually 1), then :math:`f_y=f_x*a` and in the upper formula we will have a single focal length :math:`f`. The matrix containing these four parameters is referred to as the *camera matrix*. While the distortion coefficients are the same regardless of the camera resolutions used, these should be scaled along with the current resolution from the calibrated resolution. -The process of determining these two matrices is the calibration. Calculation of these parameters is done through basic geometrical equations. The equations used depend on the chosen calibrating objects. Currently OpenCV supports three types of objects for calibration: +The process of determining these two matrices is the calibration. Calculation of these parameters is done through basic geometrical equations. The equations used depend on the chosen calibrating objects. Currently OpenCV supports three types of objects for calibration: .. container:: enumeratevisibleitemswithsquare @@ -51,7 +51,7 @@ Basically, you need to take snapshots of these patterns with your camera and let Goal ==== -The sample application will: +The sample application will: .. container:: enumeratevisibleitemswithsquare @@ -67,7 +67,7 @@ Source code You may also find the source code in the :file:`samples/cpp/tutorial_code/calib3d/camera_calibration/` folder of the OpenCV source library or :download:`download it from here <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp>`. The program has a single argument: the name of its configuration file. If none is given then it will try to open the one named "default.xml". :download:`Here's a sample configuration file <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml>` in XML format. In the configuration file you may choose to use camera as an input, a video file or an image list. If you opt for the last one, you will need to create a configuration file where you enumerate the images to use. Here's :download:`an example of this <../../../../samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml>`. The important part to remember is that the images need to be specified using the absolute path or the relative one from your application's working directory. You may find all this in the samples directory mentioned above. -The application starts up with reading the settings from the configuration file. Although, this is an important part of it, it has nothing to do with the subject of this tutorial: *camera calibration*. Therefore, I've chosen not to post the code for that part here. Technical background on how to do this you can find in the :ref:`fileInputOutputXMLYAML` tutorial. +The application starts up with reading the settings from the configuration file. Although, this is an important part of it, it has nothing to do with the subject of this tutorial: *camera calibration*. Therefore, I've chosen not to post the code for that part here. Technical background on how to do this you can find in the :ref:`fileInputOutputXMLYAML` tutorial. Explanation =========== @@ -76,15 +76,15 @@ Explanation .. code-block:: cpp - Settings s; + Settings s; const string inputSettingsFile = argc > 1 ? argv[1] : "default.xml"; FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings if (!fs.isOpened()) { - cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl; + cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl; return -1; } - fs["Settings"] >> s; + fs["Settings"] >> s; fs.release(); // close Settings file if (!s.goodInput) @@ -95,7 +95,7 @@ Explanation For this I've used simple OpenCV class input operation. After reading the file I've an additional post-processing function that checks validity of the input. Only if all inputs are good then *goodInput* variable will be true. -#. **Get next input, if it fails or we have enough of them - calibrate**. After this we have a big loop where we do the following operations: get the next image from the image list, camera or video file. If this fails or we have enough images then we run the calibration process. In case of image we step out of the loop and otherwise the remaining frames will be undistorted (if the option is set) via changing from *DETECTION* mode to the *CALIBRATED* one. +#. **Get next input, if it fails or we have enough of them - calibrate**. After this we have a big loop where we do the following operations: get the next image from the image list, camera or video file. If this fails or we have enough images then we run the calibration process. In case of image we step out of the loop and otherwise the remaining frames will be undistorted (if the option is set) via changing from *DETECTION* mode to the *CALIBRATED* one. .. code-block:: cpp @@ -123,7 +123,7 @@ Explanation if( s.flipVertical ) flip( view, view, 0 ); } - For some cameras we may need to flip the input image. Here we do this too. + For some cameras we may need to flip the input image. Here we do this too. #. **Find the pattern in the current input**. The formation of the equations I mentioned above aims to finding major patterns in the input: in case of the chessboard this are corners of the squares and for the circles, well, the circles themselves. The position of these will form the result which will be written into the *pointBuf* vector. @@ -146,19 +146,19 @@ Explanation break; } - Depending on the type of the input pattern you use either the :calib3d:`findChessboardCorners ` or the :calib3d:`findCirclesGrid ` function. For both of them you pass the current image and the size of the board and you'll get the positions of the patterns. Furthermore, they return a boolean variable which states if the pattern was found in the input (we only need to take into account those images where this is true!). + Depending on the type of the input pattern you use either the :calib3d:`findChessboardCorners ` or the :calib3d:`findCirclesGrid ` function. For both of them you pass the current image and the size of the board and you'll get the positions of the patterns. Furthermore, they return a boolean variable which states if the pattern was found in the input (we only need to take into account those images where this is true!). - Then again in case of cameras we only take camera images when an input delay time is passed. This is done in order to allow user moving the chessboard around and getting different images. Similar images result in similar equations, and similar equations at the calibration step will form an ill-posed problem, so the calibration will fail. For square images the positions of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix ` function. It will produce better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image using :calib3d:`findChessboardCorners ` function. + Then again in case of cameras we only take camera images when an input delay time is passed. This is done in order to allow user moving the chessboard around and getting different images. Similar images result in similar equations, and similar equations at the calibration step will form an ill-posed problem, so the calibration will fail. For square images the positions of the corners are only approximate. We may improve this by calling the :feature2d:`cornerSubPix ` function. It will produce better calibration result. After this we add a valid inputs result to the *imagePoints* vector to collect all of the equations into a single container. Finally, for visualization feedback purposes we will draw the found points on the input image using :calib3d:`findChessboardCorners ` function. .. code-block:: cpp - if ( found) // If done with success, + if ( found) // If done with success, { // improve the found corners' coordinate accuracy for chessboard - if( s.calibrationPattern == Settings::CHESSBOARD) + if( s.calibrationPattern == Settings::CHESSBOARD) { Mat viewGray; - cvtColor(view, viewGray, CV_BGR2GRAY); + cvtColor(view, viewGray, CV_BGR2GRAY); cornerSubPix( viewGray, pointBuf, Size(11,11), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); } @@ -171,11 +171,11 @@ Explanation blinkOutput = s.inputCapture.isOpened(); } - // Draw the corners. + // Draw the corners. drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found ); } -#. **Show state and result to the user, plus command line control of the application**. This part shows text output on the image. +#. **Show state and result to the user, plus command line control of the application**. This part shows text output on the image. .. code-block:: cpp @@ -183,7 +183,7 @@ Explanation string msg = (mode == CAPTURING) ? "100/100" : mode == CALIBRATED ? "Calibrated" : "Press 'g' to start"; int baseLine = 0; - Size textSize = getTextSize(msg, 1, 1, 1, &baseLine); + Size textSize = getTextSize(msg, 1, 1, 1, &baseLine); Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10); if( mode == CAPTURING ) @@ -199,7 +199,7 @@ Explanation if( blinkOutput ) bitwise_not(view, view); - If we ran calibration and got camera's matrix with the distortion coefficients we may want to correct the image using :imgproc_geometric:`undistort ` function: + If we ran calibration and got camera's matrix with the distortion coefficients we may want to correct the image using :imgproc_geometric:`undistort ` function: .. code-block:: cpp @@ -229,7 +229,7 @@ Explanation imagePoints.clear(); } -#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must do this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort ` function, which is in fact first calls :imgproc_geometric:`initUndistortRectifyMap ` to find transformation matrices and then performs transformation using :imgproc_geometric:`remap ` function. Because, after successful calibration map calculation needs to be done only once, by using this expanded form you may speed up your application: +#. **Show the distortion removal for the images too**. When you work with an image list it is not possible to remove the distortion inside the loop. Therefore, you must do this after the loop. Taking advantage of this now I'll expand the :imgproc_geometric:`undistort ` function, which is in fact first calls :imgproc_geometric:`initUndistortRectifyMap ` to find transformation matrices and then performs transformation using :imgproc_geometric:`remap ` function. Because, after successful calibration map calculation needs to be done only once, by using this expanded form you may speed up your application: .. code-block:: cpp @@ -256,9 +256,9 @@ Explanation The calibration and save ======================== -Because the calibration needs to be done only once per camera, it makes sense to save it after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file. +Because the calibration needs to be done only once per camera, it makes sense to save it after a successful calibration. This way later on you can just load these values into your program. Due to this we first make the calibration, and if it succeeds we save the result into an OpenCV style XML or YAML file, depending on the extension you give in the configuration file. -Therefore in the first function we just split up these two processes. Because we want to save many of the calibration variables we'll create these variables here and pass on both of them to the calibration and saving function. Again, I'll not show the saving part as that has little in common with the calibration. Explore the source file in order to find out how and what: +Therefore in the first function we just split up these two processes. Because we want to save many of the calibration variables we'll create these variables here and pass on both of them to the calibration and saving function. Again, I'll not show the saving part as that has little in common with the calibration. Explore the source file in order to find out how and what: .. code-block:: cpp @@ -269,10 +269,10 @@ Therefore in the first function we just split up these two processes. Because we vector reprojErrs; double totalAvgErr = 0; - bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs, + bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs, reprojErrs, totalAvgErr); cout << (ok ? "Calibration succeeded" : "Calibration failed") - << ". avg re projection error = " << totalAvgErr ; + << ". avg re projection error = " << totalAvgErr ; if( ok ) // save only if the calibration was done with success saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs, @@ -280,15 +280,15 @@ Therefore in the first function we just split up these two processes. Because we return ok; } -We do the calibration with the help of the :calib3d:`calibrateCamera ` function. It has the following parameters: +We do the calibration with the help of the :calib3d:`calibrateCamera ` function. It has the following parameters: .. container:: enumeratevisibleitemswithsquare - + The object points. This is a vector of *Point3f* vector that for each input image describes how should the pattern look. If we have a planar pattern (like a chessboard) then we can simply set all Z coordinates to zero. This is a collection of the points where these important points are present. Because, we use a single pattern for all the input images we can calculate this just once and multiply it for all the other input views. We calculate the corner points with the *calcBoardCornerPositions* function as: + + The object points. This is a vector of *Point3f* vector that for each input image describes how should the pattern look. If we have a planar pattern (like a chessboard) then we can simply set all Z coordinates to zero. This is a collection of the points where these important points are present. Because, we use a single pattern for all the input images we can calculate this just once and multiply it for all the other input views. We calculate the corner points with the *calcBoardCornerPositions* function as: .. code-block:: cpp - void calcBoardCornerPositions(Size boardSize, float squareSize, vector& corners, + void calcBoardCornerPositions(Size boardSize, float squareSize, vector& corners, Settings::Pattern patternType /*= Settings::CHESSBOARD*/) { corners.clear(); @@ -310,19 +310,19 @@ We do the calibration with the help of the :calib3d:`calibrateCamera > objectPoints(1); calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern); - objectPoints.resize(imagePoints.size(),objectPoints[0]); + objectPoints.resize(imagePoints.size(),objectPoints[0]); - + The image points. This is a vector of *Point2f* vector which for each input image contains coordinates of the important points (corners for chessboard and centers of the circles for the circle pattern). We have already collected this from :calib3d:`findChessboardCorners ` or :calib3d:`findCirclesGrid ` function. We just need to pass it on. + + The image points. This is a vector of *Point2f* vector which for each input image contains coordinates of the important points (corners for chessboard and centers of the circles for the circle pattern). We have already collected this from :calib3d:`findChessboardCorners ` or :calib3d:`findCirclesGrid ` function. We just need to pass it on. - + The size of the image acquired from the camera, video file or the images. + + The size of the image acquired from the camera, video file or the images. - + The camera matrix. If we used the fixed aspect ratio option we need to set the :math:`f_x` to zero: + + The camera matrix. If we used the fixed aspect ratio option we need to set the :math:`f_x` to zero: .. code-block:: cpp @@ -330,24 +330,24 @@ We do the calibration with the help of the :calib3d:`calibrateCamera (0,0) = 1.0; - + The distortion coefficient matrix. Initialize with zero. + + The distortion coefficient matrix. Initialize with zero. .. code-block:: cpp distCoeffs = Mat::zeros(8, 1, CV_64F); - + For all the views the function will calculate rotation and translation vectors which transform the object points (given in the model coordinate space) to the image points (given in the world coordinate space). The 7-th and 8-th parameters are the output vector of matrices containing in the i-th position the rotation and translation vector for the i-th object point to the i-th image point. + + For all the views the function will calculate rotation and translation vectors which transform the object points (given in the model coordinate space) to the image points (given in the world coordinate space). The 7-th and 8-th parameters are the output vector of matrices containing in the i-th position the rotation and translation vector for the i-th object point to the i-th image point. - + The final argument is the flag. You need to specify here options like fix the aspect ratio for the focal length, assume zero tangential distortion or to fix the principal point. + + The final argument is the flag. You need to specify here options like fix the aspect ratio for the focal length, assume zero tangential distortion or to fix the principal point. .. code-block:: cpp double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5); - + The function returns the average re-projection error. This number gives a good estimation of precision of the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices we may calculate the error for one view by using the :calib3d:`projectPoints ` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculated for all the calibration images. + + The function returns the average re-projection error. This number gives a good estimation of precision of the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices we may calculate the error for one view by using the :calib3d:`projectPoints ` to first transform the object point to image point. Then we calculate the absolute norm between what we got with our transformation and the corner/circle finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculated for all the calibration images. - .. code-block:: cpp + .. code-block:: cpp double computeReprojectionErrors( const vector >& objectPoints, const vector >& imagePoints, @@ -378,7 +378,7 @@ We do the calibration with the help of the :calib3d:`calibrateCamera ` which has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into VID5 directory. I've put this inside the :file:`images/CameraCalibration` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use: +Let there be :download:`this input chessboard pattern <../../../pattern.png>` which has a size of 9 X 6. I've used an AXIS IP camera to create a couple of snapshots of the board and saved it into VID5 directory. I've put this inside the :file:`images/CameraCalibration` folder of my working directory and created the following :file:`VID5.XML` file that describes which images to use: .. code-block:: xml @@ -396,25 +396,25 @@ Let there be :download:`this input chessboard pattern <../../../pattern.png>` wh -Then passed :file:`images/CameraCalibration/VID5/VID5.XML` as an input in the configuration file. Here's a chessboard pattern found during the runtime of the application: +Then passed :file:`images/CameraCalibration/VID5/VID5.XML` as an input in the configuration file. Here's a chessboard pattern found during the runtime of the application: -.. image:: images/fileListImage.jpg +.. image:: images/fileListImage.jpg :alt: A found chessboard :align: center -After applying the distortion removal we get: +After applying the distortion removal we get: -.. image:: images/fileListImageUnDist.jpg +.. image:: images/fileListImageUnDist.jpg :alt: Distortion removal for File List :align: center -The same works for :download:`this asymmetrical circle pattern <../../../acircles_pattern.png>` by setting the input width to 4 and height to 11. This time I've used a live camera feed by specifying its ID ("1") for the input. Here's, how a detected pattern should look: +The same works for :download:`this asymmetrical circle pattern <../../../acircles_pattern.png>` by setting the input width to 4 and height to 11. This time I've used a live camera feed by specifying its ID ("1") for the input. Here's, how a detected pattern should look: -.. image:: images/asymetricalPattern.jpg +.. image:: images/asymetricalPattern.jpg :alt: Asymmetrical circle detection :align: center -In both cases in the specified output XML/YAML file you'll find the camera and distortion coefficients matrices: +In both cases in the specified output XML/YAML file you'll find the camera and distortion coefficients matrices: .. code-block:: cpp @@ -433,9 +433,9 @@ In both cases in the specified output XML/YAML file you'll find the camera and d -4.1802327176423804e-001 5.0715244063187526e-001 0. 0. -5.7843597214487474e-001 -Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap ` and the :imgproc_geometric:`remap ` function to remove distortion and enjoy distortion free inputs for cheap and low quality cameras. +Add these values as constants to your program, call the :imgproc_geometric:`initUndistortRectifyMap ` and the :imgproc_geometric:`remap ` function to remove distortion and enjoy distortion free inputs for cheap and low quality cameras. -You may observe a runtime instance of this on the `YouTube here `_. +You may observe a runtime instance of this on the `YouTube here `_. .. raw:: html diff --git a/doc/tutorials/calib3d/camera_calibration_square_chess/camera_calibration_square_chess.rst b/doc/tutorials/calib3d/camera_calibration_square_chess/camera_calibration_square_chess.rst index ec7354be22..4eff2640ad 100644 --- a/doc/tutorials/calib3d/camera_calibration_square_chess/camera_calibration_square_chess.rst +++ b/doc/tutorials/calib3d/camera_calibration_square_chess/camera_calibration_square_chess.rst @@ -7,16 +7,16 @@ Camera calibration with square chessboard The goal of this tutorial is to learn how to calibrate a camera given a set of chessboard images. -*Test data*: use images in your data/chess folder. +*Test data*: use images in your data/chess folder. #. - Compile opencv with samples by setting ``BUILD_EXAMPLES`` to ``ON`` in cmake configuration. + Compile opencv with samples by setting ``BUILD_EXAMPLES`` to ``ON`` in cmake configuration. #. Go to ``bin`` folder and use ``imagelist_creator`` to create an ``XML/YAML`` list of your images. - + #. - Then, run ``calibration`` sample to get camera parameters. Use square size equal to 3cm. + Then, run ``calibration`` sample to get camera parameters. Use square size equal to 3cm. Pose estimation =============== @@ -57,6 +57,6 @@ Now, let us write a code that detects a chessboard in a new image and finds its distCoeffs, rvec, tvec, false); #. - Calculate reprojection error like it is done in ``calibration`` sample (see ``opencv/samples/cpp/calibration.cpp``, function ``computeReprojectionErrors``). + Calculate reprojection error like it is done in ``calibration`` sample (see ``opencv/samples/cpp/calibration.cpp``, function ``computeReprojectionErrors``). -Question: how to calculate the distance from the camera origin to any of the corners? \ No newline at end of file +Question: how to calculate the distance from the camera origin to any of the corners? \ No newline at end of file diff --git a/doc/tutorials/calib3d/table_of_content_calib3d/table_of_content_calib3d.rst b/doc/tutorials/calib3d/table_of_content_calib3d/table_of_content_calib3d.rst index 3d45664621..91f80b70b9 100644 --- a/doc/tutorials/calib3d/table_of_content_calib3d/table_of_content_calib3d.rst +++ b/doc/tutorials/calib3d/table_of_content_calib3d/table_of_content_calib3d.rst @@ -3,11 +3,11 @@ *calib3d* module. Camera calibration and 3D reconstruction ----------------------------------------------------------- -Although we got most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out from the 2D images information about the 3D world. +Although we got most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out from the 2D images information about the 3D world. -.. include:: ../../definitions/tocDefinitions.rst +.. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -26,7 +26,7 @@ Although we got most of our images in a 2D format they do come from a 3D world. :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv diff --git a/doc/tutorials/core/adding_images/adding_images.rst b/doc/tutorials/core/adding_images/adding_images.rst index 13e4063aa9..e3135693de 100644 --- a/doc/tutorials/core/adding_images/adding_images.rst +++ b/doc/tutorials/core/adding_images/adding_images.rst @@ -18,7 +18,7 @@ Theory .. note:: - The explanation below belongs to the book `Computer Vision: Algorithms and Applications `_ by Richard Szeliski + The explanation below belongs to the book `Computer Vision: Algorithms and Applications `_ by Richard Szeliski From our previous tutorial, we know already a bit of *Pixel operators*. An interesting dyadic (two-input) operator is the *linear blend operator*: @@ -43,7 +43,7 @@ As usual, after the not-so-lengthy explanation, let's go to the code: int main( int argc, char** argv ) { - double alpha = 0.5; double beta; double input; + double alpha = 0.5; double beta; double input; Mat src1, src2, dst; @@ -69,7 +69,7 @@ As usual, after the not-so-lengthy explanation, let's go to the code: beta = ( 1.0 - alpha ); addWeighted( src1, alpha, src2, beta, 0.0, dst); - + imshow( "Linear Blend", dst ); waitKey(0); @@ -99,10 +99,10 @@ Explanation #. Now we need to generate the :math:`g(x)` image. For this, the function :add_weighted:`addWeighted <>` comes quite handy: .. code-block:: cpp - + beta = ( 1.0 - alpha ); addWeighted( src1, alpha, src2, beta, 0.0, dst); - + since :add_weighted:`addWeighted <>` produces: .. math:: @@ -110,12 +110,12 @@ Explanation dst = \alpha \cdot src1 + \beta \cdot src2 + \gamma In this case, :math:`\gamma` is the argument :math:`0.0` in the code above. - -#. Create windows, show the images and wait for the user to end the program. + +#. Create windows, show the images and wait for the user to end the program. Result ======= .. image:: images/Adding_Images_Tutorial_Result_0.jpg :alt: Blending Images Tutorial - Final Result - :align: center + :align: center diff --git a/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst b/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst index 097e79e00b..613f4e1008 100644 --- a/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst +++ b/doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst @@ -10,7 +10,7 @@ In this tutorial you will learn how to: .. container:: enumeratevisibleitemswithsquare - + Access pixel values + + Access pixel values + Initialize a matrix with zeros @@ -20,16 +20,16 @@ In this tutorial you will learn how to: Theory ======= - + .. note:: - The explanation below belongs to the book `Computer Vision: Algorithms and Applications `_ by Richard Szeliski + The explanation below belongs to the book `Computer Vision: Algorithms and Applications `_ by Richard Szeliski Image Processing -------------------- .. container:: enumeratevisibleitemswithsquare - * A general image processing operator is a function that takes one or more input images and produces an output image. + * A general image processing operator is a function that takes one or more input images and produces an output image. * Image transforms can be seen as: @@ -54,18 +54,18 @@ Brightness and contrast adjustments * Two commonly used point processes are *multiplication* and *addition* with a constant: .. math:: - + g(x) = \alpha f(x) + \beta - + * The parameters :math:`\alpha > 0` and :math:`\beta` are often called the *gain* and *bias* parameters; sometimes these parameters are said to control *contrast* and *brightness* respectively. * You can think of :math:`f(x)` as the source image pixels and :math:`g(x)` as the output image pixels. Then, more conveniently we can write the expression as: .. math:: - + g(i,j) = \alpha \cdot f(i,j) + \beta - - where :math:`i` and :math:`j` indicates that the pixel is located in the *i-th* row and *j-th* column. + + where :math:`i` and :math:`j` indicates that the pixel is located in the *i-th* row and *j-th* column. Code ===== @@ -91,7 +91,7 @@ Code Mat image = imread( argv[1] ); Mat new_image = Mat::zeros( image.size(), image.type() ); - /// Initialize values + /// Initialize values std::cout<<" Basic Linear Transforms "<>alpha; @@ -102,7 +102,7 @@ Code { for( int x = 0; x < image.cols; x++ ) { for( int c = 0; c < 3; c++ ) { - new_image.at(y,x)[c] = + new_image.at(y,x)[c] = saturate_cast( alpha*( image.at(y,x)[c] ) + beta ); } } @@ -133,41 +133,41 @@ Explanation #. We load an image using :imread:`imread <>` and save it in a Mat object: - + .. code-block:: cpp Mat image = imread( argv[1] ); #. Now, since we will make some transformations to this image, we need a new Mat object to store it. Also, we want this to have the following features: - + .. container:: enumeratevisibleitemswithsquare * Initial pixel values equal to zero * Same size and type as the original image - + .. code-block:: cpp - Mat new_image = Mat::zeros( image.size(), image.type() ); - - We observe that :mat_zeros:`Mat::zeros <>` returns a Matlab-style zero initializer based on *image.size()* and *image.type()* + Mat new_image = Mat::zeros( image.size(), image.type() ); + + We observe that :mat_zeros:`Mat::zeros <>` returns a Matlab-style zero initializer based on *image.size()* and *image.type()* #. Now, to perform the operation :math:`g(i,j) = \alpha \cdot f(i,j) + \beta` we will access to each pixel in image. Since we are operating with RGB images, we will have three values per pixel (R, G and B), so we will also access them separately. Here is the piece of code: .. code-block:: cpp - + for( int y = 0; y < image.rows; y++ ) { for( int x = 0; x < image.cols; x++ ) { for( int c = 0; c < 3; c++ ) - { new_image.at(y,x)[c] = + { new_image.at(y,x)[c] = saturate_cast( alpha*( image.at(y,x)[c] ) + beta ); } } } - + Notice the following: .. container:: enumeratevisibleitemswithsquare - * To access each pixel in the images we are using this syntax: *image.at(y,x)[c]* where *y* is the row, *x* is the column and *c* is R, G or B (0, 1 or 2). + * To access each pixel in the images we are using this syntax: *image.at(y,x)[c]* where *y* is the row, *x* is the column and *c* is R, G or B (0, 1 or 2). * Since the operation :math:`\alpha \cdot p(i,j) + \beta` can give values out of range or not integers (if :math:`\alpha` is float), we use :saturate_cast:`saturate_cast <>` to make sure the values are valid. @@ -175,7 +175,7 @@ Explanation #. Finally, we create windows and show the images, the usual way. .. code-block:: cpp - + namedWindow("Original Image", 1); namedWindow("New Image", 1); @@ -185,9 +185,9 @@ Explanation waitKey(0); .. note:: - + Instead of using the **for** loops to access each pixel, we could have simply used this command: - + .. code-block:: cpp image.convertTo(new_image, -1, alpha, beta); @@ -211,4 +211,4 @@ Result .. image:: images/Basic_Linear_Transform_Tutorial_Result_0.jpg :alt: Basic Linear Transform - Final Result - :align: center + :align: center diff --git a/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst b/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst index b5f8e77c8d..b7cf446687 100644 --- a/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst +++ b/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.rst @@ -4,22 +4,22 @@ Discrete Fourier Transform ************************** Goal -==== +==== -We'll seek answers for the following questions: +We'll seek answers for the following questions: .. container:: enumeratevisibleitemswithsquare - + What is a Fourier transform and why use it? - + How to do it in OpenCV? + + What is a Fourier transform and why use it? + + How to do it in OpenCV? + Usage of functions such as: :imgprocfilter:`copyMakeBorder() `, :operationsonarrays:`merge() `, :operationsonarrays:`dft() `, :operationsonarrays:`getOptimalDFTSize() `, :operationsonarrays:`log() ` and :operationsonarrays:`normalize() ` . Source code =========== -You can :download:`download this from here <../../../../samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp` of the OpenCV source code library. +You can :download:`download this from here <../../../../samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp` of the OpenCV source code library. -Here's a sample usage of :operationsonarrays:`dft() ` : +Here's a sample usage of :operationsonarrays:`dft() ` : .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp :language: cpp @@ -30,11 +30,11 @@ Here's a sample usage of :operationsonarrays:`dft() ` : Explanation =========== -The Fourier Transform will decompose an image into its sinus and cosines components. In other words, it will transform an image from its spatial domain to its frequency domain. The idea is that any function may be approximated exactly with the sum of infinite sinus and cosines functions. The Fourier Transform is a way how to do this. Mathematically a two dimensional images Fourier transform is: +The Fourier Transform will decompose an image into its sinus and cosines components. In other words, it will transform an image from its spatial domain to its frequency domain. The idea is that any function may be approximated exactly with the sum of infinite sinus and cosines functions. The Fourier Transform is a way how to do this. Mathematically a two dimensional images Fourier transform is: .. math:: - F(k,l) = \displaystyle\sum\limits_{i=0}^{N-1}\sum\limits_{j=0}^{N-1} f(i,j)e^{-i2\pi(\frac{ki}{N}+\frac{lj}{N})} + F(k,l) = \displaystyle\sum\limits_{i=0}^{N-1}\sum\limits_{j=0}^{N-1} f(i,j)e^{-i2\pi(\frac{ki}{N}+\frac{lj}{N})} e^{ix} = \cos{x} + i\sin {x} @@ -44,65 +44,65 @@ In this sample I'll show how to calculate and show the *magnitude* image of a Fo 1. **Expand the image to an optimal size**. The performance of a DFT is dependent of the image size. It tends to be the fastest for image sizes that are multiple of the numbers two, three and five. Therefore, to achieve maximal performance it is generally a good idea to pad border values to the image to get a size with such traits. The :operationsonarrays:`getOptimalDFTSize() ` returns this optimal size and we can use the :imgprocfilter:`copyMakeBorder() ` function to expand the borders of an image: - .. code-block:: cpp + .. code-block:: cpp Mat padded; //expand input image to optimal size int m = getOptimalDFTSize( I.rows ); int n = getOptimalDFTSize( I.cols ); // on the border add zero pixels copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0)); - The appended pixels are initialized with zero. + The appended pixels are initialized with zero. 2. **Make place for both the complex and the real values**. The result of a Fourier Transform is complex. This implies that for each image value the result is two image values (one per component). Moreover, the frequency domains range is much larger than its spatial counterpart. Therefore, we store these usually at least in a *float* format. Therefore we'll convert our input image to this type and expand it with another channel to hold the complex values: - .. code-block:: cpp + .. code-block:: cpp Mat planes[] = {Mat_(padded), Mat::zeros(padded.size(), CV_32F)}; Mat complexI; merge(planes, 2, complexI); // Add to the expanded another plane with zeros -3. **Make the Discrete Fourier Transform**. It's possible an in-place calculation (same input as output): +3. **Make the Discrete Fourier Transform**. It's possible an in-place calculation (same input as output): - .. code-block:: cpp + .. code-block:: cpp dft(complexI, complexI); // this way the result may fit in the source matrix -4. **Transform the real and complex values to magnitude**. A complex number has a real (*Re*) and a complex (imaginary - *Im*) part. The results of a DFT are complex numbers. The magnitude of a DFT is: +4. **Transform the real and complex values to magnitude**. A complex number has a real (*Re*) and a complex (imaginary - *Im*) part. The results of a DFT are complex numbers. The magnitude of a DFT is: .. math:: M = \sqrt[2]{ {Re(DFT(I))}^2 + {Im(DFT(I))}^2} - Translated to OpenCV code: + Translated to OpenCV code: - .. code-block:: cpp + .. code-block:: cpp split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) - magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude + magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude Mat magI = planes[0]; -5. **Switch to a logarithmic scale**. It turns out that the dynamic range of the Fourier coefficients is too large to be displayed on the screen. We have some small and some high changing values that we can't observe like this. Therefore the high values will all turn out as white points, while the small ones as black. To use the gray scale values to for visualization we can transform our linear scale to a logarithmic one: +5. **Switch to a logarithmic scale**. It turns out that the dynamic range of the Fourier coefficients is too large to be displayed on the screen. We have some small and some high changing values that we can't observe like this. Therefore the high values will all turn out as white points, while the small ones as black. To use the gray scale values to for visualization we can transform our linear scale to a logarithmic one: .. math:: M_1 = \log{(1 + M)} - Translated to OpenCV code: + Translated to OpenCV code: - .. code-block:: cpp + .. code-block:: cpp magI += Scalar::all(1); // switch to logarithmic scale log(magI, magI); -6. **Crop and rearrange**. Remember, that at the first step, we expanded the image? Well, it's time to throw away the newly introduced values. For visualization purposes we may also rearrange the quadrants of the result, so that the origin (zero, zero) corresponds with the image center. +6. **Crop and rearrange**. Remember, that at the first step, we expanded the image? Well, it's time to throw away the newly introduced values. For visualization purposes we may also rearrange the quadrants of the result, so that the origin (zero, zero) corresponds with the image center. - .. code-block:: cpp + .. code-block:: cpp magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2)); int cx = magI.cols/2; int cy = magI.rows/2; - Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant + Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right @@ -116,25 +116,25 @@ In this sample I'll show how to calculate and show the *magnitude* image of a Fo q2.copyTo(q1); tmp.copyTo(q2); -7. **Normalize**. This is done again for visualization purposes. We now have the magnitudes, however this are still out of our image display range of zero to one. We normalize our values to this range using the :operationsonarrays:`normalize() ` function. +7. **Normalize**. This is done again for visualization purposes. We now have the magnitudes, however this are still out of our image display range of zero to one. We normalize our values to this range using the :operationsonarrays:`normalize() ` function. - .. code-block:: cpp + .. code-block:: cpp - normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a + normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a // viewable image form (float between values 0 and 1). Result ====== -An application idea would be to determine the geometrical orientation present in the image. For example, let us find out if a text is horizontal or not? Looking at some text you'll notice that the text lines sort of form also horizontal lines and the letters form sort of vertical lines. These two main components of a text snippet may be also seen in case of the Fourier transform. Let us use :download:`this horizontal <../../../../samples/cpp/tutorial_code/images/imageTextN.png>` and :download:`this rotated<../../../../samples/cpp/tutorial_code/images/imageTextR.png>` image about a text. +An application idea would be to determine the geometrical orientation present in the image. For example, let us find out if a text is horizontal or not? Looking at some text you'll notice that the text lines sort of form also horizontal lines and the letters form sort of vertical lines. These two main components of a text snippet may be also seen in case of the Fourier transform. Let us use :download:`this horizontal <../../../../samples/cpp/tutorial_code/images/imageTextN.png>` and :download:`this rotated<../../../../samples/cpp/tutorial_code/images/imageTextR.png>` image about a text. -In case of the horizontal text: +In case of the horizontal text: .. image:: images/result_normal.jpg :alt: In case of normal text :align: center -In case of a rotated text: +In case of a rotated text: .. image:: images/result_rotated.jpg :alt: In case of rotated text diff --git a/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst b/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst index 44eed2eca4..87166b7cc3 100644 --- a/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst +++ b/doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.rst @@ -4,9 +4,9 @@ File Input and Output using XML and YAML files ********************************************** Goal -==== +==== -You'll find answers for the following questions: +You'll find answers for the following questions: .. container:: enumeratevisibleitemswithsquare @@ -18,7 +18,7 @@ You'll find answers for the following questions: Source code =========== -You can :download:`download this from here <../../../../samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp` of the OpenCV source code library. +You can :download:`download this from here <../../../../samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp` of the OpenCV source code library. Here's a sample code of how to achieve all the stuff enumerated at the goal list. @@ -31,9 +31,9 @@ Here's a sample code of how to achieve all the stuff enumerated at the goal list Explanation =========== -Here we talk only about XML and YAML file inputs. Your output (and its respective input) file may have only one of these extensions and the structure coming from this. They are two kinds of data structures you may serialize: *mappings* (like the STL map) and *element sequence* (like the STL vector>. The difference between these is that in a map every element has a unique name through what you may access it. For sequences you need to go through them to query a specific item. +Here we talk only about XML and YAML file inputs. Your output (and its respective input) file may have only one of these extensions and the structure coming from this. They are two kinds of data structures you may serialize: *mappings* (like the STL map) and *element sequence* (like the STL vector>. The difference between these is that in a map every element has a unique name through what you may access it. For sequences you need to go through them to query a specific item. -1. **XML\\YAML File Open and Close.** Before you write any content to such file you need to open it and at the end to close it. The XML\YAML data structure in OpenCV is :xmlymlpers:`FileStorage `. To specify that this structure to which file binds on your hard drive you can use either its constructor or the *open()* function of this: +1. **XML\\YAML File Open and Close.** Before you write any content to such file you need to open it and at the end to close it. The XML\YAML data structure in OpenCV is :xmlymlpers:`FileStorage `. To specify that this structure to which file binds on your hard drive you can use either its constructor or the *open()* function of this: .. code-block:: cpp @@ -42,29 +42,29 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv \\... fs.open(filename, FileStorage::READ); - Either one of this you use the second argument is a constant specifying the type of operations you'll be able to on them: WRITE, READ or APPEND. The extension specified in the file name also determinates the output format that will be used. The output may be even compressed if you specify an extension such as *.xml.gz*. + Either one of this you use the second argument is a constant specifying the type of operations you'll be able to on them: WRITE, READ or APPEND. The extension specified in the file name also determinates the output format that will be used. The output may be even compressed if you specify an extension such as *.xml.gz*. + + The file automatically closes when the :xmlymlpers:`FileStorage ` objects is destroyed. However, you may explicitly call for this by using the *release* function: - The file automatically closes when the :xmlymlpers:`FileStorage ` objects is destroyed. However, you may explicitly call for this by using the *release* function: - .. code-block:: cpp fs.release(); // explicit close -#. **Input and Output of text and numbers.** The data structure uses the same << output operator that the STL library. For outputting any type of data structure we need first to specify its name. We do this by just simply printing out the name of this. For basic types you may follow this with the print of the value : +#. **Input and Output of text and numbers.** The data structure uses the same << output operator that the STL library. For outputting any type of data structure we need first to specify its name. We do this by just simply printing out the name of this. For basic types you may follow this with the print of the value : .. code-block:: cpp fs << "iterationNr" << 100; - Reading in is a simple addressing (via the [] operator) and casting operation or a read via the >> operator : + Reading in is a simple addressing (via the [] operator) and casting operation or a read via the >> operator : .. code-block:: cpp - int itNr; + int itNr; fs["iterationNr"] >> itNr; itNr = (int) fs["iterationNr"]; -#. **Input\\Output of OpenCV Data structures.** Well these behave exactly just as the basic C++ types: +#. **Input\\Output of OpenCV Data structures.** Well these behave exactly just as the basic C++ types: .. code-block:: cpp @@ -77,7 +77,7 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv fs["R"] >> R; // Read cv::Mat fs["T"] >> T; -#. **Input\\Output of vectors (arrays) and associative maps.** As I mentioned beforehand we can output maps and sequences (array, vector) too. Again we first print the name of the variable and then we have to specify if our output is either a sequence or map. +#. **Input\\Output of vectors (arrays) and associative maps.** As I mentioned beforehand we can output maps and sequences (array, vector) too. Again we first print the name of the variable and then we have to specify if our output is either a sequence or map. For sequence before the first element print the "[" character and after the last one the "]" character: @@ -95,7 +95,7 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv fs << "{" << "One" << 1; fs << "Two" << 2 << "}"; - To read from these we use the :xmlymlpers:`FileNode ` and the :xmlymlpers:`FileNodeIterator ` data structures. The [] operator of the :xmlymlpers:`FileStorage ` class returns a :xmlymlpers:`FileNode ` data type. If the node is sequential we can use the :xmlymlpers:`FileNodeIterator ` to iterate through the items: + To read from these we use the :xmlymlpers:`FileNode ` and the :xmlymlpers:`FileNodeIterator ` data structures. The [] operator of the :xmlymlpers:`FileStorage ` class returns a :xmlymlpers:`FileNode ` data type. If the node is sequential we can use the :xmlymlpers:`FileNodeIterator ` to iterate through the items: .. code-block:: cpp @@ -115,8 +115,8 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv .. code-block:: cpp n = fs["Mapping"]; // Read mappings from a sequence - cout << "Two " << (int)(n["Two"]) << "; "; - cout << "One " << (int)(n["One"]) << endl << endl; + cout << "Two " << (int)(n["Two"]) << "; "; + cout << "One " << (int)(n["One"]) << endl << endl; #. **Read and write your own data structures.** Suppose you have a data structure such as: @@ -148,7 +148,7 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv id = (string)node["id"]; } - Then you need to add the following functions definitions outside the class: + Then you need to add the following functions definitions outside the class: .. code-block:: cpp @@ -175,17 +175,17 @@ Here we talk only about XML and YAML file inputs. Your output (and its respectiv fs << "MyData" << m; // your own data structures fs["MyData"] >> m; // Read your own structure_ - Or to try out reading a non-existing read: + Or to try out reading a non-existing read: .. code-block:: cpp - fs["NonExisting"] >> m; // Do not add a fs << "NonExisting" << m command for this to work + fs["NonExisting"] >> m; // Do not add a fs << "NonExisting" << m command for this to work cout << endl << "NonExisting = " << endl << m << endl; Result ====== -Well mostly we just print out the defined numbers. On the screen of your console you could see: +Well mostly we just print out the defined numbers. On the screen of your console you could see: .. code-block:: bash @@ -212,7 +212,7 @@ Well mostly we just print out the defined numbers. On the screen of your console Tip: Open up output.xml with a text editor to see the serialized data. -Nevertheless, it's much more interesting what you may see in the output xml file: +Nevertheless, it's much more interesting what you may see in the output xml file: .. code-block:: xml @@ -242,7 +242,7 @@ Nevertheless, it's much more interesting what you may see in the output xml file mydata1234 -Or the YAML file: +Or the YAML file: .. code-block:: yaml diff --git a/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst b/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst index eba0cae7c2..ef0f8640ca 100644 --- a/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst +++ b/doc/tutorials/core/how_to_scan_images/how_to_scan_images.rst @@ -4,9 +4,9 @@ How to scan images, lookup tables and time measurement with OpenCV ******************************************************************* Goal -==== +==== -We'll seek answers for the following questions: +We'll seek answers for the following questions: .. container:: enumeratevisibleitemswithsquare @@ -18,11 +18,11 @@ We'll seek answers for the following questions: Our test case ============= -Let us consider a simple color reduction method. Using the unsigned char C and C++ type for matrix item storing a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result. +Let us consider a simple color reduction method. Using the unsigned char C and C++ type for matrix item storing a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result. -In this cases it's common that we make a *color space reduction*. This means that we divide the color space current value with a new input value to end up with fewer colors. For instance every value between zero and nine takes the new value zero, every value between ten and nineteen the value ten and so on. +In this cases it's common that we make a *color space reduction*. This means that we divide the color space current value with a new input value to end up with fewer colors. For instance every value between zero and nine takes the new value zero, every value between ten and nineteen the value ten and so on. -When you divide an *uchar* (unsigned char - aka values between zero and 255) value with an *int* value the result will be also *char*. These values may only be char values. Therefore, any fraction will be rounded down. Taking advantage of this fact the upper operation in the *uchar* domain may be expressed as: +When you divide an *uchar* (unsigned char - aka values between zero and 255) value with an *int* value the result will be also *char*. These values may only be char values. Therefore, any fraction will be rounded down. Taking advantage of this fact the upper operation in the *uchar* domain may be expressed as: .. math:: @@ -30,11 +30,11 @@ When you divide an *uchar* (unsigned char - aka values between zero and 255) val A simple color space reduction algorithm would consist of just passing through every pixel of an image matrix and applying this formula. It's worth noting that we do a divide and a multiplication operation. These operations are bloody expensive for a system. If possible it's worth avoiding them by using cheaper operations such as a few subtractions, addition or in best case a simple assignment. Furthermore, note that we only have a limited number of input values for the upper operation. In case of the *uchar* system this is 256 to be exact. -Therefore, for larger images it would be wise to calculate all possible values beforehand and during the assignment just make the assignment, by using a lookup table. Lookup tables are simple arrays (having one or more dimensions) that for a given input value variation holds the final output value. Its strength lies that we do not need to make the calculation, we just need to read the result. +Therefore, for larger images it would be wise to calculate all possible values beforehand and during the assignment just make the assignment, by using a lookup table. Lookup tables are simple arrays (having one or more dimensions) that for a given input value variation holds the final output value. Its strength lies that we do not need to make the calculation, we just need to read the result. -Our test case program (and the sample presented here) will do the following: read in a console line argument image (that may be either color or gray scale - console line argument too) and apply the reduction with the given console line argument integer value. In OpenCV, at the moment they are three major ways of going through an image pixel by pixel. To make things a little more interesting will make the scanning for each image using all of these methods, and print out how long it took. +Our test case program (and the sample presented here) will do the following: read in a console line argument image (that may be either color or gray scale - console line argument too) and apply the reduction with the given console line argument integer value. In OpenCV, at the moment they are three major ways of going through an image pixel by pixel. To make things a little more interesting will make the scanning for each image using all of these methods, and print out how long it took. -You can download the full source code :download:`here <../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp>` or look it up in the samples directory of OpenCV at the cpp tutorial code for the core section. Its basic usage is: +You can download the full source code :download:`here <../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp>` or look it up in the samples directory of OpenCV at the cpp tutorial code for the core section. Its basic usage is: .. code-block:: bash @@ -45,25 +45,25 @@ The final argument is optional. If given the image will be loaded in gray scale .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 - :lines: 48-60 + :lines: 48-60 Here we first use the C++ *stringstream* class to convert the third command line argument from text to an integer format. Then we use a simple look and the upper formula to calculate the lookup table. No OpenCV specific stuff here. -Another issue is how do we measure time? Well OpenCV offers two simple functions to achieve this :UtilitySystemFunctions:`getTickCount() ` and :UtilitySystemFunctions:`getTickFrequency() `. The first returns the number of ticks of your systems CPU from a certain event (like since you booted your system). The second returns how many times your CPU emits a tick during a second. So to measure in seconds the number of time elapsed between two operations is easy as: +Another issue is how do we measure time? Well OpenCV offers two simple functions to achieve this :UtilitySystemFunctions:`getTickCount() ` and :UtilitySystemFunctions:`getTickFrequency() `. The first returns the number of ticks of your systems CPU from a certain event (like since you booted your system). The second returns how many times your CPU emits a tick during a second. So to measure in seconds the number of time elapsed between two operations is easy as: .. code-block:: cpp double t = (double)getTickCount(); // do something ... - t = ((double)getTickCount() - t)/getTickFrequency(); + t = ((double)getTickCount() - t)/getTickFrequency(); cout << "Times passed in seconds: " << t << endl; -.. _How_Image_Stored_Memory: +.. _How_Image_Stored_Memory: How the image matrix is stored in the memory? ============================================= -As you could already read in my :ref:`matTheBasicImageContainer` tutorial the size of the matrix depends of the color system used. More accurately, it depends from the number of channels used. In case of a gray scale image we have something like: +As you could already read in my :ref:`matTheBasicImageContainer` tutorial the size of the matrix depends of the color system used. More accurately, it depends from the number of channels used. In case of a gray scale image we have something like: .. math:: @@ -94,14 +94,14 @@ Note that the order of the channels is inverse: BGR instead of RGB. Because in m The efficient way ================= -When it comes to performance you cannot beat the classic C style operator[] (pointer) access. Therefore, the most efficient method we can recommend for making the assignment is: +When it comes to performance you cannot beat the classic C style operator[] (pointer) access. Therefore, the most efficient method we can recommend for making the assignment is: .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 :lines: 125-152 -Here we basically just acquire a pointer to the start of each row and go through it until it ends. In the special case that the matrix is stored in a continues manner we only need to request the pointer a single time and go all the way to the end. We need to look out for color images: we have three channels so we need to pass through three times more items in each row. +Here we basically just acquire a pointer to the start of each row and go through it until it ends. In the special case that the matrix is stored in a continues manner we only need to request the pointer a single time and go all the way to the end. We need to look out for color images: we have three channels so we need to pass through three times more items in each row. There's another way of this. The *data* data member of a *Mat* object returns the pointer to the first row, first column. If this pointer is null you have no valid input in that object. Checking this is the simplest method to check if your image loading was a success. In case the storage is continues we can use this to go through the whole data pointer. In case of a gray scale image this would look like: @@ -114,17 +114,17 @@ There's another way of this. The *data* data member of a *Mat* object returns th You would get the same result. However, this code is a lot harder to read later on. It gets even harder if you have some more advanced technique there. Moreover, in practice I've observed you'll get the same performance result (as most of the modern compilers will probably make this small optimization trick automatically for you). -The iterator (safe) method +The iterator (safe) method ========================== -In case of the efficient way making sure that you pass through the right amount of *uchar* fields and to skip the gaps that may occur between the rows was your responsibility. The iterator method is considered a safer way as it takes over these tasks from the user. All you need to do is ask the begin and the end of the image matrix and then just increase the begin iterator until you reach the end. To acquire the value *pointed* by the iterator use the * operator (add it before it). +In case of the efficient way making sure that you pass through the right amount of *uchar* fields and to skip the gaps that may occur between the rows was your responsibility. The iterator method is considered a safer way as it takes over these tasks from the user. All you need to do is ask the begin and the end of the image matrix and then just increase the begin iterator until you reach the end. To acquire the value *pointed* by the iterator use the * operator (add it before it). .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp :language: cpp :tab-width: 4 :lines: 154-182 -In case of color images we have three uchar items per column. This may be considered a short vector of uchar items, that has been baptized in OpenCV with the *Vec3b* name. To access the n-th sub column we use simple operator[] access. It's important to remember that OpenCV iterators go through the columns and automatically skip to the next row. Therefore in case of color images if you use a simple *uchar* iterator you'll be able to access only the blue channel values. +In case of color images we have three uchar items per column. This may be considered a short vector of uchar items, that has been baptized in OpenCV with the *Vec3b* name. To access the n-th sub column we use simple operator[] access. It's important to remember that OpenCV iterators go through the columns and automatically skip to the next row. Therefore in case of color images if you use a simple *uchar* iterator you'll be able to access only the blue channel values. On-the-fly address calculation with reference returning ======================================================= @@ -136,7 +136,7 @@ The final method isn't recommended for scanning. It was made to acquire or modif :tab-width: 4 :lines: 184-216 -The functions takes your input type and coordinates and calculates on the fly the address of the queried item. Then returns a reference to that. This may be a constant when you *get* the value and non-constant when you *set* the value. As a safety step in **debug mode only*** there is performed a check that your input coordinates are valid and does exist. If this isn't the case you'll get a nice output message of this on the standard error output stream. Compared to the efficient way in release mode the only difference in using this is that for every element of the image you'll get a new row pointer for what we use the C operator[] to acquire the column element. +The functions takes your input type and coordinates and calculates on the fly the address of the queried item. Then returns a reference to that. This may be a constant when you *get* the value and non-constant when you *set* the value. As a safety step in **debug mode only*** there is performed a check that your input coordinates are valid and does exist. If this isn't the case you'll get a nice output message of this on the standard error output stream. Compared to the efficient way in release mode the only difference in using this is that for every element of the image you'll get a new row pointer for what we use the C operator[] to acquire the column element. If you need to multiple lookups using this method for an image it may be troublesome and time consuming to enter the type and the at keyword for each of the accesses. To solve this problem OpenCV has a :basicstructures:`Mat_ ` data type. It's the same as Mat with the extra need that at definition you need to specify the data type through what to look at the data matrix, however in return you can use the operator() for fast access of items. To make things even better this is easily convertible from and to the usual :basicstructures:`Mat ` data type. A sample usage of this you can see in case of the color images of the upper function. Nevertheless, it's important to note that the same operation (with the same runtime speed) could have been done with the :basicstructures:`at() ` function. It's just a less to write for the lazy programmer trick. diff --git a/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst b/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst index 938e504eb2..9340a7c748 100644 --- a/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst +++ b/doc/tutorials/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.rst @@ -6,7 +6,7 @@ Interoperability with OpenCV 1 Goal ==== -For the OpenCV developer team it's important to constantly improve the library. We are constantly thinking about methods that will ease your work process, while still maintain the libraries flexibility. The new C++ interface is a development of us that serves this goal. Nevertheless, backward compatibility remains important. We do not want to break your code written for earlier version of the OpenCV library. Therefore, we made sure that we add some functions that deal with this. In the following you'll learn: +For the OpenCV developer team it's important to constantly improve the library. We are constantly thinking about methods that will ease your work process, while still maintain the libraries flexibility. The new C++ interface is a development of us that serves this goal. Nevertheless, backward compatibility remains important. We do not want to break your code written for earlier version of the OpenCV library. Therefore, we made sure that we add some functions that deal with this. In the following you'll learn: .. container:: enumeratevisibleitemswithsquare @@ -17,9 +17,9 @@ For the OpenCV developer team it's important to constantly improve the library. General ======= -When making the switch you first need to learn some about the new data structure for images: :ref:`matTheBasicImageContainer`, this replaces the old *CvMat* and *IplImage* ones. Switching to the new functions is easier. You just need to remember a couple of new things. +When making the switch you first need to learn some about the new data structure for images: :ref:`matTheBasicImageContainer`, this replaces the old *CvMat* and *IplImage* ones. Switching to the new functions is easier. You just need to remember a couple of new things. -OpenCV 2 received reorganization. No longer are all the functions crammed into a single library. We have many modules, each of them containing data structures and functions relevant to certain tasks. This way you do not need to ship a large library if you use just a subset of OpenCV. This means that you should also include only those headers you will use. For example: +OpenCV 2 received reorganization. No longer are all the functions crammed into a single library. We have many modules, each of them containing data structures and functions relevant to certain tasks. This way you do not need to ship a large library if you use just a subset of OpenCV. This means that you should also include only those headers you will use. For example: .. code-block:: cpp @@ -28,13 +28,13 @@ OpenCV 2 received reorganization. No longer are all the functions crammed into a #include -All the OpenCV related stuff is put into the *cv* namespace to avoid name conflicts with other libraries data structures and functions. Therefore, either you need to prepend the *cv::* keyword before everything that comes from OpenCV or after the includes, you just add a directive to use this: +All the OpenCV related stuff is put into the *cv* namespace to avoid name conflicts with other libraries data structures and functions. Therefore, either you need to prepend the *cv::* keyword before everything that comes from OpenCV or after the includes, you just add a directive to use this: .. code-block:: cpp using namespace cv; // The new C++ interface API is inside this namespace. Import it. -Because the functions are already in a namespace there is no need for them to contain the *cv* prefix in their name. As such all the new C++ compatible functions don't have this and they follow the camel case naming rule. This means the first letter is small (unless it's a name, like Canny) and the subsequent words start with a capital letter (like *copyMakeBorder*). +Because the functions are already in a namespace there is no need for them to contain the *cv* prefix in their name. As such all the new C++ compatible functions don't have this and they follow the camel case naming rule. This means the first letter is small (unless it's a name, like Canny) and the subsequent words start with a capital letter (like *copyMakeBorder*). Now, remember that you need to link to your application all the modules you use, and in case you are on Windows using the *DLL* system you will need to add, again, to the path all the binaries. For more in-depth information if you're on Windows read :ref:`Windows_Visual_Studio_How_To` and for Linux an example usage is explained in :ref:`Linux_Eclipse_Usage`. @@ -42,7 +42,7 @@ Now for converting the *Mat* object you can use either the *IplImage* or the *Cv .. code-block:: cpp - Mat I; + Mat I; IplImage pI = I; CvMat mI = I; @@ -50,9 +50,9 @@ Now if you want pointers the conversion gets just a little more complicated. The .. code-block:: cpp - Mat I; - IplImage* pI = &I.operator IplImage(); - CvMat* mI = &I.operator CvMat(); + Mat I; + IplImage* pI = &I.operator IplImage(); + CvMat* mI = &I.operator CvMat(); One of the biggest complaints of the C interface is that it leaves all the memory management to you. You need to figure out when it is safe to release your unused objects and make sure you do so before the program finishes or you could have troublesome memory leeks. To work around this issue in OpenCV there is introduced a sort of smart pointer. This will automatically release the object when it's no longer in use. To use this declare the pointers as a specialization of the *Ptr* : @@ -60,11 +60,11 @@ One of the biggest complaints of the C interface is that it leaves all the memor Ptr piI = &I.operator IplImage(); -Converting from the C data structures to the *Mat* is done by passing these inside its constructor. For example: +Converting from the C data structures to the *Mat* is done by passing these inside its constructor. For example: .. code-block:: cpp - Mat K(piL), L; + Mat K(piL), L; L = Mat(pI); A case study @@ -79,7 +79,7 @@ Now that you have the basics done :download:`here's <../../../../samples/cpp/tut :tab-width: 4 :lines: 1-9, 22-25, 27-44 -Here you can observe that with the new structure we have no pointer problems, although it is possible to use the old functions and in the end just transform the result to a *Mat* object. +Here you can observe that with the new structure we have no pointer problems, although it is possible to use the old functions and in the end just transform the result to a *Mat* object. .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp :language: cpp @@ -87,7 +87,7 @@ Here you can observe that with the new structure we have no pointer problems, al :tab-width: 4 :lines: 46-51 -Because, we want to mess around with the images luma component we first convert from the default RGB to the YUV color space and then split the result up into separate planes. Here the program splits: in the first example it processes each plane using one of the three major image scanning algorithms in OpenCV (C [] operator, iterator, individual element access). In a second variant we add to the image some Gaussian noise and then mix together the channels according to some formula. +Because, we want to mess around with the images luma component we first convert from the default RGB to the YUV color space and then split the result up into separate planes. Here the program splits: in the first example it processes each plane using one of the three major image scanning algorithms in OpenCV (C [] operator, iterator, individual element access). In a second variant we add to the image some Gaussian noise and then mix together the channels according to some formula. The scanning version looks like: @@ -97,7 +97,7 @@ The scanning version looks like: :tab-width: 4 :lines: 55-75 -Here you can observe that we may go through all the pixels of an image in three fashions: an iterator, a C pointer and an individual element access style. You can read a more in-depth description of these in the :ref:`howToScanImagesOpenCV` tutorial. Converting from the old function names is easy. Just remove the cv prefix and use the new *Mat* data structure. Here's an example of this by using the weighted addition function: +Here you can observe that we may go through all the pixels of an image in three fashions: an iterator, a C pointer and an individual element access style. You can read a more in-depth description of these in the :ref:`howToScanImagesOpenCV` tutorial. Converting from the old function names is easy. Just remove the cv prefix and use the new *Mat* data structure. Here's an example of this by using the weighted addition function: .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp :language: cpp @@ -105,7 +105,7 @@ Here you can observe that we may go through all the pixels of an image in three :tab-width: 4 :lines: 79-112 -As you may observe the *planes* variable is of type *Mat*. However, converting from *Mat* to *IplImage* is easy and made automatically with a simple assignment operator. +As you may observe the *planes* variable is of type *Mat*. However, converting from *Mat* to *IplImage* is easy and made automatically with a simple assignment operator. .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp :language: cpp @@ -113,14 +113,14 @@ As you may observe the *planes* variable is of type *Mat*. However, converting f :tab-width: 4 :lines: 115-127 -The new *imshow* highgui function accepts both the *Mat* and *IplImage* data structures. Compile and run the program and if the first image below is your input you may get either the first or second as output: +The new *imshow* highgui function accepts both the *Mat* and *IplImage* data structures. Compile and run the program and if the first image below is your input you may get either the first or second as output: .. image:: images/outputInteropOpenCV1.jpg :alt: The output of the sample :align: center -You may observe a runtime instance of this on the `YouTube here `_ and you can :download:`download the source code from here <../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp` of the OpenCV source code library. +You may observe a runtime instance of this on the `YouTube here `_ and you can :download:`download the source code from here <../../../../samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp>` or find it in the :file:`samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp` of the OpenCV source code library. .. raw:: html diff --git a/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst b/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst index f5bacbf795..0549a9c12b 100644 --- a/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst +++ b/doc/tutorials/core/mat-mask-operations/mat-mask-operations.rst @@ -8,11 +8,11 @@ Mask operations on matrices are quite simple. The idea is that we recalculate ea Our test case ============= -Let us consider the issue of an image contrast enhancement method. Basically we want to apply for every pixel of the image the following formula: +Let us consider the issue of an image contrast enhancement method. Basically we want to apply for every pixel of the image the following formula: .. math:: - I(i,j) = 5*I(i,j) - [ I(i-1,j) + I(i+1,j) + I(i,j-1) + I(i,j+1)] + I(i,j) = 5*I(i,j) - [ I(i-1,j) + I(i+1,j) + I(i,j-1) + I(i,j+1)] \iff I(i,j)*M, \text{where } M = \bordermatrix{ _i\backslash ^j & -1 & 0 & +1 \cr @@ -23,12 +23,12 @@ Let us consider the issue of an image contrast enhancement method. Basically we The first notation is by using a formula, while the second is a compacted version of the first by using a mask. You use the mask by putting the center of the mask matrix (in the upper case noted by the zero-zero index) on the pixel you want to calculate and sum up the pixel values multiplied with the overlapped matrix values. It's the same thing, however in case of large matrices the latter notation is a lot easier to look over. -Now let us see how we can make this happen by using the basic pixel access method or by using the :filtering:`filter2D ` function. +Now let us see how we can make this happen by using the basic pixel access method or by using the :filtering:`filter2D ` function. The Basic Method ================ -Here's a function that will do this: +Here's a function that will do this: .. code-block:: cpp @@ -49,7 +49,7 @@ Here's a function that will do this: for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i) { - *output++ = saturate_cast(5*current[i] + *output++ = saturate_cast(5*current[i] -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]); } } @@ -87,7 +87,7 @@ We'll use the plain C [] operator to access pixels. Because we need to access mu for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i) { - *output++ = saturate_cast(5*current[i] + *output++ = saturate_cast(5*current[i] -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]); } } @@ -96,7 +96,7 @@ On the borders of the image the upper notation results inexistent pixel location .. code-block:: cpp - Result.row(0).setTo(Scalar(0)); // The top row + Result.row(0).setTo(Scalar(0)); // The top row Result.row(Result.rows-1).setTo(Scalar(0)); // The bottom row Result.col(0).setTo(Scalar(0)); // The left column Result.col(Result.cols-1).setTo(Scalar(0)); // The right column @@ -108,19 +108,19 @@ Applying such filters are so common in image processing that in OpenCV there exi .. code-block:: cpp - Mat kern = (Mat_(3,3) << 0, -1, 0, + Mat kern = (Mat_(3,3) << 0, -1, 0, -1, 5, -1, 0, -1, 0); -Then call the :filtering:`filter2D ` function specifying the input, the output image and the kernell to use: +Then call the :filtering:`filter2D ` function specifying the input, the output image and the kernell to use: .. code-block:: cpp - filter2D(I, K, I.depth(), kern ); + filter2D(I, K, I.depth(), kern ); The function even has a fifth optional argument to specify the center of the kernel, and a sixth one for determining what to do in the regions where the operation is undefined (borders). Using this function has the advantage that it's shorter, less verbose and because there are some optimization techniques implemented it is usually faster than the *hand-coded method*. For example in my test while the second one took only 13 milliseconds the first took around 31 milliseconds. Quite some difference. -For example: +For example: .. image:: images/resultMatMaskFilter2D.png :alt: A sample output of the program @@ -128,7 +128,7 @@ For example: You can download this source code from :download:`here <../../../../samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp>` or look in the OpenCV source code libraries sample directory at :file:`samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp`. -Check out an instance of running the program on our `YouTube channel `_ . +Check out an instance of running the program on our `YouTube channel `_ . .. raw:: html diff --git a/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst b/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst index 7b3f8603fc..03d82bbd46 100644 --- a/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst +++ b/doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.rst @@ -19,15 +19,15 @@ For example in the above image you can see that the mirror of the car is nothing OpenCV has been around since 2001. In those days the library was built around a *C* interface and to store the image in the memory they used a C structure called *IplImage*. This is the one you'll see in most of the older tutorials and educational materials. The problem with this is that it brings to the table all the minuses of the C language. The biggest issue is the manual memory management. It builds on the assumption that the user is responsible for taking care of memory allocation and deallocation. While this is not a problem with smaller programs, once your code base grows it will be more of a struggle to handle all this rather than focusing on solving your development goal. -Luckily C++ came around and introduced the concept of classes making easier for the user through automatic memory management (more or less). The good news is that C++ is fully compatible with C so no compatibility issues can arise from making the change. Therefore, OpenCV 2.0 introduced a new C++ interface which offered a new way of doing things which means you do not need to fiddle with memory management, making your code concise (less to write, to achieve more). The main downside of the C++ interface is that many embedded development systems at the moment support only C. Therefore, unless you are targeting embedded platforms, there's no point to using the *old* methods (unless you're a masochist programmer and you're asking for trouble). +Luckily C++ came around and introduced the concept of classes making easier for the user through automatic memory management (more or less). The good news is that C++ is fully compatible with C so no compatibility issues can arise from making the change. Therefore, OpenCV 2.0 introduced a new C++ interface which offered a new way of doing things which means you do not need to fiddle with memory management, making your code concise (less to write, to achieve more). The main downside of the C++ interface is that many embedded development systems at the moment support only C. Therefore, unless you are targeting embedded platforms, there's no point to using the *old* methods (unless you're a masochist programmer and you're asking for trouble). The first thing you need to know about *Mat* is that you no longer need to manually allocate its memory and release it as soon as you do not need it. While doing this is still a possibility, most of the OpenCV functions will allocate its output data manually. As a nice bonus if you pass on an already existing *Mat* object, which has already allocated the required space for the matrix, this will be reused. In other words we use at all times only as much memory as we need to perform the task. -*Mat* is basically a class with two data parts: the matrix header (containing information such as the size of the matrix, the method used for storing, at which address is the matrix stored, and so on) and a pointer to the matrix containing the pixel values (taking any dimensionality depending on the method chosen for storing) . The matrix header size is constant, however the size of the matrix itself may vary from image to image and usually is larger by orders of magnitude. +*Mat* is basically a class with two data parts: the matrix header (containing information such as the size of the matrix, the method used for storing, at which address is the matrix stored, and so on) and a pointer to the matrix containing the pixel values (taking any dimensionality depending on the method chosen for storing) . The matrix header size is constant, however the size of the matrix itself may vary from image to image and usually is larger by orders of magnitude. OpenCV is an image processing library. It contains a large collection of image processing functions. To solve a computational challenge, most of the time you will end up using multiple functions of the library. Because of this, passing images to functions is a common practice. We should not forget that we are talking about image processing algorithms, which tend to be quite computational heavy. The last thing we want to do is further decrease the speed of your program by making unnecessary copies of potentially *large* images. -To tackle this issue OpenCV uses a reference counting system. The idea is that each *Mat* object has its own header, however the matrix may be shared between two instance of them by having their matrix pointers point to the same address. Moreover, the copy operators **will only copy the headers** and the pointer to the large matrix, not the data itself. +To tackle this issue OpenCV uses a reference counting system. The idea is that each *Mat* object has its own header, however the matrix may be shared between two instance of them by having their matrix pointers point to the same address. Moreover, the copy operators **will only copy the headers** and the pointer to the large matrix, not the data itself. .. code-block:: cpp :linenos: @@ -39,21 +39,21 @@ To tackle this issue OpenCV uses a reference counting system. The idea is that e C = A; // Assignment operator -All the above objects, in the end, point to the same single data matrix. Their headers are different, however, and making a modification using any of them will affect all the other ones as well. In practice the different objects just provide different access method to the same underlying data. Nevertheless, their header parts are different. The real interesting part is that you can create headers which refer to only a subsection of the full data. For example, to create a region of interest (*ROI*) in an image you just create a new header with the new boundaries: +All the above objects, in the end, point to the same single data matrix. Their headers are different, however, and making a modification using any of them will affect all the other ones as well. In practice the different objects just provide different access method to the same underlying data. Nevertheless, their header parts are different. The real interesting part is that you can create headers which refer to only a subsection of the full data. For example, to create a region of interest (*ROI*) in an image you just create a new header with the new boundaries: .. code-block:: cpp :linenos: Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle - Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries + Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries Now you may ask if the matrix itself may belong to multiple *Mat* objects who takes responsibility for cleaning it up when it's no longer needed. The short answer is: the last object that used it. This is handled by using a reference counting mechanism. Whenever somebody copies a header of a *Mat* object, a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Sometimes you will want to copy the matrix itself too, so OpenCV provides the :basicstructures:`clone() ` and :basicstructures:`copyTo() ` functions. .. code-block:: cpp :linenos: - Mat F = A.clone(); - Mat G; + Mat F = A.clone(); + Mat G; A.copyTo(G); Now modifying *F* or *G* will not affect the matrix pointed by the *Mat* header. What you need to remember from all this is that: @@ -66,19 +66,19 @@ Now modifying *F* or *G* will not affect the matrix pointed by the *Mat* header. * The underlying matrix of an image may be copied using the :basicstructures:`clone()` and :basicstructures:`copyTo() ` functions. *Storing* methods -================= +================= -This is about how you store the pixel values. You can select the color space and the data type used. The color space refers to how we combine color components in order to code a given color. The simplest one is the gray scale where the colors at our disposal are black and white. The combination of these allows us to create many shades of gray. +This is about how you store the pixel values. You can select the color space and the data type used. The color space refers to how we combine color components in order to code a given color. The simplest one is the gray scale where the colors at our disposal are black and white. The combination of these allows us to create many shades of gray. -For *colorful* ways we have a lot more methods to choose from. Each of them breaks it down to three or four basic components and we can use the combination of these to create the others. The most popular one is RGB, mainly because this is also how our eye builds up colors. Its base colors are red, green and blue. To code the transparency of a color sometimes a fourth element: alpha (A) is added. +For *colorful* ways we have a lot more methods to choose from. Each of them breaks it down to three or four basic components and we can use the combination of these to create the others. The most popular one is RGB, mainly because this is also how our eye builds up colors. Its base colors are red, green and blue. To code the transparency of a color sometimes a fourth element: alpha (A) is added. There are, however, many other color systems each with their own advantages: .. container:: enumeratevisibleitemswithsquare * RGB is the most common as our eyes use something similar, our display systems also compose colors using these. - * The HSV and HLS decompose colors into their hue, saturation and value/luminance components, which is a more natural way for us to describe colors. You might, for example, dismiss the last component, making your algorithm less sensible to the light conditions of the input image. - * YCrCb is used by the popular JPEG image format. + * The HSV and HLS decompose colors into their hue, saturation and value/luminance components, which is a more natural way for us to describe colors. You might, for example, dismiss the last component, making your algorithm less sensible to the light conditions of the input image. + * YCrCb is used by the popular JPEG image format. * CIE L*a*b* is a perceptually uniform color space, which comes handy if you need to measure the *distance* of a given color to another color. Each of the building components has their own valid domains. This leads to the data type used. How we store a component defines the control we have over its domain. The smallest data type possible is *char*, which means one byte or 8 bits. This may be unsigned (so can store values from 0 to 255) or signed (values from -127 to +127). Although in case of three components this already gives 16 million possible colors to represent (like in case of RGB) we may acquire an even finer control by using the float (4 byte = 32 bit) or double (8 byte = 64 bit) data types for each component. Nevertheless, remember that increasing the size of a component also increases the size of the whole picture in the memory. @@ -86,13 +86,13 @@ Each of the building components has their own valid domains. This leads to the d Creating a *Mat* object explicitly ================================== -In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() ` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices. +In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() ` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices. Although *Mat* works really well as an image container, it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways: .. container:: enumeratevisibleitemswithsquare - + :basicstructures:`Mat() ` Constructor + + :basicstructures:`Mat() ` Constructor .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp :language: cpp @@ -105,7 +105,7 @@ Although *Mat* works really well as an image container, it is also a general mat For two dimensional and multichannel images we first define their size: row and column count wise. - Then we need to specify the data type to use for storing the elements and the number of channels per matrix point. To do this we have multiple definitions constructed according to the following convention: + Then we need to specify the data type to use for storing the elements and the number of channels per matrix point. To do this we have multiple definitions constructed according to the following convention: .. code-block:: cpp @@ -176,7 +176,7 @@ Although *Mat* works really well as an image container, it is also a general mat :alt: Demo image of the matrix output :align: center -.. note:: +.. note:: You can fill out a matrix with random values using the :operationsOnArrays:`randu() ` function. You need to give the lower and upper value for the random values: @@ -189,11 +189,11 @@ Although *Mat* works really well as an image container, it is also a general mat Output formatting ================= -In the above examples you could see the default formatting option. OpenCV, however, allows you to format your matrix output: +In the above examples you could see the default formatting option. OpenCV, however, allows you to format your matrix output: .. container:: enumeratevisibleitemswithsquare - + Default + + Default .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp :language: cpp @@ -215,7 +215,7 @@ In the above examples you could see the default formatting option. OpenCV, howev :alt: Default Output :align: center - + Comma separated values (CSV) + + Comma separated values (CSV) .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp :language: cpp @@ -255,7 +255,7 @@ OpenCV offers support for output of other common OpenCV data structures too via .. container:: enumeratevisibleitemswithsquare - + 2D Point + + 2D Point .. literalinclude:: ../../../../samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp :language: cpp diff --git a/doc/tutorials/core/table_of_content_core/table_of_content_core.rst b/doc/tutorials/core/table_of_content_core/table_of_content_core.rst index ddd8ef201d..b50d97635f 100644 --- a/doc/tutorials/core/table_of_content_core/table_of_content_core.rst +++ b/doc/tutorials/core/table_of_content_core/table_of_content_core.rst @@ -44,7 +44,7 @@ Here you will learn the about the basic building blocks of the library. A must r .. |HowScanImag| image:: images/howToScanImages.jpg :height: 90pt :width: 90pt - + + .. tabularcolumns:: m{100pt} m{300pt} @@ -193,7 +193,7 @@ Here you will learn the about the basic building blocks of the library. A must r *Author:* |Author_BernatG| Did you used OpenCV before its 2.0 version? Do you wanna know what happened with your library with 2.0? Don't you know how to convert your old OpenCV programs to the new C++ interface? Look here to shed light on all this questions. - + =============== ====================================================== .. |InterOOpenCV1| image:: images/interopOpenCV1.png @@ -208,7 +208,7 @@ Here you will learn the about the basic building blocks of the library. A must r .. toctree:: :hidden: - + ../mat_the_basic_image_container/mat_the_basic_image_container ../how_to_scan_images/how_to_scan_images ../mat-mask-operations/mat-mask-operations diff --git a/doc/tutorials/definitions/tocDefinitions.rst b/doc/tutorials/definitions/tocDefinitions.rst index 946dbb0a47..4695623cca 100644 --- a/doc/tutorials/definitions/tocDefinitions.rst +++ b/doc/tutorials/definitions/tocDefinitions.rst @@ -3,7 +3,7 @@ .. |Author_AndreyK| unicode:: Andrey U+0020 Kamaev .. |Author_LeonidBLB| unicode:: Leonid U+0020 Beynenson .. |Author_VsevolodG| unicode:: Vsevolod U+0020 Glumov -.. |Author_VictorE| unicode:: Victor U+0020 Eruhimov +.. |Author_VictorE| unicode:: Victor U+0020 Eruhimov .. |Author_ArtemM| unicode:: Artem U+0020 Myagkov .. |Author_FernandoI| unicode:: Fernando U+0020 Iglesias U+0020 Garc U+00ED a .. |Author_EduardF| unicode:: Eduard U+0020 Feicho diff --git a/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst b/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst index 48d93ce0f8..009d537d51 100644 --- a/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst +++ b/doc/tutorials/features2d/detection_of_planar_objects/detection_of_planar_objects.rst @@ -5,9 +5,9 @@ Detection of planar objects .. highlight:: cpp -The goal of this tutorial is to learn how to use *features2d* and *calib3d* modules for detecting known planar objects in scenes. +The goal of this tutorial is to learn how to use *features2d* and *calib3d* modules for detecting known planar objects in scenes. -*Test data*: use images in your data folder, for instance, ``box.png`` and ``box_in_scene.png``. +*Test data*: use images in your data folder, for instance, ``box.png`` and ``box_in_scene.png``. #. Create a new console project. Read two input images. :: @@ -22,7 +22,7 @@ The goal of this tutorial is to learn how to use *features2d* and *calib3d* modu FastFeatureDetector detector(15); vector keypoints1; detector.detect(img1, keypoints1); - + ... // do the same for the second image #. @@ -32,7 +32,7 @@ The goal of this tutorial is to learn how to use *features2d* and *calib3d* modu SurfDescriptorExtractor extractor; Mat descriptors1; extractor.compute(img1, keypoints1, descriptors1); - + ... // process keypoints from the second image as well #. @@ -69,4 +69,4 @@ The goal of this tutorial is to learn how to use *features2d* and *calib3d* modu perspectiveTransform(Mat(points1), points1Projected, H); #. - Use ``drawMatches`` for drawing inliers. + Use ``drawMatches`` for drawing inliers. diff --git a/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst b/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst index be0f515b55..cc90082b85 100644 --- a/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst +++ b/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.rst @@ -5,166 +5,166 @@ Learn about how to use the feature points detectors, descriptors and matching framework found inside OpenCV. -.. include:: ../../definitions/tocDefinitions.rst +.. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |Harris| **Title:** :ref:`harris_detector` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Why is it a good idea to track corners? We learn to use the Harris method to detect corners - + ===================== ============================================== - + .. |Harris| image:: images/trackingmotion/Harris_Detector_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |ShiTomasi| **Title:** :ref:`good_features_to_track` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we use an improved method to detect corners more accuratelyI - + ===================== ============================================== - + .. |ShiTomasi| image:: images/trackingmotion/Shi_Tomasi_Detector_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |GenericCorner| **Title:** :ref:`generic_corner_detector` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Here you will learn how to use OpenCV functions to make your personalized corner detector! - + ===================== ============================================== - + .. |GenericCorner| image:: images/trackingmotion/Generic_Corner_Detector_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |Subpixel| **Title:** :ref:`corner_subpixeles` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Is pixel resolution enough? Here we learn a simple method to improve our accuracy. - + ===================== ============================================== - + .. |Subpixel| image:: images/trackingmotion/Corner_Subpixeles_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |FeatureDetect| **Title:** :ref:`feature_detection` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + In this tutorial, you will use *features2d* to detect interest points. - + ===================== ============================================== - + .. |FeatureDetect| image:: images/Feature_Detection_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |FeatureDescript| **Title:** :ref:`feature_description` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + In this tutorial, you will use *features2d* to calculate feature vectors. - + ===================== ============================================== - + .. |FeatureDescript| image:: images/Feature_Description_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |FeatureFlann| **Title:** :ref:`feature_flann_matcher` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + In this tutorial, you will use the FLANN library to make a fast matching. - + ===================== ============================================== - + .. |FeatureFlann| image:: images/Feature_Flann_Matcher_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |FeatureHomo| **Title:** :ref:`feature_homography` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + In this tutorial, you will use *features2d* and *calib3d* to detect an object in a scene. - + ===================== ============================================== - + .. |FeatureHomo| image:: images/Feature_Homography_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -175,7 +175,7 @@ Learn about how to use the feature points detectors, descriptors and matching f *Author:* |Author_VictorE| - You will use *features2d* and *calib3d* modules for detecting known planar objects in scenes. + You will use *features2d* and *calib3d* modules for detecting known planar objects in scenes. ===================== ============================================== diff --git a/doc/tutorials/gpu/table_of_content_gpu/table_of_content_gpu.rst b/doc/tutorials/gpu/table_of_content_gpu/table_of_content_gpu.rst index a3fef43282..91b25833bc 100644 --- a/doc/tutorials/gpu/table_of_content_gpu/table_of_content_gpu.rst +++ b/doc/tutorials/gpu/table_of_content_gpu/table_of_content_gpu.rst @@ -7,7 +7,7 @@ Squeeze out every little computation power from your system by using the power o .. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -18,7 +18,7 @@ Squeeze out every little computation power from your system by using the power o *Author:* |Author_BernatG| - This will give a good grasp on how to approach coding on the GPU module, once you already know how to handle the other modules. As a test case it will port the similarity methods from the tutorial :ref:`videoInputPSNRMSSIM` to the GPU. + This will give a good grasp on how to approach coding on the GPU module, once you already know how to handle the other modules. As a test case it will port the similarity methods from the tutorial :ref:`videoInputPSNRMSSIM` to the GPU. =============== ====================================================== diff --git a/doc/tutorials/highgui/table_of_content_highgui/table_of_content_highgui.rst b/doc/tutorials/highgui/table_of_content_highgui/table_of_content_highgui.rst index 36fbc92da0..0ba7c323cb 100644 --- a/doc/tutorials/highgui/table_of_content_highgui/table_of_content_highgui.rst +++ b/doc/tutorials/highgui/table_of_content_highgui/table_of_content_highgui.rst @@ -3,30 +3,30 @@ *highgui* module. High Level GUI and Media ------------------------------------------ -This section contains valuable tutorials about how to read/save your image/video files and how to use the built-in graphical user interface of the library. +This section contains valuable tutorials about how to read/save your image/video files and how to use the built-in graphical user interface of the library. .. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + =============== ====================================================== |Beginners_5| *Title:* :ref:`Adding_Trackbars` - + *Compatibility:* > OpenCV 2.0 *Author:* |Author_AnaH| - + We will learn how to add a Trackbar to our applications - + =============== ====================================================== - + .. |Beginners_5| image:: images/Adding_Trackbars_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -34,7 +34,7 @@ This section contains valuable tutorials about how to read/save your image/video |hVideoInput| *Title:* :ref:`videoInputPSNRMSSIM` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_BernatG| You will learn how to read video streams, and how to calculate similarity values such as PSNR or SSIM. @@ -45,7 +45,7 @@ This section contains valuable tutorials about how to read/save your image/video :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv diff --git a/doc/tutorials/highgui/trackbar/trackbar.rst b/doc/tutorials/highgui/trackbar/trackbar.rst index 21f8b3687c..d6f7202ce1 100644 --- a/doc/tutorials/highgui/trackbar/trackbar.rst +++ b/doc/tutorials/highgui/trackbar/trackbar.rst @@ -5,11 +5,11 @@ Adding a Trackbar to our applications! * In the previous tutorials (about *linear blending* and the *brightness and contrast adjustments*) you might have noted that we needed to give some **input** to our programs, such as :math:`\alpha` and :math:`beta`. We accomplished that by entering this data using the Terminal -* Well, it is time to use some fancy GUI tools. OpenCV provides some GUI utilities (*highgui.h*) for you. An example of this is a **Trackbar** +* Well, it is time to use some fancy GUI tools. OpenCV provides some GUI utilities (*highgui.h*) for you. An example of this is a **Trackbar** .. image:: images/Adding_Trackbars_Tutorial_Trackbar.png :alt: Trackbar example - :align: center + :align: center * In this tutorial we will just modify our two previous programs so that they get the input information from the trackbar. @@ -19,7 +19,7 @@ Goals In this tutorial you will learn how to: -* Add a Trackbar in an OpenCV window by using :create_trackbar:`createTrackbar <>` +* Add a Trackbar in an OpenCV window by using :create_trackbar:`createTrackbar <>` Code ===== @@ -33,13 +33,13 @@ Let's modify the program made in the tutorial :ref:`Adding_Images`. We will let using namespace cv; - /// Global Variables + /// Global Variables const int alpha_slider_max = 100; - int alpha_slider; + int alpha_slider; double alpha; - double beta; + double beta; - /// Matrices to store images + /// Matrices to store images Mat src1; Mat src2; Mat dst; @@ -49,12 +49,12 @@ Let's modify the program made in the tutorial :ref:`Adding_Images`. We will let * @brief Callback for trackbar */ void on_trackbar( int, void* ) - { + { alpha = (double) alpha_slider/alpha_slider_max ; beta = ( 1.0 - alpha ); addWeighted( src1, alpha, src2, beta, 0.0, dst); - + imshow( "Linear Blend", dst ); } @@ -67,7 +67,7 @@ Let's modify the program made in the tutorial :ref:`Adding_Images`. We will let if( !src1.data ) { printf("Error loading src1 \n"); return -1; } if( !src2.data ) { printf("Error loading src2 \n"); return -1; } - /// Initialize values + /// Initialize values alpha_slider = 0; /// Create Windows @@ -75,13 +75,13 @@ Let's modify the program made in the tutorial :ref:`Adding_Images`. We will let /// Create Trackbars char TrackbarName[50]; - sprintf( TrackbarName, "Alpha x %d", alpha_slider_max ); + sprintf( TrackbarName, "Alpha x %d", alpha_slider_max ); createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar ); /// Show some stuff on_trackbar( alpha_slider, 0 ); - + /// Wait until user press some key waitKey(0); return 0; @@ -113,7 +113,7 @@ We only analyze the code that is related to Trackbar: createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar ); Note the following: - + * Our Trackbar has a label **TrackbarName** * The Trackbar is located in the window named **"Linear Blend"** * The Trackbar values will be in the range from :math:`0` to **alpha_slider_max** (the minimum limit is always **zero**). @@ -125,21 +125,21 @@ We only analyze the code that is related to Trackbar: .. code-block:: cpp void on_trackbar( int, void* ) - { + { alpha = (double) alpha_slider/alpha_slider_max ; beta = ( 1.0 - alpha ); addWeighted( src1, alpha, src2, beta, 0.0, dst); - + imshow( "Linear Blend", dst ); } Note that: - - * We use the value of **alpha_slider** (integer) to get a double value for **alpha**. + + * We use the value of **alpha_slider** (integer) to get a double value for **alpha**. * **alpha_slider** is updated each time the trackbar is displaced by the user. * We define *src1*, *src2*, *dist*, *alpha*, *alpha_slider* and *beta* as global variables, so they can be used everywhere. - + Result ======= @@ -147,13 +147,13 @@ Result .. image:: images/Adding_Trackbars_Tutorial_Result_0.jpg :alt: Adding Trackbars - Windows Linux - :align: center + :align: center * As a manner of practice, you can also add 02 trackbars for the program made in :ref:`Basic_Linear_Transform`. One trackbar to set :math:`\alpha` and another for :math:`\beta`. The output might look like: .. image:: images/Adding_Trackbars_Tutorial_Result_1.jpg :alt: Adding Trackbars - Lena - :align: center + :align: center diff --git a/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst b/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst index b9c1201b8f..6f5476cf05 100644 --- a/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst +++ b/doc/tutorials/highgui/video-input-psnr-ssim/video-input-psnr-ssim.rst @@ -64,7 +64,7 @@ Closing the video is automatic when the objects destructor is called. However, i captRefrnc >> frameReference; captUndTst.open(frameUnderTest); -The upper read operations will leave empty the *Mat* objects if no frame could be acquired (either cause the video stream was closed or you got to the end of the video file). We can check this with a simple if: +The upper read operations will leave empty the *Mat* objects if no frame could be acquired (either cause the video stream was closed or you got to the end of the video file). We can check this with a simple if: .. code-block:: cpp @@ -111,7 +111,7 @@ Then the PSNR is expressed as: PSNR = 10 \cdot \log_{10} \left( \frac{MAX_I^2}{MSE} \right) -Here the :math:`MAX_I^2` is the maximum valid value for a pixel. In case of the simple single byte image per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as we'll need to handle this case separately. The transition to a logarithmic scale is made because the pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks like: +Here the :math:`MAX_I^2` is the maximum valid value for a pixel. In case of the simple single byte image per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as we'll need to handle this case separately. The transition to a logarithmic scale is made because the pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks like: .. code-block:: cpp @@ -136,13 +136,13 @@ Here the :math:`MAX_I^2` is the maximum valid value for a pixel. In case of the } } -Typically result values are anywhere between 30 and 50 for video compression, where higher is better. If the images significantly differ you'll get much lower ones like 15 and so. This similarity check is easy and fast to calculate, however in practice it may turn out somewhat inconsistent with human eye perception. The **structural similarity** algorithm aims to correct this. +Typically result values are anywhere between 30 and 50 for video compression, where higher is better. If the images significantly differ you'll get much lower ones like 15 and so. This similarity check is easy and fast to calculate, however in practice it may turn out somewhat inconsistent with human eye perception. The **structural similarity** algorithm aims to correct this. -Describing the methods goes well beyond the purpose of this tutorial. For that I invite you to read the article introducing it. Nevertheless, you can get a good image of it by looking at the OpenCV implementation below. +Describing the methods goes well beyond the purpose of this tutorial. For that I invite you to read the article introducing it. Nevertheless, you can get a good image of it by looking at the OpenCV implementation below. .. seealso:: - SSIM is described more in-depth in the: "Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality assessment: From error visibility to structural similarity," IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004." article. + SSIM is described more in-depth in the: "Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality assessment: From error visibility to structural similarity," IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004." article. .. code-block:: cpp @@ -162,7 +162,7 @@ Describing the methods goes well beyond the purpose of this tutorial. For that I /***********************PRELIMINARY COMPUTING ******************************/ - Mat mu1, mu2; // + Mat mu1, mu2; // GaussianBlur(I1, mu1, Size(11, 11), 1.5); GaussianBlur(I2, mu2, Size(11, 11), 1.5); @@ -199,7 +199,7 @@ Describing the methods goes well beyond the purpose of this tutorial. For that I return mssim; } -This will return a similarity index for each channel of the image. This value is between zero and one, where one corresponds to perfect fit. Unfortunately, the many Gaussian blurring is quite costly, so while the PSNR may work in a real time like environment (24 frame per second) this will take significantly more than to accomplish similar performance results. +This will return a similarity index for each channel of the image. This value is between zero and one, where one corresponds to perfect fit. Unfortunately, the many Gaussian blurring is quite costly, so while the PSNR may work in a real time like environment (24 frame per second) this will take significantly more than to accomplish similar performance results. Therefore, the source code presented at the start of the tutorial will perform the PSNR measurement for each frame, and the SSIM only for the frames where the PSNR falls below an input value. For visualization purpose we show both images in an OpenCV window and print the PSNR and MSSIM values to the console. Expect to see something like: @@ -207,7 +207,7 @@ Therefore, the source code presented at the start of the tutorial will perform t :alt: A sample output :align: center -You may observe a runtime instance of this on the `YouTube here `_. +You may observe a runtime instance of this on the `YouTube here `_. .. raw:: html diff --git a/doc/tutorials/imgproc/table_of_content_imgproc/table_of_content_imgproc.rst b/doc/tutorials/imgproc/table_of_content_imgproc/table_of_content_imgproc.rst index 011dedd363..1be239cfb1 100644 --- a/doc/tutorials/imgproc/table_of_content_imgproc/table_of_content_imgproc.rst +++ b/doc/tutorials/imgproc/table_of_content_imgproc/table_of_content_imgproc.rst @@ -7,502 +7,502 @@ In this section you will learn about the image processing (manipulation) functio .. include:: ../../definitions/tocDefinitions.rst - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |ImageProcessing_1| **Title:** :ref:`Smoothing` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Let's take a look at some basic linear filters! - + ===================== ============================================== - + .. |ImageProcessing_1| image:: images/Smoothing_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |ImageProcessing_2| **Title:** :ref:`Morphology_1` - + *Compatibility:* > OpenCV 2.0 - + Author: |Author_AnaH| - + Let's *change* the shape of objects! - + ===================== ============================================== - + .. |ImageProcessing_2| image:: images/Morphology_1_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ================= ================================================== |Morphology_2| **Title:** :ref:`Morphology_2` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Here we investigate different morphology operators - + ================= ================================================== - + .. |Morphology_2| image:: images/Morphology_2_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |Pyramids| **Title:** :ref:`Pyramids` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + What if I need a bigger/smaller image? - + ===================== ============================================== - + .. |Pyramids| image:: images/Pyramids_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |Threshold| **Title:** :ref:`Basic_Threshold` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + After so much processing, it is time to decide which pixels stay! - + ===================== ============================================== - + .. |Threshold| image:: images/Threshold_Tutorial_Cover.jpg :height: 90pt :width: 90pt - + .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - -+ + ++ ===================== ============================================== |Filter_2D| **Title:** :ref:`filter_2d` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn to design our own filters by using OpenCV functions - + ===================== ============================================== - + .. |Filter_2D| image:: images/imgtrans/Filter_2D_Tutorial_Cover.jpg :height: 90pt :width: 90pt - + .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - -+ + ++ ===================== ============================================== |CopyMakeBorder| **Title:** :ref:`copyMakeBorderTutorial` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to pad our images! - + ===================== ============================================== - + .. |CopyMakeBorder| image:: images/imgtrans/CopyMakeBorder_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |SobelDerivatives| **Title:** :ref:`sobel_derivatives` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to calculate gradients and use them to detect edges! - + ===================== ============================================== - + .. |SobelDerivatives| image:: images/imgtrans/Sobel_Derivatives_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |LaplaceOperator| **Title:** :ref:`laplace_operator` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn about the *Laplace* operator and how to detect edges with it. - + ===================== ============================================== - + .. |LaplaceOperator| image:: images/imgtrans/Laplace_Operator_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |CannyDetector| **Title:** :ref:`canny_detector` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn a sophisticated alternative to detect edges. - + ===================== ============================================== - + .. |CannyDetector| image:: images/imgtrans/Canny_Detector_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HoughLines| **Title:** :ref:`hough_lines` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to detect lines - + ===================== ============================================== - + .. |HoughLines| image:: images/imgtrans/Hough_Lines_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HoughCircle| **Title:** :ref:`hough_circle` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to detect circles - + ===================== ============================================== - + .. |HoughCircle| image:: images/imgtrans/Hough_Circle_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |Remap| **Title:** :ref:`remap` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to manipulate pixels locations - + ===================== ============================================== - + .. |Remap| image:: images/imgtrans/Remap_Tutorial_Cover.jpg :height: 90pt :width: 90pt - -+ + ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |WarpAffine| **Title:** :ref:`warp_affine` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Where we learn how to rotate, translate and scale our images - + ===================== ============================================== - + .. |WarpAffine| image:: images/imgtrans/Warp_Affine_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HistEqualization| **Title:** :ref:`histogram_equalization` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to improve the contrast in our images ===================== ============================================== - + .. |HistEqualization| image:: images/histograms/Histogram_Equalization_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HistCalculation| **Title:** :ref:`histogram_calculation` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to create and generate histograms ===================== ============================================== - + .. |HistCalculation| image:: images/histograms/Histogram_Calculation_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |HistComparison| **Title:** :ref:`histogram_comparison` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn to calculate metrics between histograms ===================== ============================================== - + .. |HistComparison| image:: images/histograms/Histogram_Comparison_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |BackProjection| **Title:** :ref:`back_projection` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to use histograms to find similar objects in images ===================== ============================================== - + .. |BackProjection| image:: images/histograms/Back_Projection_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |TemplateMatching| **Title:** :ref:`template_matching` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to match templates in an image ===================== ============================================== - + .. |TemplateMatching| image:: images/histograms/Template_Matching_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |FindContours| **Title:** :ref:`find_contours` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to find contours of objects in our image ===================== ============================================== - + .. |FindContours| image:: images/shapedescriptors/Find_Contours_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |Hull| **Title:** :ref:`hull` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to get hull contours and draw them! ===================== ============================================== - + .. |Hull| image:: images/shapedescriptors/Hull_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - + ===================== ============================================== |BRC| **Title:** :ref:`bounding_rects_circles` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to obtain bounding boxes and circles for our contours. ===================== ============================================== - + .. |BRC| image:: images/shapedescriptors/Bounding_Rects_Circles_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - - + + ===================== ============================================== |BRE| **Title:** :ref:`bounding_rotated_ellipses` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to obtain rotated bounding boxes and ellipses for our contours. ===================== ============================================== - + .. |BRE| image:: images/shapedescriptors/Bounding_Rotated_Ellipses_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - - + + ===================== ============================================== |MU| **Title:** :ref:`moments` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn to calculate the moments of an image ===================== ============================================== - + .. |MU| image:: images/shapedescriptors/Moments_Tutorial_Cover.jpg :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv - - + + ===================== ============================================== |PPT| **Title:** :ref:`point_polygon_test` *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| Where we learn how to calculate distances from the image to contours ===================== ============================================== - + .. |PPT| image:: images/shapedescriptors/Point_Polygon_Test_Tutorial_Cover.jpg :height: 90pt :width: 90pt diff --git a/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst b/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst index 368c3b2b76..243dc35dd8 100644 --- a/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst +++ b/doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst @@ -6,7 +6,7 @@ Android Development with OpenCV This tutorial has been created to help you use OpenCV library within your Android project. -This guide was written with Windows 7 in mind, though it should work with any other OS supported by +This guide was written with Windows 7 in mind, though it should work with any other OS supported by OpenCV4Android SDK. This tutorial assumes you have the following installed and configured: @@ -23,22 +23,22 @@ This tutorial assumes you have the following installed and configured: If you need help with anything of the above, you may refer to our :ref:`android_dev_intro` guide. -This tutorial also assumes you have OpenCV4Android SDK already installed on your development -machine and OpenCV Manager on your testing device correspondingly. If you need help with any of +This tutorial also assumes you have OpenCV4Android SDK already installed on your development +machine and OpenCV Manager on your testing device correspondingly. If you need help with any of these, you may consult our :ref:`O4A_SDK` tutorial. -If you encounter any error after thoroughly following these steps, feel free to contact us via -`OpenCV4Android `_ discussion group or OpenCV +If you encounter any error after thoroughly following these steps, feel free to contact us via +`OpenCV4Android `_ discussion group or OpenCV `Q&A forum `_ . We'll do our best to help you out. Using OpenCV Library Within Your Android Project ================================================ -In this section we will explain how to make some existing project to use OpenCV. -Starting with 2.4.2 release for Android, *OpenCV Manager* is used to provide apps with the best -available version of OpenCV. -You can get more information here: :ref:`Android_OpenCV_Manager` and in these +In this section we will explain how to make some existing project to use OpenCV. +Starting with 2.4.2 release for Android, *OpenCV Manager* is used to provide apps with the best +available version of OpenCV. +You can get more information here: :ref:`Android_OpenCV_Manager` and in these `slides `_. @@ -48,31 +48,31 @@ Java Application Development with Async Initialization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Using async initialization is a **recommended** way for application development. It uses the OpenCV +Using async initialization is a **recommended** way for application development. It uses the OpenCV Manager to access OpenCV libraries externally installed in the target system. -#. Add OpenCV library project to your workspace. Use menu +#. Add OpenCV library project to your workspace. Use menu :guilabel:`File -> Import -> Existing project in your workspace`. - Press :guilabel:`Browse` button and locate OpenCV4Android SDK + Press :guilabel:`Browse` button and locate OpenCV4Android SDK (:file:`OpenCV-2.4.6-android-sdk/sdk`). .. image:: images/eclipse_opencv_dependency0.png :alt: Add dependency from OpenCV library :align: center -#. In application project add a reference to the OpenCV Java SDK in +#. In application project add a reference to the OpenCV Java SDK in :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.6``. .. image:: images/eclipse_opencv_dependency1.png :alt: Add dependency from OpenCV library :align: center -In most cases OpenCV Manager may be installed automatically from Google Play. For the case, when -Google Play is not available, i.e. emulator, developer board, etc, you can install it manually +In most cases OpenCV Manager may be installed automatically from Google Play. For the case, when +Google Play is not available, i.e. emulator, developer board, etc, you can install it manually using adb tool. See :ref:`manager_selection` for details. -There is a very base code snippet implementing the async initialization. It shows basic principles. +There is a very base code snippet implementing the async initialization. It shows basic principles. See the "15-puzzle" OpenCV sample for details. .. code-block:: java @@ -107,47 +107,47 @@ See the "15-puzzle" OpenCV sample for details. ... } -It this case application works with OpenCV Manager in asynchronous fashion. ``OnManagerConnected`` -callback will be called in UI thread, when initialization finishes. Please note, that it is not -allowed to use OpenCV calls or load OpenCV-dependent native libs before invoking this callback. -Load your own native libraries that depend on OpenCV after the successful OpenCV initialization. -Default ``BaseLoaderCallback`` implementation treat application context as Activity and calls -``Activity.finish()`` method to exit in case of initialization failure. To override this behavior -you need to override ``finish()`` method of ``BaseLoaderCallback`` class and implement your own +It this case application works with OpenCV Manager in asynchronous fashion. ``OnManagerConnected`` +callback will be called in UI thread, when initialization finishes. Please note, that it is not +allowed to use OpenCV calls or load OpenCV-dependent native libs before invoking this callback. +Load your own native libraries that depend on OpenCV after the successful OpenCV initialization. +Default ``BaseLoaderCallback`` implementation treat application context as Activity and calls +``Activity.finish()`` method to exit in case of initialization failure. To override this behavior +you need to override ``finish()`` method of ``BaseLoaderCallback`` class and implement your own finalization method. Application Development with Static Initialization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -According to this approach all OpenCV binaries are included into your application package. It is -designed mostly for development purposes. This approach is deprecated for the production code, -release package is recommended to communicate with OpenCV Manager via the async initialization +According to this approach all OpenCV binaries are included into your application package. It is +designed mostly for development purposes. This approach is deprecated for the production code, +release package is recommended to communicate with OpenCV Manager via the async initialization described above. -#. Add the OpenCV library project to your workspace the same way as for the async initialization - above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`, - press :guilabel:`Browse` button and select OpenCV SDK path +#. Add the OpenCV library project to your workspace the same way as for the async initialization + above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`, + press :guilabel:`Browse` button and select OpenCV SDK path (:file:`OpenCV-2.4.6-android-sdk/sdk`). .. image:: images/eclipse_opencv_dependency0.png :alt: Add dependency from OpenCV library :align: center -#. In the application project add a reference to the OpenCV4Android SDK in +#. In the application project add a reference to the OpenCV4Android SDK in :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.6``; .. image:: images/eclipse_opencv_dependency1.png :alt: Add dependency from OpenCV library :align: center -#. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV +#. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV native libs from :file:`/sdk/native/libs/` to your project directory to folder :file:`libs/`. - In case of the application project **with a JNI part**, instead of manual libraries copying you + In case of the application project **with a JNI part**, instead of manual libraries copying you need to modify your ``Android.mk`` file: - add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before + add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before ``"include path_to_OpenCV-2.4.6-android-sdk/sdk/native/jni/OpenCV.mk"`` .. code-block:: make @@ -168,13 +168,13 @@ described above. OPENCV_INSTALL_MODULES:=on include ../../sdk/native/jni/OpenCV.mk - After that the OpenCV libraries will be copied to your application :file:`libs` folder during + After that the OpenCV libraries will be copied to your application :file:`libs` folder during the JNI build.v - Eclipse will automatically include all the libraries from the :file:`libs` folder to the - application package (APK). + Eclipse will automatically include all the libraries from the :file:`libs` folder to the + application package (APK). -#. The last step of enabling OpenCV in your application is Java initialization code before calling +#. The last step of enabling OpenCV in your application is Java initialization code before calling OpenCV API. It can be done, for example, in the static section of the ``Activity`` class: .. code-block:: java @@ -186,7 +186,7 @@ described above. } } - If you application includes other OpenCV-dependent native libraries you should load them + If you application includes other OpenCV-dependent native libraries you should load them **after** OpenCV initialization: .. code-block:: java @@ -205,16 +205,16 @@ described above. Native/C++ ---------- -To build your own Android application, using OpenCV as native part, the following steps should be +To build your own Android application, using OpenCV as native part, the following steps should be taken: -#. You can use an environment variable to specify the location of OpenCV package or just hardcode +#. You can use an environment variable to specify the location of OpenCV package or just hardcode absolute or relative path in the :file:`jni/Android.mk` of your projects. -#. The file :file:`jni/Android.mk` should be written for the current application using the common +#. The file :file:`jni/Android.mk` should be written for the current application using the common rules for this file. - For detailed information see the Android NDK documentation from the Android NDK archive, in the + For detailed information see the Android NDK documentation from the Android NDK archive, in the file :file:`/docs/ANDROID-MK.html`. #. The following line: @@ -229,7 +229,7 @@ taken: include $(CLEAR_VARS) -#. Several variables can be used to customize OpenCV stuff, but you **don't need** to use them when +#. Several variables can be used to customize OpenCV stuff, but you **don't need** to use them when your application uses the `async initialization` via the `OpenCV Manager` API. .. note:: These variables should be set **before** the ``"include .../OpenCV.mk"`` line: @@ -238,7 +238,7 @@ taken: OPENCV_INSTALL_MODULES:=on - Copies necessary OpenCV dynamic libs to the project ``libs`` folder in order to include them + Copies necessary OpenCV dynamic libs to the project ``libs`` folder in order to include them into the APK. .. code-block:: make @@ -251,7 +251,7 @@ taken: OPENCV_LIB_TYPE:=STATIC - Perform static linking with OpenCV. By default dynamic link is used and the project JNI lib + Perform static linking with OpenCV. By default dynamic link is used and the project JNI lib depends on ``libopencv_java.so``. #. The file :file:`Application.mk` should exist and should contain lines: @@ -269,8 +269,8 @@ taken: Should specify the application target platforms. - In some cases a linkage error (like ``"In function 'cv::toUtf16(std::basic_string<...>... - undefined reference to 'mbstowcs'"``) happens when building an application JNI library, + In some cases a linkage error (like ``"In function 'cv::toUtf16(std::basic_string<...>... + undefined reference to 'mbstowcs'"``) happens when building an application JNI library, depending on OpenCV. The following line in the :file:`Application.mk` usually fixes it: .. code-block:: make @@ -278,19 +278,19 @@ taken: APP_PLATFORM := android-9 -#. Either use :ref:`manual ` ``ndk-build`` invocation or - :ref:`setup Eclipse CDT Builder ` to build native JNI lib before (re)building the Java +#. Either use :ref:`manual ` ``ndk-build`` invocation or + :ref:`setup Eclipse CDT Builder ` to build native JNI lib before (re)building the Java part and creating an APK. Hello OpenCV Sample =================== -Here are basic steps to guide you trough the process of creating a simple OpenCV-centric -application. It will be capable of accessing camera output, processing it and displaying the +Here are basic steps to guide you trough the process of creating a simple OpenCV-centric +application. It will be capable of accessing camera output, processing it and displaying the result. -#. Open Eclipse IDE, create a new clean workspace, create a new Android project +#. Open Eclipse IDE, create a new clean workspace, create a new Android project :menuselection:`File --> New --> Android Project` #. Set name, target, package and ``minSDKVersion`` accordingly. The minimal SDK version for build diff --git a/doc/tutorials/introduction/ios_install/ios_install.rst b/doc/tutorials/introduction/ios_install/ios_install.rst index 8d117a0b42..2973b7ec2b 100644 --- a/doc/tutorials/introduction/ios_install/ios_install.rst +++ b/doc/tutorials/introduction/ios_install/ios_install.rst @@ -20,7 +20,7 @@ In MacOS it can be done using the following command in Terminal: cd ~/ git clone https://github.com/Itseez/opencv.git - + Building OpenCV from Source, using CMake and Command Line ========================================================= @@ -28,10 +28,10 @@ Building OpenCV from Source, using CMake and Command Line #. Make symbolic link for Xcode to let OpenCV build scripts find the compiler, header files etc. .. code-block:: bash - + cd / sudo ln -s /Applications/Xcode.app/Contents/Developer Developer - + #. Build OpenCV framework: .. code-block:: bash diff --git a/doc/tutorials/introduction/linux_eclipse/linux_eclipse.rst b/doc/tutorials/introduction/linux_eclipse/linux_eclipse.rst index 41c161ce32..dc684451e7 100644 --- a/doc/tutorials/introduction/linux_eclipse/linux_eclipse.rst +++ b/doc/tutorials/introduction/linux_eclipse/linux_eclipse.rst @@ -11,7 +11,7 @@ Prerequisites 1. Having installed `Eclipse `_ in your workstation (only the CDT plugin for C/C++ is needed). You can follow the following steps: - * Go to the Eclipse site + * Go to the Eclipse site * Download `Eclipse IDE for C/C++ Developers `_ . Choose the link according to your workstation. @@ -20,7 +20,7 @@ Prerequisites Making a project ================= -1. Start Eclipse. Just run the executable that comes in the folder. +1. Start Eclipse. Just run the executable that comes in the folder. #. Go to **File -> New -> C/C++ Project** @@ -28,13 +28,13 @@ Making a project :alt: Eclipse Tutorial Screenshot 0 :align: center -#. Choose a name for your project (i.e. DisplayImage). An **Empty Project** should be okay for this example. +#. Choose a name for your project (i.e. DisplayImage). An **Empty Project** should be okay for this example. .. image:: images/a1.png :alt: Eclipse Tutorial Screenshot 1 :align: center -#. Leave everything else by default. Press **Finish**. +#. Leave everything else by default. Press **Finish**. #. Your project (in this case DisplayImage) should appear in the **Project Navigator** (usually at the left side of your window). @@ -45,7 +45,7 @@ Making a project #. Now, let's add a source file using OpenCV: - * Right click on **DisplayImage** (in the Navigator). **New -> Folder** . + * Right click on **DisplayImage** (in the Navigator). **New -> Folder** . .. image:: images/a4.png :alt: Eclipse Tutorial Screenshot 4 @@ -76,9 +76,9 @@ Making a project image = imread( argv[1], 1 ); if( argc != 2 || !image.data ) - { + { printf( "No image data \n" ); - return -1; + return -1; } namedWindow( "Display Image", CV_WINDOW_AUTOSIZE ); @@ -102,7 +102,7 @@ Making a project :align: center .. note:: - If you do not know where your opencv files are, open the **Terminal** and type: + If you do not know where your opencv files are, open the **Terminal** and type: .. code-block:: bash @@ -112,56 +112,56 @@ Making a project .. code-block:: bash - -I/usr/local/include/opencv -I/usr/local/include + -I/usr/local/include/opencv -I/usr/local/include b. Now go to **GCC C++ Linker**,there you have to fill two spaces: First in **Library search path (-L)** you have to write the path to where the opencv libraries reside, in my case the path is: :: - + /usr/local/lib - + Then in **Libraries(-l)** add the OpenCV libraries that you may need. Usually just the 3 first on the list below are enough (for simple applications) . In my case, I am putting all of them since I plan to use the whole bunch: - opencv_core - opencv_imgproc + opencv_core + opencv_imgproc opencv_highgui - opencv_ml - opencv_video + opencv_ml + opencv_video opencv_features2d - opencv_calib3d - opencv_objdetect + opencv_calib3d + opencv_objdetect opencv_contrib - opencv_legacy + opencv_legacy opencv_flann .. image:: images/a10.png :alt: Eclipse Tutorial Screenshot 10 - :align: center - + :align: center + If you don't know where your libraries are (or you are just psychotic and want to make sure the path is fine), type in **Terminal**: .. code-block:: bash - + pkg-config --libs opencv My output (in case you want to check) was: .. code-block:: bash - - -L/usr/local/lib -lopencv_core -lopencv_imgproc -lopencv_highgui -lopencv_ml -lopencv_video -lopencv_features2d -lopencv_calib3d -lopencv_objdetect -lopencv_contrib -lopencv_legacy -lopencv_flann + + -L/usr/local/lib -lopencv_core -lopencv_imgproc -lopencv_highgui -lopencv_ml -lopencv_video -lopencv_features2d -lopencv_calib3d -lopencv_objdetect -lopencv_contrib -lopencv_legacy -lopencv_flann Now you are done. Click **OK** - * Your project should be ready to be built. For this, go to **Project->Build all** + * Your project should be ready to be built. For this, go to **Project->Build all** - In the Console you should get something like + In the Console you should get something like .. image:: images/a12.png :alt: Eclipse Tutorial Screenshot 12 - :align: center + :align: center If you check in your folder, there should be an executable there. @@ -179,21 +179,21 @@ So, now we have an executable ready to run. If we were to use the Terminal, we w Assuming that the image to use as the argument would be located in /images/HappyLittleFish.png. We can still do this, but let's do it from Eclipse: -#. Go to **Run->Run Configurations** +#. Go to **Run->Run Configurations** -#. Under C/C++ Application you will see the name of your executable + Debug (if not, click over C/C++ Application a couple of times). Select the name (in this case **DisplayImage Debug**). +#. Under C/C++ Application you will see the name of your executable + Debug (if not, click over C/C++ Application a couple of times). Select the name (in this case **DisplayImage Debug**). #. Now, in the right side of the window, choose the **Arguments** Tab. Write the path of the image file we want to open (path relative to the workspace/DisplayImage folder). Let's use **HappyLittleFish.png**: .. image:: images/a14.png :alt: Eclipse Tutorial Screenshot 14 - :align: center + :align: center #. Click on the **Apply** button and then in Run. An OpenCV window should pop up with the fish image (or whatever you used). .. image:: images/a15.jpg :alt: Eclipse Tutorial Screenshot 15 - :align: center + :align: center #. Congratulations! You are ready to have fun with OpenCV using Eclipse. @@ -236,7 +236,7 @@ Say you have or create a new file, *helloworld.cpp* in a directory called *foo*: ADD_EXECUTABLE( helloworld helloworld.cxx ) TARGET_LINK_LIBRARIES( helloworld ${OpenCV_LIBS} ) -#. Run: ``cmake-gui ..`` and make sure you fill in where opencv was built. +#. Run: ``cmake-gui ..`` and make sure you fill in where opencv was built. #. Then click ``configure`` and then ``generate``. If it's OK, **quit cmake-gui** diff --git a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst index e603e5c289..f582d32086 100644 --- a/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst +++ b/doc/tutorials/introduction/linux_gcc_cmake/linux_gcc_cmake.rst @@ -11,7 +11,7 @@ Using OpenCV with gcc and CMake * The easiest way of using OpenCV in your code is to use `CMake `_. A few advantages (taken from the Wiki): #. No need to change anything when porting between Linux and Windows - #. Can easily be combined with other tools by CMake( i.e. Qt, ITK and VTK ) + #. Can easily be combined with other tools by CMake( i.e. Qt, ITK and VTK ) * If you are not familiar with CMake, checkout the `tutorial `_ on its website. @@ -21,7 +21,7 @@ Steps Create a program using OpenCV ------------------------------- -Let's use a simple program such as DisplayImage.cpp shown below. +Let's use a simple program such as DisplayImage.cpp shown below. .. code-block:: cpp @@ -36,9 +36,9 @@ Let's use a simple program such as DisplayImage.cpp shown below. image = imread( argv[1], 1 ); if( argc != 2 || !image.data ) - { + { printf( "No image data \n" ); - return -1; + return -1; } namedWindow( "Display Image", CV_WINDOW_AUTOSIZE ); diff --git a/doc/tutorials/introduction/linux_install/linux_install.rst b/doc/tutorials/introduction/linux_install/linux_install.rst index e3039ca07f..e8b96dab73 100644 --- a/doc/tutorials/introduction/linux_install/linux_install.rst +++ b/doc/tutorials/introduction/linux_install/linux_install.rst @@ -11,8 +11,8 @@ Required Packages .. code-block:: bash - sudo apt-get install build-essential - + sudo apt-get install build-essential + * CMake 2.6 or higher; * Git; * GTK+2.x or higher, including headers (libgtk2.0-dev); @@ -48,7 +48,7 @@ In Linux it can be achieved with the following command in Terminal: cd ~/ git clone https://github.com/Itseez/opencv.git - + Building OpenCV from Source Using CMake, Using the Command Line =============================================================== @@ -58,26 +58,26 @@ Building OpenCV from Source Using CMake, Using the Command Line #. Enter the and type .. code-block:: bash - + cmake [] For example .. code-block:: bash - + cd ~/opencv mkdir release cd release cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. - + #. Enter the created temporary directory () and proceed with: .. code-block:: bash - + make sudo make install .. note:: - + If the size of the created library is a critical issue (like in case of an Android build) you can use the ``install/strip`` command to get the smallest size as possible. The *stripped* version appears to be twice as small. However, we do not recommend using this unless those extra megabytes do really matter. diff --git a/doc/tutorials/introduction/load_save_image/load_save_image.rst b/doc/tutorials/introduction/load_save_image/load_save_image.rst index 1a757cfabc..50fb9ea37f 100644 --- a/doc/tutorials/introduction/load_save_image/load_save_image.rst +++ b/doc/tutorials/introduction/load_save_image/load_save_image.rst @@ -5,8 +5,8 @@ Load, Modify, and Save an Image .. note:: - We assume that by now you know how to load an image using :imread:`imread <>` and to display it in a window (using :imshow:`imshow <>`). Read the :ref:`Display_Image` tutorial otherwise. - + We assume that by now you know how to load an image using :imread:`imread <>` and to display it in a window (using :imshow:`imshow <>`). Read the :ref:`Display_Image` tutorial otherwise. + Goals ====== @@ -35,9 +35,9 @@ Here it is: { char* imageName = argv[1]; - Mat image; + Mat image; image = imread( imageName, 1 ); - + if( argc != 2 || !image.data ) { printf( " No image data \n " ); @@ -53,7 +53,7 @@ Here it is: namedWindow( "Gray image", CV_WINDOW_AUTOSIZE ); imshow( imageName, image ); - imshow( "Gray image", gray_image ); + imshow( "Gray image", gray_image ); waitKey(0); @@ -67,18 +67,18 @@ Explanation * Creating a Mat object to store the image information * Load an image using :imread:`imread <>`, located in the path given by *imageName*. Fort this example, assume you are loading a RGB image. - -#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations: + +#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations: .. code-block:: cpp - + cvtColor( image, gray_image, CV_BGR2GRAY ); As you can see, :cvt_color:`cvtColor <>` takes as arguments: .. container:: enumeratevisibleitemswithsquare - * a source image (*image*) + * a source image (*image*) * a destination image (*gray_image*), in which we will save the converted image. * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :imread:`imread <>` has BGR default channel order in case of color images). @@ -86,7 +86,7 @@ Explanation .. code-block:: cpp - imwrite( "../../images/Gray_Image.jpg", gray_image ); + imwrite( "../../images/Gray_Image.jpg", gray_image ); Which will save our *gray_image* as *Gray_Image.jpg* in the folder *images* located two levels up of my current location. diff --git a/doc/tutorials/introduction/windows_install/windows_install.rst b/doc/tutorials/introduction/windows_install/windows_install.rst index 8ed84197df..c29c13aede 100644 --- a/doc/tutorials/introduction/windows_install/windows_install.rst +++ b/doc/tutorials/introduction/windows_install/windows_install.rst @@ -126,7 +126,7 @@ Building the library #. Install |TortoiseGit|_. Choose the 32 or 64 bit version according to the type of OS you work in. While installing, locate your msysgit (if it doesn't do that automatically). Follow the wizard -- the default options are OK for the most part. -#. Choose a directory in your file system, where you will download the OpenCV libraries to. I recommend creating a new one that has short path and no special charachters in it, for example :file:`D:/OpenCV`. For this tutorial I'll suggest you do so. If you use your own path and know, what you're doing -- it's OK. +#. Choose a directory in your file system, where you will download the OpenCV libraries to. I recommend creating a new one that has short path and no special charachters in it, for example :file:`D:/OpenCV`. For this tutorial I'll suggest you do so. If you use your own path and know, what you're doing -- it's OK. a) Clone the repository to the selected directory. After clicking *Clone* button, a window will appear where you can select from what repository you want to download source files (https://github.com/Itseez/opencv.git) and to what directory (:file:`D:/OpenCV`). @@ -314,10 +314,10 @@ First we set an enviroment variable to make easier our work. This will hold the setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc10 (suggested for Visual Studio 2010 - 32 bit Windows) setx -m OPENCV_DIR D:\OpenCV\Build\x64\vc10 (suggested for Visual Studio 2010 - 64 bit Windows) - + setx -m OPENCV_DIR D:\OpenCV\Build\x86\vc11 (suggested for Visual Studio 2012 - 32 bit Windows) setx -m OPENCV_DIR D:\OpenCV\Build\x64\vc11 (suggested for Visual Studio 2012 - 64 bit Windows) - + Here the directory is where you have your OpenCV binaries (*extracted* or *built*). You can have different platform (e.g. x64 instead of x86) or compiler type, so substitute appropriate value. Inside this you should have two folders called *lib* and *bin*. The -m should be added if you wish to make the settings computer wise, instead of user wise. If you built static libraries then you are done. Otherwise, you need to add the *bin* folders path to the systems path. This is cause you will use the OpenCV library in form of *\"Dynamic-link libraries\"* (also known as **DLL**). Inside these are stored all the algorithms and information the OpenCV library contains. The operating system will load them only on demand, during runtime. However, to do this he needs to know where they are. The systems **PATH** contains a list of folders where DLLs can be found. Add the OpenCV library path to this and the OS will know where to look if he ever needs the OpenCV binaries. Otherwise, you will need to copy the used DLLs right beside the applications executable file (*exe*) for the OS to find it, which is highly unpleasent if you work on many projects. To do this start up again the |PathEditor|_ and add the following new entry (right click in the application to bring up the menu): diff --git a/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst b/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst index ccf35eb054..f3058a74d2 100644 --- a/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst +++ b/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.rst @@ -10,16 +10,16 @@ I start out from the assumption that you have read and completed with success th :alt: You should have a folder looking like this. :align: center -The OpenCV libraries, distributed by us, on the Microsoft Windows operating system are in a **D**\ ynamic **L**\ inked **L**\ ibraries (*DLL*). These have the advantage that all the content of the library are loaded only at runtime, on demand, and that countless programs may use the same library file. This means that if you have ten applications using the OpenCV library, no need to have around a version for each one of them. Of course you need to have the *dll* of the OpenCV on all systems where you want to run your application. +The OpenCV libraries, distributed by us, on the Microsoft Windows operating system are in a **D**\ ynamic **L**\ inked **L**\ ibraries (*DLL*). These have the advantage that all the content of the library are loaded only at runtime, on demand, and that countless programs may use the same library file. This means that if you have ten applications using the OpenCV library, no need to have around a version for each one of them. Of course you need to have the *dll* of the OpenCV on all systems where you want to run your application. Another approach is to use static libraries that have *lib* extensions. You may build these by using our source files as described in the :ref:`Windows_Installation` tutorial. When you use this the library will be built-in inside your *exe* file. So there is no chance that the user deletes them, for some reason. As a drawback your application will be larger one and as, it will take more time to load it during its startup. -To build an application with OpenCV you need to do two things: +To build an application with OpenCV you need to do two things: .. container:: enumeratevisibleitemswithsquare - + *Tell* to the compiler how the OpenCV library *looks*. You do this by *showing* it the header files. - + *Tell* to the linker from where to get the functions or data structures of OpenCV, when they are needed. + + *Tell* to the compiler how the OpenCV library *looks*. You do this by *showing* it the header files. + + *Tell* to the linker from where to get the functions or data structures of OpenCV, when they are needed. If you use the *lib* system you must set the path where the library files are and specify in which one of them to look. During the build the linker will look into these libraries and add the definitions and implementation of all *used* functions and data structures to the executable file. @@ -27,7 +27,7 @@ To build an application with OpenCV you need to do two things: To pass on all this information to the Visual Studio IDE you can either do it globally (so all your future projects will get these information) or locally (so only for you current project). The advantage of the global one is that you only need to do it once; however, it may be undesirable to clump all your projects all the time with all these information. In case of the global one how you do it depends on the Microsoft Visual Studio you use. There is a **2008 and previous versions** and a **2010 way** of doing it. Inside the global section of this tutorial I'll show what the main differences are. -The base item of a project in Visual Studio is a solution. A solution may contain multiple projects. Projects are the building blocks of an application. Every project will realize something and you will have a main project in which you can put together this project puzzle. In case of the many simple applications (like many of the tutorials will be) you do not need to break down the application into modules. In these cases your main project will be the only existing one. Now go create a new solution inside Visual studio by going through the :menuselection:`File --> New --> Project` menu selection. Choose *Win32 Console Application* as type. Enter its name and select the path where to create it. Then in the upcoming dialog make sure you create an empty project. +The base item of a project in Visual Studio is a solution. A solution may contain multiple projects. Projects are the building blocks of an application. Every project will realize something and you will have a main project in which you can put together this project puzzle. In case of the many simple applications (like many of the tutorials will be) you do not need to break down the application into modules. In these cases your main project will be the only existing one. Now go create a new solution inside Visual studio by going through the :menuselection:`File --> New --> Project` menu selection. Choose *Win32 Console Application* as type. Enter its name and select the path where to create it. Then in the upcoming dialog make sure you create an empty project. .. image:: images/NewProjectVisualStudio.jpg :alt: Which options to select @@ -36,7 +36,7 @@ The base item of a project in Visual Studio is a solution. A solution may contai The *local* method ================== -Every project is built separately from the others. Due to this every project has its own rule package. Inside this rule packages are stored all the information the *IDE* needs to know to build your project. For any application there are at least two build modes: a *Release* and a *Debug* one. The *Debug* has many features that exist so you can find and resolve easier bugs inside your application. In contrast the *Release* is an optimized version, where the goal is to make the application run as fast as possible or to be as small as possible. You may figure that these modes also require different rules to use during build. Therefore, there exist different rule packages for each of your build modes. These rule packages are called inside the IDE as *project properties* and you can view and modify them by using the *Property Manger*. You can bring up this with :menuselection:`View --> Property Pages`. Expand it and you can see the existing rule packages (called *Proporty Sheets*). +Every project is built separately from the others. Due to this every project has its own rule package. Inside this rule packages are stored all the information the *IDE* needs to know to build your project. For any application there are at least two build modes: a *Release* and a *Debug* one. The *Debug* has many features that exist so you can find and resolve easier bugs inside your application. In contrast the *Release* is an optimized version, where the goal is to make the application run as fast as possible or to be as small as possible. You may figure that these modes also require different rules to use during build. Therefore, there exist different rule packages for each of your build modes. These rule packages are called inside the IDE as *project properties* and you can view and modify them by using the *Property Manger*. You can bring up this with :menuselection:`View --> Property Pages`. Expand it and you can see the existing rule packages (called *Proporty Sheets*). .. image:: images/PropertyPageExample.jpg :alt: An example of Property Sheet @@ -55,10 +55,10 @@ Use for example the *OpenCV_Debug* name. Then by selecting the sheet :menuselect $(OPENCV_DIR)\..\..\include .. image:: images/PropertySheetOpenCVInclude.jpg - :alt: Add the include dir like this. + :alt: Add the include dir like this. :align: center -When adding third party libraries settings it is generally a good idea to use the power behind the environment variables. The full location of the OpenCV library may change on each system. Moreover, you may even end up yourself with moving the install directory for some reason. If you would give explicit paths inside your property sheet your project will end up not working when you pass it further to someone else who has a different OpenCV install path. Moreover, fixing this would require to manually modifying every explicit path. A more elegant solution is to use the environment variables. Anything that you put inside a parenthesis started with a dollar sign will be replaced at runtime with the current environment variables value. Here comes in play the environment variable setting we already made in our :ref:`previous tutorial `. +When adding third party libraries settings it is generally a good idea to use the power behind the environment variables. The full location of the OpenCV library may change on each system. Moreover, you may even end up yourself with moving the install directory for some reason. If you would give explicit paths inside your property sheet your project will end up not working when you pass it further to someone else who has a different OpenCV install path. Moreover, fixing this would require to manually modifying every explicit path. A more elegant solution is to use the environment variables. Anything that you put inside a parenthesis started with a dollar sign will be replaced at runtime with the current environment variables value. Here comes in play the environment variable setting we already made in our :ref:`previous tutorial `. Next go to the :menuselection:`Linker --> General` and under the *"Additional Library Directories"* add the libs directory: @@ -67,7 +67,7 @@ Next go to the :menuselection:`Linker --> General` and under the *"Additional Li $(OPENCV_DIR)\lib .. image:: images/PropertySheetOpenCVLib.jpg - :alt: Add the library folder like this. + :alt: Add the library folder like this. :align: center Then you need to specify the libraries in which the linker should look into. To do this go to the :menuselection:`Linker --> Input` and under the *"Additional Dependencies"* entry add the name of all modules which you want to use: @@ -77,7 +77,7 @@ Then you need to specify the libraries in which the linker should look into. To :align: center .. image:: images/PropertySheetOpenCVLibrariesDebug.jpg - :alt: Like this. + :alt: Like this. :align: center The names of the libraries are as follow: @@ -105,33 +105,33 @@ A full list, for the latest version would contain: The letter *d* at the end just indicates that these are the libraries required for the debug. Now click ok to save and do the same with a new property inside the Release rule section. Make sure to omit the *d* letters from the library names and to save the property sheets with the save icon above them. .. image:: images/PropertySheetOpenCVLibrariesRelease.jpg - :alt: And the release ones. + :alt: And the release ones. :align: center -You can find your property sheets inside your projects directory. At this point it is a wise decision to back them up into some special directory, to always have them at hand in the future, whenever you create an OpenCV project. Note that for Visual Studio 2010 the file extension is *props*, while for 2008 this is *vsprops*. +You can find your property sheets inside your projects directory. At this point it is a wise decision to back them up into some special directory, to always have them at hand in the future, whenever you create an OpenCV project. Note that for Visual Studio 2010 the file extension is *props*, while for 2008 this is *vsprops*. .. image:: images/PropertySheetInsideFolder.jpg - :alt: And the release ones. + :alt: And the release ones. :align: center -Next time when you make a new OpenCV project just use the "Add Existing Property Sheet..." menu entry inside the Property Manager to easily add the OpenCV build rules. +Next time when you make a new OpenCV project just use the "Add Existing Property Sheet..." menu entry inside the Property Manager to easily add the OpenCV build rules. .. image:: images/PropertyPageAddExisting.jpg - :alt: Use this option. + :alt: Use this option. :align: center The *global* method =================== -In case you find to troublesome to add the property pages to each and every one of your projects you can also add this rules to a *"global property page"*. However, this applies only to the additional include and library directories. The name of the libraries to use you still need to specify manually by using for instance: a Property page. +In case you find to troublesome to add the property pages to each and every one of your projects you can also add this rules to a *"global property page"*. However, this applies only to the additional include and library directories. The name of the libraries to use you still need to specify manually by using for instance: a Property page. -In Visual Studio 2008 you can find this under the: :menuselection:`Tools --> Options --> Projects and Solutions --> VC++ Directories`. +In Visual Studio 2008 you can find this under the: :menuselection:`Tools --> Options --> Projects and Solutions --> VC++ Directories`. .. image:: images/VCDirectories2008.jpg :alt: VC++ Directories in VS 2008. :align: center -In Visual Studio 2010 this has been moved to a global property sheet which is automatically added to every project you create: +In Visual Studio 2010 this has been moved to a global property sheet which is automatically added to every project you create: .. image:: images/VCDirectories2010.jpg :alt: VC++ Directories in VS 2010. @@ -153,10 +153,10 @@ You can start a Visual Studio build from two places. Either inside from the *IDE .. |voila| unicode:: voil U+00E1 -This is important to remember when you code inside the code open and save commands. You're resources will be saved ( and queried for at opening!!!) relatively to your working directory. This is unless you give a full, explicit path as parameter for the I/O functions. In the code above we open :download:`this OpenCV logo<../../../../samples/cpp/tutorial_code/images/opencv-logo.png>`. Before starting up the application make sure you place the image file in your current working directory. Modify the image file name inside the code to try it out on other images too. Run it and |voila|: +This is important to remember when you code inside the code open and save commands. You're resources will be saved ( and queried for at opening!!!) relatively to your working directory. This is unless you give a full, explicit path as parameter for the I/O functions. In the code above we open :download:`this OpenCV logo<../../../../samples/cpp/tutorial_code/images/opencv-logo.png>`. Before starting up the application make sure you place the image file in your current working directory. Modify the image file name inside the code to try it out on other images too. Run it and |voila|: .. image:: images/SuccessVisualStudioWindows.jpg - :alt: You should have this. + :alt: You should have this. :align: center Command line arguments with Visual Studio @@ -167,11 +167,11 @@ Throughout some of our future tutorials you'll see that the programs main input .. code-block:: bash :linenos: - D: + D: CD OpenCV\MySolutionName\Release MySolutionName.exe exampleImage.jpg -Here I first changed my drive (if your project isn't on the OS local drive), navigated to my project and start it with an example image argument. While under Linux system it is common to fiddle around with the console window on the Microsoft Windows many people come to use it almost never. Besides, adding the same argument again and again while you are testing your application is, somewhat, a cumbersome task. Luckily, in the Visual Studio there is a menu to automate all this: +Here I first changed my drive (if your project isn't on the OS local drive), navigated to my project and start it with an example image argument. While under Linux system it is common to fiddle around with the console window on the Microsoft Windows many people come to use it almost never. Besides, adding the same argument again and again while you are testing your application is, somewhat, a cumbersome task. Luckily, in the Visual Studio there is a menu to automate all this: .. image:: images/VisualStudioCommandLineArguments.jpg :alt: Visual Studio Command Line Arguments diff --git a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst index 7b201b977f..e15a43be28 100644 --- a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst +++ b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst @@ -114,7 +114,7 @@ Now assume you want to do a visual sanity check of the *cv::Canny()* implementat .. image:: images/edges_zoom.png :height: 160pt - + Right-click on the *Image Viewer* to bring up the view context menu and enable :menuselection:`Link Views` (a check box next to the menu item indicates whether the option is enabled). .. image:: images/viewer_context_menu.png @@ -124,7 +124,7 @@ The :menuselection:`Link Views` feature keeps the view region fixed when flippin .. image:: images/input_zoom.png :height: 160pt - + You may also switch back and forth between viewing input and edges with your up/down cursor keys. That way you can easily verify that the detected edges line up nicely with the data in the input image. More ... diff --git a/doc/tutorials/ios/hello/hello.rst b/doc/tutorials/ios/hello/hello.rst index 8e6ddb88ac..8deda33ef0 100644 --- a/doc/tutorials/ios/hello/hello.rst +++ b/doc/tutorials/ios/hello/hello.rst @@ -19,7 +19,7 @@ Follow this step by step guide to link OpenCV to iOS. 1. Create a new XCode project. -2. Now we need to link *opencv2.framework* with Xcode. Select the project Navigator in the left hand panel and click on project name. +2. Now we need to link *opencv2.framework* with Xcode. Select the project Navigator in the left hand panel and click on project name. 3. Under the TARGETS click on Build Phases. Expand Link Binary With Libraries option. @@ -29,10 +29,10 @@ Follow this step by step guide to link OpenCV to iOS. .. image:: images/linking_opencv_ios.png :alt: OpenCV iOS in Xcode - :align: center + :align: center *Hello OpenCV iOS Application* -=============================== +=============================== Now we will learn how to write a simple Hello World Application in Xcode using OpenCV. @@ -49,7 +49,7 @@ Now we will learn how to write a simple Hello World Application in Xcode using O .. image:: images/header_directive.png :alt: header - :align: center + :align: center .. container:: enumeratevisibleitemswithsquare @@ -61,7 +61,7 @@ Now we will learn how to write a simple Hello World Application in Xcode using O .. image:: images/view_did_load.png :alt: view did load - :align: center + :align: center .. container:: enumeratevisibleitemswithsquare @@ -73,4 +73,4 @@ Now we will learn how to write a simple Hello World Application in Xcode using O .. image:: images/output.png :alt: output :align: center - + diff --git a/doc/tutorials/ios/image_manipulation/image_manipulation.rst b/doc/tutorials/ios/image_manipulation/image_manipulation.rst index e8d4aad5bb..fd2d9c6e3f 100644 --- a/doc/tutorials/ios/image_manipulation/image_manipulation.rst +++ b/doc/tutorials/ios/image_manipulation/image_manipulation.rst @@ -21,9 +21,9 @@ In *OpenCV* all the image processing operations are done on *Mat*. iOS uses UIIm CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage); CGFloat cols = image.size.width; CGFloat rows = image.size.height; - + cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels - + CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data cols, // Width of bitmap rows, // Height of bitmap @@ -32,11 +32,11 @@ In *OpenCV* all the image processing operations are done on *Mat*. iOS uses UIIm colorSpace, // Colorspace kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault); // Bitmap info flags - + CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage); CGContextRelease(contextRef); CGColorSpaceRelease(colorSpace); - + return cvMat; } @@ -47,9 +47,9 @@ In *OpenCV* all the image processing operations are done on *Mat*. iOS uses UIIm CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage); CGFloat cols = image.size.width; CGFloat rows = image.size.height; - + cv::Mat cvMat(rows, cols, CV_8UC1); // 8 bits per component, 1 channels - + CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data cols, // Width of bitmap rows, // Height of bitmap @@ -58,11 +58,11 @@ In *OpenCV* all the image processing operations are done on *Mat*. iOS uses UIIm colorSpace, // Colorspace kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault); // Bitmap info flags - + CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage); CGContextRelease(contextRef); CGColorSpaceRelease(colorSpace); - + return cvMat; } @@ -81,15 +81,15 @@ After the processing we need to convert it back to UIImage. { NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()]; CGColorSpaceRef colorSpace; - + if (cvMat.elemSize() == 1) { colorSpace = CGColorSpaceCreateDeviceGray(); } else { colorSpace = CGColorSpaceCreateDeviceRGB(); } - + CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data); - + // Creating CGImage from cv::Mat CGImageRef imageRef = CGImageCreate(cvMat.cols, //width cvMat.rows, //height @@ -103,15 +103,15 @@ After the processing we need to convert it back to UIImage. false, //should interpolate kCGRenderingIntentDefault //intent ); - - + + // Getting UIImage from CGImage UIImage *finalImage = [UIImage imageWithCGImage:imageRef]; CGImageRelease(imageRef); CGDataProviderRelease(provider); CGColorSpaceRelease(colorSpace); - - return finalImage; + + return finalImage; } *Output* @@ -119,9 +119,9 @@ After the processing we need to convert it back to UIImage. .. image:: images/output.jpg :alt: header - :align: center + :align: center -Check out an instance of running code with more Image Effects on `YouTube `_ . +Check out an instance of running code with more Image Effects on `YouTube `_ . .. raw:: html diff --git a/doc/tutorials/ios/table_of_content_ios/table_of_content_ios.rst b/doc/tutorials/ios/table_of_content_ios/table_of_content_ios.rst index 5ecda41c26..377446dee6 100644 --- a/doc/tutorials/ios/table_of_content_ios/table_of_content_ios.rst +++ b/doc/tutorials/ios/table_of_content_ios/table_of_content_ios.rst @@ -69,7 +69,7 @@ .. toctree:: :hidden: - + ../hello/hello ../image_manipulation/image_manipulation ../video_processing/video_processing diff --git a/doc/tutorials/ios/video_processing/video_processing.rst b/doc/tutorials/ios/video_processing/video_processing.rst index 6143f7717e..4d8116a4c5 100644 --- a/doc/tutorials/ios/video_processing/video_processing.rst +++ b/doc/tutorials/ios/video_processing/video_processing.rst @@ -18,34 +18,34 @@ Including OpenCV library in your iOS project The OpenCV library comes as a so-called framework, which you can directly drag-and-drop into your XCode project. Download the latest binary from . Alternatively follow this guide :ref:`iOS-Installation` to compile the framework manually. Once you have the framework, just drag-and-drop into XCode: .. image:: images/xcode_hello_ios_framework_drag_and_drop.png - - + + Also you have to locate the prefix header that is used for all header files in the project. The file is typically located at "ProjectName/Supporting Files/ProjectName-Prefix.pch". There, you have add an include statement to import the opencv library. However, make sure you include opencv before you include UIKit and Foundation, because else you will get some weird compile errors that some macros like min and max are defined multiple times. For example the prefix header could look like the following: .. code-block:: objc :linenos: - + // // Prefix header for all source files of the 'VideoFilters' target in the 'VideoFilters' project // - + #import - + #ifndef __IPHONE_4_0 #warning "This project uses features only available in iOS SDK 4.0 and later." #endif - + #ifdef __cplusplus #import #endif - + #ifdef __OBJC__ #import #import #endif - - - + + + Example video frame processing project -------------------------------------- User Interface @@ -60,18 +60,18 @@ Make sure to add and connect the IBOutlets and IBActions to the corresponding Vi .. code-block:: objc :linenos: - + @interface ViewController : UIViewController { IBOutlet UIImageView* imageView; IBOutlet UIButton* button; } - + - (IBAction)actionStart:(id)sender; - + @end - - + + Adding the Camera ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,21 +79,21 @@ We add a camera controller to the view controller and initialize it when the vie .. code-block:: objc :linenos: - + #import using namespace cv; - - + + @interface ViewController : UIViewController { - ... + ... CvVideoCamera* videoCamera; } ... @property (nonatomic, retain) CvVideoCamera* videoCamera; - + @end - + .. code-block:: objc :linenos: @@ -101,7 +101,7 @@ We add a camera controller to the view controller and initialize it when the vie { [super viewDidLoad]; // Do any additional setup after loading the view, typically from a nib. - + self.videoCamera = [[CvVideoCamera alloc] initWithParentView:imageView]; self.videoCamera.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront; self.videoCamera.defaultAVCaptureSessionPreset = AVCaptureSessionPreset352x288; @@ -109,7 +109,7 @@ We add a camera controller to the view controller and initialize it when the vie self.videoCamera.defaultFPS = 30; self.videoCamera.grayscale = NO; } - + In this case, we initialize the camera and provide the imageView as a target for rendering each frame. CvVideoCamera is basically a wrapper around AVFoundation, so we provie as properties some of the AVFoundation camera options. For example we want to use the front camera, set the video size to 352x288 and a video orientation (the video camera normally outputs in landscape mode, which results in transposed data when you design a portrait application). The property defaultFPS sets the FPS of the camera. If the processing is less fast than the desired FPS, frames are automatically dropped. @@ -153,14 +153,14 @@ We follow the delegation pattern, which is very common in iOS, to provide access .. code-block:: objc :linenos: - + @interface ViewController : UIViewController - + .. code-block:: objc :linenos: - + - (void)viewDidLoad { ... @@ -194,13 +194,13 @@ From here you can start processing video frames. For example the following snipp .. code-block:: objc :linenos: - + - (void)processImage:(Mat&)image; { // Do some OpenCV stuff with the image Mat image_copy; cvtColor(image, image_copy, CV_BGRA2BGR); - + // invert image bitwise_not(image_copy, image_copy); cvtColor(image_copy, image, CV_BGR2BGRA); @@ -214,9 +214,9 @@ Finally, we have to tell the camera to actually start/stop working. The followin .. code-block:: objc :linenos: - + #pragma mark - UI Actions - + - (IBAction)actionStart:(id)sender; { [self.videoCamera start]; diff --git a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst index 6cd66e52b9..1b3abd660a 100644 --- a/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst +++ b/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst @@ -10,7 +10,7 @@ In this tutorial you will learn how to: .. container:: enumeratevisibleitemswithsquare - + Use the OpenCV functions :svms:`CvSVM::train ` to build a classifier based on SVMs and :svms:`CvSVM::predict ` to test its performance. + + Use the OpenCV functions :svms:`CvSVM::train ` to build a classifier based on SVMs and :svms:`CvSVM::predict ` to test its performance. What is a SVM? ============== @@ -36,14 +36,14 @@ Then, the operation of the SVM algorithm is based on finding the hyperplane that .. image:: images/optimal-hyperplane.png :alt: The Optimal hyperplane - :align: center + :align: center How is the optimal hyperplane computed? ======================================= Let's introduce the notation used to define formally a hyperplane: -.. math:: +.. math:: f(x) = \beta_{0} + \beta^{T} x, where :math:`\beta` is known as the *weight vector* and :math:`\beta_{0}` as the *bias*. @@ -106,7 +106,7 @@ Explanation .. code-block:: cpp Mat trainingDataMat(3, 2, CV_32FC1, trainingData); - Mat labelsMat (3, 1, CV_32FC1, labels); + Mat labelsMat (3, 1, CV_32FC1, labels); 2. **Set up SVM's parameters** @@ -143,7 +143,7 @@ Explanation .. code-block:: cpp Vec3b green(0,255,0), blue (255,0,0); - + for (int i = 0; i < image.rows; ++i) for (int j = 0; j < image.cols; ++j) { @@ -152,8 +152,8 @@ Explanation if (response == 1) image.at(j, i) = green; - else - if (response == -1) + else + if (response == -1) image.at(j, i) = blue; } @@ -184,5 +184,5 @@ Results .. image:: images/result.png :alt: The seperated planes - :align: center + :align: center diff --git a/doc/tutorials/ml/table_of_content_ml/table_of_content_ml.rst b/doc/tutorials/ml/table_of_content_ml/table_of_content_ml.rst index 452b896364..4691756a96 100644 --- a/doc/tutorials/ml/table_of_content_ml/table_of_content_ml.rst +++ b/doc/tutorials/ml/table_of_content_ml/table_of_content_ml.rst @@ -5,9 +5,9 @@ Use the powerfull machine learning classes for statistical classification, regression and clustering of data. -.. include:: ../../definitions/tocDefinitions.rst +.. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -18,7 +18,7 @@ Use the powerfull machine learning classes for statistical classification, regre *Author:* |Author_FernandoI| - Learn what a Suport Vector Machine is. + Learn what a Suport Vector Machine is. ============ ============================================== @@ -26,7 +26,7 @@ Use the powerfull machine learning classes for statistical classification, regre :height: 90pt :width: 90pt -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv @@ -51,6 +51,6 @@ Use the powerfull machine learning classes for statistical classification, regre .. toctree:: :hidden: - + ../introduction_to_svm/introduction_to_svm ../non_linear_svms/non_linear_svms diff --git a/doc/tutorials/objdetect/table_of_content_objdetect/table_of_content_objdetect.rst b/doc/tutorials/objdetect/table_of_content_objdetect/table_of_content_objdetect.rst index 64ed109450..c9df3eb1dc 100644 --- a/doc/tutorials/objdetect/table_of_content_objdetect/table_of_content_objdetect.rst +++ b/doc/tutorials/objdetect/table_of_content_objdetect/table_of_content_objdetect.rst @@ -5,23 +5,23 @@ Ever wondered how your digital camera detects peoples and faces? Look here to find out! -.. include:: ../../definitions/tocDefinitions.rst +.. include:: ../../definitions/tocDefinitions.rst -+ ++ .. tabularcolumns:: m{100pt} m{300pt} .. cssclass:: toctableopencv ===================== ============================================== |CascadeClassif| **Title:** :ref:`cascade_classifier` - + *Compatibility:* > OpenCV 2.0 - + *Author:* |Author_AnaH| - + Here we learn how to use *objdetect* to find objects in our images or videos - + ===================== ============================================== - + .. |CascadeClassif| image:: images/Cascade_Classifier_Tutorial_Cover.jpg :height: 90pt :width: 90pt diff --git a/doc/tutorials/video/table_of_content_video/table_of_content_video.rst b/doc/tutorials/video/table_of_content_video/table_of_content_video.rst index 778bc5cdbe..a2521d6956 100644 --- a/doc/tutorials/video/table_of_content_video/table_of_content_video.rst +++ b/doc/tutorials/video/table_of_content_video/table_of_content_video.rst @@ -3,7 +3,7 @@ *video* module. Video analysis ----------------------------------------------------------- -Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions. +Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions. .. include:: ../../definitions/noContent.rst diff --git a/doc/user_guide/ug_features2d.rst b/doc/user_guide/ug_features2d.rst index ac56336716..e3ef302dc3 100644 --- a/doc/user_guide/ug_features2d.rst +++ b/doc/user_guide/ug_features2d.rst @@ -78,7 +78,7 @@ First, we create an instance of a keypoint detector. All detectors inherit the a extractor.compute(img1, keypoints1, descriptors1); extractor.compute(img2, keypoints2, descriptors2); -We create an instance of descriptor extractor. The most of OpenCV descriptors inherit ``DescriptorExtractor`` abstract interface. Then we compute descriptors for each of the keypoints. The output ``Mat`` of the ``DescriptorExtractor::compute`` method contains a descriptor in a row *i* for each *i*-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count). :: +We create an instance of descriptor extractor. The most of OpenCV descriptors inherit ``DescriptorExtractor`` abstract interface. Then we compute descriptors for each of the keypoints. The output ``Mat`` of the ``DescriptorExtractor::compute`` method contains a descriptor in a row *i* for each *i*-th keypoint. Note that the method can modify the keypoints vector by removing the keypoints such that a descriptor for them is not defined (usually these are the keypoints near image border). The method makes sure that the ouptut keypoints and descriptors are consistent with each other (so that the number of keypoints is equal to the descriptors row count). :: // matching descriptors BruteForceMatcher > matcher; diff --git a/doc/user_guide/ug_mat.rst b/doc/user_guide/ug_mat.rst index 5fd163053f..d4cef8f230 100644 --- a/doc/user_guide/ug_mat.rst +++ b/doc/user_guide/ug_mat.rst @@ -13,7 +13,7 @@ Images Load an image from a file: :: Mat img = imread(filename) - + If you read a jpg file, a 3 channel image is created by default. If you need a grayscale image, use: :: Mat img = imread(filename, 0); @@ -23,14 +23,14 @@ If you read a jpg file, a 3 channel image is created by default. If you need a g Save an image to a file: :: imwrite(filename, img); - + .. note:: format of the file is determined by its extension. .. note:: use ``imdecode`` and ``imencode`` to read and write image from/to memory rather than a file. XML/YAML -------- - + TBD Basic operations with images @@ -71,7 +71,7 @@ There are functions in OpenCV, especially from calib3d module, such as ``project //... fill the array Mat pointsMat = Mat(points); -One can access a point in this matrix using the same method ``Mat::at`` : +One can access a point in this matrix using the same method ``Mat::at`` : :: @@ -87,7 +87,7 @@ Memory management and reference counting // .. fill the array Mat pointsMat = Mat(points).reshape(1); -As a result we get a 32FC1 matrix with 3 columns instead of 32FC3 matrix with 1 column. ``pointsMat`` uses data from ``points`` and will not deallocate the memory when destroyed. In this particular instance, however, developer has to make sure that lifetime of ``points`` is longer than of ``pointsMat``. +As a result we get a 32FC1 matrix with 3 columns instead of 32FC3 matrix with 1 column. ``pointsMat`` uses data from ``points`` and will not deallocate the memory when destroyed. In this particular instance, however, developer has to make sure that lifetime of ``points`` is longer than of ``pointsMat``. If we need to copy the data, this is done using, for example, ``Mat::copyTo`` or ``Mat::clone``: :: Mat img = imread("image.jpg"); @@ -117,7 +117,7 @@ A convertion from ``Mat`` to C API data structures: :: IplImage img1 = img; CvMat m = img; -Note that there is no data copying here. +Note that there is no data copying here. Conversion from color to grey scale: :: diff --git a/doc/user_guide/ug_traincascade.rst b/doc/user_guide/ug_traincascade.rst index cb5190bc48..601f504382 100644 --- a/doc/user_guide/ug_traincascade.rst +++ b/doc/user_guide/ug_traincascade.rst @@ -6,7 +6,7 @@ Cascade Classifier Training Introduction ============ -The work with a cascade classifier inlcudes two major stages: training and detection. +The work with a cascade classifier inlcudes two major stages: training and detection. Detection stage is described in a documentation of ``objdetect`` module of general OpenCV documentation. Documentation gives some basic information about cascade classifier. Current guide is describing how to train a cascade classifier: preparation of a training data and running the training application. @@ -18,10 +18,10 @@ There are two applications in OpenCV to train cascade classifier: ``opencv_haart Note that ``opencv_traincascade`` application can use TBB for multi-threading. To use it in multicore mode OpenCV must be built with TBB. -Also there are some auxilary utilities related to the training. +Also there are some auxilary utilities related to the training. * ``opencv_createsamples`` is used to prepare a training dataset of positive and test samples. ``opencv_createsamples`` produces dataset of positive samples in a format that is supported by both ``opencv_haartraining`` and ``opencv_traincascade`` applications. The output is a file with \*.vec extension, it is a binary format which contains images. - + * ``opencv_performance`` may be used to evaluate the quality of classifiers, but for trained by ``opencv_haartraining`` only. It takes a collection of marked up images, runs the classifier and reports the performance, i.e. number of found objects, number of missed objects, number of false alarms and other information. Since ``opencv_haartraining`` is an obsolete application, only ``opencv_traincascade`` will be described futher. ``opencv_createsamples`` utility is needed to prepare a training data for ``opencv_traincascade``, so it will be described too. @@ -36,7 +36,7 @@ Negative Samples Negative samples are taken from arbitrary images. These images must not contain detected objects. Negative samples are enumerated in a special file. It is a text file in which each line contains an image filename (relative to the directory of the description file) of negative sample image. This file must be created manually. Note that negative samples and sample images are also called background samples or background samples images, and are used interchangeably in this document. Described images may be of different sizes. But each image should be (but not nessesarily) larger then a training window size, because these images are used to subsample negative image to the training size. An example of description file: - + Directory structure: .. code-block:: text @@ -45,14 +45,14 @@ Directory structure: img1.jpg img2.jpg bg.txt - + File bg.txt: .. code-block:: text img/img1.jpg img/img2.jpg - + Positive Samples ---------------- Positive samples are created by ``opencv_createsamples`` utility. They may be created from a single image with object or from a collection of previously marked up images. @@ -66,37 +66,37 @@ Command line arguments: * ``-vec `` Name of the output file containing the positive samples for training. - + * ``-img `` Source object image (e.g., a company logo). - + * ``-bg `` Background description file; contains a list of images which are used as a background for randomly distorted versions of the object. * ``-num `` - + Number of positive samples to generate. - + * ``-bgcolor `` Background color (currently grayscale images are assumed); the background color denotes the transparent color. Since there might be compression artifacts, the amount of color tolerance can be specified by ``-bgthresh``. All pixels withing ``bgcolor-bgthresh`` and ``bgcolor+bgthresh`` range are interpreted as transparent. - + * ``-bgthresh `` * ``-inv`` - + If specified, colors will be inverted. - + * ``-randinv`` If specified, colors will be inverted randomly. - + * ``-maxidev `` - + Maximal intensity deviation of pixels in foreground samples. - + * ``-maxxangle `` * ``-maxyangle `` @@ -104,15 +104,15 @@ Command line arguments: * ``-maxzangle `` Maximum rotation angles must be given in radians. - + * ``-show`` Useful debugging option. If specified, each sample will be shown. Pressing ``Esc`` will continue the samples creation process without. - + * ``-w `` Width (in pixels) of the output samples. - + * ``-h `` Height (in pixels) of the output samples. @@ -123,7 +123,7 @@ The source image is rotated randomly around all three axes. The chosen angle is Positive samples also may be obtained from a collection of previously marked up images. This collection is described by a text file similar to background description file. Each line of this file corresponds to an image. The first element of the line is the filename. It is followed by the number of object instances. The following numbers are the coordinates of objects bounding rectangles (x, y, width, height). An example of description file: - + Directory structure: .. code-block:: text @@ -132,27 +132,27 @@ Directory structure: img1.jpg img2.jpg info.dat - + File info.dat: .. code-block:: text - + img/img1.jpg 1 140 100 45 45 img/img2.jpg 2 100 200 50 50 50 30 25 25 - + Image img1.jpg contains single object instance with the following coordinates of bounding rectangle: (140, 100, 45, 45). Image img2.jpg contains two object instances. - + In order to create positive samples from such collection, ``-info`` argument should be specified instead of ``-img``: * ``-info `` Description file of marked up images collection. - + The scheme of samples creation in this case is as follows. The object instances are taken from images. Then they are resized to target samples size and stored in output vec-file. No distortion is applied, so the only affecting arguments are ``-w``, ``-h``, ``-show`` and ``-num``. - + ``opencv_createsamples`` utility may be used for examining samples stored in positive samples file. In order to do this only ``-vec``, ``-w`` and ``-h`` parameters should be specified. - -Note that for training, it does not matter how vec-files with positive samples are generated. But ``opencv_createsamples`` utility is the only one way to collect/create a vector file of positive samples, provided by OpenCV. + +Note that for training, it does not matter how vec-files with positive samples are generated. But ``opencv_createsamples`` utility is the only one way to collect/create a vector file of positive samples, provided by OpenCV. Example of vec-file is available here ``opencv/data/vec_files/trainingfaces_24-24.vec``. It can be used to train a face detector with the following window size: ``-w 24 -h 24``. @@ -165,99 +165,99 @@ Command line arguments of ``opencv_traincascade`` application grouped by purpose #. Common arguments: - + * ``-data `` - + Where the trained classifier should be stored. - + * ``-vec `` - + vec-file with positive samples (created by ``opencv_createsamples`` utility). - + * ``-bg `` - + Background description file. - + * ``-numPos `` - + * ``-numNeg `` - + Number of positive/negative samples used in training for every classifier stage. - + * ``-numStages `` - + Number of cascade stages to be trained. - + * ``-precalcValBufSize `` - + Size of buffer for precalculated feature values (in Mb). - + * ``-precalcIdxBufSize `` - + Size of buffer for precalculated feature indices (in Mb). The more memory you have the faster the training process. - + * ``-baseFormatSave`` - + This argument is actual in case of Haar-like features. If it is specified, the cascade will be saved in the old format. - + #. Cascade parameters: * ``-stageType `` - + Type of stages. Only boosted classifier are supported as a stage type at the moment. - + * ``-featureType<{HAAR(default), LBP}>`` - + Type of features: ``HAAR`` - Haar-like features, ``LBP`` - local binary patterns. - + * ``-w `` - + * ``-h `` - + Size of training samples (in pixels). Must have exactly the same values as used during training samples creation (``opencv_createsamples`` utility). - + #. Boosted classifer parameters: - + * ``-bt <{DAB, RAB, LB, GAB(default)}>`` - + Type of boosted classifiers: ``DAB`` - Discrete AdaBoost, ``RAB`` - Real AdaBoost, ``LB`` - LogitBoost, ``GAB`` - Gentle AdaBoost. - + * ``-minHitRate `` - + Minimal desired hit rate for each stage of the classifier. Overall hit rate may be estimated as (min_hit_rate^number_of_stages). - + * ``-maxFalseAlarmRate `` - + Maximal desired false alarm rate for each stage of the classifier. Overall false alarm rate may be estimated as (max_false_alarm_rate^number_of_stages). - + * ``-weightTrimRate `` - + Specifies whether trimming should be used and its weight. A decent choice is 0.95. - + * ``-maxDepth `` - + Maximal depth of a weak tree. A decent choice is 1, that is case of stumps. - + * ``-maxWeakCount `` - + Maximal count of weak trees for every cascade stage. The boosted classifier (stage) will have so many weak trees (``<=maxWeakCount``), as needed to achieve the given ``-maxFalseAlarmRate``. - + #. Haar-like feature parameters: - + * ``-mode `` - + Selects the type of Haar features set used in training. ``BASIC`` use only upright features, while ``ALL`` uses the full set of upright and 45 degree rotated feature set. See [Rainer2002]_ for more details. - -#. + +#. Local Binary Patterns parameters: - + Local Binary Patterns don't have parameters. After the ``opencv_traincascade`` application has finished its work, the trained cascade will be saved in cascade.xml file in the folder, which was passed as ``-data`` parameter. Other files in this folder are created for the case of interrupted training, so you may delete them after completion of training. diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst index 11a0802d0d..37159b016b 100644 --- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst @@ -1481,7 +1481,7 @@ Reconstructs points by triangulation. :param points4D: 4xN array of reconstructed points in homogeneous coordinates. -The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their observations with a stereo camera. Projections matrices can be obtained from :ocv:func:`stereoRectify`. +The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their observations with a stereo camera. Projections matrices can be obtained from :ocv:func:`stereoRectify`. .. seealso:: diff --git a/modules/contrib/doc/facerec/facerec_changelog.rst b/modules/contrib/doc/facerec/facerec_changelog.rst index fc8b1aded2..1071358183 100644 --- a/modules/contrib/doc/facerec/facerec_changelog.rst +++ b/modules/contrib/doc/facerec/facerec_changelog.rst @@ -4,19 +4,19 @@ Changelog Release 0.05 ------------ -This library is now included in the official OpenCV distribution (from 2.4 on). +This library is now included in the official OpenCV distribution (from 2.4 on). The :ocv:class`FaceRecognizer` is now an :ocv:class:`Algorithm`, which better fits into the overall -OpenCV API. +OpenCV API. -To reduce the confusion on user side and minimize my work, libfacerec and OpenCV -have been synchronized and are now based on the same interfaces and implementation. +To reduce the confusion on user side and minimize my work, libfacerec and OpenCV +have been synchronized and are now based on the same interfaces and implementation. The library now has an extensive documentation: * The API is explained in detail and with a lot of code examples. -* The face recognition guide I had written for Python and GNU Octave/MATLAB has been adapted to the new OpenCV C++ ``cv::FaceRecognizer``. +* The face recognition guide I had written for Python and GNU Octave/MATLAB has been adapted to the new OpenCV C++ ``cv::FaceRecognizer``. * A tutorial for gender classification with Fisherfaces. -* A tutorial for face recognition in videos (e.g. webcam). +* A tutorial for face recognition in videos (e.g. webcam). Release highlights @@ -27,8 +27,8 @@ Release highlights Release 0.04 ------------ -This version is fully Windows-compatible and works with OpenCV 2.3.1. Several -bugfixes, but none influenced the recognition rate. +This version is fully Windows-compatible and works with OpenCV 2.3.1. Several +bugfixes, but none influenced the recognition rate. Release highlights ++++++++++++++++++ @@ -40,9 +40,9 @@ Release highlights Release 0.03 ------------ -Reworked the library to provide separate implementations in cpp files, because -it's the preferred way of contributing OpenCV libraries. This means the library -is not header-only anymore. Slight API changes were done, please see the +Reworked the library to provide separate implementations in cpp files, because +it's the preferred way of contributing OpenCV libraries. This means the library +is not header-only anymore. Slight API changes were done, please see the documentation for details. Release highlights @@ -55,9 +55,9 @@ Release highlights Release 0.02 ------------ -Reworked the library to provide separate implementations in cpp files, because -it's the preferred way of contributing OpenCV libraries. This means the library -is not header-only anymore. Slight API changes were done, please see the +Reworked the library to provide separate implementations in cpp files, because +it's the preferred way of contributing OpenCV libraries. This means the library +is not header-only anymore. Slight API changes were done, please see the documentation for details. Release highlights @@ -80,7 +80,7 @@ Release highlights * Eigenfaces [TP91]_ * Fisherfaces [BHK97]_ * Local Binary Patterns Histograms [AHP04]_ - + * Added persistence facilities to store the models with a common API. * Unit Tests (using `gtest `_). * Providing a CMakeLists.txt to enable easy cross-platform building. diff --git a/modules/contrib/doc/facerec/facerec_tutorial.rst b/modules/contrib/doc/facerec/facerec_tutorial.rst index 61cd882dad..16b425d7ee 100644 --- a/modules/contrib/doc/facerec/facerec_tutorial.rst +++ b/modules/contrib/doc/facerec/facerec_tutorial.rst @@ -201,7 +201,7 @@ For the first source code example, I'll go through it with you. I am first givin .. literalinclude:: src/facerec_eigenfaces.cpp :language: cpp :linenos: - + The source code for this demo application is also available in the ``src`` folder coming with this documentation: * :download:`src/facerec_eigenfaces.cpp ` diff --git a/modules/contrib/doc/facerec/tutorial/facerec_save_load.rst b/modules/contrib/doc/facerec/tutorial/facerec_save_load.rst index faa77df7c7..7948bcd73a 100644 --- a/modules/contrib/doc/facerec/tutorial/facerec_save_load.rst +++ b/modules/contrib/doc/facerec/tutorial/facerec_save_load.rst @@ -6,7 +6,7 @@ Introduction Saving and loading a :ocv:class:`FaceRecognizer` is very important. Training a FaceRecognizer can be a very time-intense task, plus it's often impossible to ship the whole face database to the user of your product. The task of saving and loading a FaceRecognizer is easy with :ocv:class:`FaceRecognizer`. You only have to call :ocv:func:`FaceRecognizer::load` for loading and :ocv:func:`FaceRecognizer::save` for saving a :ocv:class:`FaceRecognizer`. -I'll adapt the Eigenfaces example from the :doc:`../facerec_tutorial`: Imagine we want to learn the Eigenfaces of the `AT&T Facedatabase `_, store the model to a YAML file and then load it again. +I'll adapt the Eigenfaces example from the :doc:`../facerec_tutorial`: Imagine we want to learn the Eigenfaces of the `AT&T Facedatabase `_, store the model to a YAML file and then load it again. From the loaded model, we'll get a prediction, show the mean, Eigenfaces and the image reconstruction. diff --git a/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.rst b/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.rst index ecb979d1a8..b692fe5137 100644 --- a/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.rst +++ b/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.rst @@ -111,7 +111,7 @@ An example. If the haar-cascade is at ``C:/opencv/data/haarcascades/haarcascade_ facerec_video.exe C:/opencv/data/haarcascades/haarcascade_frontalface_default.xml C:/facerec/data/celebrities.txt 1 -That's it. +That's it. Results ------- diff --git a/modules/contrib/src/facerec.cpp b/modules/contrib/src/facerec.cpp index fbf124a486..bc41a86a08 100644 --- a/modules/contrib/src/facerec.cpp +++ b/modules/contrib/src/facerec.cpp @@ -306,7 +306,7 @@ void FaceRecognizer::update(InputArrayOfArrays src, InputArray labels ) { dynamic_cast(this)->update( src, labels ); return; } - + string error_msg = format("This FaceRecognizer (%s) does not support updating, you have to use FaceRecognizer::train to update it.", this->name().c_str()); CV_Error(CV_StsNotImplemented, error_msg); } diff --git a/modules/core/doc/basic_structures.rst b/modules/core/doc/basic_structures.rst index 66ea59b6b8..7631ee3654 100644 --- a/modules/core/doc/basic_structures.rst +++ b/modules/core/doc/basic_structures.rst @@ -553,7 +553,7 @@ Range ----- .. ocv:class:: Range -Template class specifying a continuous subsequence (slice) of a sequence. +Template class specifying a continuous subsequence (slice) of a sequence. :: @@ -773,7 +773,7 @@ Mat --- .. ocv:class:: Mat -OpenCV C++ n-dimensional dense array class +OpenCV C++ n-dimensional dense array class :: class CV_EXPORTS Mat diff --git a/modules/core/doc/clustering.rst b/modules/core/doc/clustering.rst index 090a75d31a..0f9fa6cf89 100644 --- a/modules/core/doc/clustering.rst +++ b/modules/core/doc/clustering.rst @@ -80,8 +80,8 @@ Splits an element set into equivalency classes. :param vec: Set of elements stored as a vector. - :param labels: Output vector of labels. It contains as many elements as ``vec``. Each label ``labels[i]`` is a 0-based cluster index of ``vec[i]`` . - + :param labels: Output vector of labels. It contains as many elements as ``vec``. Each label ``labels[i]`` is a 0-based cluster index of ``vec[i]`` . + :param predicate: Equivalence predicate (pointer to a boolean function of two arguments or an instance of the class that has the method ``bool operator()(const _Tp& a, const _Tp& b)`` ). The predicate returns ``true`` when the elements are certainly in the same class, and returns ``false`` if they may or may not be in the same class. The generic function ``partition`` implements an diff --git a/modules/core/doc/drawing_functions.rst b/modules/core/doc/drawing_functions.rst index 472ebfd51f..f1b62fb8e2 100644 --- a/modules/core/doc/drawing_functions.rst +++ b/modules/core/doc/drawing_functions.rst @@ -416,8 +416,8 @@ The number of pixels along the line is stored in ``LineIterator::count`` . The m for(int i = 0; i < it.count; i++, ++it) buf[i] = *(const Vec3b)*it; - - // alternative way of iterating through the line + + // alternative way of iterating through the line for(int i = 0; i < it2.count; i++, ++it2) { Vec3b val = img.at(it2.pos()); diff --git a/modules/core/doc/intro.rst b/modules/core/doc/intro.rst index 0f8a3b0d52..ae95b57ad9 100644 --- a/modules/core/doc/intro.rst +++ b/modules/core/doc/intro.rst @@ -91,8 +91,8 @@ you can use:: Ptr ptr = new T(...); -That is, ``Ptr ptr`` encapsulates a pointer to a ``T`` instance and a reference counter associated with the pointer. See the -:ocv:class:`Ptr` +That is, ``Ptr ptr`` encapsulates a pointer to a ``T`` instance and a reference counter associated with the pointer. See the +:ocv:class:`Ptr` description for details. .. _AutomaticAllocation: diff --git a/modules/core/include/opencv2/core/operations.hpp b/modules/core/include/opencv2/core/operations.hpp index 7b2c94007a..d3b80a0040 100644 --- a/modules/core/include/opencv2/core/operations.hpp +++ b/modules/core/include/opencv2/core/operations.hpp @@ -3002,55 +3002,55 @@ static inline void read(const FileNode& node, string& value, const string& defau } template static inline void read(const FileNode& node, Point_<_Tp>& value, const Point_<_Tp>& default_value) -{ +{ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; value = temp.size() != 2 ? default_value : Point_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); } template static inline void read(const FileNode& node, Point3_<_Tp>& value, const Point3_<_Tp>& default_value) -{ +{ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; value = temp.size() != 3 ? default_value : Point3_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), saturate_cast<_Tp>(temp[2])); } template static inline void read(const FileNode& node, Size_<_Tp>& value, const Size_<_Tp>& default_value) -{ +{ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; value = temp.size() != 2 ? default_value : Size_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); } template static inline void read(const FileNode& node, Complex<_Tp>& value, const Complex<_Tp>& default_value) -{ +{ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; value = temp.size() != 2 ? default_value : Complex<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); } template static inline void read(const FileNode& node, Rect_<_Tp>& value, const Rect_<_Tp>& default_value) -{ +{ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; - value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); } template static inline void read(const FileNode& node, Vec<_Tp, cn>& value, const Vec<_Tp, cn>& default_value) -{ +{ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; value = temp.size() != cn ? default_value : Vec<_Tp, cn>(&temp[0]); } template static inline void read(const FileNode& node, Scalar_<_Tp>& value, const Scalar_<_Tp>& default_value) -{ +{ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; value = temp.size() != 4 ? default_value : Scalar_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); } static inline void read(const FileNode& node, Range& value, const Range& default_value) -{ - Point2i temp(value.start, value.end); const Point2i default_temp = Point2i(default_value.start, default_value.end); +{ + Point2i temp(value.start, value.end); const Point2i default_temp = Point2i(default_value.start, default_value.end); read(node, temp, default_temp); - value.start = temp.x; value.end = temp.y; + value.start = temp.x; value.end = temp.y; } CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() ); diff --git a/modules/core/src/arithm.cpp b/modules/core/src/arithm.cpp index 99390402e0..0db4c62bf2 100644 --- a/modules/core/src/arithm.cpp +++ b/modules/core/src/arithm.cpp @@ -1252,14 +1252,14 @@ static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst, Mat src1 = _src1.getMat(), src2 = _src2.getMat(); bool haveMask = !_mask.empty(); bool reallocate = false; - - bool src1Scalar = checkScalar(src1, src2.type(), kind1, kind2); - bool src2Scalar = checkScalar(src2, src1.type(), kind2, kind1); + + bool src1Scalar = checkScalar(src1, src2.type(), kind1, kind2); + bool src2Scalar = checkScalar(src2, src1.type(), kind2, kind1); if( (kind1 == kind2 || src1.channels() == 1) && src1.dims <= 2 && src2.dims <= 2 && src1.size() == src2.size() && src1.type() == src2.type() && !haveMask && ((!_dst.fixedType() && (dtype < 0 || CV_MAT_DEPTH(dtype) == src1.depth())) || - (_dst.fixedType() && _dst.type() == _src1.type())) && + (_dst.fixedType() && _dst.type() == _src1.type())) && ((src1Scalar && src2Scalar) || (!src1Scalar && !src2Scalar)) ) { _dst.create(src1.size(), src1.type()); diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index 1509c9218f..d8ce658f08 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -453,7 +453,7 @@ cv::Scalar cv::sum( InputArray _src ) { Mat src = _src.getMat(); int k, cn = src.channels(), depth = src.depth(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) size_t total_size = src.total(); int rows = src.size[0], cols = (int)(total_size/rows); @@ -462,7 +462,7 @@ cv::Scalar cv::sum( InputArray _src ) IppiSize sz = { cols, rows }; int type = src.type(); typedef IppStatus (CV_STDCALL* ippiSumFunc)(const void*, int, IppiSize, double *, int); - ippiSumFunc ippFunc = + ippiSumFunc ippFunc = type == CV_8UC1 ? (ippiSumFunc)ippiSum_8u_C1R : type == CV_8UC3 ? (ippiSumFunc)ippiSum_8u_C3R : type == CV_8UC4 ? (ippiSumFunc)ippiSum_8u_C4R : @@ -490,8 +490,8 @@ cv::Scalar cv::sum( InputArray _src ) } } } -#endif - +#endif + SumFunc func = getSumFunc(depth); CV_Assert( cn <= 4 && func != 0 ); @@ -565,7 +565,7 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask ) CV_Assert( mask.empty() || mask.type() == CV_8U ); int k, cn = src.channels(), depth = src.depth(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) size_t total_size = src.total(); int rows = src.size[0], cols = (int)(total_size/rows); @@ -576,7 +576,7 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask ) if( !mask.empty() ) { typedef IppStatus (CV_STDCALL* ippiMaskMeanFuncC1)(const void *, int, void *, int, IppiSize, Ipp64f *); - ippiMaskMeanFuncC1 ippFuncC1 = + ippiMaskMeanFuncC1 ippFuncC1 = type == CV_8UC1 ? (ippiMaskMeanFuncC1)ippiMean_8u_C1MR : type == CV_16UC1 ? (ippiMaskMeanFuncC1)ippiMean_16u_C1MR : type == CV_32FC1 ? (ippiMaskMeanFuncC1)ippiMean_32f_C1MR : @@ -590,7 +590,7 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask ) } } typedef IppStatus (CV_STDCALL* ippiMaskMeanFuncC3)(const void *, int, void *, int, IppiSize, int, Ipp64f *); - ippiMaskMeanFuncC3 ippFuncC3 = + ippiMaskMeanFuncC3 ippFuncC3 = type == CV_8UC3 ? (ippiMaskMeanFuncC3)ippiMean_8u_C3CMR : type == CV_16UC3 ? (ippiMaskMeanFuncC3)ippiMean_16u_C3CMR : type == CV_32FC3 ? (ippiMaskMeanFuncC3)ippiMean_32f_C3CMR : @@ -609,7 +609,7 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask ) else { typedef IppStatus (CV_STDCALL* ippiMeanFunc)(const void*, int, IppiSize, double *, int); - ippiMeanFunc ippFunc = + ippiMeanFunc ippFunc = type == CV_8UC1 ? (ippiMeanFunc)ippiMean_8u_C1R : type == CV_8UC3 ? (ippiMeanFunc)ippiMean_8u_C3R : type == CV_8UC4 ? (ippiMeanFunc)ippiMean_8u_C4R : @@ -639,7 +639,7 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask ) } } #endif - + SumFunc func = getSumFunc(depth); CV_Assert( cn <= 4 && func != 0 ); diff --git a/modules/core/test/test_io.cpp b/modules/core/test/test_io.cpp index 58c8817a4d..09a45fd6a2 100644 --- a/modules/core/test/test_io.cpp +++ b/modules/core/test/test_io.cpp @@ -405,7 +405,7 @@ protected: Vec v1(15, 16, 17, 18, 19), ov1; Scalar sc1(20.0, 21.1, 22.2, 23.3), osc1; Range g1(7, 8), og1; - + FileStorage fs(fname, FileStorage::WRITE); fs << "mi" << mi; fs << "mv" << mv; diff --git a/modules/core/test/test_math.cpp b/modules/core/test/test_math.cpp index c3d88bda6b..cc91a6b863 100644 --- a/modules/core/test/test_math.cpp +++ b/modules/core/test/test_math.cpp @@ -2457,7 +2457,7 @@ TEST(Core_Invert, small) { cv::Mat a = (cv::Mat_(3,3) << 2.42104644730331, 1.81444796521479, -3.98072565304758, 0, 7.08389214348967e-3, 5.55326770986007e-3, 0,0, 7.44556154284261e-3); //cv::randu(a, -1, 1); - + cv::Mat b = a.t()*a; cv::Mat c, i = Mat_::eye(3, 3); cv::invert(b, c, cv::DECOMP_LU); //std::cout << b*c << std::endl; diff --git a/modules/features2d/doc/object_categorization.rst b/modules/features2d/doc/object_categorization.rst index cf10460f69..4361c3022c 100644 --- a/modules/features2d/doc/object_categorization.rst +++ b/modules/features2d/doc/object_categorization.rst @@ -40,7 +40,7 @@ Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. :: BOWTrainer::add ------------------- -Adds descriptors to a training set. +Adds descriptors to a training set. .. ocv:function:: void BOWTrainer::add( const Mat& descriptors ) @@ -66,7 +66,7 @@ Returns the count of all descriptors stored in the training set. BOWTrainer::cluster ----------------------- -Clusters train descriptors. +Clusters train descriptors. .. ocv:function:: Mat BOWTrainer::cluster() const @@ -116,7 +116,7 @@ Class to compute an image descriptor using the *bag of visual words*. Such a com #. Compute descriptors for a given image and its keypoints set. #. Find the nearest visual words from the vocabulary for each keypoint descriptor. #. Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words encountered in the image. The ``i``-th bin of the histogram is a frequency of ``i``-th word of the vocabulary in the given image. - + The class declaration is the following: :: class BOWImgDescriptorExtractor diff --git a/modules/features2d/src/bagofwords.cpp b/modules/features2d/src/bagofwords.cpp index 898c6342dd..83d9df7b3d 100644 --- a/modules/features2d/src/bagofwords.cpp +++ b/modules/features2d/src/bagofwords.cpp @@ -178,10 +178,10 @@ void BOWImgDescriptorExtractor::compute( const Mat& image, vector& key // Normalize image descriptor. imgDescriptor /= descriptors.rows; - + // Add the descriptors of image keypoints if (_descriptors) { - *_descriptors = descriptors.clone(); + *_descriptors = descriptors.clone(); } } diff --git a/modules/features2d/src/evaluation.cpp b/modules/features2d/src/evaluation.cpp index 1724b0176a..44151c03cf 100644 --- a/modules/features2d/src/evaluation.cpp +++ b/modules/features2d/src/evaluation.cpp @@ -258,7 +258,7 @@ struct IntersectAreaCounter { CV_Assert( miny < maxy ); CV_Assert( dr > FLT_EPSILON ); - + int temp_bua = bua, temp_bna = bna; for( int i = range.begin(); i != range.end(); i++ ) { diff --git a/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.rst b/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.rst index ee44b301ed..07b0b2a467 100644 --- a/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.rst +++ b/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.rst @@ -68,11 +68,11 @@ The method constructs a fast search structure from a set of features using the s * **branching** The branching factor to use for the hierarchical k-means tree - * **iterations** The maximum number of iterations to use in the k-means clustering stage when building the k-means tree. A value of -1 used here means that the k-means clustering should be iterated until convergence + * **iterations** The maximum number of iterations to use in the k-means clustering stage when building the k-means tree. A value of -1 used here means that the k-means clustering should be iterated until convergence - * **centers_init** The algorithm to use for selecting the initial centers when performing a k-means clustering step. The possible values are ``CENTERS_RANDOM`` (picks the initial cluster centers randomly), ``CENTERS_GONZALES`` (picks the initial centers using Gonzales' algorithm) and ``CENTERS_KMEANSPP`` (picks the initial centers using the algorithm suggested in arthur_kmeanspp_2007 ) + * **centers_init** The algorithm to use for selecting the initial centers when performing a k-means clustering step. The possible values are ``CENTERS_RANDOM`` (picks the initial cluster centers randomly), ``CENTERS_GONZALES`` (picks the initial centers using Gonzales' algorithm) and ``CENTERS_KMEANSPP`` (picks the initial centers using the algorithm suggested in arthur_kmeanspp_2007 ) - * **cb_index** This parameter (cluster boundary index) influences the way exploration is performed in the hierarchical kmeans tree. When ``cb_index`` is zero the next kmeans domain to be explored is chosen to be the one with the closest center. A value greater then zero also takes into account the size of the domain. + * **cb_index** This parameter (cluster boundary index) influences the way exploration is performed in the hierarchical kmeans tree. When ``cb_index`` is zero the next kmeans domain to be explored is chosen to be the one with the closest center. A value greater then zero also takes into account the size of the domain. * **CompositeIndexParams** When using a parameters object of this type the index created combines the randomized kd-trees and the hierarchical k-means tree. :: @@ -122,16 +122,16 @@ The method constructs a fast search structure from a set of features using the s .. - * **target_precision** Is a number between 0 and 1 specifying the percentage of the approximate nearest-neighbor searches that return the exact nearest-neighbor. Using a higher value for this parameter gives more accurate results, but the search takes longer. The optimum value usually depends on the application. + * **target_precision** Is a number between 0 and 1 specifying the percentage of the approximate nearest-neighbor searches that return the exact nearest-neighbor. Using a higher value for this parameter gives more accurate results, but the search takes longer. The optimum value usually depends on the application. - * **build_weight** Specifies the importance of the index build time raported to the nearest-neighbor search time. In some applications it's acceptable for the index build step to take a long time if the subsequent searches in the index can be performed very fast. In other applications it's required that the index be build as fast as possible even if that leads to slightly longer search times. + * **build_weight** Specifies the importance of the index build time raported to the nearest-neighbor search time. In some applications it's acceptable for the index build step to take a long time if the subsequent searches in the index can be performed very fast. In other applications it's required that the index be build as fast as possible even if that leads to slightly longer search times. * **memory_weight** Is used to specify the tradeoff between time (index build time and search time) and memory used by the index. A value less than 1 gives more importance to the time spent and a value greater than 1 gives more importance to the memory usage. - * **sample_fraction** Is a number between 0 and 1 indicating what fraction of the dataset to use in the automatic parameter configuration algorithm. Running the algorithm on the full dataset gives the most accurate results, but for very large datasets can take longer than desired. In such case using just a fraction of the data helps speeding up this algorithm while still giving good approximations of the optimum parameters. + * **sample_fraction** Is a number between 0 and 1 indicating what fraction of the dataset to use in the automatic parameter configuration algorithm. Running the algorithm on the full dataset gives the most accurate results, but for very large datasets can take longer than desired. In such case using just a fraction of the data helps speeding up this algorithm while still giving good approximations of the optimum parameters. * **SavedIndexParams** This object type is used for loading a previously saved index from the disk. :: diff --git a/modules/gpu/CMakeLists.txt b/modules/gpu/CMakeLists.txt index 44b5072683..8650b3f091 100644 --- a/modules/gpu/CMakeLists.txt +++ b/modules/gpu/CMakeLists.txt @@ -43,7 +43,7 @@ if(HAVE_CUDA) ocv_cuda_compile(cuda_objs ${lib_cuda} ${ncv_cuda}) set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY}) - + if(HAVE_CUFFT) set(cuda_link_libs ${cuda_link_libs} ${CUDA_cufft_LIBRARY}) endif() diff --git a/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst b/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst index d99def207c..aafbf74584 100644 --- a/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst @@ -49,7 +49,7 @@ This means that the input left image is low textured. * A basic stereo matching example can be found at opencv_source_code/samples/gpu/stereo_match.cpp * A stereo matching example using several GPU's can be found at opencv_source_code/samples/gpu/stereo_multi.cpp * A stereo matching example using several GPU's and driver API can be found at opencv_source_code/samples/gpu/driver_api_stereo_multi.cpp - + gpu::StereoBM_GPU::StereoBM_GPU ----------------------------------- Enables :ocv:class:`gpu::StereoBM_GPU` constructors. diff --git a/modules/gpu/doc/video.rst b/modules/gpu/doc/video.rst index 7b8dde6016..ea15a062a3 100644 --- a/modules/gpu/doc/video.rst +++ b/modules/gpu/doc/video.rst @@ -7,7 +7,7 @@ Video Analysis * A general optical flow example can be found at opencv_source_code/samples/gpu/optical_flow.cpp * A general optical flow example using the Nvidia API can be found at opencv_source_code/samples/gpu/opticalflow_nvidia_api.cpp - + gpu::BroxOpticalFlow -------------------- .. ocv:class:: gpu::BroxOpticalFlow diff --git a/modules/highgui/doc/reading_and_writing_images_and_video.rst b/modules/highgui/doc/reading_and_writing_images_and_video.rst index 811f3da06c..bc901821f5 100644 --- a/modules/highgui/doc/reading_and_writing_images_and_video.rst +++ b/modules/highgui/doc/reading_and_writing_images_and_video.rst @@ -20,7 +20,7 @@ Reads an image from a buffer in memory. :param buf: Input array or vector of bytes. :param flags: The same flags as in :ocv:func:`imread` . - + :param dst: The optional output placeholder for the decoded matrix. It can save the image reallocations when the function is called repeatedly for images of the same size. The function reads an image from the specified buffer in the memory. @@ -74,9 +74,9 @@ Loads an image from a file. :param filename: Name of file to be loaded. :param flags: Flags specifying the color type of a loaded image: - + * CV_LOAD_IMAGE_ANYDEPTH - If set, return 16-bit/32-bit image when the input has the corresponding depth, otherwise convert it to 8-bit. - + * CV_LOAD_IMAGE_COLOR - If set, always convert image to the color one * CV_LOAD_IMAGE_GRAYSCALE - If set, always convert image to the grayscale one diff --git a/modules/highgui/src/cap_ffmpeg.cpp b/modules/highgui/src/cap_ffmpeg.cpp index 00f0494d89..54a8529bad 100644 --- a/modules/highgui/src/cap_ffmpeg.cpp +++ b/modules/highgui/src/cap_ffmpeg.cpp @@ -160,7 +160,7 @@ private: }; -class CvCapture_FFMPEG_proxy : +class CvCapture_FFMPEG_proxy : public CvCapture { public: @@ -224,7 +224,7 @@ CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char * filename) return 0; } -class CvVideoWriter_FFMPEG_proxy : +class CvVideoWriter_FFMPEG_proxy : public CvVideoWriter { public: diff --git a/modules/highgui/src/cap_ffmpeg_impl.hpp b/modules/highgui/src/cap_ffmpeg_impl.hpp index 21acc54c33..ee6ba84a12 100644 --- a/modules/highgui/src/cap_ffmpeg_impl.hpp +++ b/modules/highgui/src/cap_ffmpeg_impl.hpp @@ -345,7 +345,7 @@ class ImplMutex public: ImplMutex() { init(); } ~ImplMutex() { destroy(); } - + void init(); void destroy(); @@ -450,7 +450,7 @@ void ImplMutex::init() impl = (Impl*)malloc(sizeof(Impl)); impl->init(); } -void ImplMutex::destroy() +void ImplMutex::destroy() { impl->destroy(); free(impl); diff --git a/modules/highgui/src/cap_ios_video_camera.mm b/modules/highgui/src/cap_ios_video_camera.mm index ac85f79ee5..f4d30a7618 100644 --- a/modules/highgui/src/cap_ios_video_camera.mm +++ b/modules/highgui/src/cap_ios_video_camera.mm @@ -388,7 +388,7 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;}; - (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image { - + CGSize frameSize = CGSizeMake(CGImageGetWidth(image), CGImageGetHeight(image)); NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithBool:NO], kCVPixelBufferCGImageCompatibilityKey, @@ -399,23 +399,23 @@ static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;}; frameSize.height, kCVPixelFormatType_32ARGB, (CFDictionaryRef) CFBridgingRetain(options), &pxbuffer); NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL); - + CVPixelBufferLockBaseAddress(pxbuffer, 0); void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer); - - + + CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef context = CGBitmapContextCreate(pxdata, frameSize.width, frameSize.height, 8, 4*frameSize.width, rgbColorSpace, kCGImageAlphaPremultipliedFirst); - + CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image), CGImageGetHeight(image)), image); CGColorSpaceRelease(rgbColorSpace); CGContextRelease(context); - + CVPixelBufferUnlockBaseAddress(pxbuffer, 0); - + return pxbuffer; } diff --git a/modules/highgui/src/cap_libv4l.cpp b/modules/highgui/src/cap_libv4l.cpp index 6027caec26..3fd6dd59ad 100644 --- a/modules/highgui/src/cap_libv4l.cpp +++ b/modules/highgui/src/cap_libv4l.cpp @@ -14,7 +14,7 @@ It has been tested with the motempl sample program First Patch: August 24, 2004 Travis Wood TravisOCV@tkwood.com For Release: OpenCV-Linux Beta4 opencv-0.9.6 Tested On: LMLBT44 with 8 video inputs -Problems? Post your questions at answers.opencv.org, +Problems? Post your questions at answers.opencv.org, Report bugs at code.opencv.org, Submit your fixes at https://github.com/Itseez/opencv/ Patched Comments: diff --git a/modules/highgui/src/cap_msmf.cpp b/modules/highgui/src/cap_msmf.cpp index 76b9a215c5..9daad64206 100644 --- a/modules/highgui/src/cap_msmf.cpp +++ b/modules/highgui/src/cap_msmf.cpp @@ -3026,7 +3026,7 @@ double CvCaptureFile_MSMF::getProperty(int property_id) return ((double)captureFormats[captureFormatIndex].MF_MT_FRAME_RATE_NUMERATOR) / ((double)captureFormats[captureFormatIndex].MF_MT_FRAME_RATE_DENOMINATOR); } - + return -1; } @@ -3062,7 +3062,7 @@ IplImage* CvCaptureFile_MSMF::retrieveFrame(int) if(RIOut && size == RIOut->getSize()) { - videoInput::processPixels(RIOut->getpPixels(), (unsigned char*)frame->imageData, width, + videoInput::processPixels(RIOut->getpPixels(), (unsigned char*)frame->imageData, width, height, bytes, false, verticalFlip); } diff --git a/modules/highgui/src/cap_qtkit.mm b/modules/highgui/src/cap_qtkit.mm index 258d63777e..275deae0d2 100644 --- a/modules/highgui/src/cap_qtkit.mm +++ b/modules/highgui/src/cap_qtkit.mm @@ -413,9 +413,9 @@ int CvCaptureCAM::startCaptureDevice(int cameraNum) { void CvCaptureCAM::setWidthHeight() { NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init]; - + [mCaptureSession stopRunning]; - + NSDictionary* pixelBufferOptions = [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithDouble:1.0*width], (id)kCVPixelBufferWidthKey, [NSNumber numberWithDouble:1.0*height], (id)kCVPixelBufferHeightKey, @@ -424,9 +424,9 @@ void CvCaptureCAM::setWidthHeight() { nil]; [mCaptureDecompressedVideoOutput setPixelBufferAttributes:pixelBufferOptions]; - + [mCaptureSession startRunning]; - + grabFrame(60); [localpool drain]; } diff --git a/modules/highgui/src/cap_v4l.cpp b/modules/highgui/src/cap_v4l.cpp index 23497a5958..045c6f889c 100644 --- a/modules/highgui/src/cap_v4l.cpp +++ b/modules/highgui/src/cap_v4l.cpp @@ -14,7 +14,7 @@ It has been tested with the motempl sample program First Patch: August 24, 2004 Travis Wood TravisOCV@tkwood.com For Release: OpenCV-Linux Beta4 opencv-0.9.6 Tested On: LMLBT44 with 8 video inputs -Problems? Post your questions at answers.opencv.org, +Problems? Post your questions at answers.opencv.org, Report bugs at code.opencv.org, Submit your fixes at https://github.com/Itseez/opencv/ Patched Comments: @@ -157,7 +157,7 @@ the symptoms were damaged image and 'Corrupt JPEG data: premature end of data se prevents bad images in the first place 11th patch: April 2, 2013, Forrest Reiling forrest.reiling@gmail.com -Added v4l2 support for getting capture property CV_CAP_PROP_POS_MSEC. +Added v4l2 support for getting capture property CV_CAP_PROP_POS_MSEC. Returns the millisecond timestamp of the last frame grabbed or 0 if no frames have been grabbed Used to successfully synchonize 2 Logitech C310 USB webcams to within 16 ms of one another @@ -1233,8 +1233,8 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) { if (-1 == ioctl (capture->deviceHandle, VIDIOC_QBUF, &buf)) perror ("VIDIOC_QBUF"); - //set timestamp in capture struct to be timestamp of most recent frame - capture->timestamp = buf.timestamp; + //set timestamp in capture struct to be timestamp of most recent frame + capture->timestamp = buf.timestamp; return 1; } @@ -2327,7 +2327,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture, if (capture->FirstCapture) { return 0; } else { - return 1000 * capture->timestamp.tv_sec + ((double) capture->timestamp.tv_usec) / 1000; + return 1000 * capture->timestamp.tv_sec + ((double) capture->timestamp.tv_usec) / 1000; } break; case CV_CAP_PROP_BRIGHTNESS: diff --git a/modules/highgui/src/cap_ximea.cpp b/modules/highgui/src/cap_ximea.cpp index 98279e05b5..7292727b76 100644 --- a/modules/highgui/src/cap_ximea.cpp +++ b/modules/highgui/src/cap_ximea.cpp @@ -138,7 +138,7 @@ void CvCaptureCAM_XIMEA::close() { if(frame) cvReleaseImage(&frame); - + if(hmv) { xiStopAcquisition(hmv); @@ -176,11 +176,11 @@ IplImage* CvCaptureCAM_XIMEA::retrieveFrame(int) { // update cvImage after format has changed resetCvImage(); - + // copy pixel data switch( image.frm) { - case XI_MONO8 : + case XI_MONO8 : case XI_RAW8 : memcpy( frame->imageData, image.bp, image.width*image.height); break; case XI_MONO16 : case XI_RAW16 : memcpy( frame->imageData, image.bp, image.width*image.height*sizeof(WORD)); break; @@ -210,15 +210,15 @@ void CvCaptureCAM_XIMEA::resetCvImage() { case XI_MONO8 : case XI_RAW8 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 1); break; - case XI_MONO16 : + case XI_MONO16 : case XI_RAW16 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_16U, 1); break; - case XI_RGB24 : + case XI_RGB24 : case XI_RGB_PLANAR : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 3); break; case XI_RGB32 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 4); break; default : return; } - } + } cvZero(frame); } /**********************************************************************************/ @@ -338,9 +338,9 @@ int CvCaptureCAM_XIMEA::getBpp() { case XI_MONO8 : case XI_RAW8 : return 1; - case XI_MONO16 : + case XI_MONO16 : case XI_RAW16 : return 2; - case XI_RGB24 : + case XI_RGB24 : case XI_RGB_PLANAR : return 3; case XI_RGB32 : return 4; default : @@ -348,4 +348,4 @@ int CvCaptureCAM_XIMEA::getBpp() } } -/**********************************************************************************/ \ No newline at end of file +/**********************************************************************************/ diff --git a/modules/highgui/src/files_Qt/Milky/README.txt b/modules/highgui/src/files_Qt/Milky/README.txt index a257e5a2ca..df2a2e83f4 100644 --- a/modules/highgui/src/files_Qt/Milky/README.txt +++ b/modules/highgui/src/files_Qt/Milky/README.txt @@ -16,4 +16,4 @@ The license does not permit the following uses: You may not use, or allow anyone else to use the icons to create pornographic, libelous, obscene, or defamatory material. -All icon files are provided "as is". You agree not to hold IconEden.com liable for any damages that may occur due to use, or inability to use, icons or image data from IconEden.com. \ No newline at end of file +All icon files are provided "as is". You agree not to hold IconEden.com liable for any damages that may occur due to use, or inability to use, icons or image data from IconEden.com. \ No newline at end of file diff --git a/modules/highgui/src/ios_conversions.mm b/modules/highgui/src/ios_conversions.mm index 7295743c58..4300acd9bf 100644 --- a/modules/highgui/src/ios_conversions.mm +++ b/modules/highgui/src/ios_conversions.mm @@ -44,21 +44,21 @@ #include "precomp.hpp" UIImage* MatToUIImage(const cv::Mat& image) { - + NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()]; - + CGColorSpaceRef colorSpace; - + if (image.elemSize() == 1) { colorSpace = CGColorSpaceCreateDeviceGray(); } else { colorSpace = CGColorSpaceCreateDeviceRGB(); } - + CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data); - + // Creating CGImage from cv::Mat CGImageRef imageRef = CGImageCreate(image.cols, image.rows, @@ -73,14 +73,14 @@ UIImage* MatToUIImage(const cv::Mat& image) { false, kCGRenderingIntentDefault ); - - + + // Getting UIImage from CGImage UIImage *finalImage = [UIImage imageWithCGImage:imageRef]; CGImageRelease(imageRef); CGDataProviderRelease(provider); CGColorSpaceRelease(colorSpace); - + return finalImage; } diff --git a/modules/highgui/src/window_QT.cpp b/modules/highgui/src/window_QT.cpp index 64d57ab269..eb9cfe37dd 100644 --- a/modules/highgui/src/window_QT.cpp +++ b/modules/highgui/src/window_QT.cpp @@ -2474,7 +2474,7 @@ void DefaultViewPort::saveView() if (!fileName.isEmpty()) //save the picture { QString extension = fileName.right(3); - + // Create a new pixmap to render the viewport into QPixmap viewportPixmap(viewport()->size()); viewport()->render(&viewportPixmap); diff --git a/modules/imgproc/doc/histograms.rst b/modules/imgproc/doc/histograms.rst index bf6c98fd6b..e36ce2d536 100644 --- a/modules/imgproc/doc/histograms.rst +++ b/modules/imgproc/doc/histograms.rst @@ -179,7 +179,7 @@ Compares two histograms. * **CV_COMP_INTERSECT** Intersection * **CV_COMP_BHATTACHARYYA** Bhattacharyya distance - + * **CV_COMP_HELLINGER** Synonym for ``CV_COMP_BHATTACHARYYA`` The functions ``compareHist`` compare two dense or two sparse histograms using the specified method: diff --git a/modules/imgproc/include/opencv2/imgproc/types_c.h b/modules/imgproc/include/opencv2/imgproc/types_c.h index ba25af9f91..4aba0a8748 100644 --- a/modules/imgproc/include/opencv2/imgproc/types_c.h +++ b/modules/imgproc/include/opencv2/imgproc/types_c.h @@ -309,7 +309,7 @@ enum // alpha premultiplication CV_RGBA2mRGBA = 125, CV_mRGBA2RGBA = 126, - + CV_RGB2YUV_I420 = 127, CV_BGR2YUV_I420 = 128, CV_RGB2YUV_IYUV = CV_RGB2YUV_I420, diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index fed7b73133..2e130e042d 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -3896,7 +3896,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) CV_Error( CV_StsBadArg, "Unsupported image depth" ); } } - break; + break; default: CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" ); } diff --git a/modules/imgproc/src/morph.cpp b/modules/imgproc/src/morph.cpp index fb901b7268..40be8cd3d2 100644 --- a/modules/imgproc/src/morph.cpp +++ b/modules/imgproc/src/morph.cpp @@ -1149,11 +1149,11 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne } //DEPRECATED. Allocates and initializes morphology state structure for erosion or dilation operation. typedef IppStatus (CV_STDCALL* ippiMorphologyInitAllocFunc)(int, const void*, IppiSize, IppiPoint, IppiMorphState **); - ippiMorphologyInitAllocFunc ippInitAllocFunc = - type == CV_8UC1 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_8u_C1R : - type == CV_8UC3 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_8u_C3R : - type == CV_8UC4 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_8u_C4R : - type == CV_32FC1 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_32f_C1R : + ippiMorphologyInitAllocFunc ippInitAllocFunc = + type == CV_8UC1 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_8u_C1R : + type == CV_8UC3 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_8u_C3R : + type == CV_8UC4 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_8u_C4R : + type == CV_32FC1 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_32f_C1R : type == CV_32FC3 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_32f_C3R : type == CV_32FC4 ? (ippiMorphologyInitAllocFunc)ippiMorphologyInitAlloc_32f_C4R : 0; @@ -1163,25 +1163,25 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne { case MORPH_DILATE: { - ippFunc = - type == CV_8UC1 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_8u_C1R : - type == CV_8UC3 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_8u_C3R : - type == CV_8UC4 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_8u_C4R : - type == CV_32FC1 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_32f_C1R : - type == CV_32FC3 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_32f_C3R : - type == CV_32FC4 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_32f_C4R : + ippFunc = + type == CV_8UC1 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_8u_C1R : + type == CV_8UC3 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_8u_C3R : + type == CV_8UC4 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_8u_C4R : + type == CV_32FC1 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_32f_C1R : + type == CV_32FC3 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_32f_C3R : + type == CV_32FC4 ? (ippiMorphologyBorderReplicateFunc)ippiDilateBorderReplicate_32f_C4R : 0; break; } case MORPH_ERODE: { - ippFunc = - type == CV_8UC1 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_8u_C1R : - type == CV_8UC3 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_8u_C3R : - type == CV_8UC4 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_8u_C4R : - type == CV_32FC1 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_32f_C1R : - type == CV_32FC3 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_32f_C3R : - type == CV_32FC4 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_32f_C4R : + ippFunc = + type == CV_8UC1 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_8u_C1R : + type == CV_8UC3 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_8u_C3R : + type == CV_8UC4 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_8u_C4R : + type == CV_32FC1 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_32f_C1R : + type == CV_32FC3 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_32f_C3R : + type == CV_32FC4 ? (ippiMorphologyBorderReplicateFunc)ippiErodeBorderReplicate_32f_C4R : 0; break; } @@ -1207,8 +1207,8 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, int borderType, const Scalar &borderValue) { Mat src = _src.getMat(), kernel = _kernel.getMat(); - if( !( src.depth() == CV_8U || src.depth() == CV_32F ) || ( iterations > 1 ) || - !( borderType == cv::BORDER_REPLICATE || (borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue()) ) + if( !( src.depth() == CV_8U || src.depth() == CV_32F ) || ( iterations > 1 ) || + !( borderType == cv::BORDER_REPLICATE || (borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue()) ) || !( op == MORPH_DILATE || op == MORPH_ERODE) ) return false; if( borderType == cv::BORDER_CONSTANT ) diff --git a/modules/imgproc/test/test_cvtyuv.cpp b/modules/imgproc/test/test_cvtyuv.cpp index 61bc9bd8d4..bd8d95dc76 100644 --- a/modules/imgproc/test/test_cvtyuv.cpp +++ b/modules/imgproc/test/test_cvtyuv.cpp @@ -349,7 +349,7 @@ public: int r = rgb[0]; int g = rgb[1]; int b = rgb[2]; - + uchar y = saturate_cast((int)( 0.257f*r + 0.504f*g + 0.098f*b + 0.5f) + 16); uchar u = saturate_cast((int)(-0.148f*r - 0.291f*g + 0.439f*b + 0.5f) + 128); uchar v = saturate_cast((int)( 0.439f*r - 0.368f*g - 0.071f*b + 0.5f) + 128); diff --git a/modules/java/generator/src/cpp/Mat.cpp b/modules/java/generator/src/cpp/Mat.cpp index b651b4d705..fb546ede15 100644 --- a/modules/java/generator/src/cpp/Mat.cpp +++ b/modules/java/generator/src/cpp/Mat.cpp @@ -11,10 +11,10 @@ using namespace cv; static void throwJavaException(JNIEnv *env, const std::exception *e, const char *method) { std::string what = "unknown exception"; jclass je = 0; - + if(e) { std::string exception_type = "std::exception"; - + if(dynamic_cast(e)) { exception_type = "cv::Exception"; je = env->FindClass("org/opencv/core/CvException"); @@ -22,16 +22,16 @@ static void throwJavaException(JNIEnv *env, const std::exception *e, const char what = exception_type + ": " + e->what(); } - + if(!je) je = env->FindClass("java/lang/Exception"); env->ThrowNew(je, what.c_str()); - + LOGE("%s caught %s", method, what.c_str()); (void)method; // avoid "unused" warning } extern "C" { - + // // MatXXX::MatXXX() @@ -69,7 +69,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1Mat__III } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -95,7 +95,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1Mat__DDI } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -122,7 +122,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1Mat__IIIDDDD } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -149,7 +149,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1Mat__DDIDDDD } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -176,7 +176,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1Mat__JIIII } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -198,7 +198,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1Mat__JII } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -224,7 +224,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1adjustROI } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -293,7 +293,7 @@ JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_n_1channels } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -319,7 +319,7 @@ JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_n_1checkVector__JIIZ } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -341,7 +341,7 @@ JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_n_1checkVector__JII } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -363,7 +363,7 @@ JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_n_1checkVector__JI } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -391,7 +391,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1clone } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -418,7 +418,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1col } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -445,7 +445,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1colRange } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -454,7 +454,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1colRange // // int Mat::dims() // - + JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_n_1dims (JNIEnv* env, jclass, jlong self); @@ -471,7 +471,7 @@ JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_n_1dims } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -497,7 +497,7 @@ JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_n_1cols } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -690,7 +690,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1cross } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -733,7 +733,7 @@ JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_n_1depth } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -760,7 +760,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1diag__JI } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -787,7 +787,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1diag__J } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -814,7 +814,7 @@ JNIEXPORT jdouble JNICALL Java_org_opencv_core_Mat_n_1dot } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -840,7 +840,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1elemSize } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -866,7 +866,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1elemSize1 } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -892,7 +892,7 @@ JNIEXPORT jboolean JNICALL Java_org_opencv_core_Mat_n_1empty } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -918,7 +918,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1eye__III } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -945,7 +945,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1eye__DDI } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -972,7 +972,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1inv__JI } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -994,7 +994,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1inv__J } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -1020,7 +1020,7 @@ JNIEXPORT jboolean JNICALL Java_org_opencv_core_Mat_n_1isContinuous } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -1046,7 +1046,7 @@ JNIEXPORT jboolean JNICALL Java_org_opencv_core_Mat_n_1isSubmatrix } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -1101,7 +1101,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1mul__JJD } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -1253,7 +1253,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1reshape__JII } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -1276,7 +1276,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1reshape__JI } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -1303,7 +1303,7 @@ JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1row } catch (...) { throwJavaException(env, 0, method_name); } - + return 0; } @@ -1487,7 +1487,7 @@ JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Mat_n_1size LOGD("%s", method_name); Mat* me = (Mat*) self; //TODO: check for NULL Size _retval_ = me->size( ); - jdoubleArray _da_retval_ = env->NewDoubleArray(2); + jdoubleArray _da_retval_ = env->NewDoubleArray(2); jdouble _tmp_retval_[2] = {_retval_.width, _retval_.height}; env->SetDoubleArrayRegion(_da_retval_, 0, 2, _tmp_retval_); return _da_retval_; diff --git a/modules/ml/doc/gradient_boosted_trees.rst b/modules/ml/doc/gradient_boosted_trees.rst index 4bace302ad..93d0d82aee 100644 --- a/modules/ml/doc/gradient_boosted_trees.rst +++ b/modules/ml/doc/gradient_boosted_trees.rst @@ -67,7 +67,7 @@ The following loss functions are implemented for regression problems: :math:`L(y,f(x)) = \left\{ \begin{array}{lr} \delta\cdot\left(|y-f(x)|-\dfrac{\delta}{2}\right) & : |y-f(x)|>\delta\\ \dfrac{1}{2}\cdot(y-f(x))^2 & : |y-f(x)|\leq\delta \end{array} \right.`, - + where :math:`\delta` is the :math:`\alpha`-quantile estimation of the :math:`|y-f(x)|`. In the current implementation :math:`\alpha=0.2`. @@ -129,9 +129,9 @@ CvGBTreesParams::CvGBTreesParams :param weak_count: Count of boosting algorithm iterations. ``weak_count*K`` is the total count of trees in the GBT model, where ``K`` is the output classes count (equal to one in case of a regression). - + :param shrinkage: Regularization parameter (see :ref:`Training GBT`). - + :param subsample_portion: Portion of the whole training set used for each algorithm iteration. Subset is generated randomly. For more information see http://www.salfordsystems.com/doc/StochasticBoostingSS.pdf. @@ -139,7 +139,7 @@ CvGBTreesParams::CvGBTreesParams :param max_depth: Maximal depth of each decision tree in the ensemble (see :ocv:class:`CvDTree`). :param use_surrogates: If ``true``, surrogate splits are built (see :ocv:class:`CvDTree`). - + By default the following constructor is used: .. code-block:: cpp @@ -178,7 +178,7 @@ Trains a Gradient boosted tree model. .. ocv:function:: bool CvGBTrees::train(CvMLData* data, CvGBTreesParams params=CvGBTreesParams(), bool update=false) .. ocv:pyfunction:: cv2.GBTrees.train(trainData, tflag, responses[, varIdx[, sampleIdx[, varType[, missingDataMask[, params[, update]]]]]]) -> retval - + The first train method follows the common template (see :ocv:func:`CvStatModel::train`). Both ``tflag`` values (``CV_ROW_SAMPLE``, ``CV_COL_SAMPLE``) are supported. ``trainData`` must be of the ``CV_32F`` type. ``responses`` must be a matrix of type @@ -188,7 +188,7 @@ list of indices (``CV_32S``) or a mask (``CV_8U`` or ``CV_8S``). ``update`` is a dummy parameter. The second form of :ocv:func:`CvGBTrees::train` function uses :ocv:class:`CvMLData` as a -data set container. ``update`` is still a dummy parameter. +data set container. ``update`` is still a dummy parameter. All parameters specific to the GBT model are passed into the training function as a :ocv:class:`CvGBTreesParams` structure. @@ -207,42 +207,42 @@ Predicts a response for an input sample. :param sample: Input feature vector that has the same format as every training set element. If not all the variables were actually used during training, ``sample`` contains forged values at the appropriate places. - + :param missing: Missing values mask, which is a dimensional matrix of the same size as ``sample`` having the ``CV_8U`` type. ``1`` corresponds to the missing value in the same position in the ``sample`` vector. If there are no missing values in the feature vector, an empty matrix can be passed instead of the missing mask. - + :param weakResponses: Matrix used to obtain predictions of all the trees. The matrix has :math:`K` rows, where :math:`K` is the count of output classes (1 for the regression case). The matrix has as many columns as the ``slice`` length. - + :param slice: Parameter defining the part of the ensemble used for prediction. If ``slice = Range::all()``, all trees are used. Use this parameter to get predictions of the GBT models with different ensemble sizes learning only one model. - + :param k: Number of tree ensembles built in case of the classification problem (see :ref:`Training GBT`). Use this parameter to change the output to sum of the trees' predictions in the ``k``-th ensemble only. To get the total GBT model prediction, ``k`` value must be -1. For regression problems, ``k`` is also equal to -1. - + The method predicts the response corresponding to the given sample (see :ref:`Predicting with GBT`). The result is either the class label or the estimated function value. The :ocv:func:`CvGBTrees::predict` method enables using the parallel version of the GBT model prediction if the OpenCV is built with the TBB library. In this case, predictions -of single trees are computed in a parallel fashion. +of single trees are computed in a parallel fashion. + - CvGBTrees::clear ---------------- Clears the model. .. ocv:function:: void CvGBTrees::clear() - + .. ocv:pyfunction:: cv2.GBTrees.clear() -> None The function deletes the data set information and all the weak models and sets all internal @@ -257,7 +257,7 @@ Calculates a training or testing error. .. ocv:function:: float CvGBTrees::calc_error( CvMLData* _data, int type, std::vector *resp = 0 ) :param _data: Data set. - + :param type: Parameter defining the error that should be computed: train (``CV_TRAIN_ERROR``) or test (``CV_TEST_ERROR``). diff --git a/modules/ml/doc/k_nearest_neighbors.rst b/modules/ml/doc/k_nearest_neighbors.rst index 6692417189..171be3dc1c 100644 --- a/modules/ml/doc/k_nearest_neighbors.rst +++ b/modules/ml/doc/k_nearest_neighbors.rst @@ -45,7 +45,7 @@ Trains the model. :param updateBase: Specifies whether the model is trained from scratch (``update_base=false``), or it is updated using the new training data (``update_base=true``). In the latter case, the parameter ``maxK`` must not be larger than the original value. -The method trains the K-Nearest model. It follows the conventions of the generic :ocv:func:`CvStatModel::train` approach with the following limitations: +The method trains the K-Nearest model. It follows the conventions of the generic :ocv:func:`CvStatModel::train` approach with the following limitations: * Only ``CV_ROW_SAMPLE`` data layout is supported. * Input variables are all ordered. diff --git a/modules/ml/doc/mldata.rst b/modules/ml/doc/mldata.rst index 9b5e805e8f..79815291ba 100644 --- a/modules/ml/doc/mldata.rst +++ b/modules/ml/doc/mldata.rst @@ -9,7 +9,7 @@ CvMLData -------- .. ocv:class:: CvMLData -Class for loading the data from a ``.csv`` file. +Class for loading the data from a ``.csv`` file. :: class CV_EXPORTS CvMLData @@ -27,42 +27,42 @@ Class for loading the data from a ``.csv`` file. void set_response_idx( int idx ); int get_response_idx() const; - + void set_train_test_split( const CvTrainTestSplit * spl); const CvMat* get_train_sample_idx() const; const CvMat* get_test_sample_idx() const; void mix_train_and_test_idx(); - + const CvMat* get_var_idx(); void change_var_idx( int vi, bool state ); const CvMat* get_var_types(); void set_var_types( const char* str ); - + int get_var_type( int var_idx ) const; void change_var_type( int var_idx, int type); - + void set_delimiter( char ch ); char get_delimiter() const; void set_miss_ch( char ch ); char get_miss_ch() const; - + const std::map& get_class_labels_map() const; - - protected: - ... + + protected: + ... }; CvMLData::read_csv ------------------ -Reads the data set from a ``.csv``-like ``filename`` file and stores all read values in a matrix. +Reads the data set from a ``.csv``-like ``filename`` file and stores all read values in a matrix. .. ocv:function:: int CvMLData::read_csv(const char* filename) :param filename: The input file name -While reading the data, the method tries to define the type of variables (predictors and responses): ordered or categorical. If a value of the variable is not numerical (except for the label for a missing value), the type of the variable is set to ``CV_VAR_CATEGORICAL``. If all existing values of the variable are numerical, the type of the variable is set to ``CV_VAR_ORDERED``. So, the default definition of variables types works correctly for all cases except the case of a categorical variable with numerical class labels. In this case, the type ``CV_VAR_ORDERED`` is set. You should change the type to ``CV_VAR_CATEGORICAL`` using the method :ocv:func:`CvMLData::change_var_type`. For categorical variables, a common map is built to convert a string class label to the numerical class label. Use :ocv:func:`CvMLData::get_class_labels_map` to obtain this map. +While reading the data, the method tries to define the type of variables (predictors and responses): ordered or categorical. If a value of the variable is not numerical (except for the label for a missing value), the type of the variable is set to ``CV_VAR_CATEGORICAL``. If all existing values of the variable are numerical, the type of the variable is set to ``CV_VAR_ORDERED``. So, the default definition of variables types works correctly for all cases except the case of a categorical variable with numerical class labels. In this case, the type ``CV_VAR_ORDERED`` is set. You should change the type to ``CV_VAR_CATEGORICAL`` using the method :ocv:func:`CvMLData::change_var_type`. For categorical variables, a common map is built to convert a string class label to the numerical class label. Use :ocv:func:`CvMLData::get_class_labels_map` to obtain this map. Also, when reading the data, the method constructs the mask of missing values. For example, values are equal to `'?'`. @@ -72,7 +72,7 @@ Returns a pointer to the matrix of predictors and response values .. ocv:function:: const CvMat* CvMLData::get_values() const -The method returns a pointer to the matrix of predictor and response ``values`` or ``0`` if the data has not been loaded from the file yet. +The method returns a pointer to the matrix of predictor and response ``values`` or ``0`` if the data has not been loaded from the file yet. The row count of this matrix equals the sample count. The column count equals predictors ``+ 1`` for the response (if exists) count. This means that each row of the matrix contains values of one sample predictor and response. The matrix type is ``CV_32FC1``. @@ -82,7 +82,7 @@ Returns a pointer to the matrix of response values .. ocv:function:: const CvMat* CvMLData::get_responses() -The method returns a pointer to the matrix of response values or throws an exception if the data has not been loaded from the file yet. +The method returns a pointer to the matrix of response values or throws an exception if the data has not been loaded from the file yet. This is a single-column matrix of the type ``CV_32FC1``. Its row count is equal to the sample count, one column and . @@ -92,7 +92,7 @@ Returns a pointer to the mask matrix of missing values .. ocv:function:: const CvMat* CvMLData::get_missing() const -The method returns a pointer to the mask matrix of missing values or throws an exception if the data has not been loaded from the file yet. +The method returns a pointer to the mask matrix of missing values or throws an exception if the data has not been loaded from the file yet. This matrix has the same size as the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) and the type ``CV_8UC1``. @@ -102,7 +102,7 @@ Specifies index of response column in the data matrix .. ocv:function:: void CvMLData::set_response_idx( int idx ) -The method sets the index of a response column in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) or throws an exception if the data has not been loaded from the file yet. +The method sets the index of a response column in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) or throws an exception if the data has not been loaded from the file yet. The old response columns become predictors. If ``idx < 0``, there is no response. @@ -115,15 +115,15 @@ Returns index of the response column in the loaded data matrix The method returns the index of a response column in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) or throws an exception if the data has not been loaded from the file yet. If ``idx < 0``, there is no response. - + CvMLData::set_train_test_split ------------------------------ -Divides the read data set into two disjoint training and test subsets. +Divides the read data set into two disjoint training and test subsets. .. ocv:function:: void CvMLData::set_train_test_split( const CvTrainTestSplit * spl ) -This method sets parameters for such a split using ``spl`` (see :ocv:class:`CvTrainTestSplit`) or throws an exception if the data has not been loaded from the file yet. +This method sets parameters for such a split using ``spl`` (see :ocv:class:`CvTrainTestSplit`) or throws an exception if the data has not been loaded from the file yet. CvMLData::get_train_sample_idx ------------------------------ @@ -139,13 +139,13 @@ Returns the matrix of sample indices for a testing subset .. ocv:function:: const CvMat* CvMLData::get_test_sample_idx() const - + CvMLData::mix_train_and_test_idx -------------------------------- Mixes the indices of training and test samples .. ocv:function:: void CvMLData::mix_train_and_test_idx() - + The method shuffles the indices of training and test samples preserving sizes of training and test subsets if the data split is set by :ocv:func:`CvMLData::get_values`. If the data has not been loaded from the file yet, an exception is thrown. CvMLData::get_var_idx @@ -153,8 +153,8 @@ CvMLData::get_var_idx Returns the indices of the active variables in the data matrix .. ocv:function:: const CvMat* CvMLData::get_var_idx() - -The method returns the indices of variables (columns) used in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`). + +The method returns the indices of variables (columns) used in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`). It returns ``0`` if the used subset is not set. It throws an exception if the data has not been loaded from the file yet. Returned matrix is a single-row matrix of the type ``CV_32SC1``. Its column count is equal to the size of the used variable subset. @@ -165,22 +165,22 @@ Enables or disables particular variable in the loaded data .. ocv:function:: void CvMLData::change_var_idx( int vi, bool state ) By default, after reading the data set all variables in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) are used. But you may want to use only a subset of variables and include/exclude (depending on ``state`` value) a variable with the ``vi`` index from the used subset. If the data has not been loaded from the file yet, an exception is thrown. - + CvMLData::get_var_types ----------------------- -Returns a matrix of the variable types. +Returns a matrix of the variable types. .. ocv:function:: const CvMat* CvMLData::get_var_types() - + The function returns a single-row matrix of the type ``CV_8UC1``, where each element is set to either ``CV_VAR_ORDERED`` or ``CV_VAR_CATEGORICAL``. The number of columns is equal to the number of variables. If data has not been loaded from file yet an exception is thrown. - + CvMLData::set_var_types ----------------------- Sets the variables types in the loaded data. .. ocv:function:: void CvMLData::set_var_types( const char* str ) -In the string, a variable type is followed by a list of variables indices. For example: ``"ord[0-17],cat[18]"``, ``"ord[0,2,4,10-12], cat[1,3,5-9,13,14]"``, ``"cat"`` (all variables are categorical), ``"ord"`` (all variables are ordered). +In the string, a variable type is followed by a list of variables indices. For example: ``"ord[0-17],cat[18]"``, ``"ord[0,2,4,10-12], cat[1,3,5-9,13,14]"``, ``"cat"`` (all variables are categorical), ``"ord"`` (all variables are ordered). CvMLData::get_var_type ---------------------- @@ -189,15 +189,15 @@ Returns type of the specified variable .. ocv:function:: int CvMLData::get_var_type( int var_idx ) const The method returns the type of a variable by the index ``var_idx`` ( ``CV_VAR_ORDERED`` or ``CV_VAR_CATEGORICAL``). - + CvMLData::change_var_type ------------------------- Changes type of the specified variable .. ocv:function:: void CvMLData::change_var_type( int var_idx, int type) - + The method changes type of variable with index ``var_idx`` from existing type to ``type`` ( ``CV_VAR_ORDERED`` or ``CV_VAR_CATEGORICAL``). - + CvMLData::set_delimiter ----------------------- Sets the delimiter in the file used to separate input numbers @@ -260,6 +260,6 @@ Structure setting the split of a data set read by :ocv:class:`CvMLData`. There are two ways to construct a split: -* Set the training sample count (subset size) ``train_sample_count``. Other existing samples are located in a test subset. +* Set the training sample count (subset size) ``train_sample_count``. Other existing samples are located in a test subset. * Set a training sample portion in ``[0,..1]``. The flag ``mix`` is used to mix training and test samples indices when the split is set. Otherwise, the data set is split in the storing order: the first part of samples of a given size is a training subset, the second part is a test subset. diff --git a/modules/ml/src/knearest.cpp b/modules/ml/src/knearest.cpp index 6b6f5e6afa..78012baa0a 100644 --- a/modules/ml/src/knearest.cpp +++ b/modules/ml/src/knearest.cpp @@ -116,7 +116,7 @@ bool CvKNearest::train( const CvMat* _train_data, const CvMat* _responses, if( !responses ) CV_ERROR( CV_StsNoMem, "Could not allocate memory for responses" ); - + if( _update_base && _dims != var_count ) CV_ERROR( CV_StsBadArg, "The newly added data have different dimensionality" ); diff --git a/modules/nonfree/src/opencl/surf.cl b/modules/nonfree/src/opencl/surf.cl index 3dced5ea10..aace143d53 100644 --- a/modules/nonfree/src/opencl/surf.cl +++ b/modules/nonfree/src/opencl/surf.cl @@ -125,7 +125,7 @@ float icvCalcHaarPatternSum_2( t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy1.x), rows, cols, elemPerRow ); t += read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy2.x), rows, cols, elemPerRow ); d += t * src[4].x / ((dx2.x - dx1.x) * (dy2.x - dy1.x)); - + t = 0; t += read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy1.y), rows, cols, elemPerRow ); t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy2.y), rows, cols, elemPerRow ); @@ -161,14 +161,14 @@ float icvCalcHaarPatternSum_3( t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy1.x), rows, cols, elemPerRow ); t += read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy2.x), rows, cols, elemPerRow ); d += t * src[4].x / ((dx2.x - dx1.x) * (dy2.x - dy1.x)); - + t = 0; t += read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy1.y), rows, cols, elemPerRow ); t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy2.y), rows, cols, elemPerRow ); t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy1.y), rows, cols, elemPerRow ); t += read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy2.y), rows, cols, elemPerRow ); d += t * src[4].y / ((dx2.y - dx1.y) * (dy2.y - dy1.y)); - + t = 0; t += read_sumTex( sumTex, sampler, (int2)(x + dx1.z, y + dy1.z), rows, cols, elemPerRow ); t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.z, y + dy2.z), rows, cols, elemPerRow ); @@ -204,21 +204,21 @@ float icvCalcHaarPatternSum_4( t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy1.x), rows, cols, elemPerRow ); t += read_sumTex( sumTex, sampler, (int2)(x + dx2.x, y + dy2.x), rows, cols, elemPerRow ); d += t * src[4].x / ((dx2.x - dx1.x) * (dy2.x - dy1.x)); - + t = 0; t += read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy1.y), rows, cols, elemPerRow ); t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.y, y + dy2.y), rows, cols, elemPerRow ); t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy1.y), rows, cols, elemPerRow ); t += read_sumTex( sumTex, sampler, (int2)(x + dx2.y, y + dy2.y), rows, cols, elemPerRow ); d += t * src[4].y / ((dx2.y - dx1.y) * (dy2.y - dy1.y)); - + t = 0; t += read_sumTex( sumTex, sampler, (int2)(x + dx1.z, y + dy1.z), rows, cols, elemPerRow ); t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.z, y + dy2.z), rows, cols, elemPerRow ); t -= read_sumTex( sumTex, sampler, (int2)(x + dx2.z, y + dy1.z), rows, cols, elemPerRow ); t += read_sumTex( sumTex, sampler, (int2)(x + dx2.z, y + dy2.z), rows, cols, elemPerRow ); d += t * src[4].z / ((dx2.z - dx1.z) * (dy2.z - dy1.z)); - + t = 0; t += read_sumTex( sumTex, sampler, (int2)(x + dx1.w, y + dy1.w), rows, cols, elemPerRow ); t -= read_sumTex( sumTex, sampler, (int2)(x + dx1.w, y + dy2.w), rows, cols, elemPerRow ); @@ -1231,7 +1231,7 @@ void compute_descriptors64( barrier(CLK_LOCAL_MEM_FENCE); reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid); - + barrier(CLK_LOCAL_MEM_FENCE); if (tid < 25) { @@ -1361,14 +1361,14 @@ void reduce_sum128(volatile __local float* smem, int tid) #if WAVE_SIZE < 64 } barrier(CLK_LOCAL_MEM_FENCE); - if (tid < 32) + if (tid < 32) { #endif smem[tid] += smem[tid + 32]; #if WAVE_SIZE < 32 } barrier(CLK_LOCAL_MEM_FENCE); - if (tid < 16) + if (tid < 16) { #endif smem[tid] += smem[tid + 16]; @@ -1415,7 +1415,7 @@ void reduce_sum64(volatile __local float* smem, int tid) #if WAVE_SIZE < 32 } barrier(CLK_LOCAL_MEM_FENCE); - if (tid < 16) + if (tid < 16) { #endif smem[tid] += smem[tid + 16]; diff --git a/modules/nonfree/src/precomp.hpp b/modules/nonfree/src/precomp.hpp index 2abe6038ac..5fbe446af8 100644 --- a/modules/nonfree/src/precomp.hpp +++ b/modules/nonfree/src/precomp.hpp @@ -53,7 +53,7 @@ #if defined(HAVE_OPENCV_GPU) #include "opencv2/nonfree/gpu.hpp" - + #if defined(HAVE_CUDA) #include "opencv2/gpu/stream_accessor.hpp" #include "opencv2/gpu/device/common.hpp" diff --git a/modules/objdetect/src/haar.cpp b/modules/objdetect/src/haar.cpp index 0a5f8873a8..6bde067560 100644 --- a/modules/objdetect/src/haar.cpp +++ b/modules/objdetect/src/haar.cpp @@ -1964,10 +1964,10 @@ cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size ) size += (n+1)*sizeof(char*); const char** input_cascade = (const char**)cvAlloc( size ); - + if( !input_cascade ) CV_Error( CV_StsNoMem, "Could not allocate memory for input_cascade" ); - + char* ptr = (char*)(input_cascade + n + 1); for( int i = 0; i < n; i++ ) @@ -1988,7 +1988,7 @@ cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size ) } input_cascade[n] = 0; - + CvHaarClassifierCascade* cascade = icvLoadCascadeCART( input_cascade, n, orig_window_size ); if( input_cascade ) diff --git a/modules/objdetect/test/test_cascadeandhog.cpp b/modules/objdetect/test/test_cascadeandhog.cpp index 41ecb3fdeb..dd0c439d03 100644 --- a/modules/objdetect/test/test_cascadeandhog.cpp +++ b/modules/objdetect/test/test_cascadeandhog.cpp @@ -439,7 +439,7 @@ int CV_CascadeDetectorTest::detectMultiScale_C( const string& filename, CvMat c_gray = grayImg; CvSeq* rs = cvHaarDetectObjects(&c_gray, c_cascade, storage, 1.1, 3, flags[di] ); - + objects.clear(); for( int i = 0; i < rs->total; i++ ) { diff --git a/modules/ocl/doc/introduction.rst b/modules/ocl/doc/introduction.rst index 5da848f1e6..7dda96396f 100644 --- a/modules/ocl/doc/introduction.rst +++ b/modules/ocl/doc/introduction.rst @@ -6,14 +6,14 @@ OpenCL Module Introduction General Information ------------------- -The OpenCV OCL module contains a set of classes and functions that implement and accelerate select openCV functionality on OpenCL compatible devices. OpenCL is a Khronos standard, implemented by a variety of devices (CPUs, GPUs, FPGAs, ARM), abstracting the exact hardware details, while enabling vendors to provide native implementation for maximal acceleration on their hardware. The standard enjoys wide industry support, and the end user of the module will enjoy the data parallelism benefits that the specific platform/hardware may be capable of, in a platform/hardware independent manner. +The OpenCV OCL module contains a set of classes and functions that implement and accelerate select openCV functionality on OpenCL compatible devices. OpenCL is a Khronos standard, implemented by a variety of devices (CPUs, GPUs, FPGAs, ARM), abstracting the exact hardware details, while enabling vendors to provide native implementation for maximal acceleration on their hardware. The standard enjoys wide industry support, and the end user of the module will enjoy the data parallelism benefits that the specific platform/hardware may be capable of, in a platform/hardware independent manner. -While in the future we hope to validate (and enable) the OCL module in all OpenCL capable devices, we currently develop and test on GPU devices only. This includes both discrete GPUs (NVidia, AMD), as well as integrated chips(AMD APU and intel HD devices). Performance of any particular algorithm will depend on the particular platform characteristics and capabilities. However, currently (as of 2.4.4), accuracy and mathematical correctness has been verified to be identical to that of the pure CPU implementation on all tested GPU devices and platforms (both windows and linux). +While in the future we hope to validate (and enable) the OCL module in all OpenCL capable devices, we currently develop and test on GPU devices only. This includes both discrete GPUs (NVidia, AMD), as well as integrated chips(AMD APU and intel HD devices). Performance of any particular algorithm will depend on the particular platform characteristics and capabilities. However, currently (as of 2.4.4), accuracy and mathematical correctness has been verified to be identical to that of the pure CPU implementation on all tested GPU devices and platforms (both windows and linux). The OpenCV OCL module includes utility functions, low-level vision primitives, and high-level algorithms. The utility functions and low-level primitives provide a powerful infrastructure for developing fast vision algorithms taking advangtage of OCL whereas the high-level functionality (samples)includes some state-of-the-art algorithms (including LK Optical flow, and Face detection) ready to be used by the application developers. The module is also accompanied by an extensive performance and accuracy test suite. -The OpenCV OCL module is designed for ease of use and does not require any knowledge of OpenCL. At a minimuml level, it can be viewed as a set of accelerators, that can take advantage of the high compute throughput that GPU/APU devices can provide. However, it can also be viewed as a starting point to really integratethe built-in functionality with your own custom OpenCL kernels, with or without modifying the source of OpenCV-OCL. Of course, knowledge of OpenCL will certainly help, however we hope that OpenCV-OCL module, and the kernels it contains in source code, can be very useful as a means of actually learning openCL. Such a knowledge would be necessary to further fine-tune any of the existing OpenCL kernels, or for extending the framework with new kernels. As of OpenCV 2.4.4, we introduce interoperability with OpenCL, enabling easy use of custom OpenCL kernels within the OpenCV framework. +The OpenCV OCL module is designed for ease of use and does not require any knowledge of OpenCL. At a minimuml level, it can be viewed as a set of accelerators, that can take advantage of the high compute throughput that GPU/APU devices can provide. However, it can also be viewed as a starting point to really integratethe built-in functionality with your own custom OpenCL kernels, with or without modifying the source of OpenCV-OCL. Of course, knowledge of OpenCL will certainly help, however we hope that OpenCV-OCL module, and the kernels it contains in source code, can be very useful as a means of actually learning openCL. Such a knowledge would be necessary to further fine-tune any of the existing OpenCL kernels, or for extending the framework with new kernels. As of OpenCV 2.4.4, we introduce interoperability with OpenCL, enabling easy use of custom OpenCL kernels within the OpenCV framework. To use the OCL module, you need to make sure that you have the OpenCL SDK provided with your device vendor. To correctly run the OCL module, you need to have the OpenCL runtime provide by the device vendor, typically the device driver. diff --git a/modules/ocl/doc/operations_on_matrices.rst b/modules/ocl/doc/operations_on_matrices.rst index 1500ea52ad..e47e720922 100644 --- a/modules/ocl/doc/operations_on_matrices.rst +++ b/modules/ocl/doc/operations_on_matrices.rst @@ -500,13 +500,13 @@ Returns void * **SORT_SELECTION** selection sort, currently cannot sort duplicate keys * **SORT_MERGE** merge sort * **SORT_RADIX** radix sort, only support signed int/float keys(``CV_32S``/``CV_32F``) - + Returns the sorted result of all the elements in values based on equivalent keys. -The element unit in the values to be sorted is determined from the data type, +The element unit in the values to be sorted is determined from the data type, i.e., a ``CV_32FC2`` input ``{a1a2, b1b2}`` will be considered as two elements, regardless its matrix dimension. -Both keys and values will be sorted inplace. +Both keys and values will be sorted inplace. Keys needs to be a **single** channel `oclMat`. diff --git a/modules/ocl/include/opencv2/ocl/ocl.hpp b/modules/ocl/include/opencv2/ocl/ocl.hpp index aa0283fbeb..5b3642d034 100644 --- a/modules/ocl/include/opencv2/ocl/ocl.hpp +++ b/modules/ocl/include/opencv2/ocl/ocl.hpp @@ -119,7 +119,7 @@ namespace cv CV_EXPORTS void setDevice(Info &oclinfo, int devnum = 0); //The two functions below enable other opencl program to use ocl module's cl_context and cl_command_queue - //returns cl_context * + //returns cl_context * CV_EXPORTS void* getoclContext(); //returns cl_command_queue * CV_EXPORTS void* getoclCommandQueue(); @@ -183,8 +183,8 @@ namespace cv //! Enable or disable OpenCL program binary caching onto local disk // After a program (*.cl files in opencl/ folder) is built at runtime, we allow the - // compiled OpenCL program to be cached to the path automatically as "path/*.clb" - // binary file, which will be reused when the OpenCV executable is started again. + // compiled OpenCL program to be cached to the path automatically as "path/*.clb" + // binary file, which will be reused when the OpenCV executable is started again. // // Caching mode is controlled by the following enums // Notes @@ -201,7 +201,7 @@ namespace cv }; CV_EXPORTS void setBinaryDiskCache(int mode = CACHE_RELEASE, cv::String path = "./"); - //! set where binary cache to be saved to + //! set where binary cache to be saved to CV_EXPORTS void setBinpath(const char *path); class CV_EXPORTS oclMatExpr; @@ -514,10 +514,10 @@ namespace cv CV_EXPORTS void calcHist(const oclMat &mat_src, oclMat &mat_hist); //! only 8UC1 and 256 bins is supported now CV_EXPORTS void equalizeHist(const oclMat &mat_src, oclMat &mat_dst); - + //! only 8UC1 is supported now CV_EXPORTS Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); - + //! bilateralFilter // supports 8UC1 8UC4 CV_EXPORTS void bilateralFilter(const oclMat& src, oclMat& dst, int d, double sigmaColor, double sigmaSpave, int borderType=BORDER_DEFAULT); @@ -840,7 +840,7 @@ namespace cv // supports CV_32FC1/CV_32FC2/CV_32FC4 data type CV_EXPORTS void distanceToCenters(oclMat &dists, oclMat &labels, const oclMat &src, const oclMat ¢ers); - //!Does k-means procedure on GPU + //!Does k-means procedure on GPU // supports CV_32FC1/CV_32FC2/CV_32FC4 data type CV_EXPORTS double kmeans(const oclMat &src, int K, oclMat &bestLabels, TermCriteria criteria, int attemps, int flags, oclMat ¢ers); @@ -1683,7 +1683,7 @@ namespace cv }; //! Returns the sorted result of all the elements in input based on equivalent keys. // - // The element unit in the values to be sorted is determined from the data type, + // The element unit in the values to be sorted is determined from the data type, // i.e., a CV_32FC2 input {a1a2, b1b2} will be considered as two elements, regardless its // matrix dimension. // both keys and values will be sorted inplace diff --git a/modules/ocl/include/opencv2/ocl/private/util.hpp b/modules/ocl/include/opencv2/ocl/private/util.hpp index 634f2f2b15..3176a68951 100644 --- a/modules/ocl/include/opencv2/ocl/private/util.hpp +++ b/modules/ocl/include/opencv2/ocl/private/util.hpp @@ -130,7 +130,7 @@ namespace cv { openCLFree(tex_); } - operator cl_mem() + operator cl_mem() { return tex_; } diff --git a/modules/ocl/perf/main.cpp b/modules/ocl/perf/main.cpp index 0875903413..1d7e2cffa1 100644 --- a/modules/ocl/perf/main.cpp +++ b/modules/ocl/perf/main.cpp @@ -99,7 +99,7 @@ int main(int argc, const char *argv[]) // set this to overwrite binary cache every time the test starts ocl::setBinaryDiskCache(ocl::CACHE_UPDATE); - + if (cmd.get("verify")) { TestSystem::instance().setNumIters(1); diff --git a/modules/ocl/perf/perf_calib3d.cpp b/modules/ocl/perf/perf_calib3d.cpp index e0622aa56b..8485278c13 100644 --- a/modules/ocl/perf/perf_calib3d.cpp +++ b/modules/ocl/perf/perf_calib3d.cpp @@ -87,7 +87,7 @@ PERFTEST(StereoMatchBM) d_bm(d_left, d_right, d_disp); d_disp.download(disp); GPU_FULL_OFF; - + TestSystem::instance().setAccurate(-1, 0.); } @@ -98,4 +98,4 @@ PERFTEST(StereoMatchBM) - \ No newline at end of file + diff --git a/modules/ocl/perf/perf_filters.cpp b/modules/ocl/perf/perf_filters.cpp index be288b444b..e4204cbbd1 100644 --- a/modules/ocl/perf/perf_filters.cpp +++ b/modules/ocl/perf/perf_filters.cpp @@ -284,7 +284,7 @@ PERFTEST(GaussianBlur) Mat src, dst, ocl_dst; int all_type[] = {CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4}; std::string type_name[] = {"CV_8UC1", "CV_8UC4", "CV_32FC1", "CV_32FC4"}; - const int ksize = 7; + const int ksize = 7; for (int size = Min_Size; size <= Max_Size; size *= Multiple) { @@ -374,4 +374,4 @@ PERFTEST(filter2D) } -} \ No newline at end of file +} diff --git a/modules/ocl/perf/perf_gftt.cpp b/modules/ocl/perf/perf_gftt.cpp index 9e809e4435..430d441430 100644 --- a/modules/ocl/perf/perf_gftt.cpp +++ b/modules/ocl/perf/perf_gftt.cpp @@ -55,14 +55,14 @@ PERFTEST(GoodFeaturesToTrack) double qualityLevel = 0.01; std::string images[] = { "rubberwhale1.png", "aloeL.jpg" }; - + std::vector pts_gold, pts_ocl; for(size_t imgIdx = 0; imgIdx < (sizeof(images)/sizeof(std::string)); ++imgIdx) { Mat frame = imread(abspath(images[imgIdx]), IMREAD_GRAYSCALE); CV_Assert(!frame.empty()); - + for(float minDistance = 0; minDistance < 4; minDistance += 3.0) { SUBTEST << "image = " << images[imgIdx] << "; "; diff --git a/modules/ocl/perf/perf_hog.cpp b/modules/ocl/perf/perf_hog.cpp index 0f05581295..610a168df2 100644 --- a/modules/ocl/perf/perf_hog.cpp +++ b/modules/ocl/perf/perf_hog.cpp @@ -77,7 +77,7 @@ PERFTEST(HOG) WARMUP_ON; ocl_hog.detectMultiScale(d_src, d_found_locations); WARMUP_OFF; - + if(d_found_locations.size() == found_locations.size()) TestSystem::instance().setAccurate(1, 0); else diff --git a/modules/ocl/perf/perf_imgproc.cpp b/modules/ocl/perf/perf_imgproc.cpp index 582853d05b..2ed64ce325 100644 --- a/modules/ocl/perf/perf_imgproc.cpp +++ b/modules/ocl/perf/perf_imgproc.cpp @@ -852,7 +852,7 @@ PERFTEST(meanShiftProc) GPU_FULL_OFF; vector eps(2, 0.); - TestSystem::instance().ExpectMatsNear(dst, ocl_dst, eps); + TestSystem::instance().ExpectMatsNear(dst, ocl_dst, eps); } } diff --git a/modules/ocl/perf/perf_norm.cpp b/modules/ocl/perf/perf_norm.cpp index fec8d73791..268fe79e46 100644 --- a/modules/ocl/perf/perf_norm.cpp +++ b/modules/ocl/perf/perf_norm.cpp @@ -72,7 +72,7 @@ PERFTEST(norm) WARMUP_OFF; d_src1.download(ocl_src1); - TestSystem::instance().ExpectedMatNear(src1, ocl_src1, .5); + TestSystem::instance().ExpectedMatNear(src1, ocl_src1, .5); GPU_ON; ocl::norm(d_src1, d_src2, NORM_INF); diff --git a/modules/ocl/perf/perf_precomp.cpp b/modules/ocl/perf/perf_precomp.cpp index 2a49eb20e8..439930da2d 100644 --- a/modules/ocl/perf/perf_precomp.cpp +++ b/modules/ocl/perf/perf_precomp.cpp @@ -301,8 +301,8 @@ static const char* GetAnsiColorCode(GTestColor color) { static void printMetricsUti(double cpu_time, double gpu_time, double gpu_full_time, double speedup, double fullspeedup, std::stringstream& stream, std::stringstream& cur_subtest_description) { - //cout < -void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, +void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, string kernelName, const char **kernelString, void *_scalar, int op_type = 0) { if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F) @@ -187,12 +187,12 @@ void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, depth); } } -static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, +static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, string kernelName, const char **kernelString, int op_type = 0) { arithmetic_run(src1, src2, dst, kernelName, kernelString, (void *)NULL, op_type); } -static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask, +static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask, string kernelName, const char **kernelString, int op_type = 0) { if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F) diff --git a/modules/ocl/src/filtering.cpp b/modules/ocl/src/filtering.cpp index 6e858d1d6b..a08f0ed2bd 100644 --- a/modules/ocl/src/filtering.cpp +++ b/modules/ocl/src/filtering.cpp @@ -218,7 +218,7 @@ public: **Extend this if necessary later. **Note that the kernel need to be further refined. */ -static void GPUErode(const oclMat &src, oclMat &dst, oclMat &mat_kernel, +static void GPUErode(const oclMat &src, oclMat &dst, oclMat &mat_kernel, Size &ksize, const Point anchor, bool rectKernel) { //Normalize the result by default @@ -275,8 +275,8 @@ static void GPUErode(const oclMat &src, oclMat &dst, oclMat &mat_kernel, } char compile_option[128]; - sprintf(compile_option, "-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D ERODE %s %s", - anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1], + sprintf(compile_option, "-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D ERODE %s %s", + anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1], s, rectKernel?"-D RECTKERNEL":""); vector< pair > args; args.push_back(make_pair(sizeof(cl_mem), (void *)&src.data)); @@ -296,7 +296,7 @@ static void GPUErode(const oclMat &src, oclMat &dst, oclMat &mat_kernel, //! data type supported: CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4 -static void GPUDilate(const oclMat &src, oclMat &dst, oclMat &mat_kernel, +static void GPUDilate(const oclMat &src, oclMat &dst, oclMat &mat_kernel, Size &ksize, const Point anchor, bool rectKernel) { //Normalize the result by default @@ -316,7 +316,7 @@ static void GPUDilate(const oclMat &src, oclMat &dst, oclMat &mat_kernel, Context *clCxt = src.clCxt; string kernelName; size_t localThreads[3] = {16, 16, 1}; - size_t globalThreads[3] = {(src.cols + localThreads[0] - 1) / localThreads[0] *localThreads[0], + size_t globalThreads[3] = {(src.cols + localThreads[0] - 1) / localThreads[0] *localThreads[0], (src.rows + localThreads[1] - 1) / localThreads[1] *localThreads[1], 1}; if (src.type() == CV_8UC1) @@ -354,8 +354,8 @@ static void GPUDilate(const oclMat &src, oclMat &dst, oclMat &mat_kernel, } char compile_option[128]; - sprintf(compile_option, "-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D DILATE %s %s", - anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1], + sprintf(compile_option, "-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D DILATE %s %s", + anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1], s, rectKernel?"-D RECTKERNEL":""); vector< pair > args; args.push_back(make_pair(sizeof(cl_mem), (void *)&src.data)); @@ -622,7 +622,7 @@ static void GPUFilter2D(const oclMat &src, oclMat &dst, const oclMat &mat_kernel int cn = src.oclchannels(); int src_step = (int)(src.step/src.elemSize()); int dst_step = (int)(dst.step/src.elemSize()); - + int localWidth = localThreads[0] + paddingPixels; int localHeight = localThreads[1] + paddingPixels; diff --git a/modules/ocl/src/gftt.cpp b/modules/ocl/src/gftt.cpp index 06844177a9..2219e7763d 100644 --- a/modules/ocl/src/gftt.cpp +++ b/modules/ocl/src/gftt.cpp @@ -147,7 +147,7 @@ struct Sorter static void sortCorners_caller(const EigType& eig_tex, oclMat& corners, const int count) { Context * cxt = Context::getContext(); - + size_t globalThreads[3] = {count, 1, 1}; size_t localThreads[3] = {GROUP_SIZE, 1, 1}; @@ -170,7 +170,7 @@ struct Sorter }; int findCorners_caller( - const TextureCL& eig, + const TextureCL& eig, const float threshold, const oclMat& mask, oclMat& corners, @@ -254,7 +254,7 @@ void cv::ocl::GoodFeaturesToTrackDetector_OCL::operator ()(const oclMat& image, Sorter::sortCorners_caller(*eig_tex, tmpCorners_, total); } } - + if (minDistance < 1) { Rect roi_range(0, 0, maxCorners > 0 ? std::min(maxCorners, total) : total, 1); @@ -338,14 +338,14 @@ void cv::ocl::GoodFeaturesToTrackDetector_OCL::downloadPoints(const oclMat &poin CV_DbgAssert(points.type() == CV_32FC2); points_v.resize(points.cols); openCLSafeCall(clEnqueueReadBuffer( - *reinterpret_cast(getoclCommandQueue()), - reinterpret_cast(points.data), - CL_TRUE, - 0, - points.cols * sizeof(Point2f), - &points_v[0], - 0, - NULL, + *reinterpret_cast(getoclCommandQueue()), + reinterpret_cast(points.data), + CL_TRUE, + 0, + points.cols * sizeof(Point2f), + &points_v[0], + 0, + NULL, NULL)); } diff --git a/modules/ocl/src/haar.cpp b/modules/ocl/src/haar.cpp index 934957e593..212fd2c444 100644 --- a/modules/ocl/src/haar.cpp +++ b/modules/ocl/src/haar.cpp @@ -1458,7 +1458,7 @@ void cv::ocl::OclCascadeClassifierBuf::CreateFactorRelatedBufs( gimg1.release(); gsum.release(); gsqsum.release(); - } + } else if (!(m_flags & CV_HAAR_SCALE_IMAGE) && (flags & CV_HAAR_SCALE_IMAGE)) { openCLSafeCall(clReleaseMemObject(((OclBuffers *)buffers)->newnodebuffer)); @@ -1476,7 +1476,7 @@ void cv::ocl::OclCascadeClassifierBuf::CreateFactorRelatedBufs( { return; } - } + } else { if (fabs(m_scaleFactor - scaleFactor) < 1e-6 diff --git a/modules/ocl/src/hog.cpp b/modules/ocl/src/hog.cpp index c7ac4098f5..e532d31fd7 100644 --- a/modules/ocl/src/hog.cpp +++ b/modules/ocl/src/hog.cpp @@ -89,33 +89,33 @@ namespace cv void compute_hists(int nbins, int block_stride_x, int blovck_stride_y, int height, int width, const cv::ocl::oclMat &grad, - const cv::ocl::oclMat &qangle, + const cv::ocl::oclMat &qangle, const cv::ocl::oclMat &gauss_w_lut, cv::ocl::oclMat &block_hists); void normalize_hists(int nbins, int block_stride_x, int block_stride_y, - int height, int width, cv::ocl::oclMat &block_hists, + int height, int width, cv::ocl::oclMat &block_hists, float threshold); void classify_hists(int win_height, int win_width, int block_stride_y, - int block_stride_x, int win_stride_y, int win_stride_x, - int height, int width, const cv::ocl::oclMat &block_hists, + int block_stride_x, int win_stride_y, int win_stride_x, + int height, int width, const cv::ocl::oclMat &block_hists, const cv::ocl::oclMat &coefs, float free_coef, float threshold, cv::ocl::oclMat &labels); - void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, - int block_stride_x, int win_stride_y, int win_stride_x, + void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, + int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, const cv::ocl::oclMat &block_hists, cv::ocl::oclMat &descriptors); - void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, - int block_stride_x, int win_stride_y, int win_stride_x, + void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, + int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, const cv::ocl::oclMat &block_hists, cv::ocl::oclMat &descriptors); void compute_gradients_8UC1(int height, int width, const cv::ocl::oclMat &img, - float angle_scale, cv::ocl::oclMat &grad, + float angle_scale, cv::ocl::oclMat &grad, cv::ocl::oclMat &qangle, bool correct_gamma); void compute_gradients_8UC4(int height, int width, const cv::ocl::oclMat &img, - float angle_scale, cv::ocl::oclMat &grad, + float angle_scale, cv::ocl::oclMat &grad, cv::ocl::oclMat &qangle, bool correct_gamma); } } @@ -129,8 +129,8 @@ static inline int divUp(int total, int grain) return (total + grain - 1) / grain; } -cv::ocl::HOGDescriptor::HOGDescriptor(Size win_size_, Size block_size_, Size block_stride_, - Size cell_size_, int nbins_, double win_sigma_, +cv::ocl::HOGDescriptor::HOGDescriptor(Size win_size_, Size block_size_, Size block_stride_, + Size cell_size_, int nbins_, double win_sigma_, double threshold_L2hys_, bool gamma_correction_, int nlevels_) : win_size(win_size_), block_size(block_size_), @@ -145,19 +145,19 @@ cv::ocl::HOGDescriptor::HOGDescriptor(Size win_size_, Size block_size_, Size blo CV_Assert((win_size.width - block_size.width ) % block_stride.width == 0 && (win_size.height - block_size.height) % block_stride.height == 0); - CV_Assert(block_size.width % cell_size.width == 0 && + CV_Assert(block_size.width % cell_size.width == 0 && block_size.height % cell_size.height == 0); CV_Assert(block_stride == cell_size); CV_Assert(cell_size == Size(8, 8)); - Size cells_per_block(block_size.width / cell_size.width, + Size cells_per_block(block_size.width / cell_size.width, block_size.height / cell_size.height); CV_Assert(cells_per_block == Size(2, 2)); cv::Size blocks_per_win = numPartsWithin(win_size, block_size, block_stride); - hog::set_up_constants(nbins, block_stride.width, block_stride.height, + hog::set_up_constants(nbins, block_stride.width, block_stride.height, blocks_per_win.width, blocks_per_win.height); effect_size = Size(0, 0); @@ -175,7 +175,7 @@ size_t cv::ocl::HOGDescriptor::getDescriptorSize() const size_t cv::ocl::HOGDescriptor::getBlockHistogramSize() const { - Size cells_per_block = Size(block_size.width / cell_size.width, + Size cells_per_block = Size(block_size.width / cell_size.width, block_size.height / cell_size.height); return (size_t)(nbins * cells_per_block.area()); } @@ -189,7 +189,7 @@ bool cv::ocl::HOGDescriptor::checkDetectorSize() const { size_t detector_size = detector.rows * detector.cols; size_t descriptor_size = getDescriptorSize(); - return detector_size == 0 || detector_size == descriptor_size || + return detector_size == 0 || detector_size == descriptor_size || detector_size == descriptor_size + 1; } @@ -230,7 +230,7 @@ void cv::ocl::HOGDescriptor::init_buffer(const oclMat &img, Size win_stride) const size_t block_hist_size = getBlockHistogramSize(); const Size blocks_per_img = numPartsWithin(img.size(), block_size, block_stride); - block_hists.create(1, + block_hists.create(1, static_cast(block_hist_size * blocks_per_img.area()) + 256, CV_32F); Size wins_per_img = numPartsWithin(img.size(), win_size, win_stride); @@ -258,11 +258,11 @@ void cv::ocl::HOGDescriptor::computeGradient(const oclMat &img, oclMat &grad, oc switch (img.type()) { case CV_8UC1: - hog::compute_gradients_8UC1(effect_size.height, effect_size.width, img, + hog::compute_gradients_8UC1(effect_size.height, effect_size.width, img, angleScale, grad, qangle, gamma_correction); break; case CV_8UC4: - hog::compute_gradients_8UC4(effect_size.height, effect_size.width, img, + hog::compute_gradients_8UC4(effect_size.height, effect_size.width, img, angleScale, grad, qangle, gamma_correction); break; } @@ -273,18 +273,18 @@ void cv::ocl::HOGDescriptor::computeBlockHistograms(const oclMat &img) { computeGradient(img, this->grad, this->qangle); - hog::compute_hists(nbins, block_stride.width, block_stride.height, effect_size.height, + hog::compute_hists(nbins, block_stride.width, block_stride.height, effect_size.height, effect_size.width, grad, qangle, gauss_w_lut, block_hists); - hog::normalize_hists(nbins, block_stride.width, block_stride.height, effect_size.height, + hog::normalize_hists(nbins, block_stride.width, block_stride.height, effect_size.height, effect_size.width, block_hists, (float)threshold_L2hys); } -void cv::ocl::HOGDescriptor::getDescriptors(const oclMat &img, Size win_stride, +void cv::ocl::HOGDescriptor::getDescriptors(const oclMat &img, Size win_stride, oclMat &descriptors, int descr_format) { - CV_Assert(win_stride.width % block_stride.width == 0 && + CV_Assert(win_stride.width % block_stride.width == 0 && win_stride.height % block_stride.height == 0); init_buffer(img, win_stride); @@ -295,19 +295,19 @@ void cv::ocl::HOGDescriptor::getDescriptors(const oclMat &img, Size win_stride, Size blocks_per_win = numPartsWithin(win_size, block_size, block_stride); Size wins_per_img = numPartsWithin(effect_size, win_size, win_stride); - descriptors.create(wins_per_img.area(), + descriptors.create(wins_per_img.area(), static_cast(blocks_per_win.area() * block_hist_size), CV_32F); switch (descr_format) { case DESCR_FORMAT_ROW_BY_ROW: - hog::extract_descrs_by_rows(win_size.height, win_size.width, - block_stride.height, block_stride.width, win_stride.height, win_stride.width, + hog::extract_descrs_by_rows(win_size.height, win_size.width, + block_stride.height, block_stride.width, win_stride.height, win_stride.width, effect_size.height, effect_size.width, block_hists, descriptors); break; case DESCR_FORMAT_COL_BY_COL: - hog::extract_descrs_by_cols(win_size.height, win_size.width, - block_stride.height, block_stride.width, win_stride.height, win_stride.width, + hog::extract_descrs_by_cols(win_size.height, win_size.width, + block_stride.height, block_stride.width, win_stride.height, win_stride.width, effect_size.height, effect_size.width, block_hists, descriptors); break; default: @@ -316,7 +316,7 @@ void cv::ocl::HOGDescriptor::getDescriptors(const oclMat &img, Size win_stride, } -void cv::ocl::HOGDescriptor::detect(const oclMat &img, vector &hits, +void cv::ocl::HOGDescriptor::detect(const oclMat &img, vector &hits, double hit_threshold, Size win_stride, Size padding) { CV_Assert(img.type() == CV_8UC1 || img.type() == CV_8UC4); @@ -329,15 +329,15 @@ void cv::ocl::HOGDescriptor::detect(const oclMat &img, vector &hits, if (win_stride == Size()) win_stride = block_stride; else - CV_Assert(win_stride.width % block_stride.width == 0 && + CV_Assert(win_stride.width % block_stride.width == 0 && win_stride.height % block_stride.height == 0); init_buffer(img, win_stride); computeBlockHistograms(img); - hog::classify_hists(win_size.height, win_size.width, block_stride.height, - block_stride.width, win_stride.height, win_stride.width, - effect_size.height, effect_size.width, block_hists, detector, + hog::classify_hists(win_size.height, win_size.width, block_stride.height, + block_stride.width, win_stride.height, win_stride.width, + effect_size.height, effect_size.width, block_hists, detector, (float)free_coef, (float)hit_threshold, labels); labels.download(labels_host); @@ -354,8 +354,8 @@ void cv::ocl::HOGDescriptor::detect(const oclMat &img, vector &hits, -void cv::ocl::HOGDescriptor::detectMultiScale(const oclMat &img, vector &found_locations, - double hit_threshold, Size win_stride, Size padding, +void cv::ocl::HOGDescriptor::detectMultiScale(const oclMat &img, vector &found_locations, + double hit_threshold, Size win_stride, Size padding, double scale0, int group_threshold) { CV_Assert(img.type() == CV_8UC1 || img.type() == CV_8UC4); @@ -382,7 +382,7 @@ void cv::ocl::HOGDescriptor::detectMultiScale(const oclMat &img, vector &f if (win_stride == Size()) win_stride = block_stride; else - CV_Assert(win_stride.width % block_stride.width == 0 && + CV_Assert(win_stride.width % block_stride.width == 0 && win_stride.height % block_stride.height == 0); init_buffer(img, win_stride); image_scale.create(img.size(), img.type()); @@ -400,10 +400,10 @@ void cv::ocl::HOGDescriptor::detectMultiScale(const oclMat &img, vector &f resize(img, image_scale, effect_size); detect(image_scale, locations, hit_threshold, win_stride, padding); } - Size scaled_win_size(cvRound(win_size.width * scale), + Size scaled_win_size(cvRound(win_size.width * scale), cvRound(win_size.height * scale)); for (size_t j = 0; j < locations.size(); j++) - all_candidates.push_back(Rect(Point2d((CvPoint)locations[j]) * scale, + all_candidates.push_back(Rect(Point2d((CvPoint)locations[j]) * scale, scaled_win_size)); } @@ -416,10 +416,10 @@ int cv::ocl::HOGDescriptor::numPartsWithin(int size, int part_size, int stride) return (size - part_size + stride) / stride; } -cv::Size cv::ocl::HOGDescriptor::numPartsWithin(cv::Size size, cv::Size part_size, +cv::Size cv::ocl::HOGDescriptor::numPartsWithin(cv::Size size, cv::Size part_size, cv::Size stride) { - return Size(numPartsWithin(size.width, part_size.width, stride.width), + return Size(numPartsWithin(size.width, part_size.width, stride.width), numPartsWithin(size.height, part_size.height, stride.height)); } @@ -1601,8 +1601,8 @@ static int power_2up(unsigned int n) return -1; // Input is too big } -void cv::ocl::device::hog::set_up_constants(int nbins, - int block_stride_x, int block_stride_y, +void cv::ocl::device::hog::set_up_constants(int nbins, + int block_stride_x, int block_stride_y, int nblocks_win_x, int nblocks_win_y) { cnbins = nbins; @@ -1622,21 +1622,21 @@ void cv::ocl::device::hog::set_up_constants(int nbins, cdescr_size = descr_size; } -void cv::ocl::device::hog::compute_hists(int nbins, +void cv::ocl::device::hog::compute_hists(int nbins, int block_stride_x, int block_stride_y, - int height, int width, - const cv::ocl::oclMat &grad, - const cv::ocl::oclMat &qangle, - const cv::ocl::oclMat &gauss_w_lut, + int height, int width, + const cv::ocl::oclMat &grad, + const cv::ocl::oclMat &qangle, + const cv::ocl::oclMat &gauss_w_lut, cv::ocl::oclMat &block_hists) { Context *clCxt = Context::getContext(); vector< pair > args; string kernelName = "compute_hists_lut_kernel"; - int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) + int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x; - int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) + int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) / block_stride_y; int blocks_total = img_block_width * img_block_height; @@ -1645,7 +1645,7 @@ void cv::ocl::device::hog::compute_hists(int nbins, int blocks_in_group = 4; size_t localThreads[3] = { blocks_in_group * 24, 2, 1 }; - size_t globalThreads[3] = { + size_t globalThreads[3] = { divUp(img_block_width * img_block_height, blocks_in_group) * localThreads[0], 2, 1 }; int hists_size = (nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * 12) * sizeof(float); @@ -1669,7 +1669,7 @@ void cv::ocl::device::hog::compute_hists(int nbins, if(hog_device_cpu) { - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1, "-D CPU"); }else { @@ -1677,15 +1677,15 @@ void cv::ocl::device::hog::compute_hists(int nbins, int wave_size = queryDeviceInfo(kernel); char opt[32] = {0}; sprintf(opt, "-D WAVE_SIZE=%d", wave_size); - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1, opt); } } -void cv::ocl::device::hog::normalize_hists(int nbins, +void cv::ocl::device::hog::normalize_hists(int nbins, int block_stride_x, int block_stride_y, - int height, int width, - cv::ocl::oclMat &block_hists, + int height, int width, + cv::ocl::oclMat &block_hists, float threshold) { Context *clCxt = Context::getContext(); @@ -1693,14 +1693,14 @@ void cv::ocl::device::hog::normalize_hists(int nbins, string kernelName; int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y; - int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) + int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x; - int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) + int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) / block_stride_y; int nthreads; size_t globalThreads[3] = { 1, 1, 1 }; size_t localThreads[3] = { 1, 1, 1 }; - + if ( nbins == 9 ) { /* optimized for the case of 9 bins */ @@ -1720,7 +1720,7 @@ void cv::ocl::device::hog::normalize_hists(int nbins, localThreads[0] = nthreads; if ((nthreads < 32) || (nthreads > 512) ) - cv::ocl::error("normalize_hists: histogram's size is too small or too big", + cv::ocl::error("normalize_hists: histogram's size is too small or too big", __FILE__, __LINE__, "normalize_hists"); args.push_back( make_pair( sizeof(cl_int), (void *)&nthreads)); @@ -1733,7 +1733,7 @@ void cv::ocl::device::hog::normalize_hists(int nbins, args.push_back( make_pair( nthreads * sizeof(float), (void *)NULL)); if(hog_device_cpu) - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1, "-D CPU"); else { @@ -1741,18 +1741,18 @@ void cv::ocl::device::hog::normalize_hists(int nbins, int wave_size = queryDeviceInfo(kernel); char opt[32] = {0}; sprintf(opt, "-D WAVE_SIZE=%d", wave_size); - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1, opt); } } -void cv::ocl::device::hog::classify_hists(int win_height, int win_width, - int block_stride_y, int block_stride_x, - int win_stride_y, int win_stride_x, - int height, int width, - const cv::ocl::oclMat &block_hists, - const cv::ocl::oclMat &coefs, - float free_coef, float threshold, +void cv::ocl::device::hog::classify_hists(int win_height, int win_width, + int block_stride_y, int block_stride_x, + int win_stride_y, int win_stride_x, + int height, int width, + const cv::ocl::oclMat &block_hists, + const cv::ocl::oclMat &coefs, + float free_coef, float threshold, cv::ocl::oclMat &labels) { Context *clCxt = Context::getContext(); @@ -1785,7 +1785,7 @@ void cv::ocl::device::hog::classify_hists(int win_height, int win_width, int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; - int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / + int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x; size_t globalThreads[3] = { img_win_width * nthreads, img_win_height, 1 }; @@ -1802,7 +1802,7 @@ void cv::ocl::device::hog::classify_hists(int win_height, int win_width, args.push_back( make_pair( sizeof(cl_mem), (void *)&labels.data)); if(hog_device_cpu) - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1, "-D CPU"); else { @@ -1810,16 +1810,16 @@ void cv::ocl::device::hog::classify_hists(int win_height, int win_width, int wave_size = queryDeviceInfo(kernel); char opt[32] = {0}; sprintf(opt, "-D WAVE_SIZE=%d", wave_size); - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1, opt); } } -void cv::ocl::device::hog::extract_descrs_by_rows(int win_height, int win_width, +void cv::ocl::device::hog::extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x, - int win_stride_y, int win_stride_x, + int win_stride_y, int win_stride_x, int height, int width, - const cv::ocl::oclMat &block_hists, + const cv::ocl::oclMat &block_hists, cv::ocl::oclMat &descriptors) { Context *clCxt = Context::getContext(); @@ -1830,7 +1830,7 @@ void cv::ocl::device::hog::extract_descrs_by_rows(int win_height, int win_width, int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; - int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / + int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x; int descriptors_quadstep = descriptors.step >> 2; @@ -1847,15 +1847,15 @@ void cv::ocl::device::hog::extract_descrs_by_rows(int win_height, int win_width, args.push_back( make_pair( sizeof(cl_mem), (void *)&block_hists.data)); args.push_back( make_pair( sizeof(cl_mem), (void *)&descriptors.data)); - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1); } -void cv::ocl::device::hog::extract_descrs_by_cols(int win_height, int win_width, +void cv::ocl::device::hog::extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x, - int win_stride_y, int win_stride_x, + int win_stride_y, int win_stride_x, int height, int width, - const cv::ocl::oclMat &block_hists, + const cv::ocl::oclMat &block_hists, cv::ocl::oclMat &descriptors) { Context *clCxt = Context::getContext(); @@ -1866,7 +1866,7 @@ void cv::ocl::device::hog::extract_descrs_by_cols(int win_height, int win_width, int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; - int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / + int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x; int descriptors_quadstep = descriptors.step >> 2; @@ -1884,15 +1884,15 @@ void cv::ocl::device::hog::extract_descrs_by_cols(int win_height, int win_width, args.push_back( make_pair( sizeof(cl_mem), (void *)&block_hists.data)); args.push_back( make_pair( sizeof(cl_mem), (void *)&descriptors.data)); - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1); } -void cv::ocl::device::hog::compute_gradients_8UC1(int height, int width, +void cv::ocl::device::hog::compute_gradients_8UC1(int height, int width, const cv::ocl::oclMat &img, - float angle_scale, - cv::ocl::oclMat &grad, - cv::ocl::oclMat &qangle, + float angle_scale, + cv::ocl::oclMat &grad, + cv::ocl::oclMat &qangle, bool correct_gamma) { Context *clCxt = Context::getContext(); @@ -1918,15 +1918,15 @@ void cv::ocl::device::hog::compute_gradients_8UC1(int height, int width, args.push_back( make_pair( sizeof(cl_char), (void *)&correctGamma)); args.push_back( make_pair( sizeof(cl_int), (void *)&cnbins)); - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1); } -void cv::ocl::device::hog::compute_gradients_8UC4(int height, int width, +void cv::ocl::device::hog::compute_gradients_8UC4(int height, int width, const cv::ocl::oclMat &img, - float angle_scale, - cv::ocl::oclMat &grad, - cv::ocl::oclMat &qangle, + float angle_scale, + cv::ocl::oclMat &grad, + cv::ocl::oclMat &qangle, bool correct_gamma) { Context *clCxt = Context::getContext(); @@ -1953,6 +1953,6 @@ void cv::ocl::device::hog::compute_gradients_8UC4(int height, int width, args.push_back( make_pair( sizeof(cl_char), (void *)&correctGamma)); args.push_back( make_pair( sizeof(cl_int), (void *)&cnbins)); - openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, + openCLExecuteKernel(clCxt, &objdetect_hog, kernelName, globalThreads, localThreads, args, -1, -1); } \ No newline at end of file diff --git a/modules/ocl/src/imgproc.cpp b/modules/ocl/src/imgproc.cpp index 15c1539c0e..ff509fb110 100644 --- a/modules/ocl/src/imgproc.cpp +++ b/modules/ocl/src/imgproc.cpp @@ -291,7 +291,7 @@ namespace cv args.push_back( make_pair(sizeof(cl_int), (void *)&map1.cols)); args.push_back( make_pair(sizeof(cl_int), (void *)&map1.rows)); args.push_back( make_pair(sizeof(cl_int), (void *)&cols)); - + if(src.clCxt->supportsFeature(Context::CL_DOUBLE)) { args.push_back( make_pair(sizeof(cl_double4), (void *)&borderValue)); @@ -1115,7 +1115,7 @@ namespace cv args.push_back( make_pair( sizeof(cl_int) , (void *)&sum.step)); args.push_back( make_pair( sizeof(cl_int) , (void *)&sum_offset)); size_t gt2[3] = {t_sum.cols * 32, 1, 1}, lt2[3] = {256, 1, 1}; - openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_rows", gt2, lt2, args, -1, depth); + openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_rows", gt2, lt2, args, -1, depth); } /////////////////////// corner ////////////////////////////// @@ -1230,7 +1230,7 @@ namespace cv oclMat dx, dy; cornerMinEigenVal_dxdy(src, dst, dx, dy, blockSize, ksize, borderType); } - + void cornerMinEigenVal_dxdy(const oclMat &src, oclMat &dst, oclMat &dx, oclMat &dy, int blockSize, int ksize, int borderType) { if(!src.clCxt->supportsFeature(Context::CL_DOUBLE) && src.depth() == CV_64F) diff --git a/modules/ocl/src/initialization.cpp b/modules/ocl/src/initialization.cpp index 5d81517959..b990e09fe0 100644 --- a/modules/ocl/src/initialization.cpp +++ b/modules/ocl/src/initialization.cpp @@ -168,7 +168,7 @@ namespace cv }; // global variables to hold binary cache properties - static int enable_disk_cache = + static int enable_disk_cache = #ifdef _DEBUG false; #else @@ -514,8 +514,8 @@ namespace cv return; } update_disk_cache |= (mode & CACHE_UPDATE) == CACHE_UPDATE; - enable_disk_cache |= -#ifdef _DEBUG + enable_disk_cache |= +#ifdef _DEBUG (mode & CACHE_DEBUG) == CACHE_DEBUG; #else (mode & CACHE_RELEASE) == CACHE_RELEASE; @@ -950,8 +950,8 @@ namespace cv bool initialized() { - return *((volatile int*)&Context::val) != 0 && - Context::clCxt->impl->clCmdQueue != NULL&& + return *((volatile int*)&Context::val) != 0 && + Context::clCxt->impl->clCmdQueue != NULL&& Context::clCxt->impl->oclcontext != NULL; } diff --git a/modules/ocl/src/match_template.cpp b/modules/ocl/src/match_template.cpp index 1f76d633dc..7c0a7ac5db 100644 --- a/modules/ocl/src/match_template.cpp +++ b/modules/ocl/src/match_template.cpp @@ -103,7 +103,7 @@ namespace cv { // FIXME! // always use naive until convolve is imported - return true; + return true; } ////////////////////////////////////////////////////////////////////// @@ -120,7 +120,7 @@ namespace cv else { buf.image_sqsums.resize(1); - + // TODO, add double support for ocl::integral // use CPU integral temporarily Mat sums, sqsums; @@ -360,7 +360,7 @@ namespace cv } else { - + split(image, buf.images); templ_sum = sum(templ) / templ.size().area(); buf.image_sums.resize(buf.images.size()); diff --git a/modules/ocl/src/matrix_operations.cpp b/modules/ocl/src/matrix_operations.cpp index 1ff963a5cd..82189b71e5 100644 --- a/modules/ocl/src/matrix_operations.cpp +++ b/modules/ocl/src/matrix_operations.cpp @@ -627,7 +627,7 @@ static void set_to_withoutmask_run(const oclMat &dst, const Scalar &scalar, stri if(Context::getContext()->supportsFeature(Context::CL_VER_1_2) && dst.offset == 0 && dst.cols == dst.wholecols) { - clEnqueueFillBuffer((cl_command_queue)dst.clCxt->oclCommandQueue(), + clEnqueueFillBuffer((cl_command_queue)dst.clCxt->oclCommandQueue(), (cl_mem)dst.data, args[0].second, args[0].first, 0, dst.step * dst.rows, 0, NULL, NULL); } else diff --git a/modules/ocl/src/mcwutil.cpp b/modules/ocl/src/mcwutil.cpp index 4292a1f877..2966d53dba 100644 --- a/modules/ocl/src/mcwutil.cpp +++ b/modules/ocl/src/mcwutil.cpp @@ -197,7 +197,7 @@ namespace cv desc.buffer = NULL; desc.num_mip_levels = 0; desc.num_samples = 0; - texture = clCreateImage((cl_context)mat.clCxt->oclContext(), CL_MEM_READ_WRITE, &format, &desc, NULL, &err); + texture = clCreateImage((cl_context)mat.clCxt->oclContext(), CL_MEM_READ_WRITE, &format, &desc, NULL, &err); } else #endif @@ -223,7 +223,7 @@ namespace cv const size_t regin[3] = {mat.cols * mat.elemSize(), mat.rows, 1}; clEnqueueCopyBufferRect((cl_command_queue)mat.clCxt->oclCommandQueue(), (cl_mem)mat.data, devData, origin, origin, regin, mat.step, 0, mat.cols * mat.elemSize(), 0, 0, NULL, NULL); - clFlush((cl_command_queue)mat.clCxt->oclCommandQueue()); + clFlush((cl_command_queue)mat.clCxt->oclCommandQueue()); } else { diff --git a/modules/ocl/src/moments.cpp b/modules/ocl/src/moments.cpp index cb16fb136d..8dfc664577 100644 --- a/modules/ocl/src/moments.cpp +++ b/modules/ocl/src/moments.cpp @@ -143,7 +143,7 @@ static void icvContourMoments( CvSeq* contour, CvMoments* mom ) args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step )); openCLExecuteKernel(dst_a.clCxt, &moments, "icvContourMoments", globalThreads, localThreads, args, -1, -1); - + cv::Mat dst(dst_a); a00 = a10 = a01 = a20 = a11 = a02 = a30 = a21 = a12 = a03 = 0.0; if (!cv::ocl::Context::getContext()->supportsFeature(Context::CL_DOUBLE)) diff --git a/modules/ocl/src/opencl/arithm_absdiff.cl b/modules/ocl/src/opencl/arithm_absdiff.cl index 6ae869d61c..341a0048ff 100644 --- a/modules/ocl/src/opencl/arithm_absdiff.cl +++ b/modules/ocl/src/opencl/arithm_absdiff.cl @@ -66,7 +66,7 @@ __kernel void arithm_absdiff_D0 (__global uchar *src1, int src1_step, int src1_o if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -117,7 +117,7 @@ __kernel void arithm_absdiff_D2 (__global ushort *src1, int src1_step, int src1_ if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -154,7 +154,7 @@ __kernel void arithm_absdiff_D3 (__global short *src1, int src1_step, int src1_o if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -261,7 +261,7 @@ __kernel void arithm_s_absdiff_C1_D0 (__global uchar *src1, int src1_step, int if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -303,7 +303,7 @@ __kernel void arithm_s_absdiff_C1_D2 (__global ushort *src1, int src1_step, in if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -337,7 +337,7 @@ __kernel void arithm_s_absdiff_C1_D3 (__global short *src1, int src1_step, int if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -443,7 +443,7 @@ __kernel void arithm_s_absdiff_C2_D0 (__global uchar *src1, int src1_step, int if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -588,7 +588,7 @@ __kernel void arithm_s_absdiff_C3_D0 (__global uchar *src1, int src1_step, int if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -645,7 +645,7 @@ __kernel void arithm_s_absdiff_C3_D2 (__global ushort *src1, int src1_step, in if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -698,7 +698,7 @@ __kernel void arithm_s_absdiff_C3_D3 (__global short *src1, int src1_step, int if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_addWeighted.cl b/modules/ocl/src/opencl/arithm_addWeighted.cl index d3a002625d..e7ed289281 100644 --- a/modules/ocl/src/opencl/arithm_addWeighted.cl +++ b/modules/ocl/src/opencl/arithm_addWeighted.cl @@ -128,7 +128,7 @@ __kernel void addWeighted_D2 (__global ushort *src1, int src1_step,int src1_offs { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -191,7 +191,7 @@ __kernel void addWeighted_D3 (__global short *src1, int src1_step,int src1_offse { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -255,7 +255,7 @@ __kernel void addWeighted_D4 (__global int *src1, int src1_step,int src1_offset, x = x << 2; #define bitOfInt (sizeof(int)== 4 ? 2: 3) - + #ifdef dst_align #undef dst_align #endif @@ -319,7 +319,7 @@ __kernel void addWeighted_D5 (__global float *src1,int src1_step,int src1_offset { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -384,7 +384,7 @@ __kernel void addWeighted_D6 (__global double *src1, int src1_step,int src1_offs { x = x << 2; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_add_scalar.cl b/modules/ocl/src/opencl/arithm_add_scalar.cl index 0552fc8a7b..cdb79f37ed 100644 --- a/modules/ocl/src/opencl/arithm_add_scalar.cl +++ b/modules/ocl/src/opencl/arithm_add_scalar.cl @@ -67,7 +67,7 @@ __kernel void arithm_s_add_C1_D0 (__global uchar *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -110,7 +110,7 @@ __kernel void arithm_s_add_C1_D2 (__global ushort *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -145,7 +145,7 @@ __kernel void arithm_s_add_C1_D3 (__global short *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -250,7 +250,7 @@ __kernel void arithm_s_add_C2_D0 (__global uchar *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_add_scalar_mask.cl b/modules/ocl/src/opencl/arithm_add_scalar_mask.cl index 3dbd376ecf..a0cb7dacb4 100644 --- a/modules/ocl/src/opencl/arithm_add_scalar_mask.cl +++ b/modules/ocl/src/opencl/arithm_add_scalar_mask.cl @@ -69,7 +69,7 @@ __kernel void arithm_s_add_with_mask_C1_D0 (__global uchar *src1, int src1_ste if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -122,7 +122,7 @@ __kernel void arithm_s_add_with_mask_C1_D2 (__global ushort *src1, int src1_st if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -160,7 +160,7 @@ __kernel void arithm_s_add_with_mask_C1_D3 (__global short *src1, int src1_ste if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -284,7 +284,7 @@ __kernel void arithm_s_add_with_mask_C2_D0 (__global uchar *src1, int src1_ste if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_bitwise_binary.cl b/modules/ocl/src/opencl/arithm_bitwise_binary.cl index b1f8545d08..8bdd23c177 100644 --- a/modules/ocl/src/opencl/arithm_bitwise_binary.cl +++ b/modules/ocl/src/opencl/arithm_bitwise_binary.cl @@ -72,7 +72,7 @@ __kernel void arithm_bitwise_binary_D0 (__global uchar *src1, int src1_step, int if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -125,7 +125,7 @@ __kernel void arithm_bitwise_binary_D1 (__global char *src1, int src1_step, int if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -179,7 +179,7 @@ __kernel void arithm_bitwise_binary_D2 (__global ushort *src1, int src1_step, in if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -234,7 +234,7 @@ __kernel void arithm_bitwise_binary_D3 (__global short *src1, int src1_step, int if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_bitwise_binary_mask.cl b/modules/ocl/src/opencl/arithm_bitwise_binary_mask.cl index 7e4a884f8c..60cd188203 100644 --- a/modules/ocl/src/opencl/arithm_bitwise_binary_mask.cl +++ b/modules/ocl/src/opencl/arithm_bitwise_binary_mask.cl @@ -73,7 +73,7 @@ __kernel void arithm_bitwise_binary_with_mask_C1_D0 ( if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -118,7 +118,7 @@ __kernel void arithm_bitwise_binary_with_mask_C1_D1 ( if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -163,7 +163,7 @@ __kernel void arithm_bitwise_binary_with_mask_C1_D2 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -206,7 +206,7 @@ __kernel void arithm_bitwise_binary_with_mask_C1_D3 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -349,7 +349,7 @@ __kernel void arithm_bitwise_binary_with_mask_C2_D0 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -391,7 +391,7 @@ __kernel void arithm_bitwise_binary_with_mask_C2_D1 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_bitwise_binary_scalar.cl b/modules/ocl/src/opencl/arithm_bitwise_binary_scalar.cl index ce870b3f18..5fa25004d5 100644 --- a/modules/ocl/src/opencl/arithm_bitwise_binary_scalar.cl +++ b/modules/ocl/src/opencl/arithm_bitwise_binary_scalar.cl @@ -70,7 +70,7 @@ __kernel void arithm_s_bitwise_binary_C1_D0 ( if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -108,7 +108,7 @@ __kernel void arithm_s_bitwise_binary_C1_D1 ( if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -146,7 +146,7 @@ __kernel void arithm_s_bitwise_binary_C1_D2 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -181,7 +181,7 @@ __kernel void arithm_s_bitwise_binary_C1_D3 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -293,7 +293,7 @@ __kernel void arithm_s_bitwise_binary_C2_D0 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -331,7 +331,7 @@ __kernel void arithm_s_bitwise_binary_C2_D1 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_bitwise_binary_scalar_mask.cl b/modules/ocl/src/opencl/arithm_bitwise_binary_scalar_mask.cl index 9f5bac5b64..9c6475cf29 100644 --- a/modules/ocl/src/opencl/arithm_bitwise_binary_scalar_mask.cl +++ b/modules/ocl/src/opencl/arithm_bitwise_binary_scalar_mask.cl @@ -71,7 +71,7 @@ __kernel void arithm_s_bitwise_binary_with_mask_C1_D0 ( if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -113,7 +113,7 @@ __kernel void arithm_s_bitwise_binary_with_mask_C1_D1 ( if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -154,7 +154,7 @@ __kernel void arithm_s_bitwise_binary_with_mask_C1_D2 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -192,7 +192,7 @@ __kernel void arithm_s_bitwise_binary_with_mask_C1_D3 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -318,7 +318,7 @@ __kernel void arithm_s_bitwise_binary_with_mask_C2_D0 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif @@ -358,7 +358,7 @@ __kernel void arithm_s_bitwise_binary_with_mask_C2_D1 ( if (x < cols && y < rows) { x = x << 1; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_bitwise_not.cl b/modules/ocl/src/opencl/arithm_bitwise_not.cl index 8eb9ece75d..9905130013 100644 --- a/modules/ocl/src/opencl/arithm_bitwise_not.cl +++ b/modules/ocl/src/opencl/arithm_bitwise_not.cl @@ -63,7 +63,7 @@ __kernel void arithm_bitwise_not_D0 (__global uchar *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -106,7 +106,7 @@ __kernel void arithm_bitwise_not_D1 (__global char *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -143,7 +143,7 @@ __kernel void arithm_bitwise_not_D2 (__global ushort *src1, int src1_step, int s if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -181,7 +181,7 @@ __kernel void arithm_bitwise_not_D3 (__global short *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_compare_eq.cl b/modules/ocl/src/opencl/arithm_compare_eq.cl index a660d41727..0681c6f100 100644 --- a/modules/ocl/src/opencl/arithm_compare_eq.cl +++ b/modules/ocl/src/opencl/arithm_compare_eq.cl @@ -65,7 +65,7 @@ __kernel void arithm_compare_eq_D0 (__global uchar *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -120,7 +120,7 @@ __kernel void arithm_compare_ne_D2 (__global ushort *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -174,7 +174,7 @@ __kernel void arithm_compare_eq_D3 (__global short *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -380,7 +380,7 @@ __kernel void arithm_compare_gt_D0 (__global uchar *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -434,7 +434,7 @@ __kernel void arithm_compare_gt_D2 (__global ushort *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -490,7 +490,7 @@ __kernel void arithm_compare_gt_D3 (__global short *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -700,7 +700,7 @@ __kernel void arithm_compare_ge_D0 (__global uchar *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -757,7 +757,7 @@ __kernel void arithm_compare_ge_D2 (__global ushort *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -815,7 +815,7 @@ __kernel void arithm_compare_ge_D3 (__global short *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -869,7 +869,7 @@ __kernel void arithm_compare_ge_D4 (__global int *src1, int src1_step, int src1_ if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -921,7 +921,7 @@ __kernel void arithm_compare_ge_D5 (__global float *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -975,7 +975,7 @@ __kernel void arithm_compare_ge_D6 (__global double *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_compare_ne.cl b/modules/ocl/src/opencl/arithm_compare_ne.cl index f0128846b8..d0e862d721 100644 --- a/modules/ocl/src/opencl/arithm_compare_ne.cl +++ b/modules/ocl/src/opencl/arithm_compare_ne.cl @@ -61,7 +61,7 @@ __kernel void arithm_compare_ne_D0 (__global uchar *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -116,7 +116,7 @@ __kernel void arithm_compare_ne_D2 (__global ushort *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -171,7 +171,7 @@ __kernel void arithm_compare_ne_D3 (__global short *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -380,7 +380,7 @@ __kernel void arithm_compare_lt_D0 (__global uchar *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -435,7 +435,7 @@ __kernel void arithm_compare_lt_D2 (__global ushort *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -491,7 +491,7 @@ __kernel void arithm_compare_lt_D3 (__global short *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -704,7 +704,7 @@ __kernel void arithm_compare_le_D0 (__global uchar *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -760,7 +760,7 @@ __kernel void arithm_compare_le_D2 (__global ushort *src1, int src1_step, int sr if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -816,7 +816,7 @@ __kernel void arithm_compare_le_D3 (__global short *src1, int src1_step, int src if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_div.cl b/modules/ocl/src/opencl/arithm_div.cl index 896277cf58..b79da976b7 100644 --- a/modules/ocl/src/opencl/arithm_div.cl +++ b/modules/ocl/src/opencl/arithm_div.cl @@ -93,7 +93,7 @@ __kernel void arithm_div_D0 (__global uchar *src1, int src1_step, int src1_offse if (coor.x < cols && coor.y < rows) { coor.x = coor.x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -137,7 +137,7 @@ __kernel void arithm_div_D2 (__global ushort *src1, int src1_step, int src1_offs if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -180,7 +180,7 @@ __kernel void arithm_div_D3 (__global short *src1, int src1_step, int src1_offse if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -298,7 +298,7 @@ __kernel void arithm_s_div_D0 (__global uchar *src, int src_step, int src_offset if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -337,7 +337,7 @@ __kernel void arithm_s_div_D2 (__global ushort *src, int src_step, int src_offse if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -375,7 +375,7 @@ __kernel void arithm_s_div_D3 (__global short *src, int src_step, int src_offset if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_flip.cl b/modules/ocl/src/opencl/arithm_flip.cl index d0e6782cbb..49242d07c7 100644 --- a/modules/ocl/src/opencl/arithm_flip.cl +++ b/modules/ocl/src/opencl/arithm_flip.cl @@ -64,7 +64,7 @@ __kernel void arithm_flip_rows_D0 (__global uchar *src, int src_step, int src_of if (x < cols && y < thread_rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -122,7 +122,7 @@ __kernel void arithm_flip_rows_D1 (__global char *src, int src_step, int src_off if (x < cols && y < thread_rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -167,7 +167,7 @@ __kernel void arithm_flip_rows_D2 (__global ushort *src, int src_step, int src_o if (x < cols && y < thread_rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -212,7 +212,7 @@ __kernel void arithm_flip_rows_D3 (__global short *src, int src_step, int src_of if (x < cols && y < thread_rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/arithm_mul.cl b/modules/ocl/src/opencl/arithm_mul.cl index 40988f5fed..f853629436 100644 --- a/modules/ocl/src/opencl/arithm_mul.cl +++ b/modules/ocl/src/opencl/arithm_mul.cl @@ -90,7 +90,7 @@ __kernel void arithm_mul_D0 (__global uchar *src1, int src1_step, int src1_offse if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -138,7 +138,7 @@ __kernel void arithm_mul_D2 (__global ushort *src1, int src1_step, int src1_offs if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif @@ -177,7 +177,7 @@ __kernel void arithm_mul_D3 (__global short *src1, int src1_step, int src1_offse if (x < cols && y < rows) { x = x << 2; - + #ifdef dst_align #undef dst_align #endif diff --git a/modules/ocl/src/opencl/brute_force_match.cl b/modules/ocl/src/opencl/brute_force_match.cl index 8dcb9d2070..a05c98ee03 100644 --- a/modules/ocl/src/opencl/brute_force_match.cl +++ b/modules/ocl/src/opencl/brute_force_match.cl @@ -425,7 +425,7 @@ __kernel void BruteForceMatch_RadiusMatch( barrier(CLK_LOCAL_MEM_FENCE); } - if (queryIdx < query_rows && trainIdx < train_rows && + if (queryIdx < query_rows && trainIdx < train_rows && convert_float(result) < maxDistance/* && mask(queryIdx, trainIdx)*/) { unsigned int ind = atom_inc(nMatches + queryIdx); diff --git a/modules/ocl/src/opencl/filter_sep_row.cl b/modules/ocl/src/opencl/filter_sep_row.cl index 5524041fc3..30d65c59a7 100644 --- a/modules/ocl/src/opencl/filter_sep_row.cl +++ b/modules/ocl/src/opencl/filter_sep_row.cl @@ -465,7 +465,7 @@ __kernel __attribute__((reqd_work_group_size(LSIZE0,LSIZE1,1))) void row_filter_ start_addr = mad24(y,dst_step_in_pixel,x); dst[start_addr] = sum; } - + } diff --git a/modules/ocl/src/opencl/filtering_boxFilter.cl b/modules/ocl/src/opencl/filtering_boxFilter.cl index 512e32997d..d163ebe76a 100644 --- a/modules/ocl/src/opencl/filtering_boxFilter.cl +++ b/modules/ocl/src/opencl/filtering_boxFilter.cl @@ -231,7 +231,7 @@ __kernel void boxFilter_C1_D0(__global const uchar * restrict src, __global ucha { tmp_sum += (data[i]); } - + int index = dst_startY * dst_step + dst_startX + (col-anX)*4; temp[0][col] = tmp_sum + (data[0]); diff --git a/modules/ocl/src/opencl/haarobjectdetect.cl b/modules/ocl/src/opencl/haarobjectdetect.cl index 4873298af0..003505ec64 100644 --- a/modules/ocl/src/opencl/haarobjectdetect.cl +++ b/modules/ocl/src/opencl/haarobjectdetect.cl @@ -46,7 +46,7 @@ typedef int sumtype; typedef float sqsumtype; -#ifndef STUMP_BASED +#ifndef STUMP_BASED #define STUMP_BASED 1 #endif @@ -323,7 +323,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa int root_offset = 0; for(int lcl_loop=0; lcl_loopp[0][0])); diff --git a/modules/ocl/src/opencl/haarobjectdetect_scaled2.cl b/modules/ocl/src/opencl/haarobjectdetect_scaled2.cl index 8507972ff2..23ef7230fe 100644 --- a/modules/ocl/src/opencl/haarobjectdetect_scaled2.cl +++ b/modules/ocl/src/opencl/haarobjectdetect_scaled2.cl @@ -207,7 +207,7 @@ __kernel void gpuRunHaarClassifierCascade_scaled2( - sum[clamp(mad24(info3.y, step, info3.z), 0, max_idx)] - sum[clamp(mad24(info3.w, step, info3.x), 0, max_idx)] + sum[clamp(mad24(info3.w, step, info3.z), 0, max_idx)]) * w.z; - + bool passThres = classsum >= nodethreshold; #if STUMP_BASED diff --git a/modules/ocl/src/opencl/imgproc_clahe.cl b/modules/ocl/src/opencl/imgproc_clahe.cl index 0d010f7a5b..49c7096927 100644 --- a/modules/ocl/src/opencl/imgproc_clahe.cl +++ b/modules/ocl/src/opencl/imgproc_clahe.cl @@ -71,15 +71,15 @@ void reduce(volatile __local int* smem, int val, int tid) barrier(CLK_LOCAL_MEM_FENCE); if (tid < 128) - { + { smem[tid] = val += smem[tid + 128]; - } + } barrier(CLK_LOCAL_MEM_FENCE); if (tid < 64) - { + { smem[tid] = val += smem[tid + 64]; - } + } barrier(CLK_LOCAL_MEM_FENCE); if (tid < 32) @@ -125,15 +125,15 @@ void reduce(__local volatile int* smem, int val, int tid) barrier(CLK_LOCAL_MEM_FENCE); if (tid < 128) - { + { smem[tid] = val += smem[tid + 128]; - } + } barrier(CLK_LOCAL_MEM_FENCE); if (tid < 64) - { + { smem[tid] = val += smem[tid + 64]; - } + } barrier(CLK_LOCAL_MEM_FENCE); if (tid < 32) diff --git a/modules/ocl/src/opencl/imgproc_gftt.cl b/modules/ocl/src/opencl/imgproc_gftt.cl index 5fa27ffc1b..4f5075d69f 100644 --- a/modules/ocl/src/opencl/imgproc_gftt.cl +++ b/modules/ocl/src/opencl/imgproc_gftt.cl @@ -49,12 +49,12 @@ __constant sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST; -inline float ELEM_INT2(image2d_t _eig, int _x, int _y) +inline float ELEM_INT2(image2d_t _eig, int _x, int _y) { return read_imagef(_eig, sampler, (int2)(_x, _y)).x; } -inline float ELEM_FLT2(image2d_t _eig, float2 pt) +inline float ELEM_FLT2(image2d_t _eig, float2 pt) { return read_imagef(_eig, sampler, pt).x; } @@ -132,7 +132,7 @@ __kernel const int pairDistance = 1 << (stage - passOfStage); const int blockWidth = 2 * pairDistance; - const int leftId = min( (threadId % pairDistance) + const int leftId = min( (threadId % pairDistance) + (threadId / pairDistance) * blockWidth, count ); const int rightId = min( leftId + pairDistance, count ); @@ -147,7 +147,7 @@ __kernel float2 greater = compareResult ? leftPt:rightPt; float2 lesser = compareResult ? rightPt:leftPt; - + corners[leftId] = sortOrder ? lesser : greater; corners[rightId] = sortOrder ? greater : lesser; } @@ -195,20 +195,20 @@ __kernel { pt2 = scratch[j]; val2 = ELEM_FLT2(eig, pt2); - if(val2 > val1) + if(val2 > val1) pos++;//calculate the rank of this element in this work group - else + else { if(val1 > val2) continue; - else + else { // val1 and val2 are same same++; } } } - for (int j=0; j< same; j++) + for (int j=0; j< same; j++) corners[pos + j] = pt1; } __kernel @@ -240,15 +240,15 @@ __kernel for(int k=0; k val2) break; else { - //Increment only if the value is not the same. + //Increment only if the value is not the same. if( val2 > val1 ) pos++; - else + else same++; } } @@ -257,20 +257,20 @@ __kernel for(int k=0; k val2) break; else { - //Don't increment if the value is the same. + //Don't increment if the value is the same. //Two elements are same if (*userComp)(jData, iData) and (*userComp)(iData, jData) are both false if(val2 > val1) pos++; - else + else same++; } - } - for (int j=0; j< same; j++) + } + for (int j=0; j< same; j++) corners[pos + j] = pt1; } diff --git a/modules/ocl/src/opencl/imgproc_warpAffine.cl b/modules/ocl/src/opencl/imgproc_warpAffine.cl index 6eee8d3fa7..16971e252b 100644 --- a/modules/ocl/src/opencl/imgproc_warpAffine.cl +++ b/modules/ocl/src/opencl/imgproc_warpAffine.cl @@ -183,7 +183,7 @@ __kernel void warpAffineLinear_C1_D0(__global const uchar * restrict src, __glob spos1 = src_offset + sy * srcStep + sx + 1; spos2 = src_offset + (sy+1) * srcStep + sx; spos3 = src_offset + (sy+1) * srcStep + sx + 1; - + v0.s0 = scon0.s0 ? src[spos0.s0] : 0; v1.s0 = scon1.s0 ? src[spos1.s0] : 0; v2.s0 = scon2.s0 ? src[spos2.s0] : 0; @@ -203,7 +203,7 @@ __kernel void warpAffineLinear_C1_D0(__global const uchar * restrict src, __glob v1.s3 = scon1.s3 ? src[spos1.s3] : 0; v2.s3 = scon2.s3 ? src[spos2.s3] : 0; v3.s3 = scon3.s3 ? src[spos3.s3] : 0; - + short4 itab0, itab1, itab2, itab3; float4 taby, tabx; taby = INTER_SCALE * convert_float4(ay); diff --git a/modules/ocl/src/opencl/imgproc_warpPerspective.cl b/modules/ocl/src/opencl/imgproc_warpPerspective.cl index edbe42c4a7..f00e61cc16 100644 --- a/modules/ocl/src/opencl/imgproc_warpPerspective.cl +++ b/modules/ocl/src/opencl/imgproc_warpPerspective.cl @@ -116,7 +116,7 @@ __kernel void warpPerspectiveNN_C1_D0(__global uchar const * restrict src, __glo sval.s1 = scon.s1 ? src[spos.s1] : 0; sval.s2 = scon.s2 ? src[spos.s2] : 0; sval.s3 = scon.s3 ? src[spos.s3] : 0; - dval = convert_uchar4(dcon) != (uchar4)(0,0,0,0) ? sval : dval; + dval = convert_uchar4(dcon) != (uchar4)(0,0,0,0) ? sval : dval; *d = dval; } } diff --git a/modules/ocl/src/opencl/kernel_radix_sort_by_key.cl b/modules/ocl/src/opencl/kernel_radix_sort_by_key.cl index fdb440aeea..3c3eb98c80 100644 --- a/modules/ocl/src/opencl/kernel_radix_sort_by_key.cl +++ b/modules/ocl/src/opencl/kernel_radix_sort_by_key.cl @@ -43,7 +43,7 @@ // //M*/ -#pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable +#pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable #ifndef N // number of radices #define N 4 @@ -71,14 +71,14 @@ __inline uint convertKey(uint converted_key) converted_key ^= mask; #elif defined(K_INT) const uint SIGN_MASK = 1u << ((sizeof(int) * 8) - 1); - converted_key ^= SIGN_MASK; + converted_key ^= SIGN_MASK; #else #endif return converted_key; } -//FIXME(pengx17): +//FIXME(pengx17): // exclusive scan, need to be optimized as this is too naive... kernel void naiveScanAddition( @@ -108,7 +108,7 @@ kernel { const int RADIX_T = N; const int RADICES_T = (1 << RADIX_T); - const int NUM_OF_ELEMENTS_PER_WORK_ITEM_T = RADICES_T; + const int NUM_OF_ELEMENTS_PER_WORK_ITEM_T = RADICES_T; const int MASK_T = (1 << RADIX_T) - 1; int localBuckets[16] = {0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0}; diff --git a/modules/ocl/src/opencl/kernel_sort_by_key.cl b/modules/ocl/src/opencl/kernel_sort_by_key.cl index 18e9d419aa..2e85e5a888 100644 --- a/modules/ocl/src/opencl/kernel_sort_by_key.cl +++ b/modules/ocl/src/opencl/kernel_sort_by_key.cl @@ -62,7 +62,7 @@ #endif /////////////////////// Bitonic sort //////////////////////////// -// ported from +// ported from // https://github.com/HSA-Libraries/Bolt/blob/master/include/bolt/cl/sort_by_key_kernels.cl __kernel void bitonicSort @@ -82,7 +82,7 @@ __kernel const int pairDistance = 1 << (stage - passOfStage); const int blockWidth = 2 * pairDistance; - int leftId = min( (threadId % pairDistance) + int leftId = min( (threadId % pairDistance) + (threadId / pairDistance) * blockWidth, count ); int rightId = min( leftId + pairDistance, count ); @@ -90,7 +90,7 @@ __kernel int temp; const V_T lval = vals[leftId]; - const V_T rval = vals[rightId]; + const V_T rval = vals[rightId]; const K_T lkey = keys[leftId]; const K_T rkey = keys[rightId]; @@ -142,7 +142,7 @@ __kernel int offset = groupID * wg; int same = 0; - + vals += offset; keys += offset; n = (groupID == (numOfGroups-1))? (count - wg*(numOfGroups-1)) : wg; @@ -163,13 +163,13 @@ __kernel for (int j=0;j= 512) { if (tid < 256) smem[tid] = sum = sum + smem[tid + 256]; + if (size >= 512) { if (tid < 256) smem[tid] = sum = sum + smem[tid + 256]; barrier(CLK_LOCAL_MEM_FENCE); } - if (size >= 256) { if (tid < 128) smem[tid] = sum = sum + smem[tid + 128]; + if (size >= 256) { if (tid < 128) smem[tid] = sum = sum + smem[tid + 128]; barrier(CLK_LOCAL_MEM_FENCE); } - if (size >= 128) { if (tid < 64) smem[tid] = sum = sum + smem[tid + 64]; + if (size >= 128) { if (tid < 64) smem[tid] = sum = sum + smem[tid + 64]; barrier(CLK_LOCAL_MEM_FENCE); } #ifdef CPU - if (size >= 64) { if (tid < 32) smem[tid] = sum = sum + smem[tid + 32]; + if (size >= 64) { if (tid < 32) smem[tid] = sum = sum + smem[tid + 32]; barrier(CLK_LOCAL_MEM_FENCE); } - if (size >= 32) { if (tid < 16) smem[tid] = sum = sum + smem[tid + 16]; - barrier(CLK_LOCAL_MEM_FENCE); } - if (size >= 16) { if (tid < 8) smem[tid] = sum = sum + smem[tid + 8]; + if (size >= 32) { if (tid < 16) smem[tid] = sum = sum + smem[tid + 16]; barrier(CLK_LOCAL_MEM_FENCE); } - if (size >= 8) { if (tid < 4) smem[tid] = sum = sum + smem[tid + 4]; + if (size >= 16) { if (tid < 8) smem[tid] = sum = sum + smem[tid + 8]; barrier(CLK_LOCAL_MEM_FENCE); } - if (size >= 4) { if (tid < 2) smem[tid] = sum = sum + smem[tid + 2]; - barrier(CLK_LOCAL_MEM_FENCE); } - if (size >= 2) { if (tid < 1) smem[tid] = sum = sum + smem[tid + 1]; + if (size >= 8) { if (tid < 4) smem[tid] = sum = sum + smem[tid + 4]; + barrier(CLK_LOCAL_MEM_FENCE); } + if (size >= 4) { if (tid < 2) smem[tid] = sum = sum + smem[tid + 2]; + barrier(CLK_LOCAL_MEM_FENCE); } + if (size >= 2) { if (tid < 1) smem[tid] = sum = sum + smem[tid + 1]; barrier(CLK_LOCAL_MEM_FENCE); } #else if (tid < 32) @@ -251,7 +251,7 @@ __kernel void normalize_hists_kernel( const int gidX = get_group_id(0); const int gidY = get_group_id(1); - __global float* hist = block_hists + (gidY * img_block_width + gidX) * + __global float* hist = block_hists + (gidY * img_block_width + gidX) * block_hist_size + tid; float elem = 0.f; @@ -292,14 +292,14 @@ __kernel void classify_hists_180_kernel( const int gidX = get_group_id(0); const int gidY = get_group_id(1); - __global const float* hist = block_hists + (gidY * win_block_stride_y * + __global const float* hist = block_hists + (gidY * win_block_stride_y * img_block_width + gidX * win_block_stride_x) * cblock_hist_size; float product = 0.f; for (int i = 0; i < cdescr_height; i++) { - product += coefs[i * cdescr_width + tid] * + product += coefs[i * cdescr_width + tid] * hist[i * img_block_width * cblock_hist_size + tid]; } @@ -365,14 +365,14 @@ __kernel void classify_hists_252_kernel( const int gidX = get_group_id(0); const int gidY = get_group_id(1); - __global const float* hist = block_hists + (gidY * win_block_stride_y * + __global const float* hist = block_hists + (gidY * win_block_stride_y * img_block_width + gidX * win_block_stride_x) * cblock_hist_size; float product = 0.f; if (tid < cdescr_width) { for (int i = 0; i < cdescr_height; i++) - product += coefs[i * cdescr_width + tid] * + product += coefs[i * cdescr_width + tid] * hist[i * img_block_width * cblock_hist_size + tid]; } @@ -402,7 +402,7 @@ __kernel void classify_hists_252_kernel( barrier(CLK_LOCAL_MEM_FENCE); #else if (tid < 32) - { + { smem[tid] = product = product + smem[tid + 32]; #if WAVE_SIZE < 32 } barrier(CLK_LOCAL_MEM_FENCE); @@ -434,7 +434,7 @@ __kernel void classify_hists_kernel( const int gidX = get_group_id(0); const int gidY = get_group_id(1); - __global const float* hist = block_hists + (gidY * win_block_stride_y * + __global const float* hist = block_hists + (gidY * win_block_stride_y * img_block_width + gidX * win_block_stride_x) * cblock_hist_size; float product = 0.f; @@ -442,7 +442,7 @@ __kernel void classify_hists_kernel( { int offset_y = i / cdescr_width; int offset_x = i - offset_y * cdescr_width; - product += coefs[i] * + product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x]; } @@ -472,7 +472,7 @@ __kernel void classify_hists_kernel( barrier(CLK_LOCAL_MEM_FENCE); #else if (tid < 32) - { + { smem[tid] = product = product + smem[tid + 32]; #if WAVE_SIZE < 32 } barrier(CLK_LOCAL_MEM_FENCE); @@ -494,8 +494,8 @@ __kernel void classify_hists_kernel( // Extract descriptors __kernel void extract_descrs_by_rows_kernel( - const int cblock_hist_size, const int descriptors_quadstep, - const int cdescr_size, const int cdescr_width, const int img_block_width, + const int cblock_hist_size, const int descriptors_quadstep, + const int cdescr_size, const int cdescr_width, const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, __global const float* block_hists, __global float* descriptors) { @@ -504,11 +504,11 @@ __kernel void extract_descrs_by_rows_kernel( int gidY = get_group_id(1); // Get left top corner of the window in src - __global const float* hist = block_hists + (gidY * win_block_stride_y * + __global const float* hist = block_hists + (gidY * win_block_stride_y * img_block_width + gidX * win_block_stride_x) * cblock_hist_size; // Get left top corner of the window in dst - __global float* descriptor = descriptors + + __global float* descriptor = descriptors + (gidY * get_num_groups(0) + gidX) * descriptors_quadstep; // Copy elements from src to dst @@ -522,8 +522,8 @@ __kernel void extract_descrs_by_rows_kernel( __kernel void extract_descrs_by_cols_kernel( const int cblock_hist_size, const int descriptors_quadstep, const int cdescr_size, - const int cnblocks_win_x, const int cnblocks_win_y, const int img_block_width, - const int win_block_stride_x, const int win_block_stride_y, + const int cnblocks_win_x, const int cnblocks_win_y, const int img_block_width, + const int win_block_stride_x, const int win_block_stride_y, __global const float* block_hists, __global float* descriptors) { int tid = get_local_id(0); @@ -531,11 +531,11 @@ __kernel void extract_descrs_by_cols_kernel( int gidY = get_group_id(1); // Get left top corner of the window in src - __global const float* hist = block_hists + (gidY * win_block_stride_y * + __global const float* hist = block_hists + (gidY * win_block_stride_y * img_block_width + gidX * win_block_stride_x) * cblock_hist_size; // Get left top corner of the window in dst - __global float* descriptor = descriptors + + __global float* descriptor = descriptors + (gidY * get_num_groups(0) + gidX) * descriptors_quadstep; // Copy elements from src to dst @@ -547,7 +547,7 @@ __kernel void extract_descrs_by_cols_kernel( int y = block_idx / cnblocks_win_x; int x = block_idx - y * cnblocks_win_x; - descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block] = + descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block] = hist[(y * img_block_width + x) * cblock_hist_size + idx_in_block]; } } @@ -556,7 +556,7 @@ __kernel void extract_descrs_by_cols_kernel( // Gradients computation __kernel void compute_gradients_8UC4_kernel( - const int height, const int width, + const int height, const int width, const int img_step, const int grad_quadstep, const int qangle_step, const __global uchar4 * img, __global float * grad, __global uchar * qangle, const float angle_scale, const char correct_gamma, const int cnbins) @@ -600,9 +600,9 @@ __kernel void compute_gradients_8UC4_kernel( barrier(CLK_LOCAL_MEM_FENCE); if (x < width) { - float3 a = (float3) (sh_row[tid], sh_row[tid + (NTHREADS + 2)], + float3 a = (float3) (sh_row[tid], sh_row[tid + (NTHREADS + 2)], sh_row[tid + 2 * (NTHREADS + 2)]); - float3 b = (float3) (sh_row[tid + 2], sh_row[tid + 2 + (NTHREADS + 2)], + float3 b = (float3) (sh_row[tid + 2], sh_row[tid + 2 + (NTHREADS + 2)], sh_row[tid + 2 + 2 * (NTHREADS + 2)]); float3 dx; @@ -659,7 +659,7 @@ __kernel void compute_gradients_8UC4_kernel( } __kernel void compute_gradients_8UC1_kernel( - const int height, const int width, + const int height, const int width, const int img_step, const int grad_quadstep, const int qangle_step, __global const uchar * img, __global float * grad, __global uchar * qangle, const float angle_scale, const char correct_gamma, const int cnbins) @@ -717,4 +717,4 @@ __kernel void compute_gradients_8UC1_kernel( grad[ (gidY * grad_quadstep + x) << 1 ] = mag * (1.f - ang); grad[ ((gidY * grad_quadstep + x) << 1) + 1 ] = mag * ang; } -} \ No newline at end of file +} diff --git a/modules/ocl/src/opencl/stereobm.cl b/modules/ocl/src/opencl/stereobm.cl index f1b958812f..56f445e429 100644 --- a/modules/ocl/src/opencl/stereobm.cl +++ b/modules/ocl/src/opencl/stereobm.cl @@ -190,7 +190,7 @@ __kernel void stereoKernel(__global unsigned char *left, __global unsigned char { int idx1 = y_tex * img_step + x_tex; int idx2 = min(y_tex + ((radius << 1) + 1), cheight - 1) * img_step + x_tex; - + barrier(CLK_LOCAL_MEM_FENCE); StepDown(idx1, idx2, left, right, d, col_ssd); diff --git a/modules/ocl/src/opencl/stereobp.cl b/modules/ocl/src/opencl/stereobp.cl index 8a71629f87..1d523e7885 100644 --- a/modules/ocl/src/opencl/stereobp.cl +++ b/modules/ocl/src/opencl/stereobp.cl @@ -67,7 +67,7 @@ ///////////////////////////////////////////////////////////// T saturate_cast(float v){ #ifdef T_SHORT - return convert_short_sat_rte(v); + return convert_short_sat_rte(v); #else return v; #endif @@ -75,7 +75,7 @@ T saturate_cast(float v){ T4 saturate_cast4(float4 v){ #ifdef T_SHORT - return convert_short4_sat_rte(v); + return convert_short4_sat_rte(v); #else return v; #endif @@ -96,7 +96,7 @@ typedef struct inline float pix_diff_1(const uchar4 l, __global const uchar *rs) { - return abs((int)(l.x) - *rs); + return abs((int)(l.x) - *rs); } float pix_diff_4(const uchar4 l, __global const uchar *rs) @@ -174,8 +174,8 @@ __kernel void comp_data(__global uchar *left, int left_rows, int left_cols, i /////////////////////////////////////////////////////////////// //////////////////////// data step down /////////////////////// /////////////////////////////////////////////////////////////// -__kernel void data_step_down(__global T *src, int src_rows, - __global T *dst, int dst_rows, int dst_cols, +__kernel void data_step_down(__global T *src, int src_rows, + __global T *dst, int dst_rows, int dst_cols, int src_step, int dst_step, int cndisp) { @@ -289,7 +289,7 @@ void message(__global T *us_, __global T *ds_, __global T *ls_, __global T *rs_, } minimum += cmax_disc_term; - + float4 sum = 0; prev = convert_float4(t_dst[CNDISP - 1]); for (int disp = CNDISP - 2; disp >= 0; disp--) @@ -322,7 +322,7 @@ void message(__global T *us_, __global T *ds_, __global T *ls_, __global T *rs_, __kernel void one_iteration(__global T *u, int u_step, __global T *data, int data_step, __global T *d, __global T *l, __global T *r, - int t, int cols, int rows, + int t, int cols, int rows, float cmax_disc_term, float cdisc_single_jump) { const int y = get_global_id(1); diff --git a/modules/ocl/src/opencl/stereocsbp.cl b/modules/ocl/src/opencl/stereocsbp.cl index ea7af62b21..89f2bb8dc8 100644 --- a/modules/ocl/src/opencl/stereocsbp.cl +++ b/modules/ocl/src/opencl/stereocsbp.cl @@ -129,7 +129,7 @@ __kernel void get_first_k_initial_global_1(__global float *data_cost_selected_, //////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////get_first_k_initial_local//////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////// -__kernel void get_first_k_initial_local_0(__global short *data_cost_selected_, __global short *selected_disp_pyr, +__kernel void get_first_k_initial_local_0(__global short *data_cost_selected_, __global short *selected_disp_pyr, __global short *ctemp,int h, int w, int nr_plane, int cmsg_step1, int cdisp_step1, int cndisp) { @@ -187,7 +187,7 @@ __kernel void get_first_k_initial_local_0(__global short *data_cost_selected_, } } -__kernel void get_first_k_initial_local_1(__global float *data_cost_selected_, __global float *selected_disp_pyr, +__kernel void get_first_k_initial_local_1(__global float *data_cost_selected_, __global float *selected_disp_pyr, __global float *ctemp,int h, int w, int nr_plane, int cmsg_step1, int cdisp_step1, int cndisp) { @@ -257,20 +257,20 @@ float compute_3(__global uchar* left, __global uchar* right, return fmin(cdata_weight * (tr + tg + tb), cdata_weight * cmax_data_term); } -float compute_1(__global uchar* left, __global uchar* right, +float compute_1(__global uchar* left, __global uchar* right, float cdata_weight, float cmax_data_term) { return fmin(cdata_weight * abs((int)*left - (int)*right), cdata_weight * cmax_data_term); } short round_short(float v){ - return convert_short_sat_rte(v); + return convert_short_sat_rte(v); } /////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////init_data_cost/////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////// -__kernel void init_data_cost_0(__global short *ctemp, __global uchar *cleft, __global uchar *cright, +__kernel void init_data_cost_0(__global short *ctemp, __global uchar *cleft, __global uchar *cright, int h, int w, int level, int channels, - int cmsg_step1, float cdata_weight, float cmax_data_term, int cdisp_step1, + int cmsg_step1, float cdata_weight, float cmax_data_term, int cdisp_step1, int cth, int cimg_step, int cndisp) { int x = get_global_id(0); @@ -312,9 +312,9 @@ __kernel void init_data_cost_0(__global short *ctemp, __global uchar *cleft, __g } } } -__kernel void init_data_cost_1(__global float *ctemp, __global uchar *cleft, __global uchar *cright, +__kernel void init_data_cost_1(__global float *ctemp, __global uchar *cleft, __global uchar *cright, int h, int w, int level, int channels, - int cmsg_step1, float cdata_weight, float cmax_data_term, int cdisp_step1, + int cmsg_step1, float cdata_weight, float cmax_data_term, int cdisp_step1, int cth, int cimg_step, int cndisp) { int x = get_global_id(0); @@ -361,13 +361,13 @@ __kernel void init_data_cost_1(__global float *ctemp, __global uchar *cleft, __g ////////////////////////////////////////////////////////////////////////////////////////////////////////// __kernel void init_data_cost_reduce_0(__global short *ctemp, __global uchar *cleft, __global uchar *cright, __local float *smem, int level, int rows, int cols, int h, int winsz, int channels, - int cndisp,int cimg_step, float cdata_weight, float cmax_data_term, int cth, + int cndisp,int cimg_step, float cdata_weight, float cmax_data_term, int cth, int cdisp_step1, int cmsg_step1) { int x_out = get_group_id(0); int y_out = get_group_id(1) % h; //int d = (blockIdx.y / h) * blockDim.z + threadIdx.z; - int d = (get_group_id(1) / h ) * get_local_size(2) + get_local_id(2); + int d = (get_group_id(1) / h ) * get_local_size(2) + get_local_id(2); int tid = get_local_id(0); @@ -411,39 +411,39 @@ __kernel void init_data_cost_reduce_0(__global short *ctemp, __global uchar *cle if(d < cndisp) { __local float* dline = smem + winsz * get_local_id(2); - if (winsz >= 256) + if (winsz >= 256) { - if (tid < 128) - dline[tid] += dline[tid + 128]; + if (tid < 128) + dline[tid] += dline[tid + 128]; } } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local float* dline = smem + winsz * get_local_id(2); - if (winsz >= 128) + if (winsz >= 128) { - if (tid < 64) - dline[tid] += dline[tid + 64]; + if (tid < 64) + dline[tid] += dline[tid + 64]; } } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 64) - if (tid < 32) + if (winsz >= 64) + if (tid < 32) vdline[tid] += vdline[tid + 32]; } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 32) - if (tid < 16) + if (winsz >= 32) + if (tid < 16) vdline[tid] += vdline[tid + 16]; } barrier(CLK_LOCAL_MEM_FENCE); @@ -452,7 +452,7 @@ __kernel void init_data_cost_reduce_0(__global short *ctemp, __global uchar *cle { __local volatile float* vdline = smem + winsz * get_local_id(2); if (winsz >= 16) - if (tid < 8) + if (tid < 8) vdline[tid] += vdline[tid + 8]; } barrier(CLK_LOCAL_MEM_FENCE); @@ -461,7 +461,7 @@ __kernel void init_data_cost_reduce_0(__global short *ctemp, __global uchar *cle { __local volatile float* vdline = smem + winsz * get_local_id(2); if (winsz >= 8) - if (tid < 4) + if (tid < 4) vdline[tid] += vdline[tid + 4]; } barrier(CLK_LOCAL_MEM_FENCE); @@ -470,7 +470,7 @@ __kernel void init_data_cost_reduce_0(__global short *ctemp, __global uchar *cle { __local volatile float* vdline = smem + winsz * get_local_id(2); if (winsz >= 4) - if (tid < 2) + if (tid < 2) vdline[tid] += vdline[tid + 2]; } barrier(CLK_LOCAL_MEM_FENCE); @@ -479,7 +479,7 @@ __kernel void init_data_cost_reduce_0(__global short *ctemp, __global uchar *cle { __local volatile float* vdline = smem + winsz * get_local_id(2); if (winsz >= 2) - if (tid < 1) + if (tid < 1) vdline[tid] += vdline[tid + 1]; } barrier(CLK_LOCAL_MEM_FENCE); @@ -500,7 +500,7 @@ __kernel void init_data_cost_reduce_1(__global float *ctemp, __global uchar *cle { int x_out = get_group_id(0); int y_out = get_group_id(1) % h; - int d = (get_group_id(1) / h ) * get_local_size(2) + get_local_id(2); + int d = (get_group_id(1) / h ) * get_local_size(2) + get_local_id(2); int tid = get_local_id(0); @@ -545,74 +545,74 @@ __kernel void init_data_cost_reduce_1(__global float *ctemp, __global uchar *cle if(d < cndisp) { __local float* dline = smem + winsz * get_local_id(2); - if (winsz >= 256) - if (tid < 128) - dline[tid] += dline[tid + 128]; + if (winsz >= 256) + if (tid < 128) + dline[tid] += dline[tid + 128]; } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local float* dline = smem + winsz * get_local_id(2); - if (winsz >= 128) - if (tid < 64) - dline[tid] += dline[tid + 64]; + if (winsz >= 128) + if (tid < 64) + dline[tid] += dline[tid + 64]; } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 64) - if (tid < 32) - vdline[tid] += vdline[tid + 32]; + if (winsz >= 64) + if (tid < 32) + vdline[tid] += vdline[tid + 32]; } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 32) - if (tid < 16) - vdline[tid] += vdline[tid + 16]; + if (winsz >= 32) + if (tid < 16) + vdline[tid] += vdline[tid + 16]; } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 16) - if (tid < 8) - vdline[tid] += vdline[tid + 8]; + if (winsz >= 16) + if (tid < 8) + vdline[tid] += vdline[tid + 8]; } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 8) - if (tid < 4) - vdline[tid] += vdline[tid + 4]; + if (winsz >= 8) + if (tid < 4) + vdline[tid] += vdline[tid + 4]; } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 4) - if (tid < 2) - vdline[tid] += vdline[tid + 2]; + if (winsz >= 4) + if (tid < 2) + vdline[tid] += vdline[tid + 2]; } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 2) - if (tid < 1) - vdline[tid] += vdline[tid + 1]; + if (winsz >= 2) + if (tid < 1) + vdline[tid] += vdline[tid + 1]; } - barrier(CLK_LOCAL_MEM_FENCE); + barrier(CLK_LOCAL_MEM_FENCE); if(d < cndisp) { @@ -626,10 +626,10 @@ __kernel void init_data_cost_reduce_1(__global float *ctemp, __global uchar *cle /////////////////////////////////////////////////////////////// ////////////////////// compute data cost ////////////////////// /////////////////////////////////////////////////////////////// -__kernel void compute_data_cost_0(__global const short *selected_disp_pyr, __global short *data_cost_, +__kernel void compute_data_cost_0(__global const short *selected_disp_pyr, __global short *data_cost_, __global uchar *cleft, __global uchar *cright, int h, int w, int level, int nr_plane, int channels, - int cmsg_step1, int cmsg_step2, int cdisp_step1, int cdisp_step2, float cdata_weight, + int cmsg_step1, int cmsg_step2, int cdisp_step1, int cdisp_step2, float cdata_weight, float cmax_data_term, int cimg_step, int cth) { @@ -676,10 +676,10 @@ __kernel void compute_data_cost_0(__global const short *selected_disp_pyr, __glo } } } -__kernel void compute_data_cost_1(__global const float *selected_disp_pyr, __global float *data_cost_, +__kernel void compute_data_cost_1(__global const float *selected_disp_pyr, __global float *data_cost_, __global uchar *cleft, __global uchar *cright, int h, int w, int level, int nr_plane, int channels, - int cmsg_step1, int cmsg_step2, int cdisp_step1, int cdisp_step2, float cdata_weight, + int cmsg_step1, int cmsg_step2, int cdisp_step1, int cdisp_step2, float cdata_weight, float cmax_data_term, int cimg_step, int cth) { @@ -728,11 +728,11 @@ __kernel void compute_data_cost_1(__global const float *selected_disp_pyr, __glo //////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////compute_data_cost_reduce////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////// -__kernel void compute_data_cost_reduce_0(__global const short* selected_disp_pyr, __global short* data_cost_, +__kernel void compute_data_cost_reduce_0(__global const short* selected_disp_pyr, __global short* data_cost_, __global uchar *cleft, __global uchar *cright,__local float *smem, - int level, int rows, int cols, int h, int nr_plane, + int level, int rows, int cols, int h, int nr_plane, int channels, int winsz, - int cmsg_step1, int cmsg_step2, int cdisp_step1, int cdisp_step2, + int cmsg_step1, int cmsg_step2, int cdisp_step1, int cdisp_step2, float cdata_weight, float cmax_data_term, int cimg_step,int cth) { @@ -788,9 +788,9 @@ __kernel void compute_data_cost_reduce_0(__global const short* selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 64) + if (winsz >= 64) { - if (tid < 32) + if (tid < 32) vdline[tid] += vdline[tid + 32]; } } @@ -799,9 +799,9 @@ __kernel void compute_data_cost_reduce_0(__global const short* selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 32) + if (winsz >= 32) { - if (tid < 16) + if (tid < 16) vdline[tid] += vdline[tid + 16]; } } @@ -810,9 +810,9 @@ __kernel void compute_data_cost_reduce_0(__global const short* selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 16) + if (winsz >= 16) { - if (tid < 8) + if (tid < 8) vdline[tid] += vdline[tid + 8]; } } @@ -821,9 +821,9 @@ __kernel void compute_data_cost_reduce_0(__global const short* selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 8) + if (winsz >= 8) { - if (tid < 4) + if (tid < 4) vdline[tid] += vdline[tid + 4]; } } @@ -832,9 +832,9 @@ __kernel void compute_data_cost_reduce_0(__global const short* selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 4) + if (winsz >= 4) { - if (tid < 2) + if (tid < 2) vdline[tid] += vdline[tid + 2]; } } @@ -843,9 +843,9 @@ __kernel void compute_data_cost_reduce_0(__global const short* selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 2) + if (winsz >= 2) { - if (tid < 1) + if (tid < 1) vdline[tid] += vdline[tid + 1]; } } @@ -859,11 +859,11 @@ __kernel void compute_data_cost_reduce_0(__global const short* selected_disp_pyr } } -__kernel void compute_data_cost_reduce_1(__global const float *selected_disp_pyr, __global float *data_cost_, +__kernel void compute_data_cost_reduce_1(__global const float *selected_disp_pyr, __global float *data_cost_, __global uchar *cleft, __global uchar *cright, __local float *smem, - int level, int rows, int cols, int h, int nr_plane, + int level, int rows, int cols, int h, int nr_plane, int channels, int winsz, - int cmsg_step1, int cmsg_step2, int cdisp_step1,int cdisp_step2, float cdata_weight, + int cmsg_step1, int cmsg_step2, int cdisp_step1,int cdisp_step2, float cdata_weight, float cmax_data_term, int cimg_step, int cth) { @@ -918,21 +918,21 @@ __kernel void compute_data_cost_reduce_1(__global const float *selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 64) + if (winsz >= 64) { - if (tid < 32) + if (tid < 32) vdline[tid] += vdline[tid + 32]; } } barrier(CLK_LOCAL_MEM_FENCE); - if(d < nr_plane) + if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 32) + if (winsz >= 32) { - if (tid < 16) + if (tid < 16) vdline[tid] += vdline[tid + 16]; } } @@ -941,9 +941,9 @@ __kernel void compute_data_cost_reduce_1(__global const float *selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 16) + if (winsz >= 16) { - if (tid < 8) + if (tid < 8) vdline[tid] += vdline[tid + 8]; } } @@ -952,9 +952,9 @@ __kernel void compute_data_cost_reduce_1(__global const float *selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 8) + if (winsz >= 8) { - if (tid < 4) + if (tid < 4) vdline[tid] += vdline[tid + 4]; } } @@ -963,9 +963,9 @@ __kernel void compute_data_cost_reduce_1(__global const float *selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 4) + if (winsz >= 4) { - if (tid < 2) + if (tid < 2) vdline[tid] += vdline[tid + 2]; } } @@ -974,9 +974,9 @@ __kernel void compute_data_cost_reduce_1(__global const float *selected_disp_pyr if(d < nr_plane) { __local volatile float* vdline = smem + winsz * get_local_id(2); - if (winsz >= 2) + if (winsz >= 2) { - if (tid < 1) + if (tid < 1) vdline[tid] += vdline[tid + 1]; } } @@ -993,11 +993,11 @@ __kernel void compute_data_cost_reduce_1(__global const float *selected_disp_pyr /////////////////////////////////////////////////////////////// //////////////////////// init message ///////////////////////// /////////////////////////////////////////////////////////////// -void get_first_k_element_increase_0(__global short* u_new, __global short *d_new, __global short *l_new, - __global short *r_new, __global const short *u_cur, __global const short *d_cur, - __global const short *l_cur, __global const short *r_cur, - __global short *data_cost_selected, __global short *disparity_selected_new, - __global short *data_cost_new, __global const short* data_cost_cur, +void get_first_k_element_increase_0(__global short* u_new, __global short *d_new, __global short *l_new, + __global short *r_new, __global const short *u_cur, __global const short *d_cur, + __global const short *l_cur, __global const short *r_cur, + __global short *data_cost_selected, __global short *disparity_selected_new, + __global short *data_cost_new, __global const short* data_cost_cur, __global const short *disparity_selected_cur, int nr_plane, int nr_plane2, int cdisp_step1, int cdisp_step2) @@ -1027,11 +1027,11 @@ void get_first_k_element_increase_0(__global short* u_new, __global short *d_new data_cost_new[id * cdisp_step1] = SHRT_MAX; } } -void get_first_k_element_increase_1(__global float *u_new, __global float *d_new, __global float *l_new, - __global float *r_new, __global const float *u_cur, __global const float *d_cur, +void get_first_k_element_increase_1(__global float *u_new, __global float *d_new, __global float *l_new, + __global float *r_new, __global const float *u_cur, __global const float *d_cur, __global const float *l_cur, __global const float *r_cur, - __global float *data_cost_selected, __global float *disparity_selected_new, - __global float *data_cost_new, __global const float *data_cost_cur, + __global float *data_cost_selected, __global float *disparity_selected_new, + __global float *data_cost_new, __global const float *data_cost_cur, __global const float *disparity_selected_cur, int nr_plane, int nr_plane2, int cdisp_step1, int cdisp_step2) @@ -1057,13 +1057,13 @@ void get_first_k_element_increase_1(__global float *u_new, __global float *d_new u_new[i * cdisp_step1] = u_cur[id * cdisp_step2]; d_new[i * cdisp_step1] = d_cur[id * cdisp_step2]; l_new[i * cdisp_step1] = l_cur[id * cdisp_step2]; - r_new[i * cdisp_step1] = r_cur[id * cdisp_step2]; + r_new[i * cdisp_step1] = r_cur[id * cdisp_step2]; data_cost_new[id * cdisp_step1] = FLT_MAX; } } __kernel void init_message_0(__global short *u_new_, __global short *d_new_, __global short *l_new_, - __global short *r_new_, __global short *u_cur_, __global const short *d_cur_, + __global short *r_new_, __global short *u_cur_, __global const short *d_cur_, __global const short *l_cur_, __global const short *r_cur_, __global short *ctemp, __global short *selected_disp_pyr_new, __global const short *selected_disp_pyr_cur, __global short *data_cost_selected_, __global const short *data_cost_, @@ -1113,7 +1113,7 @@ __kernel void init_message_0(__global short *u_new_, __global short *d_new_, __g } } __kernel void init_message_1(__global float *u_new_, __global float *d_new_, __global float *l_new_, - __global float *r_new_, __global const float *u_cur_, __global const float *d_cur_, + __global float *r_new_, __global const float *u_cur_, __global const float *d_cur_, __global const float *l_cur_, __global const float *r_cur_, __global float *ctemp, __global float *selected_disp_pyr_new, __global const float *selected_disp_pyr_cur, __global float *data_cost_selected_, __global const float *data_cost_, @@ -1176,28 +1176,28 @@ __kernel void init_message_1(__global float *u_new_, __global float *d_new_, __g id = j; } } - data_cost_selected[i * cdisp_step1] = data_cost[id * cdisp_step1]; + data_cost_selected[i * cdisp_step1] = data_cost[id * cdisp_step1]; disparity_selected_new[i * cdisp_step1] = disparity_selected_cur[id * cdisp_step2]; u_new[i * cdisp_step1] = u_cur[id * cdisp_step2]; d_new[i * cdisp_step1] = d_cur[id * cdisp_step2]; l_new[i * cdisp_step1] = l_cur[id * cdisp_step2]; - r_new[i * cdisp_step1] = r_cur[id * cdisp_step2]; + r_new[i * cdisp_step1] = r_cur[id * cdisp_step2]; data_cost_new[id * cdisp_step1] = FLT_MAX; - } + } } } /////////////////////////////////////////////////////////////// //////////////////// calc all iterations ///////////////////// /////////////////////////////////////////////////////////////// -void message_per_pixel_0(__global const short *data, __global short *msg_dst, __global const short *msg1, +void message_per_pixel_0(__global const short *data, __global short *msg_dst, __global const short *msg1, __global const short *msg2, __global const short *msg3, - __global const short *dst_disp, __global const short *src_disp, + __global const short *dst_disp, __global const short *src_disp, int nr_plane, __global short *temp, float cmax_disc_term, int cdisp_step1, float cdisc_single_jump) { short minimum = SHRT_MAX; - for(int d = 0; d < nr_plane; d++) + for(int d = 0; d < nr_plane; d++) { int idx = d * cdisp_step1; short val = data[idx] + msg1[idx] + msg2[idx] + msg3[idx]; @@ -1215,7 +1215,7 @@ void message_per_pixel_0(__global const short *data, __global short *msg_dst, __ short src_disp_reg = src_disp[d * cdisp_step1]; for(int d2 = 0; d2 < nr_plane; d2++) - cost_min = fmin(cost_min, (msg_dst[d2 * cdisp_step1] + + cost_min = fmin(cost_min, (msg_dst[d2 * cdisp_step1] + cdisc_single_jump * abs(dst_disp[d2 * cdisp_step1] - src_disp_reg))); temp[d * cdisp_step1] = convert_short_sat_rte(cost_min); @@ -1226,14 +1226,14 @@ void message_per_pixel_0(__global const short *data, __global short *msg_dst, __ for(int d = 0; d < nr_plane; d++) msg_dst[d * cdisp_step1] = convert_short_sat_rte(temp[d * cdisp_step1] - sum); } -void message_per_pixel_1(__global const float *data, __global float *msg_dst, __global const float *msg1, +void message_per_pixel_1(__global const float *data, __global float *msg_dst, __global const float *msg1, __global const float *msg2, __global const float *msg3, - __global const float *dst_disp, __global const float *src_disp, + __global const float *dst_disp, __global const float *src_disp, int nr_plane, __global float *temp, float cmax_disc_term, int cdisp_step1, float cdisc_single_jump) { float minimum = FLT_MAX; - for(int d = 0; d < nr_plane; d++) + for(int d = 0; d < nr_plane; d++) { int idx = d * cdisp_step1; float val = data[idx] + msg1[idx] + msg2[idx] + msg3[idx]; @@ -1251,7 +1251,7 @@ void message_per_pixel_1(__global const float *data, __global float *msg_dst, __ float src_disp_reg = src_disp[d * cdisp_step1]; for(int d2 = 0; d2 < nr_plane; d2++) - cost_min = fmin(cost_min, (msg_dst[d2 * cdisp_step1] + + cost_min = fmin(cost_min, (msg_dst[d2 * cdisp_step1] + cdisc_single_jump * fabs(dst_disp[d2 * cdisp_step1] - src_disp_reg))); temp[d * cdisp_step1] = cost_min; @@ -1262,9 +1262,9 @@ void message_per_pixel_1(__global const float *data, __global float *msg_dst, __ for(int d = 0; d < nr_plane; d++) msg_dst[d * cdisp_step1] = temp[d * cdisp_step1] - sum; } -__kernel void compute_message_0(__global short *u_, __global short *d_, __global short *l_, __global short *r_, - __global const short *data_cost_selected, __global const short *selected_disp_pyr_cur, - __global short *ctemp, int h, int w, int nr_plane, int i, +__kernel void compute_message_0(__global short *u_, __global short *d_, __global short *l_, __global short *r_, + __global const short *data_cost_selected, __global const short *selected_disp_pyr_cur, + __global short *ctemp, int h, int w, int nr_plane, int i, float cmax_disc_term, int cdisp_step1, int cmsg_step1, float cdisc_single_jump) { int y = get_global_id(1); @@ -1283,7 +1283,7 @@ __kernel void compute_message_0(__global short *u_, __global short *d_, __global __global short *temp = ctemp + y * cmsg_step1 + x; - message_per_pixel_0(data, u, r - 1, u + cmsg_step1, l + 1, disp, disp - cmsg_step1, nr_plane, temp, + message_per_pixel_0(data, u, r - 1, u + cmsg_step1, l + 1, disp, disp - cmsg_step1, nr_plane, temp, cmax_disc_term, cdisp_step1, cdisc_single_jump); message_per_pixel_0(data, d, d - cmsg_step1, r - 1, l + 1, disp, disp + cmsg_step1, nr_plane, temp, cmax_disc_term, cdisp_step1, cdisc_single_jump); @@ -1293,9 +1293,9 @@ __kernel void compute_message_0(__global short *u_, __global short *d_, __global cmax_disc_term, cdisp_step1, cdisc_single_jump); } } -__kernel void compute_message_1(__global float *u_, __global float *d_, __global float *l_, __global float *r_, - __global const float *data_cost_selected, __global const float *selected_disp_pyr_cur, - __global float *ctemp, int h, int w, int nr_plane, int i, +__kernel void compute_message_1(__global float *u_, __global float *d_, __global float *l_, __global float *r_, + __global const float *data_cost_selected, __global const float *selected_disp_pyr_cur, + __global float *ctemp, int h, int w, int nr_plane, int i, float cmax_disc_term, int cdisp_step1, int cmsg_step1, float cdisc_single_jump) { int y = get_global_id(1); @@ -1313,7 +1313,7 @@ __kernel void compute_message_1(__global float *u_, __global float *d_, __global __global const float *disp = selected_disp_pyr_cur + y * cmsg_step1 + x; __global float *temp = ctemp + y * cmsg_step1 + x; - message_per_pixel_1(data, u, r - 1, u + cmsg_step1, l + 1, disp, disp - cmsg_step1, nr_plane, temp, + message_per_pixel_1(data, u, r - 1, u + cmsg_step1, l + 1, disp, disp - cmsg_step1, nr_plane, temp, cmax_disc_term, cdisp_step1, cdisc_single_jump); message_per_pixel_1(data, d, d - cmsg_step1, r - 1, l + 1, disp, disp + cmsg_step1, nr_plane, temp, cmax_disc_term, cdisp_step1, cdisc_single_jump); @@ -1327,10 +1327,10 @@ __kernel void compute_message_1(__global float *u_, __global float *d_, __global /////////////////////////////////////////////////////////////// /////////////////////////// output //////////////////////////// /////////////////////////////////////////////////////////////// -__kernel void compute_disp_0(__global const short *u_, __global const short *d_, __global const short *l_, - __global const short *r_, __global const short * data_cost_selected, +__kernel void compute_disp_0(__global const short *u_, __global const short *d_, __global const short *l_, + __global const short *r_, __global const short * data_cost_selected, __global const short *disp_selected_pyr, - __global short* disp, + __global short* disp, int res_step, int cols, int rows, int nr_plane, int cmsg_step1, int cdisp_step1) { @@ -1364,10 +1364,10 @@ __kernel void compute_disp_0(__global const short *u_, __global const short *d_, disp[res_step * y + x] = best; } } -__kernel void compute_disp_1(__global const float *u_, __global const float *d_, __global const float *l_, - __global const float *r_, __global const float *data_cost_selected, +__kernel void compute_disp_1(__global const float *u_, __global const float *d_, __global const float *l_, + __global const float *r_, __global const float *data_cost_selected, __global const float *disp_selected_pyr, - __global short *disp, + __global short *disp, int res_step, int cols, int rows, int nr_plane, int cmsg_step1, int cdisp_step1) { diff --git a/modules/ocl/src/opencl/tvl1flow.cl b/modules/ocl/src/opencl/tvl1flow.cl index e0ff7307b1..4d410327a3 100644 --- a/modules/ocl/src/opencl/tvl1flow.cl +++ b/modules/ocl/src/opencl/tvl1flow.cl @@ -43,7 +43,7 @@ // //M*/ -__kernel void centeredGradientKernel(__global const float* src, int src_col, int src_row, int src_step, +__kernel void centeredGradientKernel(__global const float* src, int src_col, int src_row, int src_step, __global float* dx, __global float* dy, int dx_step) { int x = get_global_id(0); @@ -53,7 +53,7 @@ __global float* dx, __global float* dy, int dx_step) { int src_x1 = (x + 1) < (src_col -1)? (x + 1) : (src_col - 1); int src_x2 = (x - 1) > 0 ? (x -1) : 0; - + //if(src[y * src_step + src_x1] == src[y * src_step+ src_x2]) //{ // printf("y = %d\n", y); @@ -61,7 +61,7 @@ __global float* dx, __global float* dy, int dx_step) // printf("src_x2 = %d\n", src_x2); //} dx[y * dx_step+ x] = 0.5f * (src[y * src_step + src_x1] - src[y * src_step+ src_x2]); - + int src_y1 = (y+1) < (src_row - 1) ? (y + 1) : (src_row - 1); int src_y2 = (y - 1) > 0 ? (y - 1) : 0; dy[y * dx_step+ x] = 0.5f * (src[src_y1 * src_step + x] - src[src_y2 * src_step+ x]); @@ -89,8 +89,8 @@ float bicubicCoeff(float x_) } __kernel void warpBackwardKernel(__global const float* I0, int I0_step, int I0_col, int I0_row, - image2d_t tex_I1, image2d_t tex_I1x, image2d_t tex_I1y, - __global const float* u1, int u1_step, + image2d_t tex_I1, image2d_t tex_I1x, image2d_t tex_I1y, + __global const float* u1, int u1_step, __global const float* u2, __global float* I1w, __global float* I1wx, /*int I1wx_step,*/ @@ -181,8 +181,8 @@ float readImage(__global const float *image, const int x, const int y, const } __kernel void warpBackwardKernelNoImage2d(__global const float* I0, int I0_step, int I0_col, int I0_row, - __global const float* tex_I1, __global const float* tex_I1x, __global const float* tex_I1y, - __global const float* u1, int u1_step, + __global const float* tex_I1, __global const float* tex_I1x, __global const float* tex_I1y, + __global const float* u1, int u1_step, __global const float* u2, __global float* I1w, __global float* I1wx, /*int I1wx_step,*/ @@ -256,12 +256,12 @@ __kernel void warpBackwardKernelNoImage2d(__global const float* I0, int I0_step, } -__kernel void estimateDualVariablesKernel(__global const float* u1, int u1_col, int u1_row, int u1_step, - __global const float* u2, - __global float* p11, int p11_step, +__kernel void estimateDualVariablesKernel(__global const float* u1, int u1_col, int u1_row, int u1_step, + __global const float* u2, + __global float* p11, int p11_step, __global float* p12, __global float* p21, - __global float* p22, + __global float* p22, const float taut, int u2_step, int u1_offset_x, @@ -279,7 +279,7 @@ __kernel void estimateDualVariablesKernel(__global const float* u1, int u1_col, { int src_x1 = (x + 1) < (u1_col - 1) ? (x + 1) : (u1_col - 1); const float u1x = u1[(y + u1_offset_y) * u1_step + src_x1 + u1_offset_x] - u1[(y + u1_offset_y) * u1_step + x + u1_offset_x]; - + int src_y1 = (y + 1) < (u1_row - 1) ? (y + 1) : (u1_row - 1); const float u1y = u1[(src_y1 + u1_offset_y) * u1_step + x + u1_offset_x] - u1[(y + u1_offset_y) * u1_step + x + u1_offset_x]; @@ -329,14 +329,14 @@ float divergence(__global const float* v1, __global const float* v2, int y, int __kernel void estimateUKernel(__global const float* I1wx, int I1wx_col, int I1wx_row, int I1wx_step, __global const float* I1wy, /*int I1wy_step,*/ - __global const float* grad, /*int grad_step,*/ + __global const float* grad, /*int grad_step,*/ __global const float* rho_c, /*int rho_c_step,*/ __global const float* p11, /*int p11_step,*/ __global const float* p12, /*int p12_step,*/ __global const float* p21, /*int p21_step,*/ __global const float* p22, /*int p22_step,*/ - __global float* u1, int u1_step, - __global float* u2, + __global float* u1, int u1_step, + __global float* u2, __global float* error, const float l_t, const float theta, int u2_step, int u1_offset_x, int u1_offset_y, diff --git a/modules/ocl/src/pyrlk.cpp b/modules/ocl/src/pyrlk.cpp index 8e9420480c..cdcc8f231f 100644 --- a/modules/ocl/src/pyrlk.cpp +++ b/modules/ocl/src/pyrlk.cpp @@ -145,7 +145,7 @@ static void lkSparse_run(oclMat &I, oclMat &J, static char opt[32] = {0}; sprintf(opt, " -D WAVE_SIZE=%d", wave_size); - openCLExecuteKernel(clCxt, &pyrlk, kernelName, globalThreads, localThreads, + openCLExecuteKernel(clCxt, &pyrlk, kernelName, globalThreads, localThreads, args, I.oclchannels(), I.depth(), opt); releaseTexture(ITex); releaseTexture(JTex); diff --git a/modules/ocl/src/sort_by_key.cpp b/modules/ocl/src/sort_by_key.cpp index 32af2a2fef..0025f0d911 100644 --- a/modules/ocl/src/sort_by_key.cpp +++ b/modules/ocl/src/sort_by_key.cpp @@ -160,7 +160,7 @@ static void sortByKey(oclMat& keys, oclMat& vals, size_t vecSize, bool isGreater namespace radix_sort { -//FIXME(pengx17): +//FIXME(pengx17): // exclusive scan, need to be optimized as this is too naive... //void naive_scan_addition(oclMat& input, oclMat& output) //{ @@ -247,8 +247,8 @@ static void sortByKey(oclMat& keys, oclMat& vals, size_t origVecSize, bool isGre } } ocl::copyMakeBorder( - keys(Rect(0,0,origVecSize,1)), buffer_keys, - 0, 0, 0, vecSize - origVecSize, + keys(Rect(0,0,origVecSize,1)), buffer_keys, + 0, 0, 0, vecSize - origVecSize, BORDER_CONSTANT, padding_value); vals(Rect(0,0,origVecSize,1)).copyTo(roi_buffer_vals); newBuffer = true; @@ -274,7 +274,7 @@ static void sortByKey(oclMat& keys, oclMat& vals, size_t origVecSize, bool isGre genSortBuildOption(keys, vals, isGreaterThan, build_opt_buf); //additional build option for radix sort - sprintf(build_opt_buf + strlen(build_opt_buf), " -D K_%s", isKeyFloat?"FLT":"INT"); + sprintf(build_opt_buf + strlen(build_opt_buf), " -D K_%s", isKeyFloat?"FLT":"INT"); String kernelnames[2] = {String("histogramRadixN"), String("permuteRadixN")}; diff --git a/modules/ocl/src/stereo_csbp.cpp b/modules/ocl/src/stereo_csbp.cpp index 44662ea049..1ae70c07da 100644 --- a/modules/ocl/src/stereo_csbp.cpp +++ b/modules/ocl/src/stereo_csbp.cpp @@ -624,11 +624,11 @@ static void csbp_operator(StereoConstantSpaceBP &rthis, oclMat u[2], oclMat d[2] const Scalar zero = Scalar::all(0); ////////////////////////////////////Init/////////////////////////////////////////////////// - int rows = left.rows; - int cols = left.cols; + int rows = left.rows; + int cols = left.cols; rthis.levels = min(rthis.levels, int(log((double)rthis.ndisp) / log(2.0))); - int levels = rthis.levels; + int levels = rthis.levels; AutoBuffer buf(levels * 4); @@ -639,10 +639,10 @@ static void csbp_operator(StereoConstantSpaceBP &rthis, oclMat u[2], oclMat d[2] cols_pyr[0] = cols; rows_pyr[0] = rows; - nr_plane_pyr[0] = rthis.nr_plane; + nr_plane_pyr[0] = rthis.nr_plane; const int n = 64; - step_pyr[0] = alignSize(cols * sizeof(T), n) / sizeof(T); + step_pyr[0] = alignSize(cols * sizeof(T), n) / sizeof(T); for (int i = 1; i < levels; i++) { cols_pyr[i] = cols_pyr[i - 1] / 2; @@ -688,7 +688,7 @@ static void csbp_operator(StereoConstantSpaceBP &rthis, oclMat u[2], oclMat d[2] d[0] = zero; r[0] = zero; u[0] = zero; - disp_selected_pyr[0] = zero; + disp_selected_pyr[0] = zero; l[1] = zero; d[1] = zero; diff --git a/modules/ocl/src/stereobp.cpp b/modules/ocl/src/stereobp.cpp index cca1db3499..1a10b07e20 100644 --- a/modules/ocl/src/stereobp.cpp +++ b/modules/ocl/src/stereobp.cpp @@ -136,7 +136,7 @@ namespace cv const int OPT_SIZE = 50; char cn_opt [OPT_SIZE] = ""; - sprintf( cn_opt, "%s -D CN=%d", + sprintf( cn_opt, "%s -D CN=%d", (data_type == CV_16S ? "-D T_SHORT":"-D T_FLOAT"), channels ); diff --git a/modules/ocl/src/tvl1flow.cpp b/modules/ocl/src/tvl1flow.cpp index a322f62a4e..49780553ed 100644 --- a/modules/ocl/src/tvl1flow.cpp +++ b/modules/ocl/src/tvl1flow.cpp @@ -165,16 +165,16 @@ namespace ocl_tvl1flow { void centeredGradient(const oclMat &src, oclMat &dx, oclMat &dy); - void warpBackward(const oclMat &I0, const oclMat &I1, oclMat &I1x, oclMat &I1y, - oclMat &u1, oclMat &u2, oclMat &I1w, oclMat &I1wx, oclMat &I1wy, + void warpBackward(const oclMat &I0, const oclMat &I1, oclMat &I1x, oclMat &I1y, + oclMat &u1, oclMat &u2, oclMat &I1w, oclMat &I1wx, oclMat &I1wy, oclMat &grad, oclMat &rho); - void estimateU(oclMat &I1wx, oclMat &I1wy, oclMat &grad, - oclMat &rho_c, oclMat &p11, oclMat &p12, - oclMat &p21, oclMat &p22, oclMat &u1, + void estimateU(oclMat &I1wx, oclMat &I1wy, oclMat &grad, + oclMat &rho_c, oclMat &p11, oclMat &p12, + oclMat &p21, oclMat &p22, oclMat &u1, oclMat &u2, oclMat &error, float l_t, float theta); - void estimateDualVariables(oclMat &u1, oclMat &u2, + void estimateDualVariables(oclMat &u1, oclMat &u2, oclMat &p11, oclMat &p12, oclMat &p21, oclMat &p22, float taut); } @@ -231,7 +231,7 @@ void cv::ocl::OpticalFlowDual_TVL1_OCL::procOneScale(const oclMat &I0, const ocl double error = numeric_limits::max(); for (int n = 0; error > scaledEpsilon && n < iterations; ++n) { - estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, + estimateU(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, u1, u2, diff, l_t, static_cast(theta)); error = ocl::sum(diff)[0]; @@ -299,9 +299,9 @@ void ocl_tvl1flow::estimateDualVariables(oclMat &u1, oclMat &u2, oclMat &p11, oc Context *clCxt = u1.clCxt; size_t localThread[] = {32, 8, 1}; - size_t globalThread[] = + size_t globalThread[] = { - u1.cols, + u1.cols, u1.rows, 1 }; @@ -345,17 +345,17 @@ void ocl_tvl1flow::estimateDualVariables(oclMat &u1, oclMat &u2, oclMat &p11, oc openCLExecuteKernel(clCxt, &tvl1flow, kernelName, globalThread, localThread, args, -1, -1); } -void ocl_tvl1flow::estimateU(oclMat &I1wx, oclMat &I1wy, oclMat &grad, - oclMat &rho_c, oclMat &p11, oclMat &p12, - oclMat &p21, oclMat &p22, oclMat &u1, +void ocl_tvl1flow::estimateU(oclMat &I1wx, oclMat &I1wy, oclMat &grad, + oclMat &rho_c, oclMat &p11, oclMat &p12, + oclMat &p21, oclMat &p22, oclMat &u1, oclMat &u2, oclMat &error, float l_t, float theta) { Context* clCxt = I1wx.clCxt; size_t localThread[] = {32, 8, 1}; - size_t globalThread[] = + size_t globalThread[] = { - I1wx.cols, + I1wx.cols, I1wx.rows, 1 }; @@ -409,7 +409,7 @@ void ocl_tvl1flow::warpBackward(const oclMat &I0, const oclMat &I1, oclMat &I1x, { Context* clCxt = I0.clCxt; const bool isImgSupported = support_image2d(clCxt); - + CV_Assert(isImgSupported); int u1ElementSize = u1.elemSize(); @@ -433,9 +433,9 @@ void ocl_tvl1flow::warpBackward(const oclMat &I0, const oclMat &I1, oclMat &I1x, u2_offset_x = u2_offset_x/u2.elemSize(); size_t localThread[] = {32, 8, 1}; - size_t globalThread[] = + size_t globalThread[] = { - I0.cols, + I0.cols, I0.rows, 1 }; diff --git a/modules/ocl/test/test_arithm.cpp b/modules/ocl/test/test_arithm.cpp index 149c172efa..fa9d099990 100644 --- a/modules/ocl/test/test_arithm.cpp +++ b/modules/ocl/test/test_arithm.cpp @@ -178,7 +178,7 @@ PARAM_TEST_CASE(ArithmTestBase, MatType, bool) } void Near1(double threshold = 0.) - { + { EXPECT_MAT_NEAR(dst1, Mat(gdst1_whole), threshold); } diff --git a/modules/ocl/test/test_brute_force_matcher.cpp b/modules/ocl/test/test_brute_force_matcher.cpp index 7d8fc368f8..4d0b45fb78 100644 --- a/modules/ocl/test/test_brute_force_matcher.cpp +++ b/modules/ocl/test/test_brute_force_matcher.cpp @@ -189,20 +189,20 @@ namespace ASSERT_EQ(0, badCount); } - INSTANTIATE_TEST_CASE_P(OCL_Features2D, BruteForceMatcher, + INSTANTIATE_TEST_CASE_P(OCL_Features2D, BruteForceMatcher, testing::Combine( testing::Values( DistType(cv::ocl::BruteForceMatcher_OCL_base::L1Dist), - DistType(cv::ocl::BruteForceMatcher_OCL_base::L2Dist)/*, + DistType(cv::ocl::BruteForceMatcher_OCL_base::L2Dist)/*, DistType(cv::ocl::BruteForceMatcher_OCL_base::HammingDist)*/ ), testing::Values( - DescriptorSize(57), - DescriptorSize(64), - DescriptorSize(83), - DescriptorSize(128), - DescriptorSize(179), - DescriptorSize(256), + DescriptorSize(57), + DescriptorSize(64), + DescriptorSize(83), + DescriptorSize(128), + DescriptorSize(179), + DescriptorSize(256), DescriptorSize(304)) ) ); diff --git a/modules/ocl/test/test_filters.cpp b/modules/ocl/test/test_filters.cpp index 9a1264f8a4..c98c8f40d7 100644 --- a/modules/ocl/test/test_filters.cpp +++ b/modules/ocl/test/test_filters.cpp @@ -57,8 +57,8 @@ using namespace testing; using namespace std; -PARAM_TEST_CASE(FilterTestBase, - MatType, +PARAM_TEST_CASE(FilterTestBase, + MatType, cv::Size, // kernel size cv::Size, // dx,dy int // border type, or iteration @@ -367,7 +367,7 @@ INSTANTIATE_TEST_CASE_P(Filter, Laplacian, Combine( Values(0))); //not use INSTANTIATE_TEST_CASE_P(Filter, ErodeDilate, Combine( - Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), + Values(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4), Values(Size(0, 0)), //not use Values(Size(0, 0)), //not use Values(1))); @@ -383,7 +383,7 @@ INSTANTIATE_TEST_CASE_P(Filter, Sobel, Combine( INSTANTIATE_TEST_CASE_P(Filter, Scharr, Combine( Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC4), Values(Size(0, 0)), //not use - Values(Size(0, 1), Size(1, 0)), + Values(Size(0, 1), Size(1, 0)), Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REPLICATE))); INSTANTIATE_TEST_CASE_P(Filter, GaussianBlur, Combine( @@ -395,7 +395,7 @@ INSTANTIATE_TEST_CASE_P(Filter, GaussianBlur, Combine( INSTANTIATE_TEST_CASE_P(Filter, Filter2D, testing::Combine( - Values(CV_8UC1, CV_32FC1, CV_32FC4), + Values(CV_8UC1, CV_32FC1, CV_32FC4), Values(Size(3, 3), Size(15, 15), Size(25, 25)), Values(Size(0, 0)), //not use Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REFLECT101, (MatType)cv::BORDER_REPLICATE, (MatType)cv::BORDER_REFLECT))); diff --git a/modules/ocl/test/test_imgproc.cpp b/modules/ocl/test/test_imgproc.cpp index 225925c03e..46cd257c8d 100644 --- a/modules/ocl/test/test_imgproc.cpp +++ b/modules/ocl/test/test_imgproc.cpp @@ -448,7 +448,7 @@ PARAM_TEST_CASE(ImgprocTestBase, MatType, MatType, MatType, MatType, MatType, bo { cv::Mat cpu_cldst; cldst.download(cpu_cldst); - EXPECT_MAT_NEAR(dst, cpu_cldst, threshold); + EXPECT_MAT_NEAR(dst, cpu_cldst, threshold); } }; ////////////////////////////////equalizeHist////////////////////////////////////////// diff --git a/modules/ocl/test/test_kmeans.cpp b/modules/ocl/test/test_kmeans.cpp index a3e472bdf1..1ea0b1cb21 100644 --- a/modules/ocl/test/test_kmeans.cpp +++ b/modules/ocl/test/test_kmeans.cpp @@ -87,7 +87,7 @@ PARAM_TEST_CASE(Kmeans, int, int, int) for(int j = 0; j < nchannel; j++) center_row_header.at(0, i*nchannel+j) = 50000.0; - for(int j = 0; (j < max_neighbour) || + for(int j = 0; (j < max_neighbour) || (i == K-1 && j < max_neighbour + MHEIGHT%K); j ++) { Mat cur_row_header = src.row(row_idx + 1 + j); @@ -121,15 +121,15 @@ TEST_P(Kmeans, Mat){ ocl::kmeans(d_src, K, d_labels, TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 100, 0), 1, flags, d_centers); - + Mat dd_labels(d_labels); Mat dd_centers(d_centers); if(flags & KMEANS_USE_INITIAL_LABELS) { EXPECT_MAT_NEAR(labels, dd_labels, 0); EXPECT_MAT_NEAR(centers, dd_centers, 1e-3); - } - else + } + else { int row_idx = 0; for(int i = 0; i < K; i++) @@ -157,6 +157,6 @@ TEST_P(Kmeans, Mat){ INSTANTIATE_TEST_CASE_P(OCL_ML, Kmeans, Combine( Values(3, 5, 8), Values(CV_32FC1, CV_32FC2, CV_32FC4), - Values(OCL_KMEANS_USE_INITIAL_LABELS/*, OCL_KMEANS_PP_CENTERS*/))); + Values(OCL_KMEANS_USE_INITIAL_LABELS/*, OCL_KMEANS_PP_CENTERS*/))); #endif diff --git a/modules/ocl/test/test_objdetect.cpp b/modules/ocl/test/test_objdetect.cpp index d75d99198b..7a5fc64073 100644 --- a/modules/ocl/test/test_objdetect.cpp +++ b/modules/ocl/test/test_objdetect.cpp @@ -222,13 +222,13 @@ TEST_P(Haar, FaceDetect) { MemStorage storage(cvCreateMemStorage(0)); CvSeq *_objects; - _objects = cascade.oclHaarDetectObjects(d_img, storage, 1.1, 3, + _objects = cascade.oclHaarDetectObjects(d_img, storage, 1.1, 3, flags, Size(30, 30), Size(0, 0)); vector vecAvgComp; Seq(_objects).copyTo(vecAvgComp); oclfaces.resize(vecAvgComp.size()); std::transform(vecAvgComp.begin(), vecAvgComp.end(), oclfaces.begin(), getRect()); - + cpucascade.detectMultiScale(img, faces, 1.1, 3, flags, Size(30, 30), Size(0, 0)); @@ -261,7 +261,7 @@ TEST_P(Haar, FaceDetectUseBuf) } INSTANTIATE_TEST_CASE_P(OCL_ObjDetect, Haar, - Combine(Values(CV_HAAR_SCALE_IMAGE, 0), + Combine(Values(CV_HAAR_SCALE_IMAGE, 0), Values(cascade_frontalface_alt/*, cascade_frontalface_alt2*/))); #endif //HAVE_OPENCL \ No newline at end of file diff --git a/modules/ocl/test/test_optflow.cpp b/modules/ocl/test/test_optflow.cpp index 4693d46ddf..bc08e028c9 100644 --- a/modules/ocl/test/test_optflow.cpp +++ b/modules/ocl/test/test_optflow.cpp @@ -89,7 +89,7 @@ TEST_P(GoodFeaturesToTrack, Accuracy) ASSERT_FALSE(d_pts.empty()); std::vector pts(d_pts.cols); - + detector.downloadPoints(d_pts, pts); std::vector pts_gold; @@ -129,7 +129,7 @@ TEST_P(GoodFeaturesToTrack, EmptyCorners) ASSERT_TRUE(corners.empty()); } -INSTANTIATE_TEST_CASE_P(OCL_Video, GoodFeaturesToTrack, +INSTANTIATE_TEST_CASE_P(OCL_Video, GoodFeaturesToTrack, testing::Values(MinDistance(0.0), MinDistance(3.0))); ////////////////////////////////////////////////////////////////////////// diff --git a/modules/ocl/test/test_pyramids.cpp b/modules/ocl/test/test_pyramids.cpp index b7bc752d67..58179ac185 100644 --- a/modules/ocl/test/test_pyramids.cpp +++ b/modules/ocl/test/test_pyramids.cpp @@ -79,7 +79,7 @@ TEST_P(PyrDown, Mat) Size size(MWIDTH, MHEIGHT); Mat src = randomMat(size, CV_MAKETYPE(type, channels)); oclMat gsrc(src); - + pyrDown(src, dst_cpu); pyrDown(gsrc, gdst); diff --git a/modules/ocl/test/test_sort.cpp b/modules/ocl/test/test_sort.cpp index 83326a5426..d303665568 100644 --- a/modules/ocl/test/test_sort.cpp +++ b/modules/ocl/test/test_sort.cpp @@ -59,7 +59,7 @@ IMPLEMENT_PARAM_CLASS(InputSize, int) IMPLEMENT_PARAM_CLASS(SortMethod, int) -template +template struct KV_CVTYPE{ static int toType() {return 0;} }; template<> struct KV_CVTYPE { static int toType() {return CV_32SC1;} }; @@ -101,7 +101,7 @@ void kvquicksort(Mat& keys, Mat& vals, bool isGreater = false) { vector > kvres; toKVPair(keys.begin(), vals.begin(), keys.cols, kvres); - + if(isGreater) { std::sort(kvres.begin(), kvres.end(), kvgreater); @@ -180,7 +180,7 @@ bool checkUnstableSorterResult(const Mat& gkeys_, const Mat& gvals_, { ++ iden_count; } - + // sort dv and gv int num_of_val = (iden_count + 1) * cn_val; std::sort(gvptr + i * cn_val, gvptr + i * cn_val + num_of_val); diff --git a/modules/ocl/test/utility.cpp b/modules/ocl/test/utility.cpp index 440a89d4a0..7031d99fb5 100644 --- a/modules/ocl/test/utility.cpp +++ b/modules/ocl/test/utility.cpp @@ -225,7 +225,7 @@ double checkRectSimilarity(Size sz, std::vector& ob1, std::vector& o cpu_result.setTo(0); for(vector::const_iterator r = ob1.begin(); r != ob1.end(); r++) - { + { cv::Mat cpu_result_roi(cpu_result, *r); cpu_result_roi.setTo(1); cpu_result.copyTo(cpu_result); diff --git a/modules/photo/doc/photo.rst b/modules/photo/doc/photo.rst index 6f05239120..fa2aa1ecb8 100644 --- a/modules/photo/doc/photo.rst +++ b/modules/photo/doc/photo.rst @@ -7,5 +7,5 @@ photo. Computational Photography .. toctree:: :maxdepth: 2 - inpainting + inpainting denoising diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index 745914ca4d..3a6137a498 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -124,7 +124,7 @@ typedef Ptr Ptr_FeatureDetector; typedef Ptr Ptr_DescriptorExtractor; typedef Ptr Ptr_Feature2D; typedef Ptr Ptr_DescriptorMatcher; -typedef Ptr Ptr_CLAHE; +typedef Ptr Ptr_CLAHE; typedef SimpleBlobDetector::Params SimpleBlobDetector_Params; diff --git a/modules/python/test/test2.py b/modules/python/test/test2.py index a96be4f6bb..9d19dea9b7 100644 --- a/modules/python/test/test2.py +++ b/modules/python/test/test2.py @@ -35,14 +35,14 @@ class NewOpenCVTests(unittest.TestCase): # Tests to run first; check the handful of basic operations that the later tests rely on class Hackathon244Tests(NewOpenCVTests): - + def test_int_array(self): a = np.array([-1, 2, -3, 4, -5]) absa0 = np.abs(a) self.assert_(cv2.norm(a, cv2.NORM_L1) == 15) absa1 = cv2.absdiff(a, 0) self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0) - + def test_imencode(self): a = np.zeros((480, 640), dtype=np.uint8) flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90]) @@ -50,7 +50,7 @@ class Hackathon244Tests(NewOpenCVTests): self.assertEqual(ajpg.dtype, np.uint8) self.assertGreater(ajpg.shape[0], 1) self.assertEqual(ajpg.shape[1], 1) - + def test_projectPoints(self): objpt = np.float64([[1,2,3]]) imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([])) @@ -59,7 +59,7 @@ class Hackathon244Tests(NewOpenCVTests): self.assertEqual(imgpt1.shape, imgpt0.shape) self.assertEqual(jac0.shape, jac1.shape) self.assertEqual(jac0.shape[0], 2*objpt.shape[0]) - + def test_estimateAffine3D(self): pattern_size = (11, 8) pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32) @@ -71,7 +71,7 @@ class Hackathon244Tests(NewOpenCVTests): out[2,2]=1 self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3) self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1]) - + def test_fast(self): fd = cv2.FastFeatureDetector(30, True) img = self.get_sample("samples/cpp/right02.jpg", 0) @@ -104,11 +104,11 @@ class Hackathon244Tests(NewOpenCVTests): be = cv2.fitEllipse(a) br = cv2.minAreaRect(a) mc, mr = cv2.minEnclosingCircle(a) - + be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742) br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582) mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977 - + self.check_close_boxes(be, be0, 5, 15) self.check_close_boxes(br, br0, 5, 15) self.check_close_pairs(mc, mc0, 5) diff --git a/modules/stitching/doc/camera.rst b/modules/stitching/doc/camera.rst index dc0b99eace..f6e9f73720 100644 --- a/modules/stitching/doc/camera.rst +++ b/modules/stitching/doc/camera.rst @@ -9,7 +9,7 @@ detail::CameraParams Describes camera parameters. -.. note:: Translation is assumed to be zero during the whole stitching pipeline. +.. note:: Translation is assumed to be zero during the whole stitching pipeline. :: diff --git a/modules/stitching/doc/high_level.rst b/modules/stitching/doc/high_level.rst index 07e785002d..bd75765222 100644 --- a/modules/stitching/doc/high_level.rst +++ b/modules/stitching/doc/high_level.rst @@ -57,9 +57,9 @@ High level image stitcher. It's possible to use this class without being aware o const cv::Mat& matchingMask() const { return matching_mask_; } void setMatchingMask(const cv::Mat &mask) - { + { CV_Assert(mask.type() == CV_8U && mask.cols == mask.rows); - matching_mask_ = mask.clone(); + matching_mask_ = mask.clone(); } Ptr bundleAdjuster() { return bundle_adjuster_; } @@ -84,7 +84,7 @@ High level image stitcher. It's possible to use this class without being aware o const Ptr blender() const { return blender_; } void setBlender(Ptr blender) { blender_ = blender; } - private: + private: /* hidden */ }; @@ -117,7 +117,7 @@ These functions try to match the given images and to estimate rotations of each :param images: Input images. :param rois: Region of interest rectangles. - + :return: Status code. Stitcher::composePanorama @@ -132,7 +132,7 @@ These functions try to compose the given images (or images stored internally fro .. ocv:function:: Status Stitcher::composePanorama(InputArray images, OutputArray pano) :param images: Input images. - + :param pano: Final pano. :return: Status code. @@ -147,7 +147,7 @@ These functions try to stitch the given images. .. ocv:function:: Status Stitcher::stitch(InputArray images, const std::vector > &rois, OutputArray pano) :param images: Input images. - + :param rois: Region of interest rectangles. :param pano: Final pano. diff --git a/modules/stitching/doc/seam_estimation.rst b/modules/stitching/doc/seam_estimation.rst index f2b12405a3..119fb1e158 100644 --- a/modules/stitching/doc/seam_estimation.rst +++ b/modules/stitching/doc/seam_estimation.rst @@ -130,6 +130,6 @@ Minimum graph cut-based seam estimator. See details in [V03]_. :: /* hidden */ }; -.. seealso:: +.. seealso:: :ocv:class:`detail::GraphCutSeamFinderBase`, :ocv:class:`detail::SeamFinder` diff --git a/modules/stitching/doc/stitching.rst b/modules/stitching/doc/stitching.rst index eb1577d406..983b310e78 100644 --- a/modules/stitching/doc/stitching.rst +++ b/modules/stitching/doc/stitching.rst @@ -4,7 +4,7 @@ stitching. Images stitching .. toctree:: :maxdepth: 2 - + introduction high_level camera diff --git a/modules/superres/src/btv_l1_ocl.cpp b/modules/superres/src/btv_l1_ocl.cpp index 5f9e32675e..c458b24ca8 100644 --- a/modules/superres/src/btv_l1_ocl.cpp +++ b/modules/superres/src/btv_l1_ocl.cpp @@ -160,7 +160,7 @@ void btv_l1_device_ocl::upscale(const oclMat& src, oclMat& dst, int scale) args.push_back(make_pair(sizeof(cl_int), (void*)&src.rows)); args.push_back(make_pair(sizeof(cl_int), (void*)&src.cols)); args.push_back(make_pair(sizeof(cl_int), (void*)&scale)); - args.push_back(make_pair(sizeof(cl_int), (void*)&cn)); + args.push_back(make_pair(sizeof(cl_int), (void*)&cn)); openCLExecuteKernel(clCxt, &superres_btvl1, kernel_name, global_thread, local_thread, args, -1, -1); @@ -465,8 +465,8 @@ namespace // calc motions between input frames - calcRelativeMotions(forwardMotions, backwardMotions, - lowResForwardMotions_, lowResBackwardMotions_, + calcRelativeMotions(forwardMotions, backwardMotions, + lowResForwardMotions_, lowResBackwardMotions_, baseIdx, src[0].size()); upscaleMotions(lowResForwardMotions_, highResForwardMotions_, scale_); diff --git a/modules/superres/src/opencl/superres_btvl1.cl b/modules/superres/src/opencl/superres_btvl1.cl index 0efa1709c8..0a8c62fa1e 100644 --- a/modules/superres/src/opencl/superres_btvl1.cl +++ b/modules/superres/src/opencl/superres_btvl1.cl @@ -110,7 +110,7 @@ __kernel void upscaleKernel(__global float* src, dst[y * channels * scale * dst_step + 4 * x * scale + 0] = src[y * channels * src_step + 4 * x + 0]; dst[y * channels * scale * dst_step + 4 * x * scale + 1] = src[y * channels * src_step + 4 * x + 1]; dst[y * channels * scale * dst_step + 4 * x * scale + 2] = src[y * channels * src_step + 4 * x + 2]; - dst[y * channels * scale * dst_step + 4 * x * scale + 3] = src[y * channels * src_step + 4 * x + 3]; + dst[y * channels * scale * dst_step + 4 * x * scale + 3] = src[y * channels * src_step + 4 * x + 3]; } } } diff --git a/modules/superres/test/test_superres.cpp b/modules/superres/test/test_superres.cpp index 9aa9a44bfb..1530d6d667 100644 --- a/modules/superres/test/test_superres.cpp +++ b/modules/superres/test/test_superres.cpp @@ -282,4 +282,4 @@ TEST_F(SuperResolution, BTVL1_OCL) cv::ocl::getDevice(infos); RunTest(cv::superres::createSuperResolution_BTVL1_OCL()); } -#endif +#endif diff --git a/modules/world/CMakeLists.txt b/modules/world/CMakeLists.txt index 23901bf62a..f18635e620 100644 --- a/modules/world/CMakeLists.txt +++ b/modules/world/CMakeLists.txt @@ -95,19 +95,19 @@ macro(ios_include_3party_libs) get_filename_component(srcname_we ${srcname} NAME_WE) string(REGEX REPLACE "${srcname_we}" objpath2 "${objpath1}") string(REGEX REPLACE "${srcname}" objpath3 "${objpath2}") - + list(APPEND objlist "\"${objpath3}\"") endforeach() # (srcname ${sources}) - endforeach() + endforeach() endmacro() if(IOS AND WITH_PNG) ios_include_3party_libs(zlib libpng) -endif() +endif() if(IOS AND WITH_JPEG) ios_include_3party_libs(libjpeg) -endif() +endif() string(REPLACE ";" " " objlist "${objlist}") diff --git a/samples/MacOSX/FaceTracker/README.txt b/samples/MacOSX/FaceTracker/README.txt index 3c2dc42f10..11f433e39a 100644 --- a/samples/MacOSX/FaceTracker/README.txt +++ b/samples/MacOSX/FaceTracker/README.txt @@ -11,16 +11,16 @@ to build a universal binary framework. Invoke this script from Terminal.app, wai and you are done. OpenCV is a Private Framework: -On Mac OS X the concept of Framework bundles is meant to simplify distribution of shared libraries, -accompanying headers and documentation. There are however to subtly different 'flavours' of -Frameworks: public and private ones. The public frameworks get installed into the Frameworks -diretories in /Library, /System/Library or ~/Library and are meant to be shared amongst -applications. The private frameworks are only distributed as parts of an Application Bundle. -This makes it easier to deploy applications because they bring their own framework invisibly to -the user. No installation of the framework is necessary and different applications can bring +On Mac OS X the concept of Framework bundles is meant to simplify distribution of shared libraries, +accompanying headers and documentation. There are however to subtly different 'flavours' of +Frameworks: public and private ones. The public frameworks get installed into the Frameworks +diretories in /Library, /System/Library or ~/Library and are meant to be shared amongst +applications. The private frameworks are only distributed as parts of an Application Bundle. +This makes it easier to deploy applications because they bring their own framework invisibly to +the user. No installation of the framework is necessary and different applications can bring different versions of the same framework without any conflict. -Since OpenCV is still a moving target, it seems best to avoid any installation and versioning issues -for an end user. The OpenCV framework that currently comes with this demo application therefore +Since OpenCV is still a moving target, it seems best to avoid any installation and versioning issues +for an end user. The OpenCV framework that currently comes with this demo application therefore is a Private Framework. Use it for targets that result in an Application Bundle: diff --git a/samples/c/build_all.sh b/samples/c/build_all.sh index 787474aa2f..f5835ef396 100755 --- a/samples/c/build_all.sh +++ b/samples/c/build_all.sh @@ -3,7 +3,7 @@ if [ $# -gt 0 ] ; then base=`basename $1 .c` echo "compiling $base" - gcc -ggdb `pkg-config opencv --cflags --libs` $base.c -o $base + gcc -ggdb `pkg-config opencv --cflags --libs` $base.c -o $base else for i in *.c; do echo "compiling $i" diff --git a/samples/c/example_cmake/README.txt b/samples/c/example_cmake/README.txt index b33908f2d5..7bf53e7acc 100755 --- a/samples/c/example_cmake/README.txt +++ b/samples/c/example_cmake/README.txt @@ -16,10 +16,10 @@ Then create the binary directory for the example with: Then, if "make install" have been executed, directly running $ cmake /samples/c/example_cmake/ - + will detect the "OpenCVConfig.cmake" file and the project is ready to compile. -If "make install" has not been executed, you'll have to manually pick the opencv +If "make install" has not been executed, you'll have to manually pick the opencv binary directory (Under Windows CMake may remember the correct directory). Open the CMake gui with: $ cmake-gui /samples/c/example_cmake/ @@ -27,6 +27,6 @@ the CMake gui with: And pick the correct value for OpenCV_DIR. - + diff --git a/samples/gpu/CMakeLists.txt b/samples/gpu/CMakeLists.txt index 80889fbf4a..a87461f33a 100644 --- a/samples/gpu/CMakeLists.txt +++ b/samples/gpu/CMakeLists.txt @@ -27,7 +27,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) if(HAVE_OPENCL) ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/ocl/include") endif() - + if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function") endif() @@ -47,7 +47,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) if(HAVE_OPENCL) target_link_libraries(${the_target} opencv_ocl) endif() - + set_target_properties(${the_target} PROPERTIES OUTPUT_NAME "${project}-example-${name}" PROJECT_LABEL "(EXAMPLE_${project_upper}) ${name}") diff --git a/samples/gpu/super_resolution.cpp b/samples/gpu/super_resolution.cpp index d62b42a238..4e83c71aba 100644 --- a/samples/gpu/super_resolution.cpp +++ b/samples/gpu/super_resolution.cpp @@ -138,7 +138,7 @@ int main(int argc, const char* argv[]) CV_Assert(!useOcl); info.clear(); } - + if(useOcl) { CV_Assert(!useCuda); @@ -171,7 +171,7 @@ int main(int argc, const char* argv[]) superRes = createSuperResolution_BTVL1(); Ptr of = createOptFlow(optFlow, useCuda); - + if (of.empty()) exit(-1); superRes->set("opticalFlow", of); diff --git a/samples/ocl/clahe.cpp b/samples/ocl/clahe.cpp index 72fc2fb611..c2f4b27bfb 100644 --- a/samples/ocl/clahe.cpp +++ b/samples/ocl/clahe.cpp @@ -46,7 +46,7 @@ int main(int argc, char** argv) createTrackbar("Clip Limit", "CLAHE", &cliplimit, 20, (TrackbarCallback)Clip_Callback); Mat frame, outframe; ocl::oclMat d_outframe; - + int cur_clip; Size cur_tilesize; if(use_cpu) diff --git a/samples/ocl/facedetect.cpp b/samples/ocl/facedetect.cpp index 49148bdecd..711e257e78 100644 --- a/samples/ocl/facedetect.cpp +++ b/samples/ocl/facedetect.cpp @@ -258,7 +258,7 @@ void Draw(Mat& img, vector& faces, double scale) resize(img, img, Size((int)(img.cols/scale), (int)(img.rows/scale))); } imshow( "result", img ); - + } diff --git a/samples/python2/dft.py b/samples/python2/dft.py index 73df84dc9e..0a5ca650c1 100644 --- a/samples/python2/dft.py +++ b/samples/python2/dft.py @@ -8,51 +8,51 @@ import sys def shift_dft(src, dst=None): ''' Rearrange the quadrants of Fourier image so that the origin is at - the image center. Swaps quadrant 1 with 3, and 2 with 4. - + the image center. Swaps quadrant 1 with 3, and 2 with 4. + src and dst arrays must be equal size & type ''' - + if dst is None: dst = np.empty(src.shape, src.dtype) elif src.shape != dst.shape: raise ValueError("src and dst must have equal sizes") elif src.dtype != dst.dtype: raise TypeError("src and dst must have equal types") - + if src is dst: ret = np.empty(src.shape, src.dtype) else: ret = dst - + h, w = src.shape[:2] - + cx1 = cx2 = w/2 cy1 = cy2 = h/2 - + # if the size is odd, then adjust the bottom/right quadrants if w % 2 != 0: cx2 += 1 if h % 2 != 0: - cy2 += 1 - + cy2 += 1 + # swap quadrants - + # swap q1 and q3 ret[h-cy1:, w-cx1:] = src[0:cy1 , 0:cx1 ] # q1 -> q3 ret[0:cy2 , 0:cx2 ] = src[h-cy2:, w-cx2:] # q3 -> q1 - + # swap q2 and q4 ret[0:cy2 , w-cx2:] = src[h-cy2:, 0:cx2 ] # q2 -> q4 ret[h-cy1:, 0:cx1 ] = src[0:cy1 , w-cx1:] # q4 -> q2 - + if src is dst: dst[:,:] = ret - + return dst if __name__ == "__main__": - + if len(sys.argv)>1: im = cv2.imread(sys.argv[1]) else : @@ -62,9 +62,9 @@ if __name__ == "__main__": # convert to grayscale im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) h, w = im.shape[:2] - + realInput = im.astype(np.float64) - + # perform an optimally sized dft dft_M = cv2.getOptimalDFTSize(w) dft_N = cv2.getOptimalDFTSize(h) @@ -72,22 +72,22 @@ if __name__ == "__main__": # copy A to dft_A and pad dft_A with zeros dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64) dft_A[:h, :w, 0] = realInput - + # no need to pad bottom part of dft_A with zeros because of # use of nonzeroRows parameter in cv2.dft() cv2.dft(dft_A, dst=dft_A, nonzeroRows=h) - + cv2.imshow("win", im) - + # Split fourier into real and imaginary parts image_Re, image_Im = cv2.split(dft_A) - + # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2) magnitude = cv2.sqrt(image_Re**2.0 + image_Im**2.0) - + # Compute log(1 + Mag) log_spectrum = cv2.log(1.0 + magnitude) - + # Rearrange the quadrants of Fourier image so that the origin is at # the image center shift_dft(log_spectrum, log_spectrum) diff --git a/samples/python2/grabcut.py b/samples/python2/grabcut.py index 9fc1280acf..1d5b823ddd 100644 --- a/samples/python2/grabcut.py +++ b/samples/python2/grabcut.py @@ -8,12 +8,12 @@ This sample shows interactive image segmentation using grabcut algorithm. USAGE : python grabcut.py -README FIRST: +README FIRST: Two windows will show up, one for input and one for output. - - At first, in input window, draw a rectangle around the object using + + At first, in input window, draw a rectangle around the object using mouse right button. Then press 'n' to segment the object (once or a few times) -For any finer touch-ups, you can press any of the keys below and draw lines on +For any finer touch-ups, you can press any of the keys below and draw lines on the areas you want. Then again press 'n' for updating the output. Key '0' - To select areas of sure background @@ -53,7 +53,7 @@ thickness = 3 # brush thickness def onmouse(event,x,y,flags,param): global img,img2,drawing,value,mask,rectangle,rect,rect_or_mask,ix,iy,rect_over - + # Draw Rectangle if event == cv2.EVENT_RBUTTONDOWN: rectangle = True @@ -73,9 +73,9 @@ def onmouse(event,x,y,flags,param): rect = (ix,iy,abs(ix-x),abs(iy-y)) rect_or_mask = 0 print " Now press the key 'n' a few times until no further change \n" - + # draw touchup curves - + if event == cv2.EVENT_LBUTTONDOWN: if rect_over == False: print "first draw rectangle \n" @@ -94,7 +94,7 @@ def onmouse(event,x,y,flags,param): drawing = False cv2.circle(img,(x,y),thickness,value['color'],-1) cv2.circle(mask,(x,y),thickness,value['val'],-1) - + # print documentation print __doc__ @@ -125,7 +125,7 @@ while(1): cv2.imshow('output',output) cv2.imshow('input',img) k = 0xFF & cv2.waitKey(1) - + # key bindings if k == 27: # esc to exit break @@ -147,11 +147,11 @@ while(1): elif k == ord('r'): # reset everything print "resetting \n" rect = (0,0,1,1) - drawing = False - rectangle = False - rect_or_mask = 100 - rect_over = False - value = DRAW_FG + drawing = False + rectangle = False + rect_or_mask = 100 + rect_over = False + value = DRAW_FG img = img2.copy() mask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG output = np.zeros(img.shape,np.uint8) # output image to be shown @@ -160,15 +160,15 @@ while(1): and again press 'n' \n""" if (rect_or_mask == 0): # grabcut with rect bgdmodel = np.zeros((1,65),np.float64) - fgdmodel = np.zeros((1,65),np.float64) + fgdmodel = np.zeros((1,65),np.float64) cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_RECT) rect_or_mask = 1 elif rect_or_mask == 1: # grabcut with mask bgdmodel = np.zeros((1,65),np.float64) - fgdmodel = np.zeros((1,65),np.float64) + fgdmodel = np.zeros((1,65),np.float64) cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_MASK) mask2 = np.where((mask==1) + (mask==3),255,0).astype('uint8') - output = cv2.bitwise_and(img2,img2,mask=mask2) + output = cv2.bitwise_and(img2,img2,mask=mask2) cv2.destroyAllWindows() diff --git a/samples/winrt/ImageManipulations/App.xaml b/samples/winrt/ImageManipulations/App.xaml index 2edfd7790e..769f4400d2 100644 --- a/samples/winrt/ImageManipulations/App.xaml +++ b/samples/winrt/ImageManipulations/App.xaml @@ -11,14 +11,14 @@ --> - diff --git a/samples/winrt/ImageManipulations/MainPage.xaml b/samples/winrt/ImageManipulations/MainPage.xaml index e0ed0d79c1..66ce5715f9 100644 --- a/samples/winrt/ImageManipulations/MainPage.xaml +++ b/samples/winrt/ImageManipulations/MainPage.xaml @@ -10,7 +10,7 @@ //********************************************************* --> - - diff --git a/samples/winrt/ImageManipulations/MediaExtensions/Common/AsyncCB.h b/samples/winrt/ImageManipulations/MediaExtensions/Common/AsyncCB.h index 04ff69ed8a..3321b47c95 100644 --- a/samples/winrt/ImageManipulations/MediaExtensions/Common/AsyncCB.h +++ b/samples/winrt/ImageManipulations/MediaExtensions/Common/AsyncCB.h @@ -3,7 +3,7 @@ ////////////////////////////////////////////////////////////////////////// // AsyncCallback [template] // -// Description: +// Description: // Helper class that routes IMFAsyncCallback::Invoke calls to a class // method on the parent class. // @@ -24,7 +24,7 @@ template class AsyncCallback : public IMFAsyncCallback { -public: +public: typedef HRESULT (T::*InvokeFn)(IMFAsyncResult *pAsyncResult); AsyncCallback(T *pParent, InvokeFn fn) : m_pParent(pParent), m_pInvokeFn(fn) @@ -32,13 +32,13 @@ public: } // IUnknown - STDMETHODIMP_(ULONG) AddRef() { + STDMETHODIMP_(ULONG) AddRef() { // Delegate to parent class. - return m_pParent->AddRef(); + return m_pParent->AddRef(); } - STDMETHODIMP_(ULONG) Release() { + STDMETHODIMP_(ULONG) Release() { // Delegate to parent class. - return m_pParent->Release(); + return m_pParent->Release(); } STDMETHODIMP QueryInterface(REFIID iid, void** ppv) { diff --git a/samples/winrt/ImageManipulations/MediaExtensions/Common/CritSec.h b/samples/winrt/ImageManipulations/MediaExtensions/Common/CritSec.h index d5ea05bfd9..cd6c6effa3 100644 --- a/samples/winrt/ImageManipulations/MediaExtensions/Common/CritSec.h +++ b/samples/winrt/ImageManipulations/MediaExtensions/Common/CritSec.h @@ -36,7 +36,7 @@ public: ////////////////////////////////////////////////////////////////////////// // AutoLock -// Description: Provides automatic locking and unlocking of a +// Description: Provides automatic locking and unlocking of a // of a critical section. // // Note: The AutoLock object must go out of scope before the CritSec. diff --git a/samples/winrt/ImageManipulations/MediaExtensions/Common/LinkList.h b/samples/winrt/ImageManipulations/MediaExtensions/Common/LinkList.h index c67c0f2ca9..3657b3d7c5 100644 --- a/samples/winrt/ImageManipulations/MediaExtensions/Common/LinkList.h +++ b/samples/winrt/ImageManipulations/MediaExtensions/Common/LinkList.h @@ -13,9 +13,9 @@ #pragma once // Notes: -// -// The List class template implements a simple double-linked list. -// It uses STL's copy semantics. +// +// The List class template implements a simple double-linked list. +// It uses STL's copy semantics. // There are two versions of the Clear() method: // Clear(void) clears the list w/out cleaning up the object. @@ -90,7 +90,7 @@ public: private: const Node *pNode; - POSITION(Node *p) : pNode(p) + POSITION(Node *p) : pNode(p) { } }; @@ -123,7 +123,7 @@ protected: } Node *pAfter = pBefore->next; - + pBefore->next = pNode; pAfter->prev = pNode; @@ -336,12 +336,12 @@ public: } HRESULT GetItemPos(POSITION pos, T *ppItem) - { + { if (pos.pNode) { return GetItem(pos.pNode, ppItem); } - else + else { return E_FAIL; } @@ -359,7 +359,7 @@ public: } } - // Remove an item at a position. + // Remove an item at a position. // The item is returns in ppItem, unless ppItem is nullptr. // NOTE: This method invalidates the POSITION object. HRESULT Remove(POSITION& pos, T *ppItem) @@ -390,7 +390,7 @@ public: class ComAutoRelease { -public: +public: void operator()(IUnknown *p) { if (p) @@ -399,10 +399,10 @@ public: } } }; - + class MemDelete { -public: +public: void operator()(void *p) { if (p) @@ -416,9 +416,9 @@ public: // ComPtrList class // Derived class that makes it safer to store COM pointers in the List<> class. // It automatically AddRef's the pointers that are inserted onto the list -// (unless the insertion method fails). +// (unless the insertion method fails). // -// T must be a COM interface type. +// T must be a COM interface type. // example: ComPtrList // // NULLABLE: If true, client can insert nullptr pointers. This means GetItem can @@ -487,7 +487,7 @@ protected: HRESULT RemoveItem(Node *pNode, Ptr *ppItem) { // ppItem can be nullptr, but we need to get the - // item so that we can release it. + // item so that we can release it. // If ppItem is not nullptr, we will AddRef it on the way out. diff --git a/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/OcvImageManipulations.idl b/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/OcvImageManipulations.idl index 120ef7dbb4..8e33c4af4c 100644 --- a/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/OcvImageManipulations.idl +++ b/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/OcvImageManipulations.idl @@ -5,7 +5,7 @@ import "Windows.Media.idl"; namespace OcvTransform { [version(NTDDI_WIN8)] - runtimeclass OcvImageManipulations + runtimeclass OcvImageManipulations { } } \ No newline at end of file diff --git a/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/OcvTransform.cpp b/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/OcvTransform.cpp index bf98128158..c96c0423db 100644 --- a/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/OcvTransform.cpp +++ b/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/OcvTransform.cpp @@ -1333,7 +1333,7 @@ HRESULT OcvImageManipulations::OnProcessOutput(IMFMediaBuffer *pIn, IMFMediaBuff const int mHistSize[] = {25}; const float baseRabge[] = {0.f,256.f}; const float* ranges[] = {baseRabge}; - + const cv::Scalar mColorsY[] = { cv::Scalar(76), cv::Scalar(149), cv::Scalar(29) }; const cv::Scalar mColorsUV[] = { cv::Scalar(84, 255), cv::Scalar(43, 21), cv::Scalar(255, 107) }; @@ -1370,7 +1370,7 @@ HRESULT OcvImageManipulations::OnProcessOutput(IMFMediaBuffer *pIn, IMFMediaBuff mP2.y /= 2; cv::line(OutputUV, mP1, mP2, mColorsUV[c], thikness/2); } - } + } } break; default: break; diff --git a/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/dllmain.cpp b/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/dllmain.cpp index d11bceaf07..dd4d4bf0aa 100644 --- a/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/dllmain.cpp +++ b/samples/winrt/ImageManipulations/MediaExtensions/OcvTransform/dllmain.cpp @@ -47,7 +47,7 @@ HRESULT WINAPI DllGetActivationFactory( _In_ HSTRING activatibleClassId, _Outptr HRESULT WINAPI DllCanUnloadNow() { - auto &module = Microsoft::WRL::Module::GetModule(); + auto &module = Microsoft::WRL::Module::GetModule(); return (module.Terminate()) ? S_OK : S_FALSE; } diff --git a/samples/winrt/ImageManipulations/common/StandardStyles.xaml b/samples/winrt/ImageManipulations/common/StandardStyles.xaml index 7c3d238776..b4edc81f44 100644 --- a/samples/winrt/ImageManipulations/common/StandardStyles.xaml +++ b/samples/winrt/ImageManipulations/common/StandardStyles.xaml @@ -15,7 +15,7 @@ xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"> - + @@ -399,7 +399,7 @@ - +