Merge pull request #4050 from Dikay900:2_4_to_master

pull/4052/head
Vadim Pisarevsky 10 years ago
commit bb989f2034
  1. 3
      modules/androidcamera/camera_wrapper/camera_wrapper.cpp
  2. 2
      modules/calib3d/include/opencv2/calib3d.hpp
  3. 165
      modules/calib3d/src/stereobm.cpp
  4. 2
      modules/core/doc/intro.markdown
  5. 4
      modules/core/include/opencv2/core/matx.hpp
  6. 28
      modules/core/src/matop.cpp
  7. 10
      modules/highgui/src/ppltasks_winrt.h
  8. 2
      modules/highgui/src/window_cocoa.mm
  9. 2
      modules/imgproc/include/opencv2/imgproc/types_c.h
  10. 8
      modules/imgproc/src/contours.cpp
  11. 4
      modules/imgproc/src/filterengine.hpp
  12. 2
      modules/java/generator/src/java/android+JavaCameraView.java
  13. 2
      modules/photo/include/opencv2/photo.hpp
  14. 2
      modules/photo/include/opencv2/photo/cuda.hpp
  15. 5
      modules/stitching/src/motion_estimators.cpp
  16. 4
      modules/ts/include/opencv2/ts.hpp
  17. 2
      modules/videoio/src/cap_gstreamer.cpp
  18. 7
      modules/videoio/src/cap_ios_abstract_camera.mm
  19. 8
      modules/videoio/src/cap_libv4l.cpp
  20. 4
      modules/videoio/src/cap_mjpeg_encoder.cpp
  21. 8
      modules/videoio/src/cap_v4l.cpp
  22. 10
      modules/videoio/src/ppltasks_winrt.hpp

@ -25,6 +25,7 @@
#elif defined(ANDROID_r4_3_0) || defined(ANDROID_r4_4_0)
# include <gui/IGraphicBufferProducer.h>
# include <gui/BufferQueue.h>
# include <ui/GraphicBuffer.h>
#else
# include <surfaceflinger/ISurface.h>
#endif
@ -683,6 +684,7 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
# elif defined(ANDROID_r4_4_0)
void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
handler->queue = new(buffer_queue_obj) BufferQueue();
handler->queue->setConsumerUsageBits(GraphicBuffer::USAGE_HW_TEXTURE);
void* consumer_listener_obj = operator new(sizeof(ConsumerListenerStub) + MAGIC_TAIL);
handler->listener = new(consumer_listener_obj) ConsumerListenerStub();
handler->queue->consumerConnect(handler->listener, true);
@ -1087,6 +1089,7 @@ void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
# elif defined(ANDROID_r4_4_0)
void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
handler->queue = new(buffer_queue_obj) BufferQueue();
handler->queue->setConsumerUsageBits(GraphicBuffer::USAGE_HW_TEXTURE);
handler->queue->consumerConnect(handler->listener, true);
bufferStatus = handler->camera->setPreviewTarget(handler->queue);
if (bufferStatus != 0)

@ -166,7 +166,7 @@ pattern (every view is described by several 3D-2D point correspondences).
\f[x' = (\theta_d / r) x \\ y' = (\theta_d / r) y \f]
Finally, convertion into pixel coordinates: The final pixel coordinates vector [u; v] where:
Finally, conversion into pixel coordinates: The final pixel coordinates vector [u; v] where:
\f[u = f_x (x' + \alpha y') + c_x \\
v = f_y yy + c_y\f]

@ -215,7 +215,42 @@ prefilterXSobel( const Mat& src, Mat& dst, int ftzero )
dptr0[0] = dptr0[size.width-1] = dptr1[0] = dptr1[size.width-1] = val0;
x = 1;
#if CV_SSE2
#if CV_NEON
int16x8_t ftz = vdupq_n_s16 ((short) ftzero);
uint8x8_t ftz2 = vdup_n_u8 (cv::saturate_cast<uchar>(ftzero*2));
for(; x <=size.width-9; x += 8 )
{
uint8x8_t c0 = vld1_u8 (srow0 + x - 1);
uint8x8_t c1 = vld1_u8 (srow1 + x - 1);
uint8x8_t d0 = vld1_u8 (srow0 + x + 1);
uint8x8_t d1 = vld1_u8 (srow1 + x + 1);
int16x8_t t0 = vreinterpretq_s16_u16 (vsubl_u8 (d0, c0));
int16x8_t t1 = vreinterpretq_s16_u16 (vsubl_u8 (d1, c1));
uint8x8_t c2 = vld1_u8 (srow2 + x - 1);
uint8x8_t c3 = vld1_u8 (srow3 + x - 1);
uint8x8_t d2 = vld1_u8 (srow2 + x + 1);
uint8x8_t d3 = vld1_u8 (srow3 + x + 1);
int16x8_t t2 = vreinterpretq_s16_u16 (vsubl_u8 (d2, c2));
int16x8_t t3 = vreinterpretq_s16_u16 (vsubl_u8 (d3, c3));
int16x8_t v0 = vaddq_s16 (vaddq_s16 (t2, t0), vaddq_s16 (t1, t1));
int16x8_t v1 = vaddq_s16 (vaddq_s16 (t3, t1), vaddq_s16 (t2, t2));
uint8x8_t v0_u8 = vqmovun_s16 (vaddq_s16 (v0, ftz));
uint8x8_t v1_u8 = vqmovun_s16 (vaddq_s16 (v1, ftz));
v0_u8 = vmin_u8 (v0_u8, ftz2);
v1_u8 = vmin_u8 (v1_u8, ftz2);
vqmovun_s16 (vaddq_s16 (v1, ftz));
vst1_u8 (dptr0 + x, v0_u8);
vst1_u8 (dptr1 + x, v1_u8);
}
#elif CV_SSE2
if( useSIMD )
{
__m128i z = _mm_setzero_si128(), ftz = _mm_set1_epi16((short)ftzero),
@ -260,10 +295,19 @@ prefilterXSobel( const Mat& src, Mat& dst, int ftzero )
}
}
#if CV_NEON
uint8x16_t val0_16 = vdupq_n_u8 (val0);
#endif
for( ; y < size.height; y++ )
{
uchar* dptr = dst.ptr<uchar>(y);
for( x = 0; x < size.width; x++ )
x = 0;
#if CV_NEON
for(; x <= size.width-16; x+=16 )
vst1q_u8 (dptr + x, val0_16);
#endif
for(; x < size.width; x++ )
dptr[x] = val0;
}
}
@ -525,6 +569,7 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
Mat& disp, Mat& cost, const StereoBMParams& state,
uchar* buf, int _dy0, int _dy1 )
{
const int ALIGN = 16;
int x, y, d;
int wsz = state.SADWindowSize, wsz2 = wsz/2;
@ -540,6 +585,15 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
int uniquenessRatio = state.uniquenessRatio;
short FILTERED = (short)((mindisp - 1) << DISPARITY_SHIFT);
#if CV_NEON
CV_Assert (ndisp % 8 == 0);
int32_t d0_4_temp [4];
for (int i = 0; i < 4; i ++)
d0_4_temp[i] = i;
int32x4_t d0_4 = vld1q_s32 (d0_4_temp);
int32x4_t dd_4 = vdupq_n_s32 (4);
#endif
int *sad, *hsad0, *hsad, *hsad_sub, *htext;
uchar *cbuf0, *cbuf;
const uchar* lptr0 = left.ptr() + lofs;
@ -574,12 +628,29 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
for( y = -dy0; y < height + dy1; y++, hsad += ndisp, cbuf += ndisp, lptr += sstep, rptr += sstep )
{
int lval = lptr[0];
#if CV_NEON
int16x8_t lv = vdupq_n_s16 ((int16_t)lval);
for( d = 0; d < ndisp; d += 8 )
{
int16x8_t rv = vreinterpretq_s16_u16 (vmovl_u8 (vld1_u8 (rptr + d)));
int32x4_t hsad_l = vld1q_s32 (hsad + d);
int32x4_t hsad_h = vld1q_s32 (hsad + d + 4);
int16x8_t diff = vabdq_s16 (lv, rv);
vst1_u8 (cbuf + d, vmovn_u16(vreinterpretq_u16_s16(diff)));
hsad_l = vaddq_s32 (hsad_l, vmovl_s16(vget_low_s16 (diff)));
hsad_h = vaddq_s32 (hsad_h, vmovl_s16(vget_high_s16 (diff)));
vst1q_s32 ((hsad + d), hsad_l);
vst1q_s32 ((hsad + d + 4), hsad_h);
}
#else
for( d = 0; d < ndisp; d++ )
{
int diff = std::abs(lval - rptr[d]);
cbuf[d] = (uchar)diff;
hsad[d] = (int)(hsad[d] + diff);
}
#endif
htext[y] += tab[lval];
}
}
@ -609,12 +680,31 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
hsad += ndisp, lptr += sstep, lptr_sub += sstep, rptr += sstep )
{
int lval = lptr[0];
#if CV_NEON
int16x8_t lv = vdupq_n_s16 ((int16_t)lval);
for( d = 0; d < ndisp; d += 8 )
{
int16x8_t rv = vreinterpretq_s16_u16 (vmovl_u8 (vld1_u8 (rptr + d)));
int32x4_t hsad_l = vld1q_s32 (hsad + d);
int32x4_t hsad_h = vld1q_s32 (hsad + d + 4);
int16x8_t cbs = vreinterpretq_s16_u16 (vmovl_u8 (vld1_u8 (cbuf_sub + d)));
int16x8_t diff = vabdq_s16 (lv, rv);
int32x4_t diff_h = vsubl_s16 (vget_high_s16 (diff), vget_high_s16 (cbs));
int32x4_t diff_l = vsubl_s16 (vget_low_s16 (diff), vget_low_s16 (cbs));
vst1_u8 (cbuf + d, vmovn_u16(vreinterpretq_u16_s16(diff)));
hsad_h = vaddq_s32 (hsad_h, diff_h);
hsad_l = vaddq_s32 (hsad_l, diff_l);
vst1q_s32 ((hsad + d), hsad_l);
vst1q_s32 ((hsad + d + 4), hsad_h);
}
#else
for( d = 0; d < ndisp; d++ )
{
int diff = std::abs(lval - rptr[d]);
cbuf[d] = (uchar)diff;
hsad[d] = hsad[d] + diff - cbuf_sub[d];
}
#endif
htext[y] += tab[lval] - tab[lptr_sub[0]];
}
@ -630,8 +720,24 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
hsad = hsad0 + (1 - dy0)*ndisp;
for( y = 1 - dy0; y < wsz2; y++, hsad += ndisp )
{
#if CV_NEON
for( d = 0; d <= ndisp-8; d += 8 )
{
int32x4_t s0 = vld1q_s32 (sad + d);
int32x4_t s1 = vld1q_s32 (sad + d + 4);
int32x4_t t0 = vld1q_s32 (hsad + d);
int32x4_t t1 = vld1q_s32 (hsad + d + 4);
s0 = vaddq_s32 (s0, t0);
s1 = vaddq_s32 (s1, t1);
vst1q_s32 (sad + d, s0);
vst1q_s32 (sad + d + 4, s1);
}
#else
for( d = 0; d < ndisp; d++ )
sad[d] = (int)(sad[d] + hsad[d]);
#endif
}
int tsum = 0;
for( y = -wsz2-1; y < wsz2; y++ )
tsum += htext[y];
@ -642,7 +748,61 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
int minsad = INT_MAX, mind = -1;
hsad = hsad0 + MIN(y + wsz2, height+dy1-1)*ndisp;
hsad_sub = hsad0 + MAX(y - wsz2 - 1, -dy0)*ndisp;
#if CV_NEON
int32x4_t minsad4 = vdupq_n_s32 (INT_MAX);
int32x4_t mind4 = vdupq_n_s32(0), d4 = d0_4;
for( d = 0; d <= ndisp-8; d += 8 )
{
int32x4_t u0 = vld1q_s32 (hsad_sub + d);
int32x4_t u1 = vld1q_s32 (hsad + d);
int32x4_t v0 = vld1q_s32 (hsad_sub + d + 4);
int32x4_t v1 = vld1q_s32 (hsad + d + 4);
int32x4_t usad4 = vld1q_s32(sad + d);
int32x4_t vsad4 = vld1q_s32(sad + d + 4);
u1 = vsubq_s32 (u1, u0);
v1 = vsubq_s32 (v1, v0);
usad4 = vaddq_s32 (usad4, u1);
vsad4 = vaddq_s32 (vsad4, v1);
uint32x4_t mask = vcgtq_s32 (minsad4, usad4);
minsad4 = vminq_s32 (minsad4, usad4);
mind4 = vbslq_s32(mask, d4, mind4);
vst1q_s32 (sad + d, usad4);
vst1q_s32 (sad + d + 4, vsad4);
d4 = vaddq_s32 (d4, dd_4);
mask = vcgtq_s32 (minsad4, vsad4);
minsad4 = vminq_s32 (minsad4, vsad4);
mind4 = vbslq_s32(mask, d4, mind4);
d4 = vaddq_s32 (d4, dd_4);
}
int32x2_t mind4_h = vget_high_s32 (mind4);
int32x2_t mind4_l = vget_low_s32 (mind4);
int32x2_t minsad4_h = vget_high_s32 (minsad4);
int32x2_t minsad4_l = vget_low_s32 (minsad4);
uint32x2_t mask = vorr_u32 (vclt_s32 (minsad4_h, minsad4_l), vand_u32 (vceq_s32 (minsad4_h, minsad4_l), vclt_s32 (mind4_h, mind4_l)));
mind4_h = vbsl_s32 (mask, mind4_h, mind4_l);
minsad4_h = vbsl_s32 (mask, minsad4_h, minsad4_l);
mind4_l = vext_s32 (mind4_h,mind4_h,1);
minsad4_l = vext_s32 (minsad4_h,minsad4_h,1);
mask = vorr_u32 (vclt_s32 (minsad4_h, minsad4_l), vand_u32 (vceq_s32 (minsad4_h, minsad4_l), vclt_s32 (mind4_h, mind4_l)));
mind4_h = vbsl_s32 (mask, mind4_h, mind4_l);
minsad4_h = vbsl_s32 (mask, minsad4_h, minsad4_l);
mind = (int) vget_lane_s32 (mind4_h, 0);
minsad = sad[mind];
#else
for( d = 0; d < ndisp; d++ )
{
int currsad = sad[d] + hsad[d] - hsad_sub[d];
@ -653,6 +813,7 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
mind = d;
}
}
#endif
tsum += htext[y + wsz2] - htext[y - wsz2 - 1];
if( tsum < textureThreshold )

@ -236,7 +236,7 @@ Multi-channel (n-channel) types can be specified using the following options:
the number of channels is more than 4 or unknown at the compilation time.
@note `CV_32FC1 == CV_32F, CV_32FC2 == CV_32FC(2) == CV_MAKETYPE(CV_32F, 2)`, and
`CV_MAKETYPE(depth, n) == ((x&7)<<3) + (n-1)``. This means that the constant type is formed from the
`CV_MAKETYPE(depth, n) == ((depth&7) + ((n-1)<<3)``. This means that the constant type is formed from the
depth, taking the lowest 3 bits, and the number of channels minus 1, taking the next
`log2(CV_CN_MAX)`` bits.

@ -134,7 +134,7 @@ public:
//! dot product computed in double-precision arithmetics
double ddot(const Matx<_Tp, m, n>& v) const;
//! convertion to another data type
//! conversion to another data type
template<typename T2> operator Matx<T2, m, n>() const;
//! change the matrix shape
@ -337,7 +337,7 @@ public:
For other dimensionalities the exception is raised
*/
Vec cross(const Vec& v) const;
//! convertion to another data type
//! conversion to another data type
template<typename T2> operator Vec<T2, cn>() const;
/*! element access */

@ -203,7 +203,11 @@ public:
static void makeExpr(MatExpr& res, int method, int ndims, const int* sizes, int type, double alpha=1);
};
static MatOp_Initializer g_MatOp_Initializer;
static MatOp_Initializer* getGlobalMatOpInitializer()
{
static MatOp_Initializer initializer;
return &initializer;
}
static inline bool isIdentity(const MatExpr& e) { return e.op == &g_MatOp_Identity; }
static inline bool isAddEx(const MatExpr& e) { return e.op == &g_MatOp_AddEx; }
@ -216,7 +220,7 @@ static inline bool isInv(const MatExpr& e) { return e.op == &g_MatOp_Invert; }
static inline bool isSolve(const MatExpr& e) { return e.op == &g_MatOp_Solve; }
static inline bool isGEMM(const MatExpr& e) { return e.op == &g_MatOp_GEMM; }
static inline bool isMatProd(const MatExpr& e) { return e.op == &g_MatOp_GEMM && (!e.c.data || e.beta == 0); }
static inline bool isInitializer(const MatExpr& e) { return e.op == &g_MatOp_Initializer; }
static inline bool isInitializer(const MatExpr& e) { return e.op == getGlobalMatOpInitializer(); }
/////////////////////////////////////////////////////////////////////////////////////////////////////
@ -1043,14 +1047,14 @@ MatExpr min(const Mat& a, const Mat& b)
MatExpr min(const Mat& a, double s)
{
MatExpr e;
MatOp_Bin::makeExpr(e, 'm', a, s);
MatOp_Bin::makeExpr(e, 'n', a, s);
return e;
}
MatExpr min(double s, const Mat& a)
{
MatExpr e;
MatOp_Bin::makeExpr(e, 'm', a, s);
MatOp_Bin::makeExpr(e, 'n', a, s);
return e;
}
@ -1064,14 +1068,14 @@ MatExpr max(const Mat& a, const Mat& b)
MatExpr max(const Mat& a, double s)
{
MatExpr e;
MatOp_Bin::makeExpr(e, 'M', a, s);
MatOp_Bin::makeExpr(e, 'N', a, s);
return e;
}
MatExpr max(double s, const Mat& a)
{
MatExpr e;
MatOp_Bin::makeExpr(e, 'M', a, s);
MatOp_Bin::makeExpr(e, 'N', a, s);
return e;
}
@ -1337,13 +1341,13 @@ void MatOp_Bin::assign(const MatExpr& e, Mat& m, int _type) const
bitwise_xor(e.a, e.s, dst);
else if( e.flags == '~' && !e.b.data )
bitwise_not(e.a, dst);
else if( e.flags == 'm' && e.b.data )
else if( e.flags == 'm' )
cv::min(e.a, e.b, dst);
else if( e.flags == 'm' && !e.b.data )
else if( e.flags == 'n' )
cv::min(e.a, e.s[0], dst);
else if( e.flags == 'M' && e.b.data )
else if( e.flags == 'M' )
cv::max(e.a, e.b, dst);
else if( e.flags == 'M' && !e.b.data )
else if( e.flags == 'N' )
cv::max(e.a, e.s[0], dst);
else if( e.flags == 'a' && e.b.data )
cv::absdiff(e.a, e.b, dst);
@ -1580,12 +1584,12 @@ void MatOp_Initializer::multiply(const MatExpr& e, double s, MatExpr& res) const
inline void MatOp_Initializer::makeExpr(MatExpr& res, int method, Size sz, int type, double alpha)
{
res = MatExpr(&g_MatOp_Initializer, method, Mat(sz, type, (void*)0), Mat(), Mat(), alpha, 0);
res = MatExpr(getGlobalMatOpInitializer(), method, Mat(sz, type, (void*)0), Mat(), Mat(), alpha, 0);
}
inline void MatOp_Initializer::makeExpr(MatExpr& res, int method, int ndims, const int* sizes, int type, double alpha)
{
res = MatExpr(&g_MatOp_Initializer, method, Mat(ndims, sizes, type, (void*)0), Mat(), Mat(), alpha, 0);
res = MatExpr(getGlobalMatOpInitializer(), method, Mat(ndims, sizes, type, (void*)0), Mat(), Mat(), alpha, 0);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////

@ -8628,7 +8628,7 @@ auto when_all(_Iterator _Begin, _Iterator _End, Concurrency::cancellation_token
#endif
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.
@ -8660,7 +8660,7 @@ task<std::vector<_ReturnType>> operator&&(const task<_ReturnType> & _Lhs, const
}
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.
@ -8691,7 +8691,7 @@ task<std::vector<_ReturnType>> operator&&(const task<std::vector<_ReturnType>> &
}
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.
@ -8722,7 +8722,7 @@ task<std::vector<_ReturnType>> operator&&(const task<_ReturnType> & _Lhs, const
}
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.
@ -8754,7 +8754,7 @@ task<std::vector<_ReturnType>> operator&&(const task<std::vector<_ReturnType>> &
}
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.

@ -255,7 +255,7 @@ CV_IMPL void cvResizeWindow( const char* name, int width, int height)
//cout << "cvResizeWindow" << endl;
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
CVWindow *window = cvGetWindow(name);
if(window) {
if(window && ![window autosize]) {
NSRect frame = [window frame];
frame.size.width = width;
frame.size.height = height;

@ -462,7 +462,7 @@ enum
};
/*
Internal structure that is used for sequental retrieving contours from the image.
Internal structure that is used for sequential retrieving contours from the image.
It supports both hierarchical and plane variants of Suzuki algorithm.
*/
typedef struct _CvContourScanner* CvContourScanner;

@ -125,7 +125,7 @@ _CvContourInfo;
/*
Structure that is used for sequental retrieving contours from the image.
Structure that is used for sequential retrieving contours from the image.
It supports both hierarchical and plane variants of Suzuki algorithm.
*/
typedef struct _CvContourScanner
@ -316,7 +316,7 @@ cvStartFindContours( void* _img, CvMemStorage* storage,
tree. The retrieved contour itself is removed from the storage.
Here two cases are possible:
2a. If one deals with plane variant of algorithm
(hierarchical strucutre is not reconstructed),
(hierarchical structure is not reconstructed),
the contour is removed completely.
2b. In hierarchical case, the header of the contour is not removed.
It's marked as "link to contour" and h_next pointer of it is set to
@ -328,8 +328,8 @@ cvStartFindContours( void* _img, CvMemStorage* storage,
leaves header if hierarchical (but doesn't mark header as "link").
------------------------------------------------------------------------
The 1st variant can be used to retrieve and store all the contours from the image
(with optional convertion from chains to contours using some approximation from
restriced set of methods). Some characteristics of contour can be computed in the
(with optional conversion from chains to contours using some approximation from
restricted set of methods). Some characteristics of contour can be computed in the
same pass.
The usage scheme can look like:

@ -66,7 +66,7 @@ public:
BaseRowFilter();
//! the destructor
virtual ~BaseRowFilter();
//! the filtering operator. Must be overrided in the derived classes. The horizontal border interpolation is done outside of the class.
//! the filtering operator. Must be overridden in the derived classes. The horizontal border interpolation is done outside of the class.
virtual void operator()(const uchar* src, uchar* dst, int width, int cn) = 0;
int ksize;
@ -94,7 +94,7 @@ public:
BaseColumnFilter();
//! the destructor
virtual ~BaseColumnFilter();
//! the filtering operator. Must be overrided in the derived classes. The vertical border interpolation is done outside of the class.
//! the filtering operator. Must be overridden in the derived classes. The vertical border interpolation is done outside of the class.
virtual void operator()(const uchar** src, uchar* dst, int dststep, int dstcount, int width) = 0;
//! resets the internal buffers, if any
virtual void reset();

@ -148,7 +148,7 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
Log.d(TAG, "Set preview size to " + Integer.valueOf((int)frameSize.width) + "x" + Integer.valueOf((int)frameSize.height));
params.setPreviewSize((int)frameSize.width, (int)frameSize.height);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH)
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH && !android.os.Build.MODEL.equals("GT-I9100"))
params.setRecordingHint(true);
List<String> FocusModes = params.getSupportedFocusModes();

@ -180,7 +180,7 @@ denoising time. Recommended value 21 pixels
removes noise but also removes image details, smaller h value preserves details but also preserves
some noise
@param hColor The same as h but for color components. For most images value equals 10
will be enought to remove colored noise and do not distort colors
will be enough to remove colored noise and do not distort colors
The function converts image to CIELAB colorspace and then separately denoise L and AB components
with given h parameters using fastNlMeansDenoising function.

@ -105,7 +105,7 @@ CV_EXPORTS void fastNlMeansDenoising(InputArray src, OutputArray dst,
@param h_luminance Parameter regulating filter strength. Big h value perfectly removes noise but
also removes image details, smaller h value preserves details but also preserves some noise
@param photo_render float The same as h but for color components. For most images value equals 10 will be
enought to remove colored noise and do not distort colors
enough to remove colored noise and do not distort colors
@param search_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search_window - greater
denoising time. Recommended value 21 pixels

@ -607,6 +607,11 @@ void waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind)
#if ENABLE_LOG
int64 t = getTickCount();
#endif
if (rmats.size() <= 1)
{
LOGLN("Wave correcting, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
return;
}
Mat moment = Mat::zeros(3, 3, CV_32F);
for (size_t i = 0; i < rmats.size(); ++i)

@ -382,7 +382,7 @@ public:
FAIL_HANG=-13,
// unexpected response on passing bad arguments to the tested function
// (the function crashed, proceed succesfully (while it should not), or returned
// (the function crashed, proceed successfully (while it should not), or returned
// error code that is different from what is expected)
FAIL_BAD_ARG_CHECK=-14,
@ -392,7 +392,7 @@ public:
// the test has been skipped because it is not in the selected subset of the tests to run,
// because it has been run already within the same run with the same parameters, or because
// of some other reason and this is not considered as an error.
// Normally TS::run() (or overrided method in the derived class) takes care of what
// Normally TS::run() (or overridden method in the derived class) takes care of what
// needs to be run, so this code should not occur.
SKIPPED=1
};

@ -525,7 +525,7 @@ void CvCapture_GStreamer::newPad(GstElement * /*elem*/,
* \brief CvCapture_GStreamer::open Open the given file with gstreamer
* \param type CvCapture type. One of CV_CAP_GSTREAMER_*
* \param filename Filename to open in case of CV_CAP_GSTREAMER_FILE
* \return boolean. Specifies if opening was succesful.
* \return boolean. Specifies if opening was successful.
*
* In case of CV_CAP_GSTREAMER_V4L(2), a pipelin is constructed as follows:
* v4l2src ! autoconvert ! appsink

@ -193,6 +193,13 @@
// Release any retained subviews of the main view.
// e.g. self.myOutlet = nil;
for (AVCaptureInput *input in self.captureSession.inputs) {
[self.captureSession removeInput:input];
}
for (AVCaptureOutput *output in self.captureSession.outputs) {
[self.captureSession removeOutput:output];
}
[self.captureSession stopRunning];
self.captureSession = nil;

@ -102,7 +102,7 @@ I modified the following:
autosetup_capture_mode_v4l2 -> autodetect capture modes for v4l2
- Modifications are according with Video4Linux old codes
- Video4Linux handling is automatically if it does not recognize a Video4Linux2 device
- Tested succesful with Logitech Quickcam Express (V4L), Creative Vista (V4L) and Genius VideoCam Notebook (V4L2)
- Tested successfully with Logitech Quickcam Express (V4L), Creative Vista (V4L) and Genius VideoCam Notebook (V4L2)
- Correct source lines with compiler warning messages
- Information message from v4l/v4l2 detection
@ -113,7 +113,7 @@ I modified the following:
- SN9C10x chip based webcams support
- New methods are internal:
bayer2rgb24, sonix_decompress -> decoder routines for SN9C10x decoding from Takafumi Mizuno <taka-qce@ls-a.jp> with his pleasure :)
- Tested succesful with Genius VideoCam Notebook (V4L2)
- Tested successfully with Genius VideoCam Notebook (V4L2)
Sixth Patch: Sept 10, 2005 Csaba Kertesz sign@freemail.hu
For Release: OpenCV-Linux Beta5 OpenCV-0.9.7
@ -123,7 +123,7 @@ I added the following:
- Get and change V4L capture controls (hue, saturation, brightness, contrast)
- New method is internal:
icvSetControl -> set capture controls
- Tested succesful with Creative Vista (V4L)
- Tested successfully with Creative Vista (V4L)
Seventh Patch: Sept 10, 2005 Csaba Kertesz sign@freemail.hu
For Release: OpenCV-Linux Beta5 OpenCV-0.9.7
@ -132,7 +132,7 @@ I added the following:
- Detect, get and change V4L2 capture controls (hue, saturation, brightness, contrast, gain)
- New methods are internal:
v4l2_scan_controls_enumerate_menu, v4l2_scan_controls -> detect capture control intervals
- Tested succesful with Genius VideoCam Notebook (V4L2)
- Tested successfully with Genius VideoCam Notebook (V4L2)
8th patch: Jan 5, 2006, Olivier.Bornet@idiap.ch
Add support of V4L2_PIX_FMT_YUYV and V4L2_PIX_FMT_MJPEG.

@ -1237,10 +1237,10 @@ void MotionJpegWriter::writeFrameData( const uchar* data, int step, int colorspa
}
strm.jputShort(0*256 + 63); // start and end of spectral selection - for
// sequental DCT start is 0 and end is 63
// sequential DCT start is 0 and end is 63
strm.putByte( 0 ); // successive approximation bit position
// high & low - (0,0) for sequental DCT
// high & low - (0,0) for sequential DCT
unsigned currval = 0, code = 0, tempval = 0;
int bit_idx = 32;

@ -102,7 +102,7 @@ I modified the following:
autosetup_capture_mode_v4l2 -> autodetect capture modes for v4l2
- Modifications are according with Video4Linux old codes
- Video4Linux handling is automatically if it does not recognize a Video4Linux2 device
- Tested succesful with Logitech Quickcam Express (V4L), Creative Vista (V4L) and Genius VideoCam Notebook (V4L2)
- Tested successfully with Logitech Quickcam Express (V4L), Creative Vista (V4L) and Genius VideoCam Notebook (V4L2)
- Correct source lines with compiler warning messages
- Information message from v4l/v4l2 detection
@ -113,7 +113,7 @@ I modified the following:
- SN9C10x chip based webcams support
- New methods are internal:
bayer2rgb24, sonix_decompress -> decoder routines for SN9C10x decoding from Takafumi Mizuno <taka-qce@ls-a.jp> with his pleasure :)
- Tested succesful with Genius VideoCam Notebook (V4L2)
- Tested successfully with Genius VideoCam Notebook (V4L2)
Sixth Patch: Sept 10, 2005 Csaba Kertesz sign@freemail.hu
For Release: OpenCV-Linux Beta5 OpenCV-0.9.7
@ -123,7 +123,7 @@ I added the following:
- Get and change V4L capture controls (hue, saturation, brightness, contrast)
- New method is internal:
icvSetControl -> set capture controls
- Tested succesful with Creative Vista (V4L)
- Tested successfully with Creative Vista (V4L)
Seventh Patch: Sept 10, 2005 Csaba Kertesz sign@freemail.hu
For Release: OpenCV-Linux Beta5 OpenCV-0.9.7
@ -132,7 +132,7 @@ I added the following:
- Detect, get and change V4L2 capture controls (hue, saturation, brightness, contrast, gain)
- New methods are internal:
v4l2_scan_controls_enumerate_menu, v4l2_scan_controls -> detect capture control intervals
- Tested succesful with Genius VideoCam Notebook (V4L2)
- Tested successfully with Genius VideoCam Notebook (V4L2)
8th patch: Jan 5, 2006, Olivier.Bornet@idiap.ch
Add support of V4L2_PIX_FMT_YUYV and V4L2_PIX_FMT_MJPEG.

@ -8628,7 +8628,7 @@ auto when_all(_Iterator _Begin, _Iterator _End, Concurrency::cancellation_token
#endif
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.
@ -8660,7 +8660,7 @@ task<std::vector<_ReturnType>> operator&&(const task<_ReturnType> & _Lhs, const
}
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.
@ -8691,7 +8691,7 @@ task<std::vector<_ReturnType>> operator&&(const task<std::vector<_ReturnType>> &
}
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.
@ -8722,7 +8722,7 @@ task<std::vector<_ReturnType>> operator&&(const task<_ReturnType> & _Lhs, const
}
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.
@ -8754,7 +8754,7 @@ task<std::vector<_ReturnType>> operator&&(const task<std::vector<_ReturnType>> &
}
/// <summary>
/// Creates a task that will complete succesfully when both of the tasks supplied as arguments complete successfully.
/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully.
/// </summary>
/// <typeparam name="_ReturnType">
/// The type of the returned task.

Loading…
Cancel
Save