Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/16506/head
Alexander Alekhin 5 years ago
commit 225566da7b
  1. 4
      cmake/OpenCVUtils.cmake
  2. 4
      doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown
  3. 6
      modules/calib3d/include/opencv2/calib3d.hpp
  4. 2
      modules/calib3d/src/ap3p.cpp
  5. 92
      modules/dnn/src/layers/resize_layer.cpp
  6. 31
      modules/dnn/test/test_layers.cpp
  7. 9
      modules/highgui/src/window_QT.cpp
  8. 35
      modules/imgproc/include/opencv2/imgproc.hpp
  9. 195
      modules/imgproc/src/drawing.cpp
  10. 86
      modules/imgproc/src/resize.cpp
  11. 10
      modules/imgproc/test/test_drawing.cpp
  12. 3
      modules/objdetect/src/qrcode.cpp
  13. 20
      modules/objdetect/test/test_qrcode.cpp
  14. 673
      modules/videoio/src/cap_avfoundation.mm

@ -402,8 +402,8 @@ endmacro()
set(OCV_COMPILER_FAIL_REGEX set(OCV_COMPILER_FAIL_REGEX
"argument .* is not valid" # GCC 9+ (including support of unicode quotes) "argument .* is not valid" # GCC 9+ (including support of unicode quotes)
"command line option .* is valid for .* but not for C\\+\\+" # GNU "command[- ]line option .* is valid for .* but not for C\\+\\+" # GNU
"command line option .* is valid for .* but not for C" # GNU "command[- ]line option .* is valid for .* but not for C" # GNU
"unrecognized .*option" # GNU "unrecognized .*option" # GNU
"unknown .*option" # Clang "unknown .*option" # Clang
"ignoring unknown option" # MSVC "ignoring unknown option" # MSVC

@ -188,7 +188,7 @@ blur = cv.GaussianBlur(img,(5,5),0)
# find normalized_histogram, and its cumulative distribution function # find normalized_histogram, and its cumulative distribution function
hist = cv.calcHist([blur],[0],None,[256],[0,256]) hist = cv.calcHist([blur],[0],None,[256],[0,256])
hist_norm = hist.ravel()/hist.max() hist_norm = hist.ravel()/hist.sum()
Q = hist_norm.cumsum() Q = hist_norm.cumsum()
bins = np.arange(256) bins = np.arange(256)
@ -199,6 +199,8 @@ thresh = -1
for i in xrange(1,256): for i in xrange(1,256):
p1,p2 = np.hsplit(hist_norm,[i]) # probabilities p1,p2 = np.hsplit(hist_norm,[i]) # probabilities
q1,q2 = Q[i],Q[255]-Q[i] # cum sum of classes q1,q2 = Q[i],Q[255]-Q[i] # cum sum of classes
if q1 < 1.e-6 or q2 < 1.e-6:
continue
b1,b2 = np.hsplit(bins,[i]) # weights b1,b2 = np.hsplit(bins,[i]) # weights
# finding means and variances # finding means and variances

@ -2388,7 +2388,7 @@ CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify()) //! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2, CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity, int numberOfDisparities, int minDisparity, int numberOfDisparities,
int SADWindowSize ); int blockSize );
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm //! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost, CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
@ -2813,8 +2813,8 @@ public:
the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1 the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1
between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor
pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good
P1 and P2 values are shown (like 8\*number_of_image_channels\*SADWindowSize\*SADWindowSize and P1 and P2 values are shown (like 8\*number_of_image_channels\*blockSize\*blockSize and
32\*number_of_image_channels\*SADWindowSize\*SADWindowSize , respectively). 32\*number_of_image_channels\*blockSize\*blockSize , respectively).
@param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right
disparity check. Set it to a non-positive value to disable the check. disparity check. Set it to a non-positive value to disable the check.
@param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first

@ -46,7 +46,7 @@ void solveQuartic(const double *factors, double *realRoots) {
complex<double> sqrt_2m = sqrt(static_cast<complex<double> >(-2 * p4 / 3 + t)); complex<double> sqrt_2m = sqrt(static_cast<complex<double> >(-2 * p4 / 3 + t));
double B_4A = -a3 / (4 * a4); double B_4A = -a3 / (4 * a4);
double complex1 = 4 * p4 / 3 + t; double complex1 = 4 * p4 / 3 + t;
#if defined(__clang__) && defined(__arm__) && (__clang_major__ == 3 || __clang_minor__ == 4) && !defined(__ANDROID__) #if defined(__clang__) && defined(__arm__) && (__clang_major__ == 3 || __clang_major__ == 4) && !defined(__ANDROID__)
// details: https://github.com/opencv/opencv/issues/11135 // details: https://github.com/opencv/opencv/issues/11135
// details: https://github.com/opencv/opencv/issues/11056 // details: https://github.com/opencv/opencv/issues/11056
complex<double> complex2 = 2 * q4; complex<double> complex2 = 2 * q4;

@ -25,7 +25,9 @@ namespace cv { namespace dnn {
class ResizeLayerImpl : public ResizeLayer class ResizeLayerImpl : public ResizeLayer
{ {
public: public:
ResizeLayerImpl(const LayerParams& params) : zoomFactorWidth(0), zoomFactorHeight(0), scaleWidth(0), scaleHeight(0) ResizeLayerImpl(const LayerParams& params) : zoomFactorWidth(params.get<int>("zoom_factor_x", params.get<int>("zoom_factor", 0))),
zoomFactorHeight(params.get<int>("zoom_factor_y", params.get<int>("zoom_factor", 0))),
scaleWidth(0), scaleHeight(0)
{ {
setParamsFrom(params); setParamsFrom(params);
outWidth = params.get<float>("width", 0); outWidth = params.get<float>("width", 0);
@ -33,13 +35,10 @@ public:
if (params.has("zoom_factor")) if (params.has("zoom_factor"))
{ {
CV_Assert(!params.has("zoom_factor_x") && !params.has("zoom_factor_y")); CV_Assert(!params.has("zoom_factor_x") && !params.has("zoom_factor_y"));
zoomFactorWidth = zoomFactorHeight = params.get<int>("zoom_factor");
} }
else if (params.has("zoom_factor_x") || params.has("zoom_factor_y")) else if (params.has("zoom_factor_x") || params.has("zoom_factor_y"))
{ {
CV_Assert(params.has("zoom_factor_x") && params.has("zoom_factor_y")); CV_Assert(params.has("zoom_factor_x") && params.has("zoom_factor_y"));
zoomFactorWidth = params.get<int>("zoom_factor_x");
zoomFactorHeight = params.get<int>("zoom_factor_y");
} }
interpolation = params.get<String>("interpolation"); interpolation = params.get<String>("interpolation");
CV_Assert(interpolation == "nearest" || interpolation == "bilinear"); CV_Assert(interpolation == "nearest" || interpolation == "bilinear");
@ -54,8 +53,8 @@ public:
{ {
CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4); CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4);
outputs.resize(1, inputs[0]); outputs.resize(1, inputs[0]);
outputs[0][2] = outHeight > 0 ? outHeight : (outputs[0][2] * zoomFactorHeight); outputs[0][2] = zoomFactorHeight > 0 ? (outputs[0][2] * zoomFactorHeight) : outHeight;
outputs[0][3] = outWidth > 0 ? outWidth : (outputs[0][3] * zoomFactorWidth); outputs[0][3] = zoomFactorWidth > 0 ? (outputs[0][3] * zoomFactorWidth) : outWidth;
// We can work in-place (do nothing) if input shape == output shape. // We can work in-place (do nothing) if input shape == output shape.
return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]); return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]);
} }
@ -82,11 +81,8 @@ public:
inputs_arr.getMatVector(inputs); inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs); outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight) outHeight = outputs[0].size[2];
{ outWidth = outputs[0].size[3];
outHeight = outputs[0].size[2];
outWidth = outputs[0].size[3];
}
if (alignCorners && outHeight > 1) if (alignCorners && outHeight > 1)
scaleHeight = static_cast<float>(inputs[0].size[2] - 1) / (outHeight - 1); scaleHeight = static_cast<float>(inputs[0].size[2] - 1) / (outHeight - 1);
else else
@ -214,7 +210,7 @@ public:
ieLayer.setType("Interp"); ieLayer.setType("Interp");
ieLayer.getParameters()["pad_beg"] = 0; ieLayer.getParameters()["pad_beg"] = 0;
ieLayer.getParameters()["pad_end"] = 0; ieLayer.getParameters()["pad_end"] = 0;
ieLayer.getParameters()["align_corners"] = false; ieLayer.getParameters()["align_corners"] = alignCorners;
} }
else else
CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation); CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation);
@ -238,7 +234,7 @@ public:
attrs.pads_begin.push_back(0); attrs.pads_begin.push_back(0);
attrs.pads_end.push_back(0); attrs.pads_end.push_back(0);
attrs.axes = ngraph::AxisSet{2, 3}; attrs.axes = ngraph::AxisSet{2, 3};
attrs.align_corners = false; attrs.align_corners = alignCorners;
if (interpolation == "nearest") { if (interpolation == "nearest") {
attrs.mode = "nearest"; attrs.mode = "nearest";
@ -257,7 +253,8 @@ public:
#endif // HAVE_DNN_NGRAPH #endif // HAVE_DNN_NGRAPH
protected: protected:
int outWidth, outHeight, zoomFactorWidth, zoomFactorHeight; int outWidth, outHeight;
const int zoomFactorWidth, zoomFactorHeight;
String interpolation; String interpolation;
float scaleWidth, scaleHeight; float scaleWidth, scaleHeight;
bool alignCorners; bool alignCorners;
@ -281,79 +278,18 @@ public:
{ {
CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4); CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4);
outputs.resize(1, inputs[0]); outputs.resize(1, inputs[0]);
outputs[0][2] = outHeight > 0 ? outHeight : (1 + zoomFactorHeight * (outputs[0][2] - 1)); outputs[0][2] = zoomFactorHeight > 0 ? (1 + zoomFactorHeight * (outputs[0][2] - 1)) : outHeight;
outputs[0][3] = outWidth > 0 ? outWidth : (1 + zoomFactorWidth * (outputs[0][3] - 1)); outputs[0][3] = zoomFactorWidth > 0 ? (1 + zoomFactorWidth * (outputs[0][3] - 1)) : outWidth;
// We can work in-place (do nothing) if input shape == output shape. // We can work in-place (do nothing) if input shape == output shape.
return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]); return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]);
} }
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
|| backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA;
}
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
outWidth = outputs[0].size[3];
}
int inpHeight = inputs[0].size[2];
int inpWidth = inputs[0].size[3];
scaleHeight = (outHeight > 1) ? (static_cast<float>(inpHeight - 1) / (outHeight - 1)) : 0.f;
scaleWidth = (outWidth > 1) ? (static_cast<float>(inpWidth - 1) / (outWidth - 1)) : 0.f;
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Interp");
ieLayer.getParameters()["pad_beg"] = 0;
ieLayer.getParameters()["pad_end"] = 0;
ieLayer.getParameters()["width"] = outWidth;
ieLayer.getParameters()["height"] = outHeight;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
ngraph::op::InterpolateAttrs attrs;
attrs.pads_begin.push_back(0);
attrs.pads_end.push_back(0);
attrs.axes = ngraph::AxisSet{2, 3};
attrs.mode = "linear";
std::vector<int64_t> shape = {outHeight, outWidth};
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
auto interp = std::make_shared<ngraph::op::Interpolate>(ieInpNode, out_shape, attrs);
return Ptr<BackendNode>(new InfEngineNgraphNode(interp));
}
#endif // HAVE_DNN_NGRAPH
}; };
Ptr<Layer> InterpLayer::create(const LayerParams& params) Ptr<Layer> InterpLayer::create(const LayerParams& params)
{ {
LayerParams lp(params); LayerParams lp(params);
lp.set("interpolation", "bilinear"); lp.set("interpolation", "bilinear");
lp.set("align_corners", true);
return Ptr<Layer>(new InterpLayerImpl(lp)); return Ptr<Layer>(new InterpLayerImpl(lp));
} }

@ -1760,4 +1760,35 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Eltwise_unequal, Combine(
dnnBackendsAndTargets() dnnBackendsAndTargets()
)); ));
typedef testing::TestWithParam<tuple<Backend, Target> > Layer_Test_Resize;
TEST_P(Layer_Test_Resize, change_input)
{
int backendId = get<0>(GetParam());
int targetId = get<1>(GetParam());
Net net;
LayerParams lp;
lp.type = "Resize";
lp.name = "testLayer";
lp.set("zoom_factor", 2);
lp.set("interpolation", "nearest");
net.addLayerToPrev(lp.name, lp.type, lp);
for (int i = 0; i < 2; ++i)
{
Mat inp(4 + i, 5 + i, CV_8UC3), ref;
randu(inp, 0, 255);
resize(inp, ref, Size(0, 0), 2, 2, INTER_NEAREST);
ref = blobFromImage(ref);
net.setInput(blobFromImage(inp));
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId);
Mat out = net.forward();
normAssert(out, ref);
}
}
INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Resize, dnnBackendsAndTargets());
}} // namespace }} // namespace

@ -54,9 +54,12 @@
#include <unistd.h> #include <unistd.h>
#endif #endif
// Get GL_PERSPECTIVE_CORRECTION_HINT definition, not available in GLES 2 or
// OpenGL 3 core profile or later
#ifdef HAVE_QT_OPENGL #ifdef HAVE_QT_OPENGL
#if defined Q_WS_X11 /* Qt4 */ || defined Q_OS_LINUX /* Qt5 */ #if defined Q_WS_X11 /* Qt4 */ || \
#include <GL/glx.h> (!defined(QT_OPENGL_ES_2) && defined Q_OS_LINUX) /* Qt5 with desktop OpenGL */
#include <GL/gl.h>
#endif #endif
#endif #endif
@ -3225,7 +3228,9 @@ void OpenGlViewPort::updateGl()
void OpenGlViewPort::initializeGL() void OpenGlViewPort::initializeGL()
{ {
#ifdef GL_PERSPECTIVE_CORRECTION_HINT
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
#endif
} }
void OpenGlViewPort::resizeGL(int w, int h) void OpenGlViewPort::resizeGL(int w, int h)

@ -1446,7 +1446,7 @@ equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width an
respectively (see #getGaussianKernel for details); to fully control the result regardless of respectively (see #getGaussianKernel for details); to fully control the result regardless of
possible future modifications of all this semantics, it is recommended to specify all of ksize, possible future modifications of all this semantics, it is recommended to specify all of ksize,
sigmaX, and sigmaY. sigmaX, and sigmaY.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur @sa sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
*/ */
@ -1507,7 +1507,7 @@ algorithms, and so on). If you need to compute pixel sums over variable-size win
@param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
center. center.
@param normalize flag, specifying whether the kernel is normalized by its area or not. @param normalize flag, specifying whether the kernel is normalized by its area or not.
@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
@sa blur, bilateralFilter, GaussianBlur, medianBlur, integral @sa blur, bilateralFilter, GaussianBlur, medianBlur, integral
*/ */
CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,
@ -1530,7 +1530,7 @@ variance and standard deviation around the neighborhood of a pixel.
@param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
center. center.
@param normalize flag, specifying whether the kernel is to be normalized by it's area or not. @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
@sa boxFilter @sa boxFilter
*/ */
CV_EXPORTS_W void sqrBoxFilter( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void sqrBoxFilter( InputArray src, OutputArray dst, int ddepth,
@ -1553,7 +1553,7 @@ the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
@param ksize blurring kernel size. @param ksize blurring kernel size.
@param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
center. center.
@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
@sa boxFilter, bilateralFilter, GaussianBlur, medianBlur @sa boxFilter, bilateralFilter, GaussianBlur, medianBlur
*/ */
CV_EXPORTS_W void blur( InputArray src, OutputArray dst, CV_EXPORTS_W void blur( InputArray src, OutputArray dst,
@ -1587,7 +1587,7 @@ separate color planes using split and process them individually.
the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
is at the kernel center. is at the kernel center.
@param delta optional value added to the filtered pixels before storing them in dst. @param delta optional value added to the filtered pixels before storing them in dst.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa sepFilter2D, dft, matchTemplate @sa sepFilter2D, dft, matchTemplate
*/ */
CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth,
@ -1608,7 +1608,7 @@ kernel kernelY. The final result shifted by delta is stored in dst .
@param anchor Anchor position within the kernel. The default value \f$(-1,-1)\f$ means that the anchor @param anchor Anchor position within the kernel. The default value \f$(-1,-1)\f$ means that the anchor
is at the kernel center. is at the kernel center.
@param delta Value added to the filtered results before storing them. @param delta Value added to the filtered results before storing them.
@param borderType Pixel extrapolation method, see #BorderTypes @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa filter2D, Sobel, GaussianBlur, boxFilter, blur @sa filter2D, Sobel, GaussianBlur, boxFilter, blur
*/ */
CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth,
@ -1661,7 +1661,7 @@ The second case corresponds to a kernel of:
@param scale optional scale factor for the computed derivative values; by default, no scaling is @param scale optional scale factor for the computed derivative values; by default, no scaling is
applied (see #getDerivKernels for details). applied (see #getDerivKernels for details).
@param delta optional delta value that is added to the results prior to storing them in dst. @param delta optional delta value that is added to the results prior to storing them in dst.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar @sa Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
*/ */
CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth,
@ -1682,7 +1682,8 @@ Sobel( src, dy, CV_16SC1, 0, 1, 3 );
@param dx output image with first-order derivative in x. @param dx output image with first-order derivative in x.
@param dy output image with first-order derivative in y. @param dy output image with first-order derivative in y.
@param ksize size of Sobel kernel. It must be 3. @param ksize size of Sobel kernel. It must be 3.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes.
Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
@sa Sobel @sa Sobel
*/ */
@ -1710,7 +1711,7 @@ is equivalent to
@param scale optional scale factor for the computed derivative values; by default, no scaling is @param scale optional scale factor for the computed derivative values; by default, no scaling is
applied (see #getDerivKernels for details). applied (see #getDerivKernels for details).
@param delta optional delta value that is added to the results prior to storing them in dst. @param delta optional delta value that is added to the results prior to storing them in dst.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa cartToPolar @sa cartToPolar
*/ */
CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth,
@ -1741,7 +1742,7 @@ details. The size must be positive and odd.
@param scale Optional scale factor for the computed Laplacian values. By default, no scaling is @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
applied. See #getDerivKernels for details. applied. See #getDerivKernels for details.
@param delta Optional delta value that is added to the results prior to storing them in dst . @param delta Optional delta value that is added to the results prior to storing them in dst .
@param borderType Pixel extrapolation method, see #BorderTypes @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa Sobel, Scharr @sa Sobel, Scharr
*/ */
CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth,
@ -1810,7 +1811,7 @@ of the formulae in the cornerEigenValsAndVecs description.
src . src .
@param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
@param ksize Aperture parameter for the Sobel operator. @param ksize Aperture parameter for the Sobel operator.
@param borderType Pixel extrapolation method. See #BorderTypes. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
*/ */
CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst,
int blockSize, int ksize = 3, int blockSize, int ksize = 3,
@ -1833,7 +1834,7 @@ size as src .
@param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
@param ksize Aperture parameter for the Sobel operator. @param ksize Aperture parameter for the Sobel operator.
@param k Harris detector free parameter. See the formula above. @param k Harris detector free parameter. See the formula above.
@param borderType Pixel extrapolation method. See #BorderTypes. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
*/ */
CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize,
int ksize, double k, int ksize, double k,
@ -1861,7 +1862,7 @@ The output of the function can be used for robust edge or corner detection.
@param dst Image to store the results. It has the same size as src and the type CV_32FC(6) . @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
@param blockSize Neighborhood size (see details below). @param blockSize Neighborhood size (see details below).
@param ksize Aperture parameter for the Sobel operator. @param ksize Aperture parameter for the Sobel operator.
@param borderType Pixel extrapolation method. See #BorderTypes. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
@sa cornerMinEigenVal, cornerHarris, preCornerDetect @sa cornerMinEigenVal, cornerHarris, preCornerDetect
*/ */
@ -1890,7 +1891,7 @@ The corners can be found as local maximums of the functions, as shown below:
@param src Source single-channel 8-bit of floating-point image. @param src Source single-channel 8-bit of floating-point image.
@param dst Output image that has the type CV_32F and the same size as src . @param dst Output image that has the type CV_32F and the same size as src .
@param ksize %Aperture size of the Sobel . @param ksize %Aperture size of the Sobel .
@param borderType Pixel extrapolation method. See #BorderTypes. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
*/ */
CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize,
int borderType = BORDER_DEFAULT ); int borderType = BORDER_DEFAULT );
@ -2154,7 +2155,7 @@ structuring element is used. Kernel can be created using #getStructuringElement.
@param anchor position of the anchor within the element; default value (-1, -1) means that the @param anchor position of the anchor within the element; default value (-1, -1) means that the
anchor is at the element center. anchor is at the element center.
@param iterations number of times erosion is applied. @param iterations number of times erosion is applied.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@param borderValue border value in case of a constant border @param borderValue border value in case of a constant border
@sa dilate, morphologyEx, getStructuringElement @sa dilate, morphologyEx, getStructuringElement
*/ */
@ -2186,7 +2187,7 @@ structuring element is used. Kernel can be created using #getStructuringElement
@param anchor position of the anchor within the element; default value (-1, -1) means that the @param anchor position of the anchor within the element; default value (-1, -1) means that the
anchor is at the element center. anchor is at the element center.
@param iterations number of times dilation is applied. @param iterations number of times dilation is applied.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported.
@param borderValue border value in case of a constant border @param borderValue border value in case of a constant border
@sa erode, morphologyEx, getStructuringElement @sa erode, morphologyEx, getStructuringElement
*/ */
@ -2211,7 +2212,7 @@ CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
@param anchor Anchor position with the kernel. Negative values mean that the anchor is at the @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
kernel center. kernel center.
@param iterations Number of times erosion and dilation are applied. @param iterations Number of times erosion and dilation are applied.
@param borderType Pixel extrapolation method, see #BorderTypes @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@param borderValue Border value in case of a constant border. The default value has a special @param borderValue Border value in case of a constant border. The default value has a special
meaning. meaning.
@sa dilate, erode, getStructuringElement @sa dilate, erode, getStructuringElement

@ -308,7 +308,7 @@ LineAA( Mat& img, Point2l pt1, Point2l pt2, const void* color )
int nch = img.channels(); int nch = img.channels();
uchar* ptr = img.ptr(); uchar* ptr = img.ptr();
size_t step = img.step; size_t step = img.step;
Size2l size(img.size()); Size2l size0(img.size()), size = size0;
if( !((nch == 1 || nch == 3 || nch == 4) && img.depth() == CV_8U) ) if( !((nch == 1 || nch == 3 || nch == 4) && img.depth() == CV_8U) )
{ {
@ -316,15 +316,8 @@ LineAA( Mat& img, Point2l pt1, Point2l pt2, const void* color )
return; return;
} }
pt1.x -= XY_ONE*2; size.width <<= XY_SHIFT;
pt1.y -= XY_ONE*2; size.height <<= XY_SHIFT;
pt2.x -= XY_ONE*2;
pt2.y -= XY_ONE*2;
ptr += img.step*2 + 2*nch;
size.width = ((size.width - 5) << XY_SHIFT) + 1;
size.height = ((size.height - 5) << XY_SHIFT) + 1;
if( !clipLine( size, pt1, pt2 )) if( !clipLine( size, pt1, pt2 ))
return; return;
@ -403,171 +396,160 @@ LineAA( Mat& img, Point2l pt1, Point2l pt2, const void* color )
if( nch == 3 ) if( nch == 3 )
{ {
#define ICV_PUT_POINT() \ #define ICV_PUT_POINT(x, y) \
{ \ { \
uchar* tptr = ptr + (x)*3 + (y)*step; \
_cb = tptr[0]; \ _cb = tptr[0]; \
_cb += ((cb - _cb)*a + 127)>> 8;\ _cb += ((cb - _cb)*a + 127)>> 8;\
_cb += ((cb - _cb)*a + 127)>> 8;\
_cg = tptr[1]; \ _cg = tptr[1]; \
_cg += ((cg - _cg)*a + 127)>> 8;\ _cg += ((cg - _cg)*a + 127)>> 8;\
_cg += ((cg - _cg)*a + 127)>> 8;\
_cr = tptr[2]; \ _cr = tptr[2]; \
_cr += ((cr - _cr)*a + 127)>> 8;\ _cr += ((cr - _cr)*a + 127)>> 8;\
_cr += ((cr - _cr)*a + 127)>> 8;\
tptr[0] = (uchar)_cb; \ tptr[0] = (uchar)_cb; \
tptr[1] = (uchar)_cg; \ tptr[1] = (uchar)_cg; \
tptr[2] = (uchar)_cr; \ tptr[2] = (uchar)_cr; \
} }
if( ax > ay ) if( ax > ay )
{ {
ptr += (pt1.x >> XY_SHIFT) * 3; int x = (int)(pt1.x >> XY_SHIFT);
while( ecount >= 0 ) for( ; ecount >= 0; x++, pt1.y += y_step, scount++, ecount-- )
{ {
uchar *tptr = ptr + ((pt1.y >> XY_SHIFT) - 1) * step; if( (unsigned)x >= (unsigned)size0.width )
continue;
int y = (int)((pt1.y >> XY_SHIFT) - 1);
int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 +
(((ecount >= 2) + 1) & (ecount | 2))]; (((ecount >= 2) + 1) & (ecount | 2))];
int a, dist = (pt1.y >> (XY_SHIFT - 5)) & 31; int a, dist = (pt1.y >> (XY_SHIFT - 5)) & 31;
a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)y < (unsigned)size0.height )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y)
tptr += step;
a = (ep_corr * FilterTable[dist] >> 8) & 0xff; a = (ep_corr * FilterTable[dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(y+1) < (unsigned)size0.height )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y+1)
tptr += step;
a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(y+2) < (unsigned)size0.height )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y+2)
pt1.y += y_step;
ptr += 3;
scount++;
ecount--;
} }
} }
else else
{ {
ptr += (pt1.y >> XY_SHIFT) * step; int y = (int)(pt1.y >> XY_SHIFT);
while( ecount >= 0 ) for( ; ecount >= 0; y++, pt1.x += x_step, scount++, ecount-- )
{ {
uchar *tptr = ptr + ((pt1.x >> XY_SHIFT) - 1) * 3; if( (unsigned)y >= (unsigned)size0.height )
continue;
int x = (int)((pt1.x >> XY_SHIFT) - 1);
int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 +
(((ecount >= 2) + 1) & (ecount | 2))]; (((ecount >= 2) + 1) & (ecount | 2))];
int a, dist = (pt1.x >> (XY_SHIFT - 5)) & 31; int a, dist = (pt1.x >> (XY_SHIFT - 5)) & 31;
a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)x < (unsigned)size0.width )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y)
tptr += 3;
a = (ep_corr * FilterTable[dist] >> 8) & 0xff; a = (ep_corr * FilterTable[dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(x+1) < (unsigned)size0.width )
ICV_PUT_POINT(); ICV_PUT_POINT(x+1, y)
tptr += 3;
a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(x+2) < (unsigned)size0.width )
ICV_PUT_POINT(); ICV_PUT_POINT(x+2, y)
pt1.x += x_step;
ptr += step;
scount++;
ecount--;
} }
} }
#undef ICV_PUT_POINT #undef ICV_PUT_POINT
} }
else if(nch == 1) else if(nch == 1)
{ {
#define ICV_PUT_POINT() \ #define ICV_PUT_POINT(x, y) \
{ \ { \
uchar* tptr = ptr + (x) + (y) * step; \
_cb = tptr[0]; \ _cb = tptr[0]; \
_cb += ((cb - _cb)*a + 127)>> 8;\ _cb += ((cb - _cb)*a + 127)>> 8;\
_cb += ((cb - _cb)*a + 127)>> 8;\
tptr[0] = (uchar)_cb; \ tptr[0] = (uchar)_cb; \
} }
if( ax > ay ) if( ax > ay )
{ {
ptr += (pt1.x >> XY_SHIFT); int x = (int)(pt1.x >> XY_SHIFT);
while( ecount >= 0 ) for( ; ecount >= 0; x++, pt1.y += y_step, scount++, ecount-- )
{ {
uchar *tptr = ptr + ((pt1.y >> XY_SHIFT) - 1) * step; if( (unsigned)x >= (unsigned)size0.width )
continue;
int y = (int)((pt1.y >> XY_SHIFT) - 1);
int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 +
(((ecount >= 2) + 1) & (ecount | 2))]; (((ecount >= 2) + 1) & (ecount | 2))];
int a, dist = (pt1.y >> (XY_SHIFT - 5)) & 31; int a, dist = (pt1.y >> (XY_SHIFT - 5)) & 31;
a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)y < (unsigned)size0.height )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y)
tptr += step;
a = (ep_corr * FilterTable[dist] >> 8) & 0xff; a = (ep_corr * FilterTable[dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(y+1) < (unsigned)size0.height )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y+1)
tptr += step;
a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(y+2) < (unsigned)size0.height )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y+2)
pt1.y += y_step;
ptr++;
scount++;
ecount--;
} }
} }
else else
{ {
ptr += (pt1.y >> XY_SHIFT) * step; int y = (int)(pt1.y >> XY_SHIFT);
while( ecount >= 0 ) for( ; ecount >= 0; y++, pt1.x += x_step, scount++, ecount-- )
{ {
uchar *tptr = ptr + ((pt1.x >> XY_SHIFT) - 1); if( (unsigned)y >= (unsigned)size0.height )
continue;
int x = (int)((pt1.x >> XY_SHIFT) - 1);
int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 +
(((ecount >= 2) + 1) & (ecount | 2))]; (((ecount >= 2) + 1) & (ecount | 2))];
int a, dist = (pt1.x >> (XY_SHIFT - 5)) & 31; int a, dist = (pt1.x >> (XY_SHIFT - 5)) & 31;
a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)x < (unsigned)size0.width )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y)
tptr++;
a = (ep_corr * FilterTable[dist] >> 8) & 0xff; a = (ep_corr * FilterTable[dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(x+1) < (unsigned)size0.width )
ICV_PUT_POINT(); ICV_PUT_POINT(x+1, y)
tptr++;
a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(x+2) < (unsigned)size0.width )
ICV_PUT_POINT(); ICV_PUT_POINT(x+2, y)
pt1.x += x_step;
ptr += step;
scount++;
ecount--;
} }
} }
#undef ICV_PUT_POINT #undef ICV_PUT_POINT
} }
else else
{ {
#define ICV_PUT_POINT() \ #define ICV_PUT_POINT(x, y) \
{ \ { \
uchar* tptr = ptr + (x)*4 + (y)*step; \
_cb = tptr[0]; \ _cb = tptr[0]; \
_cb += ((cb - _cb)*a + 127)>> 8;\ _cb += ((cb - _cb)*a + 127)>> 8;\
_cb += ((cb - _cb)*a + 127)>> 8;\
_cg = tptr[1]; \ _cg = tptr[1]; \
_cg += ((cg - _cg)*a + 127)>> 8;\ _cg += ((cg - _cg)*a + 127)>> 8;\
_cg += ((cg - _cg)*a + 127)>> 8;\
_cr = tptr[2]; \ _cr = tptr[2]; \
_cr += ((cr - _cr)*a + 127)>> 8;\ _cr += ((cr - _cr)*a + 127)>> 8;\
_cr += ((cr - _cr)*a + 127)>> 8;\
_ca = tptr[3]; \ _ca = tptr[3]; \
_ca += ((ca - _ca)*a + 127)>> 8;\ _ca += ((ca - _ca)*a + 127)>> 8;\
_ca += ((ca - _ca)*a + 127)>> 8;\
tptr[0] = (uchar)_cb; \ tptr[0] = (uchar)_cb; \
tptr[1] = (uchar)_cg; \ tptr[1] = (uchar)_cg; \
tptr[2] = (uchar)_cr; \ tptr[2] = (uchar)_cr; \
@ -575,66 +557,55 @@ LineAA( Mat& img, Point2l pt1, Point2l pt2, const void* color )
} }
if( ax > ay ) if( ax > ay )
{ {
ptr += (pt1.x >> XY_SHIFT) * 4; int x = (int)(pt1.x >> XY_SHIFT);
while( ecount >= 0 ) for( ; ecount >= 0; x++, pt1.y += y_step, scount++, ecount-- )
{ {
uchar *tptr = ptr + ((pt1.y >> XY_SHIFT) - 1) * step; if( (unsigned)x >= (unsigned)size0.width )
continue;
int y = (int)((pt1.y >> XY_SHIFT) - 1);
int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 +
(((ecount >= 2) + 1) & (ecount | 2))]; (((ecount >= 2) + 1) & (ecount | 2))];
int a, dist = (pt1.y >> (XY_SHIFT - 5)) & 31; int a, dist = (pt1.y >> (XY_SHIFT - 5)) & 31;
a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)y < (unsigned)size0.height )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y)
tptr += step;
a = (ep_corr * FilterTable[dist] >> 8) & 0xff; a = (ep_corr * FilterTable[dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(y+1) < (unsigned)size0.height )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y+1)
tptr += step;
a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(y+2) < (unsigned)size0.height )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y+2)
pt1.y += y_step;
ptr += 4;
scount++;
ecount--;
} }
} }
else else
{ {
ptr += (pt1.y >> XY_SHIFT) * step; int y = (int)(pt1.y >> XY_SHIFT);
while( ecount >= 0 ) for( ; ecount >= 0; y++, pt1.x += x_step, scount++, ecount-- )
{ {
uchar *tptr = ptr + ((pt1.x >> XY_SHIFT) - 1) * 4; if( (unsigned)y >= (unsigned)size0.height )
continue;
int x = (int)((pt1.x >> XY_SHIFT) - 1);
int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 +
(((ecount >= 2) + 1) & (ecount | 2))]; (((ecount >= 2) + 1) & (ecount | 2))];
int a, dist = (pt1.x >> (XY_SHIFT - 5)) & 31; int a, dist = (pt1.x >> (XY_SHIFT - 5)) & 31;
a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)x < (unsigned)size0.width )
ICV_PUT_POINT(); ICV_PUT_POINT(x, y)
tptr += 4;
a = (ep_corr * FilterTable[dist] >> 8) & 0xff; a = (ep_corr * FilterTable[dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(x+1) < (unsigned)size0.width )
ICV_PUT_POINT(); ICV_PUT_POINT(x+1, y)
tptr += 4;
a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff;
ICV_PUT_POINT(); if( (unsigned)(x+2) < (unsigned)size0.width )
ICV_PUT_POINT(); ICV_PUT_POINT(x+2, y)
pt1.x += x_step;
ptr += step;
scount++;
ecount--;
} }
} }
#undef ICV_PUT_POINT #undef ICV_PUT_POINT

@ -1674,93 +1674,9 @@ struct HResizeLinearVecU8_X4
} }
} }
} }
else if(cn < 9)
{
const int step = 8;
const int len0 = xmax & -step;
for( ; k <= (count - 2); k+=2 )
{
const uchar *S0 = src[k];
int *D0 = dst[k];
const uchar *S1 = src[k+1];
int *D1 = dst[k+1];
for( dx = 0; dx < len0; dx += cn )
{
v_int16x8 a0 = v_load(alpha+dx*2);
v_int16x8 a1 = v_load(alpha+dx*2 + 8);
v_uint16x8 s0, s1;
v_zip(v_load_expand(S0+xofs[dx]), v_load_expand(S0+xofs[dx]+cn), s0, s1);
v_store(&D0[dx], v_dotprod(v_reinterpret_as_s16(s0), a0));
v_store(&D0[dx+4], v_dotprod(v_reinterpret_as_s16(s1), a1));
v_zip(v_load_expand(S1+xofs[dx]), v_load_expand(S1+xofs[dx]+cn), s0, s1);
v_store(&D1[dx], v_dotprod(v_reinterpret_as_s16(s0), a0));
v_store(&D1[dx+4], v_dotprod(v_reinterpret_as_s16(s1), a1));
}
}
for( ; k < count; k++ )
{
const uchar *S = src[k];
int *D = dst[k];
for( dx = 0; dx < len0; dx += cn )
{
v_int16x8 a0 = v_load(alpha+dx*2);
v_int16x8 a1 = v_load(alpha+dx*2 + 8);
v_uint16x8 s0, s1;
v_zip(v_load_expand(S+xofs[dx]), v_load_expand(S+xofs[dx]+cn), s0, s1);
v_store(&D[dx], v_dotprod(v_reinterpret_as_s16(s0), a0));
v_store(&D[dx+4], v_dotprod(v_reinterpret_as_s16(s1), a1));
}
}
}
else else
{ {
const int step = 16; return 0; // images with channels >4 are out of optimization scope
const int len0 = (xmax - cn) & -step;
for( ; k <= (count - 2); k+=2 )
{
const uchar *S0 = src[k];
int *D0 = dst[k];
const uchar *S1 = src[k+1];
int *D1 = dst[k+1];
for( dx = 0; dx < len0; dx += step )
{
v_int16x8 a0 = v_load(alpha+dx*2);
v_int16x8 a1 = v_load(alpha+dx*2 + 8);
v_int16x8 a2 = v_load(alpha+dx*2 + 16);
v_int16x8 a3 = v_load(alpha+dx*2 + 24);
v_uint8x16 s01, s23;
v_zip(v_lut(S0, xofs+dx), v_lut(S0+cn, xofs+dx), s01, s23);
v_store(&D0[dx], v_dotprod(v_reinterpret_as_s16(v_expand_low(s01)), a0));
v_store(&D0[dx+4], v_dotprod(v_reinterpret_as_s16(v_expand_high(s01)), a1));
v_store(&D0[dx+8], v_dotprod(v_reinterpret_as_s16(v_expand_low(s23)), a2));
v_store(&D0[dx+12], v_dotprod(v_reinterpret_as_s16(v_expand_high(s23)), a3));
v_zip(v_lut(S1, xofs+dx), v_lut(S1+cn, xofs+dx), s01, s23);
v_store(&D1[dx], v_dotprod(v_reinterpret_as_s16(v_expand_low(s01)), a0));
v_store(&D1[dx+4], v_dotprod(v_reinterpret_as_s16(v_expand_high(s01)), a1));
v_store(&D1[dx+8], v_dotprod(v_reinterpret_as_s16(v_expand_low(s23)), a2));
v_store(&D1[dx+12], v_dotprod(v_reinterpret_as_s16(v_expand_high(s23)), a3));
}
}
for( ; k < count; k++ )
{
const uchar *S = src[k];
int *D = dst[k];
for( dx = 0; dx < len0; dx += step )
{
v_int16x8 a0 = v_load(alpha+dx*2);
v_int16x8 a1 = v_load(alpha+dx*2 + 8);
v_int16x8 a2 = v_load(alpha+dx*2 + 16);
v_int16x8 a3 = v_load(alpha+dx*2 + 24);
v_uint8x16 s01, s23;
v_zip(v_lut(S, xofs+dx), v_lut(S+cn, xofs+dx), s01, s23);
v_store(&D[dx], v_dotprod(v_reinterpret_as_s16(v_expand_low(s01)), a0));
v_store(&D[dx+4], v_dotprod(v_reinterpret_as_s16(v_expand_high(s01)), a1));
v_store(&D[dx+8], v_dotprod(v_reinterpret_as_s16(v_expand_low(s23)), a2));
v_store(&D[dx+12], v_dotprod(v_reinterpret_as_s16(v_expand_high(s23)), a3));
}
}
} }
return dx; return dx;
} }

@ -583,4 +583,14 @@ TEST(Drawing, line)
ASSERT_THROW(line(mat, Point(1,1),Point(99,99),Scalar(255),0), cv::Exception); ASSERT_THROW(line(mat, Point(1,1),Point(99,99),Scalar(255),0), cv::Exception);
} }
TEST(Drawing, regression_16308)
{
Mat_<uchar> img(Size(100, 100), (uchar)0);
circle(img, Point(50, 50), 50, 255, 1, LINE_AA);
EXPECT_NE(0, (int)img.at<uchar>(0, 50));
EXPECT_NE(0, (int)img.at<uchar>(50, 0));
EXPECT_NE(0, (int)img.at<uchar>(50, 99));
EXPECT_NE(0, (int)img.at<uchar>(99, 50));
}
}} // namespace }} // namespace

@ -2289,7 +2289,8 @@ bool QRCodeDetector::decodeMulti(
CV_Assert((points.size().width % 4) == 0); CV_Assert((points.size().width % 4) == 0);
vector< vector< Point2f > > src_points ; vector< vector< Point2f > > src_points ;
Mat qr_points = points.getMat(); Mat qr_points = points.getMat();
for (int i = 0; i < points.size().width ; i += 4) qr_points = qr_points.reshape(2, 1);
for (int i = 0; i < qr_points.size().width ; i += 4)
{ {
vector<Point2f> tempMat = qr_points.colRange(i, i + 4); vector<Point2f> tempMat = qr_points.colRange(i, i + 4);
if (contourArea(tempMat) > 0.0) if (contourArea(tempMat) > 0.0)

@ -481,6 +481,26 @@ INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Close, testing::ValuesIn(qrcode_i
INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Monitor, testing::ValuesIn(qrcode_images_monitor)); INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Monitor, testing::ValuesIn(qrcode_images_monitor));
INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Multi, testing::ValuesIn(qrcode_images_multiple)); INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Multi, testing::ValuesIn(qrcode_images_multiple));
TEST(Objdetect_QRCode_decodeMulti, decode_regression_16491)
{
#ifdef HAVE_QUIRC
Mat zero_image = Mat::zeros(256, 256, CV_8UC1);
Point corners_[] = {Point(16, 16), Point(128, 16), Point(128, 128), Point(16, 128),
Point(16, 16), Point(128, 16), Point(128, 128), Point(16, 128)};
std::vector<Point> vec_corners;
int array_size = 8;
vec_corners.assign(corners_, corners_ + array_size);
std::vector<cv::String> decoded_info;
std::vector<Mat> straight_barcode;
QRCodeDetector vec_qrcode;
EXPECT_NO_THROW(vec_qrcode.decodeMulti(zero_image, vec_corners, decoded_info, straight_barcode));
Mat mat_corners(2, 4, CV_32SC2, (void*)&vec_corners[0]);
QRCodeDetector mat_qrcode;
EXPECT_NO_THROW(mat_qrcode.decodeMulti(zero_image, mat_corners, decoded_info, straight_barcode));
#endif
}
TEST(Objdetect_QRCode_basic, not_found_qrcode) TEST(Objdetect_QRCode_basic, not_found_qrcode)
{ {
std::vector<Point> corners; std::vector<Point> corners;

@ -39,6 +39,11 @@
#import <AVFoundation/AVFoundation.h> #import <AVFoundation/AVFoundation.h>
#import <Foundation/NSException.h> #import <Foundation/NSException.h>
#define CV_CAP_MODE_BGR CV_FOURCC_MACRO('B','G','R','3')
#define CV_CAP_MODE_RGB CV_FOURCC_MACRO('R','G','B','3')
#define CV_CAP_MODE_GRAY CV_FOURCC_MACRO('G','R','E','Y')
#define CV_CAP_MODE_YUYV CV_FOURCC_MACRO('Y', 'U', 'Y', 'V')
/********************** Declaration of class headers ************************/ /********************** Declaration of class headers ************************/
/***************************************************************************** /*****************************************************************************
@ -128,37 +133,36 @@ class CvCaptureCAM : public CvCapture {
*****************************************************************************/ *****************************************************************************/
class CvCaptureFile : public CvCapture { class CvCaptureFile : public CvCapture {
public: public:
CvCaptureFile(const char* filename) ;
CvCaptureFile(const char* filename) ; ~CvCaptureFile();
~CvCaptureFile(); virtual bool grabFrame();
virtual bool grabFrame(); virtual IplImage* retrieveFrame(int);
virtual IplImage* retrieveFrame(int); virtual double getProperty(int property_id) const;
virtual IplImage* queryFrame(); virtual bool setProperty(int property_id, double value);
virtual double getProperty(int property_id) const; virtual int didStart();
virtual bool setProperty(int property_id, double value); private:
virtual int didStart(); AVAsset *mAsset;
AVAssetTrack *mAssetTrack;
private: AVAssetReader *mAssetReader;
AVAssetReaderTrackOutput *mTrackOutput;
AVAssetReader *mMovieReader;
char* imagedata; CMSampleBufferRef mCurrentSampleBuffer;
IplImage* image; CVImageBufferRef mGrabbedPixels;
char* bgr_imagedata; IplImage *mDeviceImage;
IplImage* bgr_image; uint8_t *mOutImagedata;
size_t currSize; IplImage *mOutImage;
size_t currSize;
IplImage* retrieveFramePixelBuffer(); uint32_t mMode;
double getFPS(); int mFormat;
int movieWidth; bool setupReadingAt(CMTime position);
int movieHeight; IplImage* retrieveFramePixelBuffer();
double movieFPS;
double currentFPS; CMTime mFrameTimestamp;
double movieDuration; size_t mFrameNum;
int changedPos;
int started;
int started;
}; };
@ -771,114 +775,128 @@ fromConnection:(AVCaptureConnection *)connection{
*****************************************************************************/ *****************************************************************************/
CvCaptureFile::CvCaptureFile(const char* filename) { CvCaptureFile::CvCaptureFile(const char* filename) {
NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
mAsset = nil;
mMovieReader = nil; mAssetTrack = nil;
image = NULL; mAssetReader = nil;
bgr_image = NULL; mTrackOutput = nil;
imagedata = NULL; mDeviceImage = NULL;
bgr_imagedata = NULL; mOutImage = NULL;
mOutImagedata = NULL;
currSize = 0; currSize = 0;
mMode = CV_CAP_MODE_BGR;
movieWidth = 0; mFormat = CV_8UC3;
movieHeight = 0; mCurrentSampleBuffer = NULL;
movieFPS = 0; mGrabbedPixels = NULL;
currentFPS = 0; mFrameTimestamp = kCMTimeZero;
movieDuration = 0; mFrameNum = 0;
changedPos = 0;
started = 0; started = 0;
AVURLAsset *asset = [AVURLAsset URLAssetWithURL: mAsset = [[AVAsset assetWithURL:[NSURL fileURLWithPath: @(filename)]] retain];
[NSURL fileURLWithPath: [NSString stringWithUTF8String:filename]]
options:nil];
AVAssetTrack* videoTrack = nil;
NSArray* tracks = [asset tracksWithMediaType:AVMediaTypeVideo];
if ([tracks count] == 1)
{
videoTrack = [tracks objectAtIndex:0];
movieWidth = videoTrack.naturalSize.width; if ( mAsset == nil ) {
movieHeight = videoTrack.naturalSize.height; fprintf(stderr, "OpenCV: Couldn't read movie file \"%s\"\n", filename);
movieFPS = videoTrack.nominalFrameRate; [localpool drain];
started = 0;
return;
}
currentFPS = movieFPS; //Debugging !! should be getFPS(); NSArray *tracks = [mAsset tracksWithMediaType:AVMediaTypeVideo];
//Debugging. need to be checked if ([tracks count] == 0) {
fprintf(stderr, "OpenCV: Couldn't read video stream from file \"%s\"\n", filename);
[localpool drain];
started = 0;
return;
}
// In ms mAssetTrack = [tracks[0] retain];
movieDuration = videoTrack.timeRange.duration.value/videoTrack.timeRange.duration.timescale * 1000;
started = 1; if ( ! setupReadingAt(kCMTimeZero) ) {
NSError* error = nil; fprintf(stderr, "OpenCV: Couldn't read movie file \"%s\"\n", filename);
mMovieReader = [[AVAssetReader alloc] initWithAsset:asset error:&error]; [localpool drain];
if (error) started = 0;
NSLog(@"%@", [error localizedDescription]); return;
NSDictionary* videoSettings =
[NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA]
forKey:(NSString*)kCVPixelBufferPixelFormatTypeKey];
[mMovieReader addOutput:[AVAssetReaderTrackOutput
assetReaderTrackOutputWithTrack:videoTrack
outputSettings:videoSettings]];
[mMovieReader startReading];
} }
/*
// Asynchronously open the video in another thread. Always fail.
[asset loadValuesAsynchronouslyForKeys:[NSArray arrayWithObject:@"tracks"] completionHandler:
^{
// The completion block goes here.
dispatch_async(dispatch_get_main_queue(),
^{
AVAssetTrack* ::videoTrack = nil;
NSArray* ::tracks = [asset tracksWithMediaType:AVMediaTypeVideo];
if ([tracks count] == 1)
{
videoTrack = [tracks objectAtIndex:0];
movieWidth = videoTrack.naturalSize.width;
movieHeight = videoTrack.naturalSize.height;
movieFPS = videoTrack.nominalFrameRate;
currentFPS = movieFPS; //Debugging !! should be getFPS();
//Debugging. need to be checked
movieDuration = videoTrack.timeRange.duration.value/videoTrack.timeRange.duration.timescale * 1000;
started = 1; started = 1;
[localpool drain];
}
NSError* ::error = nil; CvCaptureFile::~CvCaptureFile() {
// mMovieReader is a member variable NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
mMovieReader = [[AVAssetReader alloc] initWithAsset:asset error:&error];
if (error) free(mOutImagedata);
NSLog(@"%@", [error localizedDescription]); cvReleaseImage(&mOutImage);
cvReleaseImage(&mDeviceImage);
NSDictionary* ::videoSettings = [mAssetReader release];
[NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA] [mTrackOutput release];
forKey:(NSString*)kCVPixelBufferPixelFormatTypeKey]; [mAssetTrack release];
[mAsset release];
CVBufferRelease(mGrabbedPixels);
if ( mCurrentSampleBuffer ) {
CFRelease(mCurrentSampleBuffer);
}
[mMovieReader addOutput:[AVAssetReaderTrackOutput [localpool drain];
assetReaderTrackOutputWithTrack:videoTrack
outputSettings:videoSettings]];
[mMovieReader startReading];
} }
});
}]; bool CvCaptureFile::setupReadingAt(CMTime position) {
*/ if (mAssetReader) {
if (mAssetReader.status == AVAssetReaderStatusReading) {
[mAssetReader cancelReading];
}
[mAssetReader release];
mAssetReader = nil;
}
if (mTrackOutput) {
[mTrackOutput release];
mTrackOutput = nil;
}
[localpool drain]; // Capture in a pixel format that can be converted efficiently to the output mode.
} OSType pixelFormat;
if (mMode == CV_CAP_MODE_BGR || mMode == CV_CAP_MODE_RGB) {
pixelFormat = kCVPixelFormatType_32BGRA;
mFormat = CV_8UC3;
} else if (mMode == CV_CAP_MODE_GRAY) {
pixelFormat = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
mFormat = CV_8UC1;
} else if (mMode == CV_CAP_MODE_YUYV) {
pixelFormat = kCVPixelFormatType_422YpCbCr8;
mFormat = CV_8UC2;
} else {
fprintf(stderr, "VIDEOIO ERROR: AVF Mac: Unsupported mode: %d\n", mMode);
return false;
}
CvCaptureFile::~CvCaptureFile() { NSDictionary *settings =
@{
(id)kCVPixelBufferPixelFormatTypeKey: @(pixelFormat)
};
mTrackOutput = [[AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack: mAssetTrack
outputSettings: settings] retain];
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init]; if ( !mTrackOutput ) {
if (imagedata != NULL) free(imagedata); fprintf(stderr, "OpenCV: error in [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:outputSettings:]\n");
if (bgr_imagedata != NULL) free(bgr_imagedata); return false;
cvReleaseImage(&image); }
cvReleaseImage(&bgr_image);
[mMovieReader release]; NSError *error = nil;
[localpool drain]; mAssetReader = [[AVAssetReader assetReaderWithAsset: mAsset
error: &error] retain];
if ( error ) {
fprintf(stderr, "OpenCV: error in [AVAssetReader assetReaderWithAsset:error:]\n");
NSLog(@"OpenCV: %@", error.localizedDescription);
return false;
}
mAssetReader.timeRange = CMTimeRangeMake(position, kCMTimePositiveInfinity);
mFrameTimestamp = position;
mFrameNum = round((mFrameTimestamp.value * mAssetTrack.nominalFrameRate) / double(mFrameTimestamp.timescale));
[mAssetReader addOutput: mTrackOutput];
return [mAssetReader startReading];
} }
int CvCaptureFile::didStart() { int CvCaptureFile::didStart() {
@ -886,101 +904,191 @@ int CvCaptureFile::didStart() {
} }
bool CvCaptureFile::grabFrame() { bool CvCaptureFile::grabFrame() {
NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
//everything is done in queryFrame; CVBufferRelease(mGrabbedPixels);
currentFPS = movieFPS; if ( mCurrentSampleBuffer ) {
return 1; CFRelease(mCurrentSampleBuffer);
}
mCurrentSampleBuffer = [mTrackOutput copyNextSampleBuffer];
/* mGrabbedPixels = CMSampleBufferGetImageBuffer(mCurrentSampleBuffer);
double t1 = getProperty(CV_CAP_PROP_POS_MSEC); CVBufferRetain(mGrabbedPixels);
[mCaptureSession stepForward]; mFrameTimestamp = CMSampleBufferGetOutputPresentationTimeStamp(mCurrentSampleBuffer);
double t2 = getProperty(CV_CAP_PROP_POS_MSEC); mFrameNum++;
if (t2>t1 && !changedPos) {
currentFPS = 1000.0/(t2-t1);
} else {
currentFPS = movieFPS;
}
changedPos = 0;
*/
bool isReading = (mAssetReader.status == AVAssetReaderStatusReading);
[localpool drain];
return isReading;
} }
IplImage* CvCaptureFile::retrieveFramePixelBuffer() { IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init]; if ( ! mGrabbedPixels ) {
return 0;
}
if (mMovieReader.status != AVAssetReaderStatusReading){ NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
return NULL; CVPixelBufferLockBaseAddress(mGrabbedPixels, 0);
} void *baseaddress;
size_t width, height, rowBytes;
OSType pixelFormat = CVPixelBufferGetPixelFormatType(mGrabbedPixels);
AVAssetReaderOutput * output = [mMovieReader.outputs objectAtIndex:0]; if (CVPixelBufferIsPlanar(mGrabbedPixels)) {
CMSampleBufferRef sampleBuffer = [output copyNextSampleBuffer]; baseaddress = CVPixelBufferGetBaseAddressOfPlane(mGrabbedPixels, 0);
if (!sampleBuffer) { width = CVPixelBufferGetWidthOfPlane(mGrabbedPixels, 0);
[localpool drain]; height = CVPixelBufferGetHeightOfPlane(mGrabbedPixels, 0);
return NULL; rowBytes = CVPixelBufferGetBytesPerRowOfPlane(mGrabbedPixels, 0);
} else {
baseaddress = CVPixelBufferGetBaseAddress(mGrabbedPixels);
width = CVPixelBufferGetWidth(mGrabbedPixels);
height = CVPixelBufferGetHeight(mGrabbedPixels);
rowBytes = CVPixelBufferGetBytesPerRow(mGrabbedPixels);
} }
CVPixelBufferRef frame = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferRef pixels = CVBufferRetain(frame);
CVPixelBufferLockBaseAddress(pixels, 0); if ( rowBytes == 0 ) {
fprintf(stderr, "OpenCV: error: rowBytes == 0\n");
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
}
uint32_t* baseaddress = (uint32_t*)CVPixelBufferGetBaseAddress(pixels); int outChannels;
size_t width = CVPixelBufferGetWidth(pixels); if (mMode == CV_CAP_MODE_BGR || mMode == CV_CAP_MODE_RGB) {
size_t height = CVPixelBufferGetHeight(pixels); outChannels = 3;
size_t rowBytes = CVPixelBufferGetBytesPerRow(pixels); } else if (mMode == CV_CAP_MODE_GRAY) {
outChannels = 1;
} else if (mMode == CV_CAP_MODE_YUYV) {
outChannels = 2;
} else {
fprintf(stderr, "VIDEOIO ERROR: AVF Mac: Unsupported mode: %d\n", mMode);
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
}
if (rowBytes != 0) { if ( currSize != width*outChannels*height ) {
currSize = width*outChannels*height;
free(mOutImagedata);
mOutImagedata = reinterpret_cast<uint8_t*>(malloc(currSize));
}
if (currSize != rowBytes*height*sizeof(char)) { if (mOutImage == NULL) {
currSize = rowBytes*height*sizeof(char); mOutImage = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, outChannels);
if (imagedata != NULL) free(imagedata); }
if (bgr_imagedata != NULL) free(bgr_imagedata); mOutImage->width = int(width);
imagedata = (char*)malloc(currSize); mOutImage->height = int(height);
bgr_imagedata = (char*)malloc(currSize); mOutImage->nChannels = outChannels;
mOutImage->depth = IPL_DEPTH_8U;
mOutImage->widthStep = int(width*outChannels);
mOutImage->imageData = reinterpret_cast<char *>(mOutImagedata);
mOutImage->imageSize = int(currSize);
int deviceChannels;
int cvtCode;
if ( pixelFormat == kCVPixelFormatType_32BGRA ) {
deviceChannels = 4;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_BGRA2BGR;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = CV_BGRA2RGB;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_BGRA2GRAY;
} else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
} }
} else if ( pixelFormat == kCVPixelFormatType_24RGB ) {
memcpy(imagedata, baseaddress, currSize); deviceChannels = 3;
if (image == NULL) { if (mMode == CV_CAP_MODE_BGR) {
image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 4); cvtCode = CV_RGB2BGR;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = 0;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_RGB2GRAY;
} else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
} }
} else if ( pixelFormat == kCVPixelFormatType_422YpCbCr8 ) { // 422 (2vuy, UYVY)
image->width = (int)width; deviceChannels = 2;
image->height = (int)height;
image->nChannels = 4; if (mMode == CV_CAP_MODE_BGR) {
image->depth = IPL_DEPTH_8U; cvtCode = CV_YUV2BGR_UYVY;
image->widthStep = (int)rowBytes; } else if (mMode == CV_CAP_MODE_RGB) {
image->imageData = imagedata; cvtCode = CV_YUV2RGB_UYVY;
image->imageSize = (int)currSize; } else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_YUV2GRAY_UYVY;
} else if (mMode == CV_CAP_MODE_YUYV) {
if (bgr_image == NULL) { cvtCode = -1; // Copy
bgr_image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 3); } else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
} }
} else if ( pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange || // 420v
pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange ) { // 420f
height = height * 3 / 2;
deviceChannels = 1;
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = CV_YUV2BGR_YV12;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = CV_YUV2RGB_YV12;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = CV_YUV2GRAY_420;
} else {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
}
} else {
char pfBuf[] = { (char)pixelFormat, (char)(pixelFormat >> 8),
(char)(pixelFormat >> 16), (char)(pixelFormat >> 24), '\0' };
fprintf(stderr, "OpenCV: unsupported pixel format '%s'\n", pfBuf);
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
}
bgr_image->width = (int)width; if (mDeviceImage == NULL) {
bgr_image->height = (int)height; mDeviceImage = cvCreateImageHeader(cvSize(int(width),int(height)), IPL_DEPTH_8U, deviceChannels);
bgr_image->nChannels = 3; }
bgr_image->depth = IPL_DEPTH_8U; mDeviceImage->width = int(width);
bgr_image->widthStep = (int)rowBytes; mDeviceImage->height = int(height);
bgr_image->imageData = bgr_imagedata; mDeviceImage->nChannels = deviceChannels;
bgr_image->imageSize = (int)currSize; mDeviceImage->depth = IPL_DEPTH_8U;
mDeviceImage->widthStep = int(rowBytes);
cvCvtColor(image, bgr_image,CV_BGRA2BGR); mDeviceImage->imageData = reinterpret_cast<char *>(baseaddress);
mDeviceImage->imageSize = int(rowBytes*height);
if (cvtCode == -1) {
cv::cvarrToMat(mDeviceImage).copyTo(cv::cvarrToMat(mOutImage));
} else {
cvCvtColor(mDeviceImage, mOutImage, cvtCode);
} }
CVPixelBufferUnlockBaseAddress(pixels, 0); CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(pixels);
CMSampleBufferInvalidate(sampleBuffer);
CFRelease(sampleBuffer);
[localpool drain]; [localpool drain];
return bgr_image;
return mOutImage;
} }
@ -988,123 +1096,88 @@ IplImage* CvCaptureFile::retrieveFrame(int) {
return retrieveFramePixelBuffer(); return retrieveFramePixelBuffer();
} }
IplImage* CvCaptureFile::queryFrame() { double CvCaptureFile::getProperty(int property_id) const{
grabFrame(); if (mAsset == nil) return 0;
return retrieveFrame(0);
}
double CvCaptureFile::getFPS() { CMTime t;
/* switch (property_id) {
if (mCaptureSession == nil) return 0; case CV_CAP_PROP_POS_MSEC:
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init]; return mFrameTimestamp.value * 1000.0 / mFrameTimestamp.timescale;
double now = getProperty(CV_CAP_PROP_POS_MSEC); case CV_CAP_PROP_POS_FRAMES:
double retval = 0; return mAssetTrack.nominalFrameRate > 0 ? mFrameNum : 0;
if (now == 0) { case CV_CAP_PROP_POS_AVI_RATIO:
[mCaptureSession stepForward]; t = [mAsset duration];
double t2 = getProperty(CV_CAP_PROP_POS_MSEC); return (mFrameTimestamp.value * t.timescale) / double(mFrameTimestamp.timescale * t.value);
[mCaptureSession stepBackward]; case CV_CAP_PROP_FRAME_WIDTH:
retval = 1000.0 / (t2-now); return mAssetTrack.naturalSize.width;
} else { case CV_CAP_PROP_FRAME_HEIGHT:
[mCaptureSession stepBackward]; return mAssetTrack.naturalSize.height;
double t2 = getProperty(CV_CAP_PROP_POS_MSEC); case CV_CAP_PROP_FPS:
[mCaptureSession stepForward]; return mAssetTrack.nominalFrameRate;
retval = 1000.0 / (now-t2); case CV_CAP_PROP_FRAME_COUNT:
} t = [mAsset duration];
[localpool drain]; return round((t.value * mAssetTrack.nominalFrameRate) / double(t.timescale));
return retval; case CV_CAP_PROP_FORMAT:
*/ return mFormat;
return 30.0; //TODO: Debugging case CV_CAP_PROP_FOURCC:
return mMode;
default:
break;
}
return 0;
} }
double CvCaptureFile::getProperty(int /*property_id*/) const{ bool CvCaptureFile::setProperty(int property_id, double value) {
if (mAsset == nil) return false;
/* NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
if (mCaptureSession == nil) return 0;
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
double retval;
QTTime t;
switch (property_id) {
case CV_CAP_PROP_POS_MSEC:
[[mCaptureSession attributeForKey:QTMovieCurrentTimeAttribute] getValue:&t];
retval = t.timeValue * 1000.0 / t.timeScale;
break;
case CV_CAP_PROP_POS_FRAMES:
retval = movieFPS * getProperty(CV_CAP_PROP_POS_MSEC) / 1000;
break;
case CV_CAP_PROP_POS_AVI_RATIO:
retval = (getProperty(CV_CAP_PROP_POS_MSEC)) / (movieDuration );
break;
case CV_CAP_PROP_FRAME_WIDTH:
retval = movieWidth;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
retval = movieHeight;
break;
case CV_CAP_PROP_FPS:
retval = currentFPS;
break;
case CV_CAP_PROP_FOURCC:
default:
retval = 0;
}
[localpool drain];
return retval;
*/
return 1.0; //Debugging
}
bool CvCaptureFile::setProperty(int /*property_id*/, double /*value*/) { bool retval = false;
CMTime t;
/* switch (property_id) {
if (mCaptureSession == nil) return false; case CV_CAP_PROP_POS_MSEC:
t = mAsset.duration;
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init]; t.value = value * t.timescale / 1000;
retval = setupReadingAt(t);
bool retval = false; break;
QTTime t; case CV_CAP_PROP_POS_FRAMES:
retval = mAssetTrack.nominalFrameRate > 0 ? setupReadingAt(CMTimeMake(value, mAssetTrack.nominalFrameRate)) : false;
double ms; break;
case CV_CAP_PROP_POS_AVI_RATIO:
switch (property_id) { t = mAsset.duration;
case CV_CAP_PROP_POS_MSEC: t.value = round(t.value * value);
[[mCaptureSession attributeForKey:QTMovieCurrentTimeAttribute] getValue:&t]; retval = setupReadingAt(t);
t.timeValue = value * t.timeScale / 1000; break;
[mCaptureSession setCurrentTime:t]; case CV_CAP_PROP_FOURCC:
changedPos = 1; uint32_t mode;
retval = true; mode = cvRound(value);
break; if (mMode == mode) {
case CV_CAP_PROP_POS_FRAMES: retval = true;
ms = (value*1000.0 -5)/ currentFPS; } else {
retval = setProperty(CV_CAP_PROP_POS_MSEC, ms); switch (mode) {
break; case CV_CAP_MODE_BGR:
case CV_CAP_PROP_POS_AVI_RATIO: case CV_CAP_MODE_RGB:
ms = value * movieDuration; case CV_CAP_MODE_GRAY:
retval = setProperty(CV_CAP_PROP_POS_MSEC, ms); case CV_CAP_MODE_YUYV:
break; mMode = mode;
case CV_CAP_PROP_FRAME_WIDTH: retval = setupReadingAt(mFrameTimestamp);
//retval = movieWidth; break;
break; default:
case CV_CAP_PROP_FRAME_HEIGHT: fprintf(stderr, "VIDEOIO ERROR: AVF iOS: Unsupported mode: %d\n", mode);
//retval = movieHeight; retval=false;
break; break;
case CV_CAP_PROP_FPS: }
//etval = currentFPS; }
break; break;
case CV_CAP_PROP_FOURCC: default:
default: break;
retval = false;
} }
[localpool drain]; [localpool drain];
return retval; return retval;
*/
return true;
} }

Loading…
Cancel
Save