Merge pull request #2767 from SpecLad:merge-2.4

pull/2775/merge
Roman Donchenko 11 years ago committed by OpenCV Buildbot
commit 0ec54982b5
  1. 10
      cmake/templates/OpenCVConfig.cmake.in
  2. 2
      doc/tutorials/core/basic_linear_transform/basic_linear_transform.rst
  3. 28
      doc/tutorials/introduction/linux_install/linux_install.rst
  4. 2
      doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.rst
  5. 4
      doc/tutorials/ml/introduction_to_svm/introduction_to_svm.rst
  6. 12
      modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst
  7. 2
      modules/core/doc/drawing_functions.rst
  8. 4
      modules/core/doc/operations_on_arrays.rst
  9. 2
      modules/core/include/opencv2/core.hpp
  10. 14
      modules/core/src/matrix.cpp
  11. 91
      modules/core/test/test_math.cpp
  12. 2
      modules/cudawarping/src/cuda/resize.cu
  13. 8
      modules/flann/include/opencv2/flann/autotuned_index.h
  14. 9
      modules/flann/include/opencv2/flann/params.h
  15. 4
      modules/highgui/src/cap_giganetix.cpp
  16. 1
      modules/highgui/src/cap_gstreamer.cpp
  17. 23
      modules/highgui/src/window_w32.cpp
  18. 51
      modules/imgproc/doc/geometric_transformations.rst
  19. 6
      modules/imgproc/doc/miscellaneous_transformations.rst
  20. 2
      modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst
  21. 7
      modules/imgproc/src/filter.cpp
  22. 2
      modules/imgproc/src/smooth.cpp
  23. 2
      modules/ml/src/tree.cpp
  24. 2
      modules/nonfree/doc/feature_detection.rst
  25. 52
      modules/video/src/bgfg_gaussmix2.cpp
  26. 2
      samples/cpp/kmeans.cpp

@ -64,7 +64,11 @@ set(OpenCV_USE_CUFFT @HAVE_CUFFT@)
set(OpenCV_USE_NVCUVID @HAVE_NVCUVID@)
# Android API level from which OpenCV has been compiled is remembered
set(OpenCV_ANDROID_NATIVE_API_LEVEL @OpenCV_ANDROID_NATIVE_API_LEVEL_CONFIGCMAKE@)
if(ANDROID)
set(OpenCV_ANDROID_NATIVE_API_LEVEL @OpenCV_ANDROID_NATIVE_API_LEVEL_CONFIGCMAKE@)
else()
set(OpenCV_ANDROID_NATIVE_API_LEVEL 0)
endif()
# Some additional settings are required if OpenCV is built as static libs
set(OpenCV_SHARED @BUILD_SHARED_LIBS@)
@ -75,8 +79,8 @@ set(OpenCV_USE_MANGLED_PATHS @OpenCV_USE_MANGLED_PATHS_CONFIGCMAKE@)
# Extract the directory where *this* file has been installed (determined at cmake run-time)
get_filename_component(OpenCV_CONFIG_PATH "${CMAKE_CURRENT_LIST_FILE}" PATH CACHE)
if(NOT WIN32 OR OpenCV_ANDROID_NATIVE_API_LEVEL GREATER 0)
if(OpenCV_ANDROID_NATIVE_API_LEVEL GREATER 0)
if(NOT WIN32 OR ANDROID)
if(ANDROID)
set(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../../..")
else()
set(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../..")

@ -187,7 +187,7 @@ Explanation
image.convertTo(new_image, -1, alpha, beta);
where :convert_to:`convertTo <>` would effectively perform *new_image = a*image + beta*. However, we wanted to show you how to access each pixel. In any case, both methods give the same result.
where :convert_to:`convertTo <>` would effectively perform *new_image = a*image + beta*. However, we wanted to show you how to access each pixel. In any case, both methods give the same result but convertTo is more optimized and works a lot faster.
Result
=======

@ -7,22 +7,24 @@ These steps have been tested for Ubuntu 10.04 but should work with other distros
Required Packages
=================
* GCC 4.4.x or later. This can be installed with:
* GCC 4.4.x or later
* CMake 2.8.7 or higher
* Git
* GTK+2.x or higher, including headers (libgtk2.0-dev)
* pkg-config
* Python 2.6 or later and Numpy 1.5 or later with developer packages (python-dev, python-numpy)
* ffmpeg or libav development packages: libavcodec-dev, libavformat-dev, libswscale-dev
* [optional] libtbb2 libtbb-dev
* [optional] libdc1394 2.x
* [optional] libjpeg-dev, libpng-dev, libtiff-dev, libjasper-dev, libdc1394-22-dev
The packages can be installed using a terminal and the following commands or by using Synaptic Manager:
.. code-block:: bash
sudo apt-get install build-essential
* CMake 2.8.7 or higher;
* Git;
* GTK+2.x or higher, including headers (libgtk2.0-dev);
* pkg-config;
* Python 2.6 or later and Numpy 1.5 or later with developer packages (python-dev, python-numpy);
* ffmpeg or libav development packages: libavcodec-dev, libavformat-dev, libswscale-dev;
* [optional] libdc1394 2.x;
* [optional] libjpeg-dev, libpng-dev, libtiff-dev, libjasper-dev.
All the libraries above can be installed via Terminal or by using Synaptic Manager.
[compiler] sudo apt-get install build-essential
[required] sudo apt-get install cmake git libgtk2-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
[optional] sudo apt-get install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev
Getting OpenCV Source Code
==========================

@ -78,6 +78,8 @@ Make sure your active solution configuration (:menuselection:`Build --> Configur
Build your solution (:menuselection:`Build --> Build Solution`, or press *F7*).
Before continuing, do not forget to add the command line argument of your input image to your project (:menuselection:`Right click on project --> Properties --> Configuration Properties --> Debugging` and then set the field ``Command Arguments`` with the location of the image).
Now set a breakpoint on the source line that says
.. code-block:: c++

@ -105,8 +105,8 @@ Explanation
.. code-block:: cpp
Mat trainingDataMat(3, 2, CV_32FC1, trainingData);
Mat labelsMat (3, 1, CV_32FC1, labels);
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
Mat labelsMat (4, 1, CV_32FC1, labels);
2. **Set up SVM's parameters**

@ -224,9 +224,9 @@ Computes useful camera characteristics from the camera matrix.
:param imageSize: Input image size in pixels.
:param apertureWidth: Physical width of the sensor.
:param apertureWidth: Physical width in mm of the sensor.
:param apertureHeight: Physical height of the sensor.
:param apertureHeight: Physical height in mm of the sensor.
:param fovx: Output field of view in degrees along the horizontal sensor axis.
@ -234,13 +234,15 @@ Computes useful camera characteristics from the camera matrix.
:param focalLength: Focal length of the lens in mm.
:param principalPoint: Principal point in pixels.
:param principalPoint: Principal point in mm.
:param aspectRatio: :math:`f_y/f_x`
The function computes various useful camera characteristics from the previously estimated camera matrix.
.. note::
Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for the chessboard pitch (it can thus be any value).
composeRT
-------------
@ -1490,6 +1492,10 @@ Reconstructs points by triangulation.
The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their observations with a stereo camera. Projections matrices can be obtained from :ocv:func:`stereoRectify`.
.. note::
Keep in mind that all input data should be of float type in order for this function to work.
.. seealso::
:ocv:func:`reprojectImageTo3D`

@ -585,7 +585,7 @@ Draws a text string.
:param font: ``CvFont`` structure initialized using :ocv:cfunc:`InitFont`.
:param fontFace: Font type. One of ``FONT_HERSHEY_SIMPLEX``, ``FONT_HERSHEY_PLAIN``, ``FONT_HERSHEY_DUPLEX``, ``FONT_HERSHEY_COMPLEX``, ``FONT_HERSHEY_TRIPLEX``, ``FONT_HERSHEY_COMPLEX_SMALL``, ``FONT_HERSHEY_SCRIPT_SIMPLEX``, or ``FONT_HERSHEY_SCRIPT_COMPLEX``,
where each of the font ID's can be combined with ``FONT_HERSHEY_ITALIC`` to get the slanted letters.
where each of the font ID's can be combined with ``FONT_ITALIC`` to get the slanted letters.
:param fontScale: Font scale factor that is multiplied by the font-specific base size.

@ -1216,9 +1216,9 @@ gemm
----
Performs generalized matrix multiplication.
.. ocv:function:: void gemm( InputArray src1, InputArray src2, double alpha, InputArray src3, double gamma, OutputArray dst, int flags=0 )
.. ocv:function:: void gemm( InputArray src1, InputArray src2, double alpha, InputArray src3, double beta, OutputArray dst, int flags=0 )
.. ocv:pyfunction:: cv2.gemm(src1, src2, alpha, src3, gamma[, dst[, flags]]) -> dst
.. ocv:pyfunction:: cv2.gemm(src1, src2, alpha, src3, beta[, dst[, flags]]) -> dst
.. ocv:cfunction:: void cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, const CvArr* src3, double beta, CvArr* dst, int tABC=0)

@ -389,7 +389,7 @@ CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val = 0);
//! implements generalized matrix product algorithm GEMM from BLAS
CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha,
InputArray src3, double gamma, OutputArray dst, int flags = 0);
InputArray src3, double beta, OutputArray dst, int flags = 0);
//! multiplies matrix by its transposition from the left or from the right
CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa,

@ -4050,16 +4050,18 @@ double cv::kmeans( InputArray _data, int K,
int flags, OutputArray _centers )
{
const int SPP_TRIALS = 3;
Mat data = _data.getMat();
bool isrow = data.rows == 1 && data.channels() > 1;
int N = !isrow ? data.rows : data.cols;
int dims = (!isrow ? data.cols : 1)*data.channels();
int type = data.depth();
Mat data0 = _data.getMat();
bool isrow = data0.rows == 1 && data0.channels() > 1;
int N = !isrow ? data0.rows : data0.cols;
int dims = (!isrow ? data0.cols : 1)*data0.channels();
int type = data0.depth();
attempts = std::max(attempts, 1);
CV_Assert( data.dims <= 2 && type == CV_32F && K > 0 );
CV_Assert( data0.dims <= 2 && type == CV_32F && K > 0 );
CV_Assert( N >= K );
Mat data(N, dims, CV_32F, data0.data, isrow ? dims * sizeof(float) : static_cast<size_t>(data0.step));
_bestLabels.create(N, 1, CV_32S, -1, true);
Mat _labels, best_labels = _bestLabels.getMat();

@ -2627,6 +2627,15 @@ TEST(Core_SVD, flt)
// TODO: eigenvv, invsqrt, cbrt, fastarctan, (round, floor, ceil(?)),
enum
{
MAT_N_DIM_C1,
MAT_N_1_CDIM,
MAT_1_N_CDIM,
MAT_N_DIM_C1_NONCONT,
MAT_N_1_CDIM_NONCONT,
VECTOR
};
class CV_KMeansSingularTest : public cvtest::BaseTest
{
@ -2634,7 +2643,7 @@ public:
CV_KMeansSingularTest() {}
~CV_KMeansSingularTest() {}
protected:
void run(int)
void run(int inVariant)
{
int i, iter = 0, N = 0, N0 = 0, K = 0, dims = 0;
Mat labels;
@ -2646,20 +2655,70 @@ protected:
for( iter = 0; iter < maxIter; iter++ )
{
ts->update_context(this, iter, true);
dims = rng.uniform(1, MAX_DIM+1);
dims = rng.uniform(inVariant == MAT_1_N_CDIM ? 2 : 1, MAX_DIM+1);
N = rng.uniform(1, MAX_POINTS+1);
N0 = rng.uniform(1, MAX(N/10, 2));
K = rng.uniform(1, N+1);
Mat data0(N0, dims, CV_32F);
rng.fill(data0, RNG::UNIFORM, -1, 1);
if (inVariant == VECTOR)
{
dims = 2;
Mat data(N, dims, CV_32F);
for( i = 0; i < N; i++ )
data0.row(rng.uniform(0, N0)).copyTo(data.row(i));
std::vector<cv::Point2f> data0(N0);
rng.fill(data0, RNG::UNIFORM, -1, 1);
std::vector<cv::Point2f> data(N);
for( i = 0; i < N; i++ )
data[i] = data0[rng.uniform(0, N0)];
kmeans(data, K, labels, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 30, 0),
5, KMEANS_PP_CENTERS);
}
else
{
Mat data0(N0, dims, CV_32F);
rng.fill(data0, RNG::UNIFORM, -1, 1);
kmeans(data, K, labels, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 30, 0),
5, KMEANS_PP_CENTERS);
Mat data;
switch (inVariant)
{
case MAT_N_DIM_C1:
data.create(N, dims, CV_32F);
for( i = 0; i < N; i++ )
data0.row(rng.uniform(0, N0)).copyTo(data.row(i));
break;
case MAT_N_1_CDIM:
data.create(N, 1, CV_32FC(dims));
for( i = 0; i < N; i++ )
memcpy(data.ptr(i), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float));
break;
case MAT_1_N_CDIM:
data.create(1, N, CV_32FC(dims));
for( i = 0; i < N; i++ )
memcpy(data.data + i * dims * sizeof(float), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float));
break;
case MAT_N_DIM_C1_NONCONT:
data.create(N, dims + 5, CV_32F);
data = data(Range(0, N), Range(0, dims));
for( i = 0; i < N; i++ )
data0.row(rng.uniform(0, N0)).copyTo(data.row(i));
break;
case MAT_N_1_CDIM_NONCONT:
data.create(N, 3, CV_32FC(dims));
data = data.colRange(0, 1);
for( i = 0; i < N; i++ )
memcpy(data.ptr(i), data0.ptr(rng.uniform(0, N0)), dims * sizeof(float));
break;
}
kmeans(data, K, labels, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 30, 0),
5, KMEANS_PP_CENTERS);
}
Mat hist(K, 1, CV_32S, Scalar(0));
for( i = 0; i < N; i++ )
@ -2683,7 +2742,19 @@ protected:
}
};
TEST(Core_KMeans, singular) { CV_KMeansSingularTest test; test.safe_run(); }
TEST(Core_KMeans, singular) { CV_KMeansSingularTest test; test.safe_run(MAT_N_DIM_C1); }
CV_ENUM(KMeansInputVariant, MAT_N_DIM_C1, MAT_N_1_CDIM, MAT_1_N_CDIM, MAT_N_DIM_C1_NONCONT, MAT_N_1_CDIM_NONCONT, VECTOR)
typedef testing::TestWithParam<KMeansInputVariant> Core_KMeans_InputVariants;
TEST_P(Core_KMeans_InputVariants, singular)
{
CV_KMeansSingularTest test;
test.safe_run(GetParam());
}
INSTANTIATE_TEST_CASE_P(AllVariants, Core_KMeans_InputVariants, KMeansInputVariant::all());
TEST(CovariationMatrixVectorOfMat, accuracy)
{

@ -213,7 +213,7 @@ namespace cv { namespace cuda { namespace device
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
resize_linear<<<grid, block>>>(src, dst, fy, fx);
resize_linear<<<grid, block, 0, stream>>>(src, dst, fy, fx);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)

@ -99,18 +99,22 @@ public:
*/
virtual void buildIndex()
{
std::ostringstream stream;
bestParams_ = estimateBuildParams();
print_params(bestParams_, stream);
Logger::info("----------------------------------------------------\n");
Logger::info("Autotuned parameters:\n");
print_params(bestParams_);
Logger::info("%s", stream.str().c_str());
Logger::info("----------------------------------------------------\n");
bestIndex_ = create_index_by_type(dataset_, bestParams_, distance_);
bestIndex_->buildIndex();
speedup_ = estimateSearchParams(bestSearchParams_);
stream.str(std::string());
print_params(bestSearchParams_, stream);
Logger::info("----------------------------------------------------\n");
Logger::info("Search parameters:\n");
print_params(bestSearchParams_);
Logger::info("%s", stream.str().c_str());
Logger::info("----------------------------------------------------\n");
}

@ -79,16 +79,19 @@ T get_param(const IndexParams& params, cv::String name)
}
}
inline void print_params(const IndexParams& params)
inline void print_params(const IndexParams& params, std::ostream& stream)
{
IndexParams::const_iterator it;
for(it=params.begin(); it!=params.end(); ++it) {
std::cout << it->first << " : " << it->second << std::endl;
stream << it->first << " : " << it->second << std::endl;
}
}
inline void print_params(const IndexParams& params)
{
print_params(params, std::cout);
}
}

@ -711,13 +711,13 @@ CvCaptureCAM_Giganetix::setProperty( int property_id, double value )
INT64 w, wmax, val = (INT64)value;
if((b_ret = m_device->GetIntegerNodeValue ("Width", w)))
if((b_ret = m_device->GetIntegerNodeValue ("WidthMax", wmax)))
b_ret = m_device->SetIntegerNodeValue ("OffsetX", val w > wmax ? wmax - w : val);
b_ret = m_device->SetIntegerNodeValue ("OffsetX", (val + w) > wmax ? (wmax - w) : val);
} break;
case CV_CAP_PROP_GIGA_FRAME_OFFSET_Y: {
INT64 h, hmax, val = (INT64)value;
if((b_ret = m_device->GetIntegerNodeValue ("Height", h)))
if((b_ret = m_device->GetIntegerNodeValue ("HeightMax", hmax)))
b_ret = m_device->SetIntegerNodeValue ("OffsetY", val h > hmax ? hmax - h : val);
b_ret = m_device->SetIntegerNodeValue ("OffsetY", (val + h) > hmax ? (hmax - h) : val);
b_ret = m_device->SetIntegerNodeValue ("OffsetY", (INT64)value);
}
break;

@ -735,6 +735,7 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
#if GST_VERSION_MAJOR == 0
caps = gst_caps_new_simple("video/x-raw-rgb",
"bpp", G_TYPE_INT, 24,
"red_mask", G_TYPE_INT, 0x0000FF,
"green_mask", G_TYPE_INT, 0x00FF00,
"blue_mask", G_TYPE_INT, 0xFF0000,

@ -1472,8 +1472,6 @@ static LRESULT CALLBACK HighGUIProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM
if( window->on_mouse )
{
POINT pt;
RECT rect;
SIZE size = {0,0};
int flags = (wParam & MK_LBUTTON ? CV_EVENT_FLAG_LBUTTON : 0)|
(wParam & MK_RBUTTON ? CV_EVENT_FLAG_RBUTTON : 0)|
@ -1499,12 +1497,23 @@ static LRESULT CALLBACK HighGUIProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM
pt.x = GET_X_LPARAM( lParam );
pt.y = GET_Y_LPARAM( lParam );
GetClientRect( window->hwnd, &rect );
icvGetBitmapData( window, &size, 0, 0 );
if (window->flags & CV_WINDOW_AUTOSIZE)
{
// As user can't change window size, do not scale window coordinates. Underlying windowing system
// may prevent full window from being displayed and in this case coordinates should not be scaled.
window->on_mouse( event, pt.x, pt.y, flags, window->on_mouse_param );
} else {
// Full window is displayed using different size. Scale coordinates to match underlying positions.
RECT rect;
SIZE size = {0, 0};
GetClientRect( window->hwnd, &rect );
icvGetBitmapData( window, &size, 0, 0 );
window->on_mouse( event, pt.x*size.cx/MAX(rect.right - rect.left,1),
pt.y*size.cy/MAX(rect.bottom - rect.top,1), flags,
window->on_mouse_param );
window->on_mouse( event, pt.x*size.cx/MAX(rect.right - rect.left,1),
pt.y*size.cy/MAX(rect.bottom - rect.top,1), flags,
window->on_mouse_param );
}
}
break;

@ -249,6 +249,57 @@ The function computes an inverse affine transformation represented by
The result is also a
:math:`2 \times 3` matrix of the same type as ``M`` .
LinearPolar
-----------
Remaps an image to polar space.
.. ocv:cfunction:: void cvLinearPolar( const CvArr* src, CvArr* dst, CvPoint2D32f center, double maxRadius, int flags=CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS )
:param src: Source image
:param dst: Destination image
:param center: The transformation center;
:param maxRadius: Inverse magnitude scale parameter. See below
:param flags: A combination of interpolation methods and the following optional flags:
* **CV_WARP_FILL_OUTLIERS** fills all of the destination image pixels. If some of them correspond to outliers in the source image, they are set to zero
* **CV_WARP_INVERSE_MAP** See below
The function ``cvLinearPolar`` transforms the source image using the following transformation:
*
Forward transformation (``CV_WARP_INVERSE_MAP`` is not set):
.. math::
dst( \phi , \rho ) = src(x,y)
*
Inverse transformation (``CV_WARP_INVERSE_MAP`` is set):
.. math::
dst(x,y) = src( \phi , \rho )
where
.. math::
\rho = (src.width/maxRadius) \cdot \sqrt{x^2 + y^2} , \phi =atan(y/x)
The function can not operate in-place.
.. note::
* An example using the LinearPolar operation can be found at opencv_source_code/samples/c/polar_transforms.c
LogPolar

@ -500,7 +500,7 @@ Fills a connected component with the given color.
:param image: Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the function unless the ``FLOODFILL_MASK_ONLY`` flag is set in the second variant of the function. See the details below.
:param mask: (For the second function only) Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function uses and updates the mask, so you take responsibility of initializing the ``mask`` content. Flood-filling cannot go across non-zero pixels in the mask. For example, an edge detector output can be used as a mask to stop filling at edges. It is possible to use the same mask in multiple calls to the function to make sure the filled area does not overlap.
:param mask: Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels taller than ``image``. Since this is both an input and output parameter, you must take responsibility of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example, an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the mask corresponding to filled pixels in the image are set to 1 or to the a value specified in ``flags`` as described below. It is therefore possible to use the same mask in multiple calls to the function to make sure the filled areas do not overlap.
.. note:: Since the mask is larger than the filled image, a pixel :math:`(x, y)` in ``image`` corresponds to the pixel :math:`(x+1, y+1)` in the ``mask`` .
@ -514,11 +514,11 @@ Fills a connected component with the given color.
:param rect: Optional output parameter set by the function to the minimum bounding rectangle of the repainted domain.
:param flags: Operation flags. Lower bits contain a connectivity value, 4 (default) or 8, used within the function. Connectivity determines which neighbors of a pixel are considered. Upper bits can be 0 or a combination of the following flags:
:param flags: Operation flags. The first 8 bits contain a connectivity value. The default value of 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner) will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill the ``mask`` (the default value is 1). For example, ``4 | ( 255 << 8 )`` will consider 4 nearest neighbours and fill the mask with a value of 255. The following additional options occupy higher bits and therefore may be further combined with the connectivity and mask fill values using bit-wise or (``|``):
* **FLOODFILL_FIXED_RANGE** If set, the difference between the current pixel and seed pixel is considered. Otherwise, the difference between neighbor pixels is considered (that is, the range is floating).
* **FLOODFILL_MASK_ONLY** If set, the function does not change the image ( ``newVal`` is ignored), but fills the mask with the value in bits 8-16 of ``flags`` (that is, the fill value is set to newValue by adding (newValue << 8) to the ``flags``). The flag can be used for the second variant only.
* **FLOODFILL_MASK_ONLY** If set, the function does not change the image ( ``newVal`` is ignored), and only fills the mask with the value specified in bits 8-16 of ``flags`` as described above. This option only make sense in function variants that have the ``mask`` parameter.
The functions ``floodFill`` fill a connected component starting from the seed point with the specified color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The pixel at
:math:`(x,y)` is considered to belong to the repainted domain if:

@ -159,7 +159,7 @@ Finds contours in a binary image.
.. ocv:cfunction:: int cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** first_contour, int header_size=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPoint offset=cvPoint(0,0) )
:param image: Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as ``binary`` . You can use :ocv:func:`compare` , :ocv:func:`inRange` , :ocv:func:`threshold` , :ocv:func:`adaptiveThreshold` , :ocv:func:`Canny` , and others to create a binary image out of a grayscale or color one. The function modifies the ``image`` while extracting the contours.
:param image: Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as ``binary`` . You can use :ocv:func:`compare` , :ocv:func:`inRange` , :ocv:func:`threshold` , :ocv:func:`adaptiveThreshold` , :ocv:func:`Canny` , and others to create a binary image out of a grayscale or color one. The function modifies the ``image`` while extracting the contours. If mode equals to ``CV_RETR_CCOMP`` or ``CV_RETR_FLOODFILL``, the input can also be a 32-bit integer image of labels (``CV_32SC1``).
:param contours: Detected contours. Each contour is stored as a vector of points.

@ -410,8 +410,11 @@ void FilterEngine::apply(const Mat& src, Mat& dst,
dstOfs.y + srcRoi.height <= dst.rows );
int y = start(src, srcRoi, isolated);
proceed( src.data + y*src.step, (int)src.step, endY - startY,
dst.data + dstOfs.y*dst.step + dstOfs.x*dst.elemSize(), (int)dst.step );
proceed( src.data + y*src.step
+ srcRoi.x*src.elemSize(),
(int)src.step, endY - startY,
dst.data + dstOfs.y*dst.step +
dstOfs.x*dst.elemSize(), (int)dst.step );
}
}

@ -1154,7 +1154,7 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
Size size = _src.size();
_dst.create( size, type );
if( borderType != BORDER_CONSTANT )
if( borderType != BORDER_CONSTANT && (borderType & BORDER_ISOLATED) != 0 )
{
if( size.height == 1 )
ksize.height = 1;

@ -1445,8 +1445,6 @@ void CvDTreeTrainData::read_params( CvFileStorage* fs, CvFileNode* node )
var_type->data.i[var_count] = cat_var_count;
ord_var_count = ~ord_var_count;
if( cat_var_count != cat_var_count || ord_var_count != ord_var_count )
CV_ERROR( CV_StsParseError, "var_type is inconsistent with cat_var_count and ord_var_count" );
//////
if( cat_var_count > 0 || is_classifier )

@ -121,6 +121,8 @@ Detects keypoints and computes SURF descriptors for them.
.. ocv:pyfunction:: cv2.SURF.compute(image, keypoints[, descriptors]) -> keypoints, descriptors
.. ocv:pyfunction:: cv2.SURF.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors
.. ocv:pyfunction:: cv2.SURF.detectAndCompute(image[, mask]) -> keypoints, descriptors
.. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )
:param image: Input 8-bit grayscale image

@ -861,54 +861,48 @@ void BackgroundSubtractorMOG2Impl::getBackgroundImage(OutputArray backgroundImag
}
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels == 3 );
Mat meanBackground(frameSize, CV_8UC3, Scalar::all(0));
CV_Assert(nchannels == 1 || nchannels == 3);
Mat meanBackground(frameSize, CV_MAKETYPE(CV_8U, nchannels), Scalar::all(0));
int firstGaussianIdx = 0;
const GMM* gmm = (GMM*)bgmodel.data;
const Vec3f* mean = reinterpret_cast<const Vec3f*>(gmm + frameSize.width*frameSize.height*nmixtures);
const float* mean = reinterpret_cast<const float*>(gmm + frameSize.width*frameSize.height*nmixtures);
std::vector<float> meanVal(nchannels, 0.f);
for(int row=0; row<meanBackground.rows; row++)
{
for(int col=0; col<meanBackground.cols; col++)
{
int nmodes = bgmodelUsedModes.at<uchar>(row, col);
Vec3f meanVal;
float totalWeight = 0.f;
for(int gaussianIdx = firstGaussianIdx; gaussianIdx < firstGaussianIdx + nmodes; gaussianIdx++)
{
GMM gaussian = gmm[gaussianIdx];
meanVal += gaussian.weight * mean[gaussianIdx];
size_t meanPosition = gaussianIdx*nchannels;
for(int chn = 0; chn < nchannels; chn++)
{
meanVal[chn] += gaussian.weight * mean[meanPosition + chn];
}
totalWeight += gaussian.weight;
if(totalWeight > backgroundRatio)
break;
}
meanVal *= (1.f / totalWeight);
meanBackground.at<Vec3b>(row, col) = Vec3b(meanVal);
float invWeight = 1.f/totalWeight;
switch(nchannels)
{
case 1:
meanBackground.at<uchar>(row, col) = (uchar)(meanVal[0] * invWeight);
meanVal[0] = 0.f;
break;
case 3:
Vec3f& meanVec = *reinterpret_cast<Vec3f*>(&meanVal[0]);
meanBackground.at<Vec3b>(row, col) = Vec3b(meanVec * invWeight);
meanVec = 0.f;
break;
}
firstGaussianIdx += nmixtures;
}
}
switch(CV_MAT_CN(frameType))
{
case 1:
{
std::vector<Mat> channels;
split(meanBackground, channels);
channels[0].copyTo(backgroundImage);
break;
}
case 3:
{
meanBackground.copyTo(backgroundImage);
break;
}
default:
CV_Error(Error::StsUnsupportedFormat, "");
}
meanBackground.copyTo(backgroundImage);
}
Ptr<BackgroundSubtractorMOG2> createBackgroundSubtractorMOG2(int _history, double _varThreshold,

@ -33,7 +33,7 @@ int main( int /*argc*/, char** /*argv*/ )
{
int k, clusterCount = rng.uniform(2, MAX_CLUSTERS+1);
int i, sampleCount = rng.uniform(1, 1001);
Mat points(sampleCount, 2, CV_32F), labels;
Mat points(sampleCount, 1, CV_32FC2), labels;
clusterCount = MIN(clusterCount, sampleCount);
Mat centers;

Loading…
Cancel
Save