diff --git a/CMakeLists.txt b/CMakeLists.txt index 0c9518bbfd..bb6ba194dc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -318,7 +318,7 @@ if(UNIX) CHECK_INCLUDE_FILE(pthread.h HAVE_LIBPTHREAD) if(ANDROID) set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m log) - elseif(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD" OR ${CMAKE_SYSTEM_NAME} MATCHES "NetBSD") + elseif(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD|NetBSD|DragonFly") set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} m pthread) else() set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m pthread rt) diff --git a/android/README.android b/android/README.android index b9c688096d..46207b0305 100644 --- a/android/README.android +++ b/android/README.android @@ -1 +1 @@ -See http://opencv.willowgarage.com/wiki/Android +See http://code.opencv.org/projects/opencv/wiki/OpenCV4Android diff --git a/modules/imgproc/doc/motion_analysis_and_object_tracking.rst b/modules/imgproc/doc/motion_analysis_and_object_tracking.rst index 34dae1ef5f..8a4d0507d8 100644 --- a/modules/imgproc/doc/motion_analysis_and_object_tracking.rst +++ b/modules/imgproc/doc/motion_analysis_and_object_tracking.rst @@ -161,34 +161,34 @@ Return value: detected phase shift (sub-pixel) between the two arrays. The function performs the following equations -* - First it applies a Hanning window (see http://en.wikipedia.org/wiki/Hann\_function) to each image to remove possible edge effects. This window is cached until the array size changes to speed up processing time. +* First it applies a Hanning window (see http://en.wikipedia.org/wiki/Hann\_function) to each image to remove possible edge effects. This window is cached until the array size changes to speed up processing time. -* - Next it computes the forward DFTs of each source array: - .. math:: +* Next it computes the forward DFTs of each source array: + + .. math:: \mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\} - where - :math:`\mathcal{F}` is the forward DFT. + where + :math:`\mathcal{F}` is the forward DFT. + +* It then computes the cross-power spectrum of each frequency domain array: -* - It then computes the cross-power spectrum of each frequency domain array: .. math:: - R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|} + R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|} + +* Next the cross-correlation is converted back into the time domain via the inverse DFT: -* - Next the cross-correlation is converted back into the time domain via the inverse DFT: .. math:: - r = \mathcal{F}^{-1}\{R\} -* - Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to achieve sub-pixel accuracy. + r = \mathcal{F}^{-1}\{R\} + +* Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to achieve sub-pixel accuracy. + .. math:: - (\Delta x, \Delta y) = \texttt{weighted_centroid}\{\arg \max_{(x, y)}\{r\}\} + (\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\} .. seealso:: :ocv:func:`dft`, diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index 0962af7a5b..88d1196a2e 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -1207,7 +1207,7 @@ struct DecimateAlpha }; template -static void resizeArea_( const Mat& src, Mat& dst, const DecimateAlpha* xofs, int xofs_count ) +static void resizeArea_( const Mat& src, Mat& dst, const DecimateAlpha* xofs, int xofs_count, double scale_y_) { Size ssize = src.size(), dsize = dst.size(); int cn = src.channels(); @@ -1215,7 +1215,7 @@ static void resizeArea_( const Mat& src, Mat& dst, const DecimateAlpha* xofs, in AutoBuffer _buffer(dsize.width*2); WT *buf = _buffer, *sum = buf + dsize.width; int k, sy, dx, cur_dy = 0; - WT scale_y = (WT)ssize.height/dsize.height; + WT scale_y = (WT)scale_y_; CV_Assert( cn <= 4 ); for( dx = 0; dx < dsize.width; dx++ ) @@ -1315,7 +1315,7 @@ typedef void (*ResizeAreaFastFunc)( const Mat& src, Mat& dst, int scale_x, int scale_y ); typedef void (*ResizeAreaFunc)( const Mat& src, Mat& dst, - const DecimateAlpha* xofs, int xofs_count ); + const DecimateAlpha* xofs, int xofs_count, double scale_y_); } @@ -1532,7 +1532,7 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize, } } - func( src, dst, xofs, k ); + func( src, dst, xofs, k ,scale_y); return; } diff --git a/modules/ml/src/boost.cpp b/modules/ml/src/boost.cpp index 3d938c7408..ff7120c115 100644 --- a/modules/ml/src/boost.cpp +++ b/modules/ml/src/boost.cpp @@ -1132,7 +1132,7 @@ CvBoost::update_weights( CvBoostTree* tree ) else { if( have_subsample ) - _buf_size += data->buf->step*(sizeof(float)+sizeof(uchar)); + _buf_size += data->buf->cols*(sizeof(float)+sizeof(uchar)); } inn_buf.allocate(_buf_size); uchar* cur_buf_pos = (uchar*)inn_buf; diff --git a/modules/stitching/include/opencv2/stitching/stitcher.hpp b/modules/stitching/include/opencv2/stitching/stitcher.hpp index 352f2384ee..aab2ccae97 100644 --- a/modules/stitching/include/opencv2/stitching/stitcher.hpp +++ b/modules/stitching/include/opencv2/stitching/stitcher.hpp @@ -45,13 +45,13 @@ #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" -#include "warpers.hpp" -#include "detail/matchers.hpp" -#include "detail/motion_estimators.hpp" -#include "detail/exposure_compensate.hpp" -#include "detail/seam_finders.hpp" -#include "detail/blenders.hpp" -#include "detail/camera.hpp" +#include "opencv2/stitching/warpers.hpp" +#include "opencv2/stitching/detail/matchers.hpp" +#include "opencv2/stitching/detail/motion_estimators.hpp" +#include "opencv2/stitching/detail/exposure_compensate.hpp" +#include "opencv2/stitching/detail/seam_finders.hpp" +#include "opencv2/stitching/detail/blenders.hpp" +#include "opencv2/stitching/detail/camera.hpp" namespace cv { diff --git a/modules/stitching/include/opencv2/stitching/warpers.hpp b/modules/stitching/include/opencv2/stitching/warpers.hpp index 4c5cd59e40..d260a3029e 100644 --- a/modules/stitching/include/opencv2/stitching/warpers.hpp +++ b/modules/stitching/include/opencv2/stitching/warpers.hpp @@ -43,7 +43,7 @@ #ifndef __OPENCV_STITCHING_WARPER_CREATORS_HPP__ #define __OPENCV_STITCHING_WARPER_CREATORS_HPP__ -#include "detail/warpers.hpp" +#include "opencv2/stitching/detail/warpers.hpp" namespace cv { diff --git a/samples/android/tutorial-4-mixed/jni/Android.mk b/samples/android/tutorial-4-mixed/jni/Android.mk index a20c24693a..bd06352e14 100644 --- a/samples/android/tutorial-4-mixed/jni/Android.mk +++ b/samples/android/tutorial-4-mixed/jni/Android.mk @@ -3,6 +3,8 @@ LOCAL_PATH := $(call my-dir) include $(CLEAR_VARS) OPENCV_CAMERA_MODULES:=off +OPENCV_INSTALL_MODULES:=on +#OPENCV_LIB_TYPE:=SHARED <- this is default include ../includeOpenCV.mk ifeq ("$(wildcard $(OPENCV_MK_PATH))","") diff --git a/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp index 3bc60fefbd..57ba3eac70 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp @@ -44,8 +44,8 @@ int main( int argc, char** argv ) int histSize[] = { h_bins, s_bins }; // hue varies from 0 to 256, saturation from 0 to 180 - float h_ranges[] = { 0, 256 }; - float s_ranges[] = { 0, 180 }; + float s_ranges[] = { 0, 256 }; + float h_ranges[] = { 0, 180 }; const float* ranges[] = { h_ranges, s_ranges }; diff --git a/samples/python2/common.py b/samples/python2/common.py index 54e23d60d8..0eb49b8ea6 100644 --- a/samples/python2/common.py +++ b/samples/python2/common.py @@ -2,6 +2,7 @@ import numpy as np import cv2 import os from contextlib import contextmanager +import itertools as it image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm'] @@ -170,3 +171,22 @@ class RectSelector: return x0, y0, x1, y1 = self.drag_rect cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2) + + +def grouper(n, iterable, fillvalue=None): + '''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx''' + args = [iter(iterable)] * n + return it.izip_longest(fillvalue=fillvalue, *args) + +def mosaic(w, imgs): + '''Make a grid from images. + + w -- number of grid columns + imgs -- images (must have same size and format) + ''' + imgs = iter(imgs) + img0 = imgs.next() + pad = np.zeros_like(img0) + imgs = it.chain([img0], imgs) + rows = grouper(w, imgs, pad) + return np.vstack(map(np.hstack, rows)) diff --git a/samples/python2/digits.png b/samples/python2/digits.png new file mode 100644 index 0000000000..01cdd2972c Binary files /dev/null and b/samples/python2/digits.png differ diff --git a/samples/python2/digits.py b/samples/python2/digits.py new file mode 100644 index 0000000000..f4bb0a5cd0 --- /dev/null +++ b/samples/python2/digits.py @@ -0,0 +1,78 @@ +''' +Neural network digit recognition sample. +Usage: + digits.py + + Sample loads a dataset of handwritten digits from 'digits.png'. + Then it trains a neural network classifier on it and evaluates + its classification accuracy. +''' + +import numpy as np +import cv2 +from common import mosaic + +def unroll_responses(responses, class_n): + '''[1, 0, 2, ...] -> [[0, 1, 0], [1, 0, 0], [0, 0, 1], ...]''' + sample_n = len(responses) + new_responses = np.zeros((sample_n, class_n), np.float32) + new_responses[np.arange(sample_n), responses] = 1 + return new_responses + + +SZ = 20 # size of each digit is SZ x SZ +CLASS_N = 10 +digits_img = cv2.imread('digits.png', 0) + +# prepare dataset +h, w = digits_img.shape +digits = [np.hsplit(row, w/SZ) for row in np.vsplit(digits_img, h/SZ)] +digits = np.float32(digits).reshape(-1, SZ*SZ) +N = len(digits) +labels = np.repeat(np.arange(CLASS_N), N/CLASS_N) + +# split it onto train and test subsets +shuffle = np.random.permutation(N) +train_n = int(0.9*N) +digits_train, digits_test = np.split(digits[shuffle], [train_n]) +labels_train, labels_test = np.split(labels[shuffle], [train_n]) + +# train model +model = cv2.ANN_MLP() +layer_sizes = np.int32([SZ*SZ, 25, CLASS_N]) +model.create(layer_sizes) +params = dict( term_crit = (cv2.TERM_CRITERIA_COUNT, 100, 0.01), + train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP, + bp_dw_scale = 0.001, + bp_moment_scale = 0.0 ) +print 'training...' +labels_train_unrolled = unroll_responses(labels_train, CLASS_N) +model.train(digits_train, labels_train_unrolled, None, params=params) +model.save('dig_nn.dat') +model.load('dig_nn.dat') + +def evaluate(model, samples, labels): + '''Evaluates classifier preformance on a given labeled samples set.''' + ret, resp = model.predict(samples) + resp = resp.argmax(-1) + error_mask = (resp == labels) + accuracy = error_mask.mean() + return accuracy, error_mask + +# evaluate model +train_accuracy, _ = evaluate(model, digits_train, labels_train) +print 'train accuracy: ', train_accuracy +test_accuracy, test_error_mask = evaluate(model, digits_test, labels_test) +print 'test accuracy: ', test_accuracy + +# visualize test results +vis = [] +for img, flag in zip(digits_test, test_error_mask): + img = np.uint8(img).reshape(SZ, SZ) + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + if not flag: + img[...,:2] = 0 + vis.append(img) +vis = mosaic(25, vis) +cv2.imshow('test', vis) +cv2.waitKey()