Merge branch 2.4

pull/201/merge
Andrey Kamaev 12 years ago
commit e10ee89ec4
  1. 0
      3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so
  2. 0
      3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so
  3. BIN
      3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so
  4. 0
      3rdparty/lib/armeabi/libnative_camera_r4.1.1.so
  5. BIN
      3rdparty/lib/armeabi/libnative_camera_r4.2.0.so
  6. 0
      3rdparty/lib/mips/libnative_camera_r4.0.3.so
  7. BIN
      3rdparty/lib/mips/libnative_camera_r4.1.1.so
  8. BIN
      3rdparty/lib/mips/libnative_camera_r4.2.0.so
  9. 0
      3rdparty/lib/x86/libnative_camera_r4.1.1.so
  10. BIN
      3rdparty/lib/x86/libnative_camera_r4.2.0.so
  11. 3
      android/android.toolchain.cmake
  12. 5
      android/scripts/camera_build.conf
  13. 19
      android/scripts/cmake_android_all_cameras.py
  14. 6
      android/service/engine/AndroidManifest.xml
  15. 24
      android/service/engine/CMakeLists.txt
  16. 6
      android/service/engine/jni/include/EngineCommon.h
  17. 6
      android/service/engine/jni/include/IOpenCVEngine.h
  18. 9
      cmake/OpenCVDetectAndroidSDK.cmake
  19. 9
      cmake/OpenCVDetectCUDA.cmake
  20. 2
      doc/check_docs.py
  21. 2
      doc/check_docs2.py
  22. 2
      doc/conf.py
  23. 1
      doc/ocv.py
  24. BIN
      doc/opencv2manager.pdf
  25. 84565
      doc/opencv2refman.pdf
  26. BIN
      doc/opencv_cheatsheet.pdf
  27. 61845
      doc/opencv_tutorials.pdf
  28. BIN
      doc/opencv_user.pdf
  29. 2
      doc/patch_refman_latex.py
  30. 2
      doc/pattern_tools/gen_pattern.py
  31. 2
      doc/pattern_tools/svgfig.py
  32. 2
      doc/reformat.py
  33. 6
      doc/tutorials/introduction/android_binary_package/android_dev_intro.rst
  34. 2
      modules/androidcamera/CMakeLists.txt
  35. 10
      modules/androidcamera/camera_wrapper/camera_wrapper.cpp
  36. 12
      modules/androidcamera/src/camera_activity.cpp
  37. 1
      modules/calib3d/perf/perf_pnp.cpp
  38. 2
      modules/contrib/doc/facerec/src/create_csv.py
  39. 5
      modules/contrib/doc/retina/index.rst
  40. 1
      modules/core/perf/perf_arithm.cpp
  41. 4
      modules/core/perf/perf_bitwise.cpp
  42. 1
      modules/core/perf/perf_dot.cpp
  43. 1
      modules/core/perf/perf_norm.cpp
  44. 2
      modules/core/perf/perf_reduce.cpp
  45. 26
      modules/core/src/lapack.cpp
  46. 2
      modules/features2d/perf/perf_batchDistance.cpp
  47. 19
      modules/gpu/doc/image_processing.rst
  48. 2
      modules/gpu/include/opencv2/gpu/device/utility.hpp
  49. 2
      modules/gpu/misc/mark_nvidia.py
  50. 7
      modules/gpu/src/cuda/integral_image.cu
  51. 1
      modules/gpu/src/cuda/lbp.cu
  52. 32
      modules/gpu/src/cuda/surf.cu
  53. 6
      modules/gpu/src/gftt.cpp
  54. 42
      modules/gpu/src/imgproc.cpp
  55. 10
      modules/gpu/src/surf.cpp
  56. 15
      modules/gpu/test/test_video.cpp
  57. 1
      modules/highgui/src/cap.cpp
  58. 40
      modules/highgui/src/cap_ffmpeg.cpp
  59. 204
      modules/highgui/src/cap_ffmpeg_impl.hpp
  60. 38
      modules/highgui/src/cap_gstreamer.cpp
  61. 7
      modules/highgui/src/cap_libv4l.cpp
  62. 225
      modules/highgui/test/test_ffmpeg.cpp
  63. 10
      modules/imgproc/perf/perf_blur.cpp
  64. 2
      modules/imgproc/perf/perf_cvt_color.cpp
  65. 2
      modules/imgproc/perf/perf_filter2d.cpp
  66. 2
      modules/imgproc/perf/perf_houghLines.cpp
  67. 2
      modules/imgproc/perf/perf_integral.cpp
  68. 11
      modules/imgproc/perf/perf_resize.cpp
  69. 1
      modules/imgproc/perf/perf_threshold.cpp
  70. 12
      modules/imgproc/perf/perf_warp.cpp
  71. 11
      modules/java/android_lib/res/values/attrs.xml
  72. 2
      modules/java/check-tests.py
  73. 2
      modules/java/generator/gen_java.py
  74. 2
      modules/java/generator/gen_javadoc.py
  75. 2
      modules/java/generator/rst_parser.py
  76. 68
      modules/java/generator/src/java/android+CameraBridgeViewBase.java
  77. 66
      modules/java/generator/src/java/android+FpsMeter.java
  78. 52
      modules/java/generator/src/java/android+JavaCameraView.java
  79. 20
      modules/java/generator/src/java/android+NativeCameraView.java
  80. 4
      modules/objdetect/src/cascadedetect.cpp
  81. 2
      modules/photo/perf/perf_inpaint.cpp
  82. 2
      modules/python/src2/cv.py
  83. 2
      modules/python/src2/gen.py
  84. 2
      modules/python/src2/gen2.py
  85. 2
      modules/python/src2/hdr_parser.py
  86. 3
      modules/python/test/calchist.py
  87. 2
      modules/python/test/camera_calibration.py
  88. 2
      modules/python/test/findstereocorrespondence.py
  89. 2
      modules/python/test/goodfeatures.py
  90. 2
      modules/python/test/leak1.py
  91. 2
      modules/python/test/leak2.py
  92. 2
      modules/python/test/leak3.py
  93. 2
      modules/python/test/leak4.py
  94. 2
      modules/python/test/precornerdetect.py
  95. 2
      modules/python/test/test.py
  96. 2
      modules/python/test/ticket_6.py
  97. 2
      modules/python/test/tickets.py
  98. 2
      modules/python/test/transformations.py
  99. 2
      modules/ts/misc/chart.py
  100. 2
      modules/ts/misc/color.py
  101. Some files were not shown because too many files have changed in this diff Show More

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -596,6 +596,9 @@ if( BUILD_WITH_ANDROID_NDK )
endif()
if( NOT __availableToolchains )
file( GLOB __availableToolchains RELATIVE "${ANDROID_NDK}/toolchains" "${ANDROID_NDK}/toolchains/*" )
if( __availableToolchains )
list(SORT __availableToolchains) # we need clang to go after gcc
endif()
__LIST_FILTER( __availableToolchains "^[.]" )
__LIST_FILTER( __availableToolchains "llvm" )
__GLOB_NDK_TOOLCHAINS( __availableToolchains )

@ -16,3 +16,8 @@ native_camera_r4.0.0; armeabi-v7a; 14; /home/alexander/Projects/AndroidSource/4.
native_camera_r4.1.1; armeabi; 14; /home/alexander/Projects/AndroidSource/4.1.1
native_camera_r4.1.1; armeabi-v7a; 14; /home/alexander/Projects/AndroidSource/4.1.1
native_camera_r4.1.1; x86; 14; /home/alexander/Projects/AndroidSource/4.1.1
native_camera_r4.1.1; mips; 14; /home/alexander/Projects/AndroidSource/4.1.1_mips
native_camera_r4.2.0; armeabi-v7a; 14; /home/alexander/Projects/AndroidSource/4.2
native_camera_r4.2.0; armeabi; 14; /home/alexander/Projects/AndroidSource/4.2
native_camera_r4.2.0; x86; 14; /home/alexander/Projects/AndroidSource/4.2
native_camera_r4.2.0; mips; 14; /home/alexander/Projects/AndroidSource/4.2

@ -33,20 +33,27 @@ for s in ConfFile.readlines():
continue
shutil.rmtree(os.path.join(AndroidTreeRoot, "out", "target", "product", "generic", "system"), ignore_errors=True)
LinkerLibs = os.path.join(AndroidTreeRoot, "bin_arm", "system")
if (Arch == "x86"):
shutil.copytree(os.path.join(AndroidTreeRoot, "bin_x86", "system"), os.path.join(AndroidTreeRoot, "out", "target", "product", "generic", "system"))
LinkerLibs = os.path.join(AndroidTreeRoot, "bin_x86", "system")
elif (Arch == "mips"):
shutil.copytree(os.path.join(AndroidTreeRoot, "bin_mips", "system"), os.path.join(AndroidTreeRoot, "out", "target", "product", "generic", "system"))
else:
shutil.copytree(os.path.join(AndroidTreeRoot, "bin_arm", "system"), os.path.join(AndroidTreeRoot, "out", "target", "product", "generic", "system"))
LinkerLibs = os.path.join(AndroidTreeRoot, "bin_mips", "system")
if (not os.path.exists(LinkerLibs)):
print("Error: Paltform libs for linker in path \"%s\" not found" % LinkerLibs)
print("Building %s for %s\t[\033[91mFAILED\033[0m]" % (MakeTarget, Arch))
continue
shutil.copytree(LinkerLibs, os.path.join(AndroidTreeRoot, "out", "target", "product", "generic", "system"))
os.chdir(BuildDir)
BuildLog = os.path.join(BuildDir, "build.log")
CmakeCmdLine = "cmake -DCMAKE_TOOLCHAIN_FILE=../android.toolchain.cmake -DANDROID_SOURCE_TREE=\"%s\" -DANDROID_NATIVE_API_LEVEL=\"%s\" -DANDROID_ABI=\"%s\" -DANDROID_STL=stlport_static ../../ > \"%s\" 2>&1" % (AndroidTreeRoot, NativeApiLevel, Arch, BuildLog)
MakeCmdLine = "make %s >> \"%s\" 2>&1" % (MakeTarget, BuildLog);
print(CmakeCmdLine)
#print(CmakeCmdLine)
os.system(CmakeCmdLine)
print(MakeCmdLine)
#print(MakeCmdLine)
os.system(MakeCmdLine)
os.chdir(HomeDir)
CameraLib = os.path.join(BuildDir, "lib", Arch, "lib" + MakeTarget + ".so")

@ -1,10 +1,10 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.opencv.engine"
android:versionCode="22@ANDROID_PLATFORM_VERSION_CODE@"
android:versionName="2.2" >
android:versionCode="23@ANDROID_PLATFORM_VERSION_CODE@"
android:versionName="2.3" >
<uses-sdk android:minSdkVersion="8" />
<uses-sdk android:minSdkVersion="@ANDROID_NATIVE_API_LEVEL@" />
<uses-feature android:name="android.hardware.touchscreen" android:required="false"/>
<application

@ -2,23 +2,27 @@ set(engine OpenCVEngine)
set(JNI_LIB_NAME ${engine} ${engine}_jni)
unset(__android_project_chain CACHE)
add_android_project(opencv_engine "${CMAKE_CURRENT_SOURCE_DIR}" SDK_TARGET 8 ${ANDROID_SDK_TARGET} IGNORE_JAVA ON)
add_android_project(opencv_engine "${CMAKE_CURRENT_SOURCE_DIR}" SDK_TARGET 8 ${ANDROID_SDK_TARGET} IGNORE_JAVA ON IGNORE_MANIFEST ON )
set(ANDROID_PLATFORM_VERSION_CODE "0")
if(ARMEABI_V7A)
if (ANDROID_NATIVE_API_LEVEL LESS 9)
set(ANDROID_PLATFORM_VERSION_CODE "2")
elseif(ARMEABI_V6)
set(ANDROID_PLATFORM_VERSION_CODE "1")
elseif(ARMEABI)
set(ANDROID_PLATFORM_VERSION_CODE "1")
elseif(X86)
set(ANDROID_PLATFORM_VERSION_CODE "3")
elseif(MIPS)
set(ANDROID_PLATFORM_VERSION_CODE "4")
else()
message(WARNING "Can not automatically determine the value for ANDROID_PLATFORM_VERSION_CODE")
set(ANDROID_PLATFORM_VERSION_CODE "3")
endif()
elseif(ARMEABI_V6)
set(ANDROID_PLATFORM_VERSION_CODE "1")
elseif(ARMEABI)
set(ANDROID_PLATFORM_VERSION_CODE "1")
elseif(X86)
set(ANDROID_PLATFORM_VERSION_CODE "4")
elseif(MIPS)
set(ANDROID_PLATFORM_VERSION_CODE "5")
else()
message(WARNING "Can not automatically determine the value for ANDROID_PLATFORM_VERSION_CODE")
endif()
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/${ANDROID_MANIFEST_FILE}" "${OpenCV_BINARY_DIR}/android/service/engine/.build/${ANDROID_MANIFEST_FILE}" @ONLY)

@ -5,10 +5,16 @@
#undef LOG_TAG
#define LOG_TAG "OpenCVEngine"
// OpenCV Engine API version
#ifndef OPEN_CV_ENGINE_VERSION
#define OPEN_CV_ENGINE_VERSION 2
#endif
#define LIB_OPENCV_INFO_NAME "libopencv_info.so"
// OpenCV Manager package name
#define OPENCV_ENGINE_PACKAGE "org.opencv.engine"
// Class name of OpenCV engine binder object. Is needned for connection to service
#define OPECV_ENGINE_CLASSNAME "org.opencv.engine.OpenCVEngineInterface"
#endif

@ -4,11 +4,7 @@
#include <binder/IInterface.h>
#include <binder/Parcel.h>
#include <utils/String16.h>
// OpenCV Manager package name
#define OPENCV_ENGINE_PACKAGE "org.opencv.engine"
// Class name of OpenCV engine binder object. Is needned for connection to service
#define OPECV_ENGINE_CLASSNAME "org.opencv.engine.OpenCVEngineInterface"
#include "EngineCommon.h"
enum EngineMethonID
{

@ -179,7 +179,7 @@ unset(__android_project_chain CACHE)
#add_android_project(target_name ${path} NATIVE_DEPS opencv_core LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11)
macro(add_android_project target path)
# parse arguments
set(android_proj_arglist NATIVE_DEPS LIBRARY_DEPS SDK_TARGET IGNORE_JAVA)
set(android_proj_arglist NATIVE_DEPS LIBRARY_DEPS SDK_TARGET IGNORE_JAVA IGNORE_MANIFEST)
set(__varname "android_proj_")
foreach(v ${android_proj_arglist})
set(${__varname}${v} "")
@ -220,9 +220,13 @@ macro(add_android_project target path)
# get project sources
file(GLOB_RECURSE android_proj_files RELATIVE "${path}" "${path}/res/*" "${path}/src/*")
if(NOT android_proj_IGNORE_MANIFEST)
list(APPEND android_proj_files ${ANDROID_MANIFEST_FILE})
endif()
# copy sources out from the build tree
set(android_proj_file_deps "")
foreach(f ${android_proj_files} ${ANDROID_MANIFEST_FILE})
foreach(f ${android_proj_files})
add_custom_command(
OUTPUT "${android_proj_bin_dir}/${f}"
COMMAND ${CMAKE_COMMAND} -E copy "${path}/${f}" "${android_proj_bin_dir}/${f}"
@ -324,6 +328,7 @@ macro(add_android_project target path)
install(FILES "${OpenCV_BINARY_DIR}/bin/${target}.apk" DESTINATION "samples" COMPONENT main)
get_filename_component(sample_dir "${path}" NAME)
#java part
list(REMOVE_ITEM android_proj_files ${ANDROID_MANIFEST_FILE})
foreach(f ${android_proj_files} ${ANDROID_MANIFEST_FILE})
get_filename_component(install_subdir "${f}" PATH)
install(FILES "${android_proj_bin_dir}/${f}" DESTINATION "samples/${sample_dir}/${install_subdir}" COMPONENT main)

@ -3,8 +3,13 @@ if(${CMAKE_VERSION} VERSION_LESS "2.8.3")
return()
endif()
if (NOT MSVC AND NOT CMAKE_COMPILER_IS_GNUCXX OR MINGW)
message(STATUS "CUDA compilation was disabled (due to unsuppoted host compiler).")
if (WIN32 AND NOT MSVC)
message(STATUS "CUDA compilation is disabled (due to only Visual Studio compiler suppoted on your platform).")
return()
endif()
if (CMAKE_COMPILER_IS_GNUCXX AND NOT APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
message(STATUS "CUDA compilation is disabled (due to Clang unsuppoted on your platform).")
return()
endif()

@ -1,3 +1,5 @@
#/usr/bin/env python
import sys, glob
sys.path.append("../modules/python/src2/")

@ -1,3 +1,5 @@
#/usr/bin/env python
import os, sys, fnmatch, re
sys.path.append("../modules/python/src2/")

@ -1,3 +1,5 @@
#/usr/bin/env python
# -*- coding: utf-8 -*-
#
# opencvstd documentation build configuration file, created by

@ -1,3 +1,4 @@
#/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ocv domain, a modified copy of sphinx.domains.cpp + shpinx.domains.python.

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

@ -1,3 +1,5 @@
#/usr/bin/env python
import sys
f=open(sys.argv[1], "rt")

@ -1,3 +1,5 @@
#/usr/bin/env python
"""gen_pattern.py
To run:
-c 10 -r 12 -o out.svg

@ -1,3 +1,5 @@
#/usr/bin/env python
# svgfig.py copyright (C) 2008 Jim Pivarski <jpivarski@gmail.com>
#
# This program is free software; you can redistribute it and/or

@ -1,3 +1,5 @@
#/usr/bin/env python
import os, sys, re
finput=open(sys.argv[1], "rt")

@ -334,7 +334,11 @@ and exceptions are used in C++, it also should be created. Example of the file :
APP_STL := gnustl_static
APP_CPPFLAGS := -frtti -fexceptions
APP_ABI := armeabi-v7a
APP_ABI := all
.. note:: We recommend setting ``APP_ABI := all`` for all targets. If you want to specify the
target explicitly, use ``armeabi`` for ARMv5/ARMv6, ``armeabi-v7a`` for ARMv7, ``x86``
for Intel Atom or ``mips`` for MIPS.
.. _NDK_build_cli:

@ -6,7 +6,7 @@ set(the_description "Auxiliary module for Android native camera support")
set(OPENCV_MODULE_TYPE STATIC)
ocv_define_module(androidcamera INTERNAL opencv_core log dl)
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/camera_wrapper")
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/camera_wrapper" "${OpenCV_SOURCE_DIR}/android/service/engine/jni/include")
# Android source tree for native camera
SET (ANDROID_SOURCE_TREE "ANDROID_SOURCE_TREE-NOTFOUND" CACHE PATH

@ -1,4 +1,4 @@
#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1) && !defined(ANDROID_r4_0_0) && !defined(ANDROID_r4_0_3) && !defined(ANDROID_r4_1_1)
#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1) && !defined(ANDROID_r4_0_0) && !defined(ANDROID_r4_0_3) && !defined(ANDROID_r4_1_1) && !defined(ANDROID_r4_2_0)
# error Building camera wrapper for your version of Android is not supported by OpenCV. You need to modify OpenCV sources in order to compile camera wrapper for your version of Android.
#endif
@ -18,7 +18,7 @@
# define MAGIC_OPENCV_TEXTURE_ID (0x10)
#else // defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
//TODO: This is either 2.2 or 2.3. Include the headers for ISurface.h access
#if defined(ANDROID_r4_1_1)
#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0)
#include <gui/ISurface.h>
#include <gui/BufferQueue.h>
#else
@ -60,7 +60,7 @@ using namespace android;
void debugShowFPS();
#if defined(ANDROID_r4_1_1)
#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0)
class ConsumerListenerStub: public BufferQueue::ConsumerListener
{
public:
@ -280,7 +280,7 @@ public:
}
virtual void postData(int32_t msgType, const sp<IMemory>& dataPtr
#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1)
#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0)
,camera_frame_metadata_t*
#endif
)
@ -526,7 +526,7 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
pdstatus = camera->setPreviewTexture(surfaceTexture);
if (pdstatus != 0)
LOGE("initCameraConnect: failed setPreviewTexture call; camera migth not work correctly");
#elif defined(ANDROID_r4_1_1)
#elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0)
sp<BufferQueue> bufferQueue = new BufferQueue();
sp<BufferQueue::ConsumerListener> queueListener = new ConsumerListenerStub();
bufferQueue->consumerConnect(queueListener);

@ -9,6 +9,7 @@
#include <opencv2/core/version.hpp>
#include "camera_activity.hpp"
#include "camera_wrapper.h"
#include "EngineCommon.h"
#undef LOG_TAG
#undef LOGE
@ -267,12 +268,13 @@ void CameraWrapperConnector::fillListWrapperLibs(const string& folderPath, vecto
std::string CameraWrapperConnector::getDefaultPathLibFolder()
{
const string packageList[] = {"tegra3", "armv7a_neon", "armv7a", "armv5", "x86"};
for (size_t i = 0; i < 5; i++)
#define BIN_PACKAGE_NAME(x) "org.opencv.lib_v" CVAUX_STR(CV_MAJOR_VERSION) CVAUX_STR(CV_MINOR_VERSION) "_" x
const char* const packageList[] = {BIN_PACKAGE_NAME("armv7a"), OPENCV_ENGINE_PACKAGE};
for (size_t i = 0; i < sizeof(packageList)/sizeof(packageList[0]); i++)
{
char path[128];
sprintf(path, "/data/data/org.opencv.lib_v%d%d_%s/lib/", CV_MAJOR_VERSION, CV_MINOR_VERSION, packageList[i].c_str());
LOGD("Trying package \"%s\" (\"%s\")", packageList[i].c_str(), path);
sprintf(path, "/data/data/%s/lib/", packageList[i]);
LOGD("Trying package \"%s\" (\"%s\")", packageList[i], path);
DIR* dir = opendir(path);
if (!dir)
@ -427,7 +429,6 @@ void CameraActivity::applyProperties()
int CameraActivity::getFrameWidth()
{
LOGD("CameraActivity::getFrameWidth()");
if (frameWidth <= 0)
frameWidth = getProperty(ANDROID_CAMERA_PROPERTY_FRAMEWIDTH);
return frameWidth;
@ -435,7 +436,6 @@ int CameraActivity::getFrameWidth()
int CameraActivity::getFrameHeight()
{
LOGD("CameraActivity::getFrameHeight()");
if (frameHeight <= 0)
frameHeight = getProperty(ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT);
return frameHeight;

@ -86,6 +86,7 @@ PERF_TEST(PointsNum_Algo, solveP3P)
add(points2d, noise, points2d);
declare.in(points3d, points2d);
declare.time(100);
TEST_CYCLE_N(1000)
{

@ -1,3 +1,5 @@
#/usr/bin/env python
import sys
import os.path

@ -208,7 +208,7 @@ Retina::getMagno
Retina::getParameters
+++++++++++++++++++++
.. ocv:function:: struct Retina::RetinaParameters Retina::getParameters()
.. ocv:function:: Retina::RetinaParameters Retina::getParameters()
Retrieve the current parameters values in a *Retina::RetinaParameters* structure
@ -323,7 +323,8 @@ Retina::RetinaParameters
========================
.. ocv:struct:: Retina::RetinaParameters
This structure merges all the parameters that can be adjusted threw the **Retina::setup()**, **Retina::setupOPLandIPLParvoChannel** and **Retina::setupIPLMagnoChannel** setup methods
This structure merges all the parameters that can be adjusted threw the **Retina::setup()**, **Retina::setupOPLandIPLParvoChannel** and **Retina::setupIPLMagnoChannel** setup methods
Parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel. ::
class RetinaParameters{

@ -123,6 +123,7 @@ PERF_TEST_P(Size_MatType, add, TYPICAL_MATS_CORE_ARITHM)
cv::Mat c = Mat(sz, type);
declare.in(a, b, WARMUP_RNG).out(c);
declare.time(50);
if (CV_MAT_DEPTH(type) == CV_32S)
{

@ -19,6 +19,7 @@ PERF_TEST_P(Size_MatType, bitwise_not, TYPICAL_MATS_BITW_ARITHM)
cv::Mat c = Mat(sz, type);
declare.in(a, WARMUP_RNG).out(c);
declare.time(100);
TEST_CYCLE() cv::bitwise_not(a, c);
@ -34,6 +35,7 @@ PERF_TEST_P(Size_MatType, bitwise_and, TYPICAL_MATS_BITW_ARITHM)
cv::Mat c = Mat(sz, type);
declare.in(a, b, WARMUP_RNG).out(c);
declare.time(100);
TEST_CYCLE() bitwise_and(a, b, c);
@ -49,6 +51,7 @@ PERF_TEST_P(Size_MatType, bitwise_or, TYPICAL_MATS_BITW_ARITHM)
cv::Mat c = Mat(sz, type);
declare.in(a, b, WARMUP_RNG).out(c);
declare.time(100);
TEST_CYCLE() bitwise_or(a, b, c);
@ -64,6 +67,7 @@ PERF_TEST_P(Size_MatType, bitwise_xor, TYPICAL_MATS_BITW_ARITHM)
cv::Mat c = Mat(sz, type);
declare.in(a, b, WARMUP_RNG).out(c);
declare.time(100);
TEST_CYCLE() bitwise_xor(a, b, c);

@ -21,6 +21,7 @@ PERF_TEST_P( MatType_Length, dot,
Mat b(size, size, type);
declare.in(a, b, WARMUP_RNG);
declare.time(100);
double product;

@ -150,6 +150,7 @@ PERF_TEST_P(Size_MatType_NormType, normalize_mask,
if(normType==NORM_L2) alpha = (double)src.total()/10;
declare.in(src, WARMUP_RNG).in(mask).out(dst);
declare.time(100);
TEST_CYCLE() normalize(src, dst, alpha, 0., normType, -1, mask);

@ -32,6 +32,7 @@ PERF_TEST_P(Size_MatType_ROp, reduceR,
Mat vec(1, sz.width, ddepth < 0 ? matType : ddepth);
declare.in(src, WARMUP_RNG).out(vec);
declare.time(100);
TEST_CYCLE() reduce(src, vec, 0, reduceOp, ddepth);
@ -58,6 +59,7 @@ PERF_TEST_P(Size_MatType_ROp, reduceC,
Mat vec(sz.height, 1, ddepth < 0 ? matType : ddepth);
declare.in(src, WARMUP_RNG).out(vec);
declare.time(100);
TEST_CYCLE() reduce(src, vec, 1, reduceOp, ddepth);

@ -1097,25 +1097,25 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
double d = det3(Sf);
if( d != 0. )
{
float CV_DECL_ALIGNED(16) t[12];
double t[12];
result = true;
d = 1./d;
t[0] = (float)(((double)Sf(1,1) * Sf(2,2) - (double)Sf(1,2) * Sf(2,1)) * d);
t[1] = (float)(((double)Sf(0,2) * Sf(2,1) - (double)Sf(0,1) * Sf(2,2)) * d);
t[2] = (float)(((double)Sf(0,1) * Sf(1,2) - (double)Sf(0,2) * Sf(1,1)) * d);
t[0] = (((double)Sf(1,1) * Sf(2,2) - (double)Sf(1,2) * Sf(2,1)) * d);
t[1] = (((double)Sf(0,2) * Sf(2,1) - (double)Sf(0,1) * Sf(2,2)) * d);
t[2] = (((double)Sf(0,1) * Sf(1,2) - (double)Sf(0,2) * Sf(1,1)) * d);
t[3] = (float)(((double)Sf(1,2) * Sf(2,0) - (double)Sf(1,0) * Sf(2,2)) * d);
t[4] = (float)(((double)Sf(0,0) * Sf(2,2) - (double)Sf(0,2) * Sf(2,0)) * d);
t[5] = (float)(((double)Sf(0,2) * Sf(1,0) - (double)Sf(0,0) * Sf(1,2)) * d);
t[3] = (((double)Sf(1,2) * Sf(2,0) - (double)Sf(1,0) * Sf(2,2)) * d);
t[4] = (((double)Sf(0,0) * Sf(2,2) - (double)Sf(0,2) * Sf(2,0)) * d);
t[5] = (((double)Sf(0,2) * Sf(1,0) - (double)Sf(0,0) * Sf(1,2)) * d);
t[6] = (float)(((double)Sf(1,0) * Sf(2,1) - (double)Sf(1,1) * Sf(2,0)) * d);
t[7] = (float)(((double)Sf(0,1) * Sf(2,0) - (double)Sf(0,0) * Sf(2,1)) * d);
t[8] = (float)(((double)Sf(0,0) * Sf(1,1) - (double)Sf(0,1) * Sf(1,0)) * d);
t[6] = (((double)Sf(1,0) * Sf(2,1) - (double)Sf(1,1) * Sf(2,0)) * d);
t[7] = (((double)Sf(0,1) * Sf(2,0) - (double)Sf(0,0) * Sf(2,1)) * d);
t[8] = (((double)Sf(0,0) * Sf(1,1) - (double)Sf(0,1) * Sf(1,0)) * d);
Df(0,0) = t[0]; Df(0,1) = t[1]; Df(0,2) = t[2];
Df(1,0) = t[3]; Df(1,1) = t[4]; Df(1,2) = t[5];
Df(2,0) = t[6]; Df(2,1) = t[7]; Df(2,2) = t[8];
Df(0,0) = (float)t[0]; Df(0,1) = (float)t[1]; Df(0,2) = (float)t[2];
Df(1,0) = (float)t[3]; Df(1,1) = (float)t[4]; Df(1,2) = (float)t[5];
Df(2,0) = (float)t[6]; Df(2,1) = (float)t[7]; Df(2,2) = (float)t[8];
}
}
else

@ -92,6 +92,7 @@ PERF_TEST_P(Source_CrossCheck, batchDistance_L2,
generateData(queryDescriptors, trainDescriptors, sourceType);
declare.time(50);
TEST_CYCLE()
{
batchDistance(queryDescriptors, trainDescriptors, dist, CV_32F, (isCrossCheck) ? ndix : noArray(),
@ -118,6 +119,7 @@ PERF_TEST_P(Norm_CrossCheck, batchDistance_32F,
Mat ndix;
generateData(queryDescriptors, trainDescriptors, CV_32F);
declare.time(100);
TEST_CYCLE()
{

@ -873,15 +873,16 @@ gpu::FastNonLocalMeansDenoising
-------------------------------
.. ocv:class:: gpu::FastNonLocalMeansDenoising
class FastNonLocalMeansDenoising
{
public:
//! Simple method, recommended for grayscale images (though it supports multichannel images)
void simpleMethod(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, Stream& s = Stream::Null())
//! Processes luminance and color components separatelly
void labMethod(const GpuMat& src, GpuMat& dst, float h_luminance, float h_color, int search_window = 21, int block_size = 7, Stream& s = Stream::Null())
};
::
class FastNonLocalMeansDenoising
{
public:
//! Simple method, recommended for grayscale images (though it supports multichannel images)
void simpleMethod(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, Stream& s = Stream::Null())
//! Processes luminance and color components separatelly
void labMethod(const GpuMat& src, GpuMat& dst, float h_luminance, float h_color, int search_window = 21, int block_size = 7, Stream& s = Stream::Null())
};
The class implements fast approximate Non Local Means Denoising algorithm.

@ -150,7 +150,7 @@ namespace cv { namespace gpu { namespace device
return true;
}
static __device__ __forceinline__ bool check(int, int, int, uint offset = 0)
static __device__ __forceinline__ bool check(int, int, int)
{
return true;
}

@ -1,3 +1,5 @@
#/usr/bin/env python
import sys, re
spaces = '[\s]*'

@ -357,18 +357,19 @@ namespace cv { namespace gpu { namespace device
#endif
}
void shfl_integral_gpu(PtrStepSzb img, PtrStepSz<unsigned int> integral, cudaStream_t stream)
void shfl_integral_gpu(const PtrStepSzb& img, PtrStepSz<unsigned int> integral, cudaStream_t stream)
{
{
// each thread handles 16 values, use 1 block/row
const int block = img.cols / 16;
// save, becouse step is actually can't be less 512 bytes
int block = integral.cols / 16;
// launch 1 block / row
const int grid = img.rows;
cudaSafeCall( cudaFuncSetCacheConfig(shfl_integral_horizontal, cudaFuncCachePreferL1) );
shfl_integral_horizontal<<<grid, block, 0, stream>>>((PtrStepSz<uint4>) img, (PtrStepSz<uint4>) integral);
shfl_integral_horizontal<<<grid, block, 0, stream>>>((const PtrStepSz<uint4>) img, (PtrStepSz<uint4>) integral);
cudaSafeCall( cudaGetLastError() );
}

@ -185,6 +185,7 @@ namespace cv { namespace gpu { namespace device
void connectedConmonents(PtrStepSz<int4> candidates, int ncandidates, PtrStepSz<int4> objects, int groupThreshold, float grouping_eps, unsigned int* nclasses)
{
if (!ncandidates) return;
int block = ncandidates;
int smem = block * ( sizeof(int) + sizeof(int4) );
disjoin<InSameComponint><<<1, block, smem>>>(candidates, objects, ncandidates, groupThreshold, grouping_eps, nclasses);

@ -177,7 +177,7 @@ namespace cv { namespace gpu { namespace device
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace, uint sumOffset)
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
@ -198,9 +198,9 @@ namespace cv { namespace gpu { namespace device
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j)
{
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), sumOffset + (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), sumOffset + (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), sumOffset + (j << c_octave));
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
@ -208,7 +208,7 @@ namespace cv { namespace gpu { namespace device
}
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers, const size_t sumOffset)
int octave, int nOctaveLayers)
{
const int min_size = calcSize(octave, 0);
const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
@ -220,7 +220,7 @@ namespace cv { namespace gpu { namespace device
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace, (uint)sumOffset);
icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
@ -233,7 +233,7 @@ namespace cv { namespace gpu { namespace device
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size, const uint offset)
static __device__ bool check(int sum_i, int sum_j, int size)
{
float ratio = (float)size / 9.0f;
@ -245,10 +245,10 @@ namespace cv { namespace gpu { namespace device
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, offset + sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, offset + sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, offset + sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, offset + sum_j + dx2, sum_i + dy2);
t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
@ -258,7 +258,7 @@ namespace cv { namespace gpu { namespace device
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter, const uint maskOffset)
unsigned int* maxCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
@ -299,7 +299,7 @@ namespace cv { namespace gpu { namespace device
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size, maskOffset))
if (Mask::check(sum_i, sum_j, size))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
@ -351,7 +351,7 @@ namespace cv { namespace gpu { namespace device
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers, const size_t maskOffset)
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
@ -367,9 +367,9 @@ namespace cv { namespace gpu { namespace device
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
if (use_mask)
icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter, (uint)maskOffset);
icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
else
icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter, 0);
icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
cudaSafeCall( cudaGetLastError() );

@ -85,6 +85,12 @@ void cv::gpu::GoodFeaturesToTrackDetector_GPU::operator ()(const GpuMat& image,
int total = findCorners_gpu(eig_, static_cast<float>(maxVal * qualityLevel), mask, tmpCorners_.ptr<float2>(), tmpCorners_.cols);
if (total == 0)
{
corners.release();
return;
}
sortCorners_gpu(eig_, tmpCorners_.ptr<float2>(), total);
if (minDistance < 1)

@ -537,7 +537,7 @@ namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
void shfl_integral_gpu(PtrStepSzb img, PtrStepSz<unsigned int> integral, cudaStream_t stream);
void shfl_integral_gpu(const PtrStepSzb& img, PtrStepSz<unsigned int> integral, cudaStream_t stream);
}
}}}
@ -553,44 +553,26 @@ void cv::gpu::integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, S
src.locateROI(whole, offset);
if (info.supports(WARP_SHUFFLE_FUNCTIONS) && src.cols <= 2048)
if (info.supports(WARP_SHUFFLE_FUNCTIONS) && src.cols <= 2048
&& offset.x % 16 == 0 && ((src.cols + 63) / 64) * 64 <= (src.step - offset.x))
{
GpuMat srcAlligned;
ensureSizeIsEnough(((src.rows + 7) / 8) * 8, ((src.cols + 63) / 64) * 64, CV_32SC1, buffer);
if (src.cols % 16 == 0 && src.rows % 8 == 0 && offset.x % 16 == 0 && offset.y % 8 == 0)
srcAlligned = src;
else
{
ensureSizeIsEnough(((src.rows + 7) / 8) * 8, ((src.cols + 15) / 16) * 16, src.type(), buffer);
GpuMat inner = buffer(Rect(0, 0, src.cols, src.rows));
if (s)
{
s.enqueueMemSet(buffer, Scalar::all(0));
s.enqueueCopy(src, inner);
}
else
{
buffer.setTo(Scalar::all(0));
src.copyTo(inner);
}
srcAlligned = buffer;
}
sum.create(srcAlligned.rows + 1, srcAlligned.cols + 4, CV_32SC1);
cv::gpu::device::imgproc::shfl_integral_gpu(src, buffer, stream);
sum.create(src.rows + 1, src.cols + 1, CV_32SC1);
if (s)
s.enqueueMemSet(sum, Scalar::all(0));
else
sum.setTo(Scalar::all(0));
GpuMat inner = sum(Rect(4, 1, srcAlligned.cols, srcAlligned.rows));
cv::gpu::device::imgproc::shfl_integral_gpu(srcAlligned, inner, stream);
GpuMat inner = sum(Rect(1, 1, src.cols, src.rows));
GpuMat res = buffer(Rect(0, 0, src.cols, src.rows));
sum = sum(Rect(3, 0, src.cols + 1, src.rows + 1));
if (s)
s.enqueueCopy(res, inner);
else
res.copyTo(inner);
}
else
{

@ -75,10 +75,10 @@ namespace cv { namespace gpu { namespace device
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers, const size_t sumOffset);
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers, const size_t maskOffset);
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
@ -146,8 +146,8 @@ namespace
loadGlobalConstants(maxCandidates, maxFeatures, img_rows, img_cols, surf_.nOctaveLayers, static_cast<float>(surf_.hessianThreshold));
bindImgTex(img);
integralBuffered(img, surf_.sum, surf_.intBuffer);
integralBuffered(img, surf_.sum, surf_.intBuffer);
sumOffset = bindSumTex(surf_.sum);
if (use_mask)
@ -174,10 +174,10 @@ namespace
loadOctaveConstants(octave, layer_rows, layer_cols);
icvCalcLayerDetAndTrace_gpu(surf_.det, surf_.trace, img_rows, img_cols, octave, surf_.nOctaveLayers, sumOffset);
icvCalcLayerDetAndTrace_gpu(surf_.det, surf_.trace, img_rows, img_cols, octave, surf_.nOctaveLayers);
icvFindMaximaInLayer_gpu(surf_.det, surf_.trace, surf_.maxPosBuffer.ptr<int4>(), counters.ptr<unsigned int>() + 1 + octave,
img_rows, img_cols, octave, use_mask, surf_.nOctaveLayers, maskOffset);
img_rows, img_cols, octave, use_mask, surf_.nOctaveLayers);
unsigned int maxCounter;
cudaSafeCall( cudaMemcpy(&maxCounter, counters.ptr<unsigned int>() + 1 + octave, sizeof(unsigned int), cudaMemcpyDeviceToHost) );

@ -203,6 +203,21 @@ TEST_P(GoodFeaturesToTrack, Accuracy)
}
}
TEST_P(GoodFeaturesToTrack, EmptyCorners)
{
int maxCorners = 1000;
double qualityLevel = 0.01;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
cv::gpu::GpuMat src(100, 100, CV_8UC1, cv::Scalar::all(0));
cv::gpu::GpuMat corners(1, maxCorners, CV_32FC2);
detector(src, corners);
ASSERT_TRUE( corners.empty() );
}
INSTANTIATE_TEST_CASE_P(GPU_Video, GoodFeaturesToTrack, testing::Combine(
ALL_DEVICES,
testing::Values(MinDistance(0.0), MinDistance(3.0))));

@ -424,7 +424,6 @@ CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
CV_IMPL int cvWriteFrame( CvVideoWriter* writer, const IplImage* image )
{
return writer ? writer->writeFrame(image) : 0;
}

@ -57,11 +57,32 @@ static CvCreateVideoWriter_Plugin icvCreateVideoWriter_FFMPEG_p = 0;
static CvReleaseVideoWriter_Plugin icvReleaseVideoWriter_FFMPEG_p = 0;
static CvWriteFrame_Plugin icvWriteFrame_FFMPEG_p = 0;
static void
icvInitFFMPEG(void)
static cv::Mutex _icvInitFFMPEG_mutex;
class icvInitFFMPEG
{
static int ffmpegInitialized = 0;
if( !ffmpegInitialized )
public:
static void Init()
{
cv::AutoLock al(_icvInitFFMPEG_mutex);
static icvInitFFMPEG init;
}
private:
#if defined WIN32 || defined _WIN32
HMODULE icvFFOpenCV;
~icvInitFFMPEG()
{
if (icvFFOpenCV)
{
FreeLibrary(icvFFOpenCV);
icvFFOpenCV = 0;
}
}
#endif
icvInitFFMPEG()
{
#if defined WIN32 || defined _WIN32
const char* module_name = "opencv_ffmpeg"
@ -71,7 +92,7 @@ icvInitFFMPEG(void)
#endif
".dll";
static HMODULE icvFFOpenCV = LoadLibrary( module_name );
icvFFOpenCV = LoadLibrary( module_name );
if( icvFFOpenCV )
{
icvCreateFileCapture_FFMPEG_p =
@ -123,10 +144,8 @@ icvInitFFMPEG(void)
icvReleaseVideoWriter_FFMPEG_p = (CvReleaseVideoWriter_Plugin)cvReleaseVideoWriter_FFMPEG;
icvWriteFrame_FFMPEG_p = (CvWriteFrame_Plugin)cvWriteFrame_FFMPEG;
#endif
ffmpegInitialized = 1;
}
}
};
class CvCapture_FFMPEG_proxy : public CvCapture
@ -161,9 +180,9 @@ public:
}
virtual bool open( const char* filename )
{
icvInitFFMPEG::Init();
close();
icvInitFFMPEG();
if( !icvCreateFileCapture_FFMPEG_p )
return false;
ffmpegCapture = icvCreateFileCapture_FFMPEG_p( filename );
@ -196,7 +215,6 @@ CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char * filename)
#endif
}
class CvVideoWriter_FFMPEG_proxy : public CvVideoWriter
{
public:
@ -214,8 +232,8 @@ public:
}
virtual bool open( const char* filename, int fourcc, double fps, CvSize frameSize, bool isColor )
{
icvInitFFMPEG::Init();
close();
icvInitFFMPEG();
if( !icvCreateVideoWriter_FFMPEG_p )
return false;
ffmpegWriter = icvCreateVideoWriter_FFMPEG_p( filename, fourcc, fps, frameSize.width, frameSize.height, isColor );

@ -328,28 +328,179 @@ void CvCapture_FFMPEG::close()
#define AVSEEK_FLAG_ANY 1
#endif
static void icvInitFFMPEG_internal()
class ImplMutex
{
static volatile bool initialized = false;
if( !initialized )
public:
ImplMutex();
~ImplMutex();
void lock();
bool trylock();
void unlock();
struct Impl;
protected:
Impl* impl;
private:
ImplMutex(const ImplMutex&);
ImplMutex& operator = (const ImplMutex& m);
};
#if defined WIN32 || defined _WIN32 || defined WINCE
struct ImplMutex::Impl
{
Impl() { InitializeCriticalSection(&cs); refcount = 1; }
~Impl() { DeleteCriticalSection(&cs); }
void lock() { EnterCriticalSection(&cs); }
bool trylock() { return TryEnterCriticalSection(&cs) != 0; }
void unlock() { LeaveCriticalSection(&cs); }
CRITICAL_SECTION cs;
int refcount;
};
#ifndef __GNUC__
static int _interlockedExchangeAdd(int* addr, int delta)
{
#if defined _MSC_VER && _MSC_VER >= 1500
return (int)_InterlockedExchangeAdd((long volatile*)addr, delta);
#else
return (int)InterlockedExchangeAdd((long volatile*)addr, delta);
#endif
}
#endif // __GNUC__
#elif defined __APPLE__
#include <libkern/OSAtomic.h>
struct ImplMutex::Impl
{
Impl() { sl = OS_SPINLOCK_INIT; refcount = 1; }
~Impl() {}
void lock() { OSSpinLockLock(&sl); }
bool trylock() { return OSSpinLockTry(&sl); }
void unlock() { OSSpinLockUnlock(&sl); }
OSSpinLock sl;
int refcount;
};
#elif defined __linux__ && !defined ANDROID
struct ImplMutex::Impl
{
Impl() { pthread_spin_init(&sl, 0); refcount = 1; }
~Impl() { pthread_spin_destroy(&sl); }
void lock() { pthread_spin_lock(&sl); }
bool trylock() { return pthread_spin_trylock(&sl) == 0; }
void unlock() { pthread_spin_unlock(&sl); }
pthread_spinlock_t sl;
int refcount;
};
#else
struct ImplMutex::Impl
{
Impl() { pthread_mutex_init(&sl, 0); refcount = 1; }
~Impl() { pthread_mutex_destroy(&sl); }
void lock() { pthread_mutex_lock(&sl); }
bool trylock() { return pthread_mutex_trylock(&sl) == 0; }
void unlock() { pthread_mutex_unlock(&sl); }
pthread_mutex_t sl;
int refcount;
};
#endif
ImplMutex::ImplMutex()
{
impl = new ImplMutex::Impl;
}
ImplMutex::~ImplMutex()
{
delete impl;
impl = 0;
}
void ImplMutex::lock() { impl->lock(); }
void ImplMutex::unlock() { impl->unlock(); }
bool ImplMutex::trylock() { return impl->trylock(); }
static int LockCallBack(void **mutex, AVLockOp op)
{
switch (op)
{
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 13, 0)
case AV_LOCK_CREATE:
*mutex = reinterpret_cast<void*>(new ImplMutex());
if (!*mutex)
return 1;
break;
case AV_LOCK_OBTAIN:
reinterpret_cast<ImplMutex*>(*mutex)->lock();
break;
case AV_LOCK_RELEASE:
reinterpret_cast<ImplMutex*>(*mutex)->unlock();
break;
case AV_LOCK_DESTROY:
ImplMutex* cv_mutex = reinterpret_cast<ImplMutex*>(*mutex);
delete cv_mutex;
cv_mutex = NULL;
break;
}
return 0;
}
static ImplMutex _InternalFFMpegRegister_mutex;
class InternalFFMpegRegister
{
public:
static void Register()
{
_InternalFFMpegRegister_mutex.lock();
static InternalFFMpegRegister init;
_InternalFFMpegRegister_mutex.unlock();
}
~InternalFFMpegRegister()
{
av_lockmgr_register(NULL);
}
private:
InternalFFMpegRegister()
{
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 13, 0)
avformat_network_init();
#endif
#endif
/* register all codecs, demux and protocols */
av_register_all();
av_log_set_level(AV_LOG_ERROR);
/* register a callback function for synchronization */
av_lockmgr_register(&LockCallBack);
initialized = true;
av_log_set_level(AV_LOG_ERROR);
}
}
};
bool CvCapture_FFMPEG::open( const char* _filename )
{
icvInitFFMPEG_internal();
InternalFFMpegRegister::Register();
unsigned i;
bool valid = false;
@ -361,7 +512,8 @@ bool CvCapture_FFMPEG::open( const char* _filename )
int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
#endif
if (err < 0) {
if (err < 0)
{
CV_WARN("Error opening file");
goto exit_func;
}
@ -371,7 +523,8 @@ bool CvCapture_FFMPEG::open( const char* _filename )
#else
av_find_stream_info(ic);
#endif
if (err < 0) {
if (err < 0)
{
CV_WARN("Could not find codec parameters");
goto exit_func;
}
@ -393,7 +546,8 @@ bool CvCapture_FFMPEG::open( const char* _filename )
#define AVMEDIA_TYPE_VIDEO CODEC_TYPE_VIDEO
#endif
if( AVMEDIA_TYPE_VIDEO == enc->codec_type && video_stream < 0) {
if( AVMEDIA_TYPE_VIDEO == enc->codec_type && video_stream < 0)
{
AVCodec *codec = avcodec_find_decoder(enc->codec_id);
if (!codec ||
#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
@ -401,7 +555,8 @@ bool CvCapture_FFMPEG::open( const char* _filename )
#else
avcodec_open(enc, codec)
#endif
< 0) goto exit_func;
< 0)
goto exit_func;
video_stream = i;
video_st = ic->streams[i];
@ -1275,7 +1430,7 @@ void CvVideoWriter_FFMPEG::close()
bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
double fps, int width, int height, bool is_color )
{
icvInitFFMPEG_internal();
InternalFFMpegRegister::Register();
CodecID codec_id = CODEC_ID_NONE;
int err, codec_pix_fmt;
@ -1495,6 +1650,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
frame_width = width;
frame_height = height;
ok = true;
return true;
}
@ -1506,6 +1662,7 @@ CvCapture_FFMPEG* cvCreateFileCapture_FFMPEG( const char* filename )
capture->init();
if( capture->open( filename ))
return capture;
capture->close();
free(capture);
return 0;
@ -1554,7 +1711,6 @@ CvVideoWriter_FFMPEG* cvCreateVideoWriter_FFMPEG( const char* filename, int four
return 0;
}
void cvReleaseVideoWriter_FFMPEG( CvVideoWriter_FFMPEG** writer )
{
if( writer && *writer )
@ -1741,15 +1897,12 @@ AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CodecID
bool OutputMediaStream_FFMPEG::open(const char* fileName, int width, int height, double fps)
{
InternalFFMpegRegister::Register();
fmt_ = 0;
oc_ = 0;
video_st_ = 0;
// tell FFMPEG to register codecs
av_register_all();
av_log_set_level(AV_LOG_ERROR);
// auto detect the output format from the name and fourcc code
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
fmt_ = av_guess_format(NULL, fileName, NULL);
@ -1920,6 +2073,8 @@ private:
bool InputMediaStream_FFMPEG::open(const char* fileName, int* codec, int* chroma_format, int* width, int* height)
{
InternalFFMpegRegister::Register();
int err;
ctx_ = 0;
@ -1930,11 +2085,6 @@ bool InputMediaStream_FFMPEG::open(const char* fileName, int* codec, int* chroma
avformat_network_init();
#endif
// register all codecs, demux and protocols
av_register_all();
av_log_set_level(AV_LOG_ERROR);
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 6, 0)
err = avformat_open_input(&ctx_, fileName, 0, 0);
#else
@ -2054,7 +2204,7 @@ bool InputMediaStream_FFMPEG::read(unsigned char** data, int* size, int* endOfFi
if (ret < 0)
{
if (ret == AVERROR_EOF)
if (ret == (int)AVERROR_EOF)
*endOfFile = true;
return false;
}

@ -65,7 +65,24 @@
#define CV_WARN(message) fprintf(stderr, "warning: %s (%s:%d)\n", message, __FILE__, __LINE__)
#endif
static bool isInited = false;
static cv::Mutex gst_initializer_mutex;
class gst_initializer
{
public:
static void init()
{
gst_initializer_mutex.lock();
static gst_initializer init;
gst_initializer_mutex.unlock();
}
private:
gst_initializer()
{
gst_init(NULL, NULL);
}
};
class CvCapture_GStreamer : public CvCapture
{
public:
@ -298,16 +315,18 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
__BEGIN__;
if(!isInited) {
gst_initializer::init();
// if(!isInited) {
// printf("gst_init\n");
gst_init (NULL, NULL);
// gst_init (NULL, NULL);
// gst_debug_set_active(TRUE);
// gst_debug_set_colored(TRUE);
// gst_debug_set_default_threshold(GST_LEVEL_WARNING);
isInited = true;
}
// isInited = true;
// }
bool stream = false;
bool manualpipeline = false;
char *uri = NULL;
@ -477,10 +496,11 @@ bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
encit=encs.find(fourcc);
if (encit==encs.end())
CV_ERROR( CV_StsUnsupportedFormat,"Gstreamer Opencv backend doesn't support this codec acutally.");
if(!isInited) {
gst_init (NULL, NULL);
isInited = true;
}
// if(!isInited) {
// gst_init (NULL, NULL);
// isInited = true;
// }
gst_initializer::init();
close();
source=gst_element_factory_make("appsrc",NULL);
file=gst_element_factory_make("filesink", NULL);

@ -1008,10 +1008,6 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index)
return NULL;
}
/* set the default size */
capture->width = DEFAULT_V4L_WIDTH;
capture->height = DEFAULT_V4L_HEIGHT;
#ifdef USE_TEMP_BUFFER
capture->buffers[MAX_V4L_BUFFERS].start = NULL;
#endif
@ -1035,6 +1031,9 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index)
the standard set of cv calls promoting transparency. "Vector Table" insertion. */
capture->FirstCapture = 1;
/* set the default size */
capture->width = DEFAULT_V4L_WIDTH;
capture->height = DEFAULT_V4L_HEIGHT;
if (_capture_V4L2 (capture, deviceName) == -1) {
icvCloseCAM_V4L(capture);

@ -43,11 +43,12 @@
#include "test_precomp.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
#ifdef HAVE_FFMPEG
#include "ffmpeg_codecs.hpp"
using namespace cv;
using namespace std;
class CV_FFmpegWriteBigVideoTest : public cvtest::BaseTest
@ -118,11 +119,11 @@ public:
else
{
Mat img(frame_s, CV_8UC3, Scalar::all(0));
const int coeff = cvRound(cv::min(frame_s.width, frame_s.height)/(fps0 * time_sec));
const int coeff = cvRound(min(frame_s.width, frame_s.height)/(fps0 * time_sec));
for (int i = 0 ; i < static_cast<int>(fps * time_sec); i++ )
{
//circle(img, Point2i(img_c / 2, img_r / 2), cv::min(img_r, img_c) / 2 * (i + 1), Scalar(255, 0, 0, 0), 2);
//circle(img, Point2i(img_c / 2, img_r / 2), min(img_r, img_c) / 2 * (i + 1), Scalar(255, 0, 0, 0), 2);
rectangle(img, Point2i(coeff * i, coeff * i), Point2i(coeff * (i + 1), coeff * (i + 1)),
Scalar::all(255 * (1.0 - static_cast<double>(i) / (fps * time_sec * 2) )), -1);
writer << img;
@ -174,3 +175,221 @@ public:
TEST(Highgui_Video, ffmpeg_image) { CV_FFmpegReadImageTest test; test.safe_run(); }
#endif
#if defined(HAVE_FFMPEG) || defined(WIN32) || defined(_WIN32)
//////////////////////////////// Parallel VideoWriters and VideoCaptures ////////////////////////////////////
class CreateVideoWriterInvoker :
public ParallelLoopBody
{
public:
const static Size FrameSize;
static std::string TmpDirectory;
CreateVideoWriterInvoker(std::vector<VideoWriter*>& _writers, std::vector<std::string>& _files) :
ParallelLoopBody(), writers(&_writers), files(&_files)
{
}
virtual void operator() (const Range& range) const
{
for (int i = range.start; i != range.end; ++i)
{
std::ostringstream stream;
stream << i << ".avi";
std::string fileName = tempfile(stream.str().c_str());
files->operator[](i) = fileName;
writers->operator[](i) = new VideoWriter(fileName, CV_FOURCC('X','V','I','D'), 25.0f, FrameSize);
CV_Assert(writers->operator[](i)->isOpened());
}
}
private:
std::vector<VideoWriter*>* writers;
std::vector<std::string>* files;
};
std::string CreateVideoWriterInvoker::TmpDirectory;
const Size CreateVideoWriterInvoker::FrameSize(1020, 900);
class WriteVideo_Invoker :
public ParallelLoopBody
{
public:
enum { FrameCount = 300 };
static const Scalar ObjectColor;
static const Point Center;
WriteVideo_Invoker(const std::vector<VideoWriter*>& _writers) :
ParallelLoopBody(), writers(&_writers)
{
}
static void GenerateFrame(Mat& frame, unsigned int i)
{
frame = Scalar::all(i % 255);
std::string text = to_string(i);
putText(frame, text, Point(50, Center.y), FONT_HERSHEY_SIMPLEX, 5.0, ObjectColor, 5, CV_AA);
circle(frame, Center, i + 2, ObjectColor, 2, CV_AA);
}
virtual void operator() (const Range& range) const
{
CV_Assert((range.start + 1) == range.end);
VideoWriter* writer = writers->operator[](range.start);
CV_Assert(writer != NULL);
CV_Assert(writer->isOpened());
Mat frame(CreateVideoWriterInvoker::FrameSize, CV_8UC3);
for (unsigned int i = 0; i < FrameCount; ++i)
{
GenerateFrame(frame, i);
writer->operator<< (frame);
}
}
protected:
static std::string to_string(unsigned int i)
{
std::stringstream stream(std::ios::out);
stream << "frame #" << i;
return stream.str();
}
private:
const std::vector<VideoWriter*>* writers;
};
const Scalar WriteVideo_Invoker::ObjectColor(Scalar::all(0));
const Point WriteVideo_Invoker::Center(CreateVideoWriterInvoker::FrameSize.height / 2,
CreateVideoWriterInvoker::FrameSize.width / 2);
class CreateVideoCaptureInvoker :
public ParallelLoopBody
{
public:
CreateVideoCaptureInvoker(std::vector<VideoCapture*>& _readers, const std::vector<std::string>& _files) :
ParallelLoopBody(), readers(&_readers), files(&_files)
{
}
virtual void operator() (const Range& range) const
{
for (int i = range.start; i != range.end; ++i)
{
readers->operator[](i) = new VideoCapture(files->operator[](i));
CV_Assert(readers->operator[](i)->isOpened());
}
}
private:
std::vector<VideoCapture*>* readers;
const std::vector<std::string>* files;
};
class ReadImageAndTest :
public ParallelLoopBody
{
public:
ReadImageAndTest(const std::vector<VideoCapture*>& _readers, cvtest::TS* _ts) :
ParallelLoopBody(), readers(&_readers), ts(_ts)
{
}
virtual void operator() (const Range& range) const
{
CV_Assert(range.start + 1 == range.end);
VideoCapture* capture = readers->operator[](range.start);
CV_Assert(capture != NULL);
CV_Assert(capture->isOpened());
const static double eps = 23.0;
unsigned int frameCount = static_cast<unsigned int>(capture->get(CV_CAP_PROP_FRAME_COUNT));
CV_Assert(frameCount == WriteVideo_Invoker::FrameCount);
Mat reference(CreateVideoWriterInvoker::FrameSize, CV_8UC3);
for (unsigned int i = 0; i < frameCount && next; ++i)
{
Mat actual;
(*capture) >> actual;
WriteVideo_Invoker::GenerateFrame(reference, i);
EXPECT_EQ(reference.cols, actual.cols);
EXPECT_EQ(reference.rows, actual.rows);
EXPECT_EQ(reference.depth(), actual.depth());
EXPECT_EQ(reference.channels(), actual.channels());
double psnr = PSNR(actual, reference);
if (psnr < eps)
{
#define SUM cvtest::TS::SUMMARY
ts->printf(SUM, "\nPSNR: %lf\n", psnr);
ts->printf(SUM, "Video #: %d\n", range.start);
ts->printf(SUM, "Frame #: %d\n", i);
#undef SUM
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->set_gtest_status();
Mat diff;
absdiff(actual, reference, diff);
EXPECT_EQ(countNonZero(diff.reshape(1) > 1), 0);
next = false;
}
}
}
static bool next;
private:
const std::vector<VideoCapture*>* readers;
cvtest::TS* ts;
};
bool ReadImageAndTest::next;
TEST(Highgui_Video_parallel_writers_and_readers, accuracy)
{
const unsigned int threadsCount = 4;
cvtest::TS* ts = cvtest::TS::ptr();
// creating VideoWriters
std::vector<VideoWriter*> writers(threadsCount);
Range range(0, threadsCount);
std::vector<std::string> files(threadsCount);
CreateVideoWriterInvoker invoker1(writers, files);
parallel_for_(range, invoker1);
// write a video
parallel_for_(range, WriteVideo_Invoker(writers));
// deleting the writers
for (std::vector<VideoWriter*>::iterator i = writers.begin(), end = writers.end(); i != end; ++i)
delete *i;
writers.clear();
std::vector<VideoCapture*> readers(threadsCount);
CreateVideoCaptureInvoker invoker2(readers, files);
parallel_for_(range, invoker2);
ReadImageAndTest::next = true;
parallel_for_(range, ReadImageAndTest(readers, ts));
// deleting tmp video files
for (std::vector<std::string>::const_iterator i = files.begin(), end = files.end(); i != end; ++i)
{
int code = remove(i->c_str());
if (code == 1)
std::cerr << "Couldn't delete " << *i << std::endl;
}
}
#endif

@ -63,11 +63,7 @@ PERF_TEST_P(Size_MatType_BorderType3x3, gaussianBlur3x3,
TEST_CYCLE() GaussianBlur(src, dst, Size(3,3), 0, 0, btype);
#if CV_SSE2
SANITY_CHECK(dst, 1);
#else
SANITY_CHECK(dst);
#endif
}
PERF_TEST_P(Size_MatType_BorderType3x3, blur3x3,
@ -89,7 +85,7 @@ PERF_TEST_P(Size_MatType_BorderType3x3, blur3x3,
TEST_CYCLE() blur(src, dst, Size(3,3), Point(-1,-1), btype);
SANITY_CHECK(dst, 1e-3);
SANITY_CHECK(dst, 1);
}
PERF_TEST_P(Size_MatType_BorderType, blur16x16,
@ -183,7 +179,7 @@ PERF_TEST_P(Size_MatType_BorderType, gaussianBlur5x5,
TEST_CYCLE() GaussianBlur(src, dst, Size(5,5), 0, 0, btype);
SANITY_CHECK(dst);
SANITY_CHECK(dst, 1e-3);
}
PERF_TEST_P(Size_MatType_BorderType, blur5x5,
@ -205,5 +201,5 @@ PERF_TEST_P(Size_MatType_BorderType, blur5x5,
TEST_CYCLE() blur(src, dst, Size(5,5), Point(-1,-1), btype);
SANITY_CHECK(dst, 1e-3);
SANITY_CHECK(dst, 1);
}

@ -244,6 +244,7 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u,
Mat src(sz, CV_8UC(ch.scn));
Mat dst(sz, CV_8UC(ch.dcn));
declare.time(100);
declare.in(src, WARMUP_RNG).out(dst);
TEST_CYCLE() cvtColor(src, dst, mode, ch.dcn);
@ -268,6 +269,7 @@ PERF_TEST_P(Size_CvtMode2, cvtColorYUV420,
Mat src(sz.height + sz.height / 2, sz.width, CV_8UC(ch.scn));
Mat dst(sz, CV_8UC(ch.dcn));
declare.time(100);
declare.in(src, WARMUP_RNG).out(dst);
TEST_CYCLE() cvtColor(src, dst, mode, ch.dcn);

@ -39,7 +39,7 @@ PERF_TEST_P( TestFilter2d, Filter2d,
TEST_CYCLE() filter2D(src, dst, CV_8UC4, kernel, Point(1, 1), 0., borderMode);
SANITY_CHECK(dst);
SANITY_CHECK(dst, 1);
}
PERF_TEST_P( Image_KernelSize, GaborFilter2d,

@ -32,7 +32,7 @@ PERF_TEST_P(Image_RhoStep_ThetaStep_Threshold, HoughLines,
Canny(image, image, 0, 0);
Mat lines;
declare.time(7);
declare.time(40);
TEST_CYCLE() HoughLines(image, lines, rhoStep, thetaStep, threshold);

@ -48,6 +48,7 @@ PERF_TEST_P(Size_MatType_OutMatDepth, integral_sqsum,
Mat sqsum(sz, sdepth);
declare.in(src, WARMUP_RNG).out(sum, sqsum);
declare.time(100);
TEST_CYCLE() integral(src, sum, sqsum, sdepth);
@ -73,6 +74,7 @@ PERF_TEST_P( Size_MatType_OutMatDepth, integral_sqsum_tilted,
Mat tilted(sz, sdepth);
declare.in(src, WARMUP_RNG).out(sum, sqsum, tilted);
declare.time(100);
TEST_CYCLE() integral(src, sum, sqsum, tilted, sdepth);

@ -28,7 +28,12 @@ PERF_TEST_P(MatInfo_Size_Size, resizeUpLinear,
TEST_CYCLE() resize(src, dst, to);
// Test case temporary disabled for Android Platform
#ifdef ANDROID
SANITY_CHECK(dst, 255); // TODO: Reimplement check in future versions
#else
SANITY_CHECK(dst, 1 + 1e-6);
#endif
}
PERF_TEST_P(MatInfo_Size_Size, resizeDownLinear,
@ -52,7 +57,12 @@ PERF_TEST_P(MatInfo_Size_Size, resizeDownLinear,
TEST_CYCLE() resize(src, dst, to);
// Test case temporary disabled for Android Platform
#ifdef ANDROID
SANITY_CHECK(dst, 255); // TODO: Reimplement check in future versions
#else
SANITY_CHECK(dst, 1 + 1e-6);
#endif
}
@ -106,6 +116,7 @@ PERF_TEST_P(MatInfo_Size_Scale_Area, ResizeArea,
cv::Mat dst(to, matType);
declare.in(src, WARMUP_RNG).out(dst);
declare.time(100);
TEST_CYCLE() resize(src, dst, dst.size(), 0, 0, INTER_AREA);

@ -31,6 +31,7 @@ PERF_TEST_P(Size_MatType_ThreshType, threshold,
double maxval = theRNG().uniform(1, 254);
declare.in(src, WARMUP_RNG).out(dst);
declare.time(100);
TEST_CYCLE() threshold(src, dst, thresh, maxval, threshType);

@ -43,8 +43,12 @@ PERF_TEST_P( TestWarpAffine, WarpAffine,
TEST_CYCLE() warpAffine( src, dst, warpMat, sz, interType, borderMode, Scalar::all(150) );
// Test case temporary disabled for Android Platform
#ifdef ANDROID
SANITY_CHECK(dst, 255); // TODO: Reimplement check in future versions
#else
SANITY_CHECK(dst, 1);
#endif
}
PERF_TEST_P( TestWarpPerspective, WarpPerspective,
@ -133,13 +137,19 @@ PERF_TEST_P( TestWarpPerspectiveNear_t, WarpPerspectiveNear,
Mat dst(size, type);
declare.in(src).out(dst);
declare.time(100);
TEST_CYCLE()
{
warpPerspective( src, dst, warpMat, size, interType, borderMode, Scalar::all(150) );
}
// Test case temporary disabled for Android Platform
#ifdef ANDROID
SANITY_CHECK(dst, 255); // TODO: Reimplement check in future versions
#else
SANITY_CHECK(dst, 1);
#endif
}
PERF_TEST_P( TestRemap, remap,

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<declare-styleable name = "CameraBridgeViewBase" >
<attr name="show_fps" format="boolean"/>
<attr name="camera_id" format="integer" >
<enum name="any" value="-1" />
<enum name="back" value="0" />
<enum name="front" value="1" />
</attr>
</declare-styleable>
</resources>

@ -1,3 +1,5 @@
#/usr/bin/env python
import sys, os, re
classes_ignore_list = (

@ -1,3 +1,5 @@
#/usr/bin/env python
import sys, re, os.path
from string import Template

@ -1,3 +1,5 @@
#/usr/bin/env python
import os, sys, re, string, glob
from optparse import OptionParser

@ -1,3 +1,5 @@
#/usr/bin/env python
import os, sys, re, string, fnmatch
allmodules = ["core", "flann", "imgproc", "ml", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "gpu", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "ocl"]
verbose = False

@ -2,6 +2,7 @@ package org.opencv.android;
import java.util.List;
import org.opencv.R;
import org.opencv.android.Utils;
import org.opencv.core.Mat;
import org.opencv.core.Size;
@ -11,6 +12,7 @@ import android.app.Activity;
import android.app.AlertDialog;
import android.content.Context;
import android.content.DialogInterface;
import android.content.res.TypedArray;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.util.AttributeSet;
@ -26,22 +28,44 @@ import android.view.SurfaceView;
* The clients shall implement CvCameraViewListener.
*/
public abstract class CameraBridgeViewBase extends SurfaceView implements SurfaceHolder.Callback {
//TODO: add method to control the format in which the frames will be delivered to CvCameraViewListener
private static final String TAG = "CameraBridge";
private static final int MAX_UNSPECIFIED = -1;
private static final int STOPPED = 0;
private static final int STARTED = 1;
private int mState = STOPPED;
private Bitmap mCacheBitmap;
private CvCameraViewListener mListener;
private boolean mSurfaceExist;
private Object mSyncObject = new Object();
protected int mFrameWidth;
protected int mFrameHeight;
protected int mMaxHeight;
protected int mMaxWidth;
protected int mPreviewFormat = Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA;
protected int mCameraIndex = -1;
protected boolean mEnabled;
protected FpsMeter mFpsMeter = null;
private Bitmap mCacheBitmap;
public CameraBridgeViewBase(Context context, int cameraId) {
super(context);
mCameraIndex = cameraId;
}
public CameraBridgeViewBase(Context context, AttributeSet attrs) {
super(context, attrs);
int count = attrs.getAttributeCount();
Log.d(TAG, "Attr count: " + Integer.valueOf(count));
TypedArray styledAttrs = getContext().obtainStyledAttributes(attrs, R.styleable.CameraBridgeViewBase);
if (styledAttrs.getBoolean(R.styleable.CameraBridgeViewBase_show_fps, false))
enableFpsMeter();
mCameraIndex = styledAttrs.getInt(R.styleable.CameraBridgeViewBase_camera_id, -1);
getHolder().addCallback(this);
mMaxWidth = MAX_UNSPECIFIED;
mMaxHeight = MAX_UNSPECIFIED;
@ -71,19 +95,6 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
}
private static final int STOPPED = 0;
private static final int STARTED = 1;
private static final String TAG = "CameraBridge";
private CvCameraViewListener mListener;
private int mState = STOPPED;
private boolean mEnabled;
private boolean mSurfaceExist;
private Object mSyncObject = new Object();
public void surfaceChanged(SurfaceHolder arg0, int arg1, int arg2, int arg3) {
Log.d(TAG, "call surfaceChanged event");
synchronized(mSyncObject) {
@ -135,6 +146,25 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
}
}
/**
* This method enables label with fps value on the screen
*/
public void enableFpsMeter() {
if (mFpsMeter == null) {
mFpsMeter = new FpsMeter();
mFpsMeter.setResolution(mFrameWidth, mFrameHeight);
}
}
public void disableFpsMeter() {
mFpsMeter = null;
}
/**
*
* @param listener
*/
public void setCvCameraViewListener(CvCameraViewListener listener) {
mListener = listener;
}
@ -272,6 +302,10 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
if (canvas != null) {
canvas.drawColor(0, android.graphics.PorterDuff.Mode.CLEAR);
canvas.drawBitmap(mCacheBitmap, (canvas.getWidth() - mCacheBitmap.getWidth()) / 2, (canvas.getHeight() - mCacheBitmap.getHeight()) / 2, null);
if (mFpsMeter != null) {
mFpsMeter.measure();
mFpsMeter.draw(canvas, 20, 30);
}
getHolder().unlockCanvasAndPost(canvas);
}
}

@ -0,0 +1,66 @@
package org.opencv.android;
import java.text.DecimalFormat;
import org.opencv.core.Core;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.util.Log;
public class FpsMeter {
private static final String TAG = "FpsMeter";
private static final int STEP = 20;
private static final DecimalFormat FPS_FORMAT = new DecimalFormat("0.00");
private int mFramesCouner;
private double mFrequency;
private long mprevFrameTime;
private String mStrfps;
Paint mPaint;
boolean mIsInitialized = false;
int mWidth = 0;
int mHeight = 0;
public void init() {
mFramesCouner = 0;
mFrequency = Core.getTickFrequency();
mprevFrameTime = Core.getTickCount();
mStrfps = "";
mPaint = new Paint();
mPaint.setColor(Color.BLUE);
mPaint.setTextSize(20);
}
public void measure() {
if (!mIsInitialized) {
init();
mIsInitialized = true;
} else {
mFramesCouner++;
if (mFramesCouner % STEP == 0) {
long time = Core.getTickCount();
double fps = STEP * mFrequency / (time - mprevFrameTime);
mprevFrameTime = time;
if (mWidth != 0 && mHeight != 0)
mStrfps = FPS_FORMAT.format(fps) + " FPS@" + Integer.valueOf(mWidth) + "x" + Integer.valueOf(mHeight);
else
mStrfps = FPS_FORMAT.format(fps) + " FPS";
Log.i(TAG, mStrfps);
}
}
}
public void setResolution(int width, int height) {
mWidth = width;
mHeight = height;
}
public void draw(Canvas canvas, float offsetx, float offsety) {
Log.d(TAG, mStrfps);
canvas.drawText(mStrfps, offsetx, offsety, mPaint);
}
}

@ -40,6 +40,8 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
private Thread mThread;
private boolean mStopThread;
protected Camera mCamera;
private SurfaceTexture mSurfaceTexture;
public static class JavaCameraSizeAccessor implements ListItemAccessor {
@ -55,7 +57,9 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
}
}
private Camera mCamera;
public JavaCameraView(Context context, int cameraId) {
super(context, cameraId);
}
public JavaCameraView(Context context, AttributeSet attrs) {
super(context, attrs);
@ -69,25 +73,36 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
synchronized (this) {
mCamera = null;
Log.d(TAG, "Trying to open camera with old open()");
try {
mCamera = Camera.open();
}
catch (Exception e){
Log.e(TAG, "Camera is not available (in use or does not exist): " + e.getLocalizedMessage());
}
if (mCameraIndex == -1) {
Log.d(TAG, "Trying to open camera with old open()");
try {
mCamera = Camera.open();
}
catch (Exception e){
Log.e(TAG, "Camera is not available (in use or does not exist): " + e.getLocalizedMessage());
}
if(mCamera == null && Build.VERSION.SDK_INT >= Build.VERSION_CODES.GINGERBREAD) {
boolean connected = false;
for (int camIdx = 0; camIdx < Camera.getNumberOfCameras(); ++camIdx) {
Log.d(TAG, "Trying to open camera with new open(" + Integer.valueOf(camIdx) + ")");
if(mCamera == null && Build.VERSION.SDK_INT >= Build.VERSION_CODES.GINGERBREAD) {
boolean connected = false;
for (int camIdx = 0; camIdx < Camera.getNumberOfCameras(); ++camIdx) {
Log.d(TAG, "Trying to open camera with new open(" + Integer.valueOf(camIdx) + ")");
try {
mCamera = Camera.open(camIdx);
connected = true;
} catch (RuntimeException e) {
Log.e(TAG, "Camera #" + camIdx + "failed to open: " + e.getLocalizedMessage());
}
if (connected) break;
}
}
} else {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.GINGERBREAD) {
Log.d(TAG, "Trying to open camera with new open(" + Integer.valueOf(mCameraIndex) + ")");
try {
mCamera = Camera.open(camIdx);
connected = true;
mCamera = Camera.open(mCameraIndex);
} catch (RuntimeException e) {
Log.e(TAG, "Camera #" + camIdx + "failed to open: " + e.getLocalizedMessage());
Log.e(TAG, "Camera #" + mCameraIndex + "failed to open: " + e.getLocalizedMessage());
}
if (connected) break;
}
}
@ -120,6 +135,10 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
mFrameWidth = params.getPreviewSize().width;
mFrameHeight = params.getPreviewSize().height;
if (mFpsMeter != null) {
mFpsMeter.setResolution(mFrameWidth, mFrameHeight);
}
int size = mFrameWidth * mFrameHeight;
size = size * ImageFormat.getBitsPerPixel(params.getPreviewFormat()) / 8;
mBuffer = new byte[size];
@ -217,6 +236,7 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
releaseCamera();
}
@TargetApi(Build.VERSION_CODES.FROYO)
public void onPreviewFrame(byte[] frame, Camera arg1) {
Log.i(TAG, "Preview Frame received. Need to create MAT and deliver it to clients");
Log.i(TAG, "Frame size is " + frame.length);

@ -19,7 +19,12 @@ public class NativeCameraView extends CameraBridgeViewBase {
public static final String TAG = "NativeCameraView";
private boolean mStopThread;
private Thread mThread;
private VideoCapture mCamera;
protected VideoCapture mCamera;
public NativeCameraView(Context context, int cameraId) {
super(context, cameraId);
}
public NativeCameraView(Context context, AttributeSet attrs) {
super(context, attrs);
@ -77,12 +82,17 @@ public class NativeCameraView extends CameraBridgeViewBase {
private boolean initializeCamera(int width, int height) {
synchronized (this) {
mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID);
if (mCameraIndex == -1)
mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID);
else
mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID + mCameraIndex);
if (mCamera == null)
return false;
//TODO: improve error handling
if (mCamera.isOpened() == false)
return false;
java.util.List<Size> sizes = mCamera.getSupportedPreviewSizes();
@ -92,6 +102,10 @@ public class NativeCameraView extends CameraBridgeViewBase {
mFrameWidth = (int)frameSize.width;
mFrameHeight = (int)frameSize.height;
if (mFpsMeter != null) {
mFpsMeter.setResolution(mFrameWidth, mFrameHeight);
}
AllocateCache();
mCamera.set(Highgui.CV_CAP_PROP_FRAME_WIDTH, frameSize.width);

@ -46,7 +46,7 @@
#include <string>
#if defined (LOG_CASCADE_STATISTIC)
struct Logger
{
enum { STADIES_NUM = 20 };
@ -109,7 +109,7 @@ struct Logger
}
} logger;
#endif
namespace cv
{

@ -29,7 +29,7 @@ PERF_TEST_P(InpaintArea_InpaintingMethod, inpaint,
Rect inpaintArea(src.cols/3, src.rows/3, sz.width, sz.height);
mask(inpaintArea).setTo(255);
declare.in(src, mask).out(result).time(30);
declare.in(src, mask).out(result).time(120);
TEST_CYCLE() inpaint(src, mask, result, 10.0, inpaintingMethod);

@ -1 +1,3 @@
#/usr/bin/env python
from cv2.cv import *

@ -1,3 +1,5 @@
#/usr/bin/env python
import sys
from string import Template

@ -1,3 +1,5 @@
#/usr/bin/env python
import hdr_parser, sys, re, os, cStringIO
from string import Template

@ -1,3 +1,5 @@
#/usr/bin/env python
import os, sys, re, string
# the list only for debugging. The real list, used in the real OpenCV build, is specified in CMakeLists.txt

@ -1,5 +1,6 @@
# Calculating and displaying 2D Hue-Saturation histogram of a color image
#/usr/bin/env python
# Calculating and displaying 2D Hue-Saturation histogram of a color image
import sys
import cv2.cv as cv

@ -1,3 +1,5 @@
#/usr/bin/env python
import sys
import math
import time

@ -1,3 +1,5 @@
#/usr/bin/env python
import sys
import cv2.cv as cv

@ -1,3 +1,5 @@
#/usr/bin/env python
import cv2.cv as cv
import unittest

@ -1,3 +1,5 @@
#/usr/bin/env python
import cv2.cv as cv
import numpy as np
cv.NamedWindow('Leak')

@ -1,3 +1,5 @@
#/usr/bin/env python
import cv2.cv as cv
import numpy as np
import time

@ -1,3 +1,5 @@
#/usr/bin/env python
import cv2.cv as cv
import math
import time

@ -1,3 +1,5 @@
#/usr/bin/env python
import cv2.cv as cv
import math
import time

@ -1,3 +1,5 @@
#/usr/bin/env python
import cv2.cv as cv
def precornerdetect(image):

@ -1,3 +1,5 @@
#/usr/bin/env python
import unittest
import random
import time

@ -1,3 +1,5 @@
#/usr/bin/env python
import urllib
import cv2.cv as cv
import Image

@ -1,3 +1,5 @@
#/usr/bin/env python
import unittest
import random
import time

@ -1,3 +1,5 @@
#/usr/bin/env python
# -*- coding: utf-8 -*-
# transformations.py

@ -1,3 +1,5 @@
#!/usr/bin/env python
import testlog_parser, sys, os, xml, re
from table_formatter import *
from optparse import OptionParser

@ -1,3 +1,5 @@
#!/usr/bin/env python
import math, os, sys
webcolors = {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save