Initial pass at some boost python bindings.

pull/13383/head
Ethan Rublee 14 years ago
parent 3bb685a744
commit 44e9fdaa2d
  1. 5
      modules/python/CMakeLists.txt
  2. 62
      modules/python/boost/CMakeLists.txt
  3. 16
      modules/python/boost/cv_core.cpp
  4. 43
      modules/python/boost/cv_defines.cpp
  5. 104
      modules/python/boost/cv_highgui.cpp
  6. 188
      modules/python/boost/cv_mat.cpp
  7. 68
      modules/python/boost/cv_points.cpp
  8. 165
      modules/python/boost/highgui_defines.cpp
  9. 17
      modules/python/boost/opencv.cpp
  10. 8
      modules/python/boost/split.h
  11. 10
      samples/python/boost/camera.py
  12. 64
      samples/python/boost/subwindow.py

@ -3,6 +3,11 @@
# ----------------------------------------------------------------------------
project(opencv_python)
option(MAKE_BOOST_PYTHON_BINDINGS Off)
if(MAKE_BOOST_PYTHON_BINDINGS)
add_subdirectory(boost)
endif()
include_directories(${PYTHON_INCLUDE_PATH})
include_directories(
"${CMAKE_CURRENT_SOURCE_DIR}/src1"

@ -0,0 +1,62 @@
find_package(Boost COMPONENTS
python
)
find_package(PythonLibs)
include_directories(
"${CMAKE_SOURCE_DIR}/modules/core/include"
"${CMAKE_SOURCE_DIR}/modules/imgproc/include"
"${CMAKE_SOURCE_DIR}/modules/video/include"
"${CMAKE_SOURCE_DIR}/modules/highgui/include"
"${CMAKE_SOURCE_DIR}/modules/ml/include"
"${CMAKE_SOURCE_DIR}/modules/features2d/include"
"${CMAKE_SOURCE_DIR}/modules/flann/include"
"${CMAKE_SOURCE_DIR}/modules/calib3d/include"
"${CMAKE_SOURCE_DIR}/modules/objdetect/include"
"${CMAKE_SOURCE_DIR}/modules/legacy/include"
"${CMAKE_SOURCE_DIR}/modules/contrib/include"
)
include_directories(
${PYTHON_INCLUDE_PATH}
${Boost_INCLUDE_DIRS}
)
set(bp_opencv_target opencv_python_boost)
add_library(${bp_opencv_target} SHARED
opencv.cpp
cv_core.cpp
cv_mat.cpp
cv_points.cpp
cv_highgui.cpp
highgui_defines.cpp
)
set_target_properties(${bp_opencv_target}
PROPERTIES
OUTPUT_NAME opencv
PREFIX ""
)
set(OpenCV_LIBS opencv_core opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib)
target_link_libraries(${bp_opencv_target}
${Boost_LIBRARIES}
${PYTHON_LIBRARIES}
${OpenCV_LIBS}
)
if(WIN32)
install(TARGETS ${bp_opencv_target}
RUNTIME DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
LIBRARY DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
ARCHIVE DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
)
else()
#install(FILES ${LIBRARY_OUTPUT_PATH}/cv${CVPY_SUFFIX} DESTINATION ${PYTHON_DIST_PACKAGES_PATH})
install(TARGETS ${bp_opencv_target}
RUNTIME DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
LIBRARY DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
ARCHIVE DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main)
endif()

@ -0,0 +1,16 @@
#include <boost/python.hpp>
#include <opencv2/core/core.hpp>
namespace bp = boost::python;
namespace opencv_wrappers
{
void wrap_cv_core()
{
bp::object opencv = bp::scope();
//define opencv consts
#include "cv_defines.cpp"
}
}

@ -0,0 +1,43 @@
//These are all opencv type defines...
opencv.attr("CV_8U") = CV_8U;
opencv.attr("CV_8S") = CV_8S;
opencv.attr("CV_16U") = CV_16U;
opencv.attr("CV_16S") = CV_16S;
opencv.attr("CV_32S") = CV_32S;
opencv.attr("CV_32F") = CV_32F;
opencv.attr("CV_64F") = CV_64F;
opencv.attr("CV_8UC1") = CV_8UC1;
opencv.attr("CV_8UC2") = CV_8UC2;
opencv.attr("CV_8UC3") = CV_8UC3;
opencv.attr("CV_8UC4") = CV_8UC4;
opencv.attr("CV_8SC1") = CV_8SC1;
opencv.attr("CV_8SC2") = CV_8SC2;
opencv.attr("CV_8SC3") = CV_8SC3;
opencv.attr("CV_8SC4") = CV_8SC4;
opencv.attr("CV_16UC1") = CV_16UC1;
opencv.attr("CV_16UC2") = CV_16UC2;
opencv.attr("CV_16UC3") = CV_16UC3;
opencv.attr("CV_16UC4") = CV_16UC4;
opencv.attr("CV_16SC1") = CV_16SC1;
opencv.attr("CV_16SC2") = CV_16SC2;
opencv.attr("CV_16SC3") = CV_16SC3;
opencv.attr("CV_16SC4") = CV_16SC4;
opencv.attr("CV_32SC1") = CV_32SC1;
opencv.attr("CV_32SC2") = CV_32SC2;
opencv.attr("CV_32SC3") = CV_32SC3;
opencv.attr("CV_32SC4") = CV_32SC4;
opencv.attr("CV_32FC1") = CV_32FC1;
opencv.attr("CV_32FC2") = CV_32FC2;
opencv.attr("CV_32FC3") = CV_32FC3;
opencv.attr("CV_32FC4") = CV_32FC4;
opencv.attr("CV_64FC1") = CV_64FC1;
opencv.attr("CV_64FC2") = CV_64FC2;
opencv.attr("CV_64FC3") = CV_64FC3;
opencv.attr("CV_64FC4") = CV_64FC4;

@ -0,0 +1,104 @@
#include <boost/python.hpp>
#include <boost/python/overloads.hpp>
#include <boost/python/dict.hpp>
#include <boost/python/tuple.hpp>
#include <iostream>
#include <fstream>
#include <opencv2/highgui/highgui.hpp>
namespace bp = boost::python;
namespace
{
BOOST_PYTHON_FUNCTION_OVERLOADS(imread_overloads,cv::imread,1,2)
;
BOOST_PYTHON_FUNCTION_OVERLOADS(imwrite_overloads,cv::imwrite,2,3)
;
BOOST_PYTHON_FUNCTION_OVERLOADS(imencode_overloads,cv::imencode,3,4)
;
struct PyMCallBackData
{
bp::object cb,udata;
static void callback_fn(int event,int x, int y, int flags, void* param)
{
PyMCallBackData* d = static_cast<PyMCallBackData*>(param);
d->cb(event,x,y,flags,d->udata);
}
static std::map<std::string,PyMCallBackData*> callbacks_;
};
std::map<std::string,PyMCallBackData*> PyMCallBackData::callbacks_;
//typedef void (*MouseCallback )(int event, int x, int y, int flags, void* param);
//CV_EXPORTS void setMouseCallback( const string& windowName, MouseCallback onMouse, void* param=0)
void setMouseCallback_(const std::string& windowName, bp::object callback, bp::object userdata)
{
if(callback == bp::object())
{
std::cout << "Clearing callback" << std::endl;
PyMCallBackData::callbacks_[windowName] = NULL;
cv::setMouseCallback(windowName,NULL,NULL);
return;
}
//FIXME get rid of this leak...
PyMCallBackData* d = new PyMCallBackData;
d->cb = callback;
d->udata = userdata;
PyMCallBackData::callbacks_[windowName] = d;
cv::setMouseCallback(windowName,&PyMCallBackData::callback_fn,d);
}
}
namespace opencv_wrappers
{
void wrap_highgui_defines();
void wrap_video_capture()
{
bp::class_<cv::VideoCapture> VideoCapture_("VideoCapture");
VideoCapture_.def(bp::init<>());
VideoCapture_.def(bp::init<std::string>());
VideoCapture_.def(bp::init<int>());
typedef bool(cv::VideoCapture::*open_1)(const std::string&);
typedef bool(cv::VideoCapture::*open_2)(int);
VideoCapture_.def("open", open_1(&cv::VideoCapture::open));
VideoCapture_.def("open", open_2(&cv::VideoCapture::open));
VideoCapture_.def("isOpened", &cv::VideoCapture::isOpened);
VideoCapture_.def("release", &cv::VideoCapture::release);
VideoCapture_.def("grab", &cv::VideoCapture::grab);
VideoCapture_.def("retrieve", &cv::VideoCapture::retrieve);
VideoCapture_.def("read", &cv::VideoCapture::read);
VideoCapture_.def("set", &cv::VideoCapture::set);
VideoCapture_.def("get", &cv::VideoCapture::get);
}
void wrap_video_writer()
{
bp::class_<cv::VideoWriter> VideoWriter_("VideoWriter");
VideoWriter_.def(bp::init<>());
VideoWriter_.def(bp::init<const std::string&, int, double, cv::Size, bool>());
VideoWriter_.def("open", &cv::VideoWriter::open);
VideoWriter_.def("isOpened", &cv::VideoWriter::isOpened);
VideoWriter_.def("write", &cv::VideoWriter::write);
}
void wrap_highgui()
{
wrap_highgui_defines();
//video stuff.
wrap_video_capture();
wrap_video_writer();
//image windows
bp::def("imshow", cv::imshow);
bp::def("waitKey", cv::waitKey);
bp::def("namedWindow", cv::namedWindow);
//CV_EXPORTS void setMouseCallback( const string& windowName, MouseCallback onMouse, void* param=0);
bp::def("setMouseCallback", setMouseCallback_);
//image io
bp::def("imread", cv::imread, imread_overloads());
bp::def("imwrite", cv::imwrite, imwrite_overloads());
bp::def("imdecode", cv::imdecode);
bp::def("imencode", cv::imencode, imencode_overloads());
}
}

@ -0,0 +1,188 @@
#include <boost/python.hpp>
#include <boost/python/stl_iterator.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
#include <string>
#include <opencv2/core/core.hpp>
namespace bp = boost::python;
namespace
{
template<typename T>
inline void mat_set_t(cv::Mat&m, bp::object o)
{
int length = bp::len(o);
if (m.size().area() != length || m.depth() != cv::DataType<T>::depth)
{
m.create(length, 1, cv::DataType<T>::type);
}
bp::stl_input_iterator<T> begin(o), end;
typename cv::Mat_<T>::iterator it = m.begin<T> (), itEnd = m.end<T> ();
for (; it != itEnd; ++it)
*it = *(begin++);
}
inline void mat_set(cv::Mat& m, bp::object o, int type)
{
//switch on the given type and use this type as the cv::Mat element type
switch (CV_MAT_DEPTH(type))
{
case CV_8U:
mat_set_t<unsigned char> (m, o);
break;
case CV_8S:
mat_set_t<signed char> (m, o);
break;
case CV_16U:
mat_set_t<uint16_t> (m, o);
break;
case CV_16S:
mat_set_t<int16_t> (m, o);
break;
case CV_32S:
mat_set_t<int32_t> (m, o);
break;
case CV_32F:
mat_set_t<float_t> (m, o);
break;
case CV_64F:
mat_set_t<double_t> (m, o);
break;
default:
throw std::logic_error("Given type not supported.");
}
}
inline cv::Size mat_size(cv::Mat& m)
{
return m.size();
}
inline int mat_type(cv::Mat& m)
{
return m.type();
}
inline void mat_set(cv::Mat& m, bp::object o)
{
if (m.empty())
throw std::logic_error("The matrix is empty, can not deduce type.");
//use the m.type and implicitly assume that o is of this type
mat_set(m, o, m.type());
}
inline cv::Mat mat_mat_star(cv::Mat& m, cv::Mat& m2)
{
return m * m2;
}
inline cv::Mat mat_scalar_star(cv::Mat& m, double s)
{
return m * s;
}
inline cv::Mat mat_scalar_plus(cv::Mat& m, double s)
{
return m + cv::Scalar::all(s);
}
inline cv::Mat mat_scalar_plus2(cv::Mat& m, cv::Scalar s)
{
return m + s;
}
inline cv::Mat mat_scalar_sub(cv::Mat& m, double s)
{
return m - cv::Scalar::all(s);
}
inline cv::Mat mat_scalar_sub2(cv::Mat& m, cv::Scalar s)
{
return m - s;
}
inline cv::Mat mat_scalar_div(cv::Mat& m, double s)
{
return m / s;
}
inline cv::Mat mat_mat_plus(cv::Mat& m, cv::Mat& m2)
{
return m + m2;
}
inline cv::Mat mat_mat_sub(cv::Mat& m, cv::Mat& m2)
{
return m - m2;
}
inline cv::Mat mat_mat_div(cv::Mat& m, cv::Mat& m2)
{
return m/m2;
}
inline cv::Mat roi(cv::Mat& m, cv::Rect region)
{
return m(region);
}
//overloaded function pointers
void (*mat_set_p2)(cv::Mat&, bp::object) = mat_set;
void (*mat_set_p3)(cv::Mat&, bp::object, int) = mat_set;
}
namespace opencv_wrappers
{
void wrap_mat()
{
typedef std::vector<uchar> buffer_t;
bp::class_<std::vector<uchar> > ("buffer")
.def(bp::vector_indexing_suite<std::vector<uchar>, false>() );
bp::class_<cv::InputArray>("InputArray");
bp::class_<cv::OutputArray>("OuputArray");
bp::implicitly_convertible<cv::Mat,cv::InputArray>();
bp::implicitly_convertible<cv::Mat,cv::OutputArray>();
//mat definition
bp::class_<cv::Mat> Mat_("Mat");
Mat_.def(bp::init<>());
Mat_.def(bp::init<int, int, int>());
Mat_.def(bp::init<cv::Size, int>());
Mat_.def(bp::init<buffer_t>());
Mat_.def_readonly("rows", &cv::Mat::rows, "the number of rows");
Mat_.def_readonly("cols", &cv::Mat::cols, "the number of columns");
Mat_.def("row", &cv::Mat::row, "get the row at index");
Mat_.def("col", &cv::Mat::col, "get the column at index");
Mat_.def("fromarray", mat_set_p2, "Set a Matrix from a python iterable. Assumes the type of the Mat "
"while setting. If the size of the Matrix will not accommodate "
"the given python iterable length, then the matrix will be allocated "
"as a single channel, Nx1 vector where N = len(list)");
Mat_.def("fromarray", mat_set_p3, "Set a Matrix from a python array. Explicitly give "
"the type of the array. If the size of the Matrix will not accommodate "
"the given python iterable length, then the matrix will be allocated "
"as a single channel, Nx1 vector where N = len(list)");
Mat_.def("size", mat_size);
Mat_.def("type", mat_type);
Mat_.def("convertTo",&cv::Mat::convertTo);
Mat_.def("clone", &cv::Mat::clone);
Mat_.def("t",&cv::Mat::t);
Mat_.def("roi",roi);
Mat_.def("__mul__", mat_mat_star);
Mat_.def("__mul__", mat_scalar_star);
Mat_.def("__add__",mat_mat_plus);
Mat_.def("__add__",mat_scalar_plus);
Mat_.def("__add__",mat_scalar_plus2);
Mat_.def("__sub__",mat_mat_sub);
Mat_.def("__sub__",mat_scalar_sub);
Mat_.def("__sub__",mat_scalar_sub2);
Mat_.def("__div__",mat_mat_div);
Mat_.def("__div__",mat_scalar_div);
}
}

@ -0,0 +1,68 @@
#include <boost/python.hpp>
#include <string>
#include <opencv2/core/core.hpp>
namespace bp = boost::python;
namespace
{
template<typename T>
void wrap_point(const std::string& name)
{
typedef cv::Point_<T> Point_t;
bp::class_<Point_t> Point_(name.c_str());
Point_.def(bp::init<>());
Point_.def(bp::init<T, T>());
Point_.def(bp::init<Point_t>());
Point_.def_readwrite("x", &Point_t::x);
Point_.def_readwrite("y", &Point_t::y);
Point_.def_readwrite("dot", &Point_t::dot);
Point_.def_readwrite("inside", &Point_t::inside);
}
template<typename T>
void wrap_rect(const std::string& name)
{
typedef cv::Rect_<T> Rect_t;
bp::class_<Rect_t> c_(name.c_str());
c_.def(bp::init<>());
c_.def(bp::init<T, T, T, T>());
c_.def(bp::init<cv::Point_<T>, cv::Point_<T> >());
c_.def(bp::init<cv::Point_<T>, cv::Size_<T> >());
c_.def(bp::init<Rect_t>());
c_.def_readwrite("x", &Rect_t::x);
c_.def_readwrite("y", &Rect_t::y);
c_.def_readwrite("width", &Rect_t::width);
c_.def_readwrite("height", &Rect_t::height);
c_.def("tl", &Rect_t::tl);
c_.def("br", &Rect_t::br);
c_.def("size", &Rect_t::size);
c_.def("area", &Rect_t::area);
c_.def("contains", &Rect_t::contains);
}
}
namespace opencv_wrappers
{
void wrap_points()
{
bp::class_<cv::Size> Size_("Size");
Size_.def(bp::init<int, int>());
Size_.def_readwrite("width", &cv::Size::width);
Size_.def_readwrite("height", &cv::Size::height);
Size_.def("area", &cv::Size::area);
wrap_point<int> ("Point");
wrap_point<float> ("Point2f");
wrap_point<double> ("Point2d");
wrap_rect<int> ("Rect");
wrap_rect<float> ("Rectf");
wrap_rect<double> ("Rectd");
}
}

@ -0,0 +1,165 @@
#include <boost/python.hpp>
#include <opencv2/highgui/highgui.hpp>
namespace bp = boost::python;
namespace opencv_wrappers
{
void wrap_highgui_defines()
{
bp::object opencv = bp::scope();
opencv.attr("CV_FONT_LIGHT") = int(CV_FONT_LIGHT);
opencv.attr("CV_FONT_NORMAL") = int(CV_FONT_NORMAL);
opencv.attr("CV_FONT_DEMIBOLD") = int(CV_FONT_DEMIBOLD);
opencv.attr("CV_FONT_BOLD") = int(CV_FONT_BOLD);
opencv.attr("CV_FONT_BLACK") = int(CV_FONT_BLACK);
opencv.attr("CV_STYLE_NORMAL") = int(CV_STYLE_NORMAL);
opencv.attr("CV_STYLE_ITALIC") = int(CV_STYLE_ITALIC);
opencv.attr("CV_STYLE_OBLIQUE") = int(CV_STYLE_OBLIQUE);
//These 3 flags are used by cvSet/GetWindowProperty
opencv.attr("CV_WND_PROP_FULLSCREEN") = int(CV_WND_PROP_FULLSCREEN);
opencv.attr("CV_WND_PROP_AUTOSIZE") = int(CV_WND_PROP_AUTOSIZE);
opencv.attr("CV_WND_PROP_ASPECTRATIO") = int(CV_WND_PROP_ASPECTRATIO);
//
//These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty
opencv.attr("CV_WINDOW_NORMAL") = int(CV_WINDOW_NORMAL);
opencv.attr("CV_WINDOW_AUTOSIZE") = int(CV_WINDOW_AUTOSIZE);
//
//Those flags are only for Qt
opencv.attr("CV_GUI_EXPANDED") = int(CV_GUI_EXPANDED);
opencv.attr("CV_GUI_NORMAL") = int(CV_GUI_NORMAL);
//
//These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty
opencv.attr("CV_WINDOW_FULLSCREEN") = int(CV_WINDOW_FULLSCREEN);
opencv.attr("CV_WINDOW_FREERATIO") = int(CV_WINDOW_FREERATIO);
opencv.attr("CV_WINDOW_KEEPRATIO") = int(CV_WINDOW_KEEPRATIO);
opencv.attr("CV_EVENT_MOUSEMOVE") = int(CV_EVENT_MOUSEMOVE);
opencv.attr("CV_EVENT_LBUTTONDOWN") = int(CV_EVENT_LBUTTONDOWN);
opencv.attr("CV_EVENT_RBUTTONDOWN") = int(CV_EVENT_RBUTTONDOWN);
opencv.attr("CV_EVENT_MBUTTONDOWN") = int(CV_EVENT_MBUTTONDOWN);
opencv.attr("CV_EVENT_LBUTTONUP") = int(CV_EVENT_LBUTTONUP);
opencv.attr("CV_EVENT_RBUTTONUP") = int(CV_EVENT_RBUTTONUP);
opencv.attr("CV_EVENT_MBUTTONUP") = int(CV_EVENT_MBUTTONUP);
opencv.attr("CV_EVENT_LBUTTONDBLCLK") = int(CV_EVENT_LBUTTONDBLCLK);
opencv.attr("CV_EVENT_RBUTTONDBLCLK") = int(CV_EVENT_RBUTTONDBLCLK);
opencv.attr("CV_EVENT_MBUTTONDBLCLK") = int(CV_EVENT_MBUTTONDBLCLK);
opencv.attr("CV_EVENT_FLAG_LBUTTON") = int(CV_EVENT_FLAG_LBUTTON);
opencv.attr("CV_EVENT_FLAG_RBUTTON") = int(CV_EVENT_FLAG_RBUTTON);
opencv.attr("CV_EVENT_FLAG_MBUTTON") = int(CV_EVENT_FLAG_MBUTTON);
opencv.attr("CV_EVENT_FLAG_CTRLKEY") = int(CV_EVENT_FLAG_CTRLKEY);
opencv.attr("CV_EVENT_FLAG_SHIFTKEY") = int(CV_EVENT_FLAG_SHIFTKEY);
opencv.attr("CV_EVENT_FLAG_ALTKEY") = int(CV_EVENT_FLAG_ALTKEY);
opencv.attr("CV_LOAD_IMAGE_UNCHANGED") = int(CV_LOAD_IMAGE_UNCHANGED);
opencv.attr("CV_LOAD_IMAGE_GRAYSCALE") = int(CV_LOAD_IMAGE_GRAYSCALE);
opencv.attr("CV_LOAD_IMAGE_COLOR") = int(CV_LOAD_IMAGE_COLOR);
opencv.attr("CV_LOAD_IMAGE_ANYDEPTH") = int(CV_LOAD_IMAGE_ANYDEPTH);
opencv.attr("CV_LOAD_IMAGE_ANYCOLOR") = int(CV_LOAD_IMAGE_ANYCOLOR);
opencv.attr("CV_IMWRITE_JPEG_QUALITY") = int(CV_IMWRITE_JPEG_QUALITY);
opencv.attr("CV_IMWRITE_PNG_COMPRESSION") = int(CV_IMWRITE_PNG_COMPRESSION);
opencv.attr("CV_IMWRITE_PXM_BINARY") = int(CV_IMWRITE_PXM_BINARY);
opencv.attr("CV_CVTIMG_FLIP") = int(CV_CVTIMG_FLIP);
opencv.attr("CV_CVTIMG_SWAP_RB") = int(CV_CVTIMG_SWAP_RB);
opencv.attr("CV_CAP_ANY") = int(CV_CAP_ANY);
opencv.attr("CV_CAP_MIL") = int(CV_CAP_MIL);
opencv.attr("CV_CAP_VFW") = int(CV_CAP_VFW);
opencv.attr("CV_CAP_V4L") = int(CV_CAP_V4L);
opencv.attr("CV_CAP_V4L2") = int(CV_CAP_V4L2);
opencv.attr("CV_CAP_FIREWARE") = int(CV_CAP_FIREWARE);
opencv.attr("CV_CAP_FIREWIRE") = int(CV_CAP_FIREWIRE);
opencv.attr("CV_CAP_IEEE1394") = int(CV_CAP_IEEE1394);
opencv.attr("CV_CAP_DC1394") = int(CV_CAP_DC1394);
opencv.attr("CV_CAP_CMU1394") = int(CV_CAP_CMU1394);
opencv.attr("CV_CAP_STEREO") = int(CV_CAP_STEREO);
opencv.attr("CV_CAP_TYZX") = int(CV_CAP_TYZX);
opencv.attr("CV_TYZX_LEFT") = int(CV_TYZX_LEFT);
opencv.attr("CV_TYZX_RIGHT") = int(CV_TYZX_RIGHT);
opencv.attr("CV_TYZX_COLOR") = int(CV_TYZX_COLOR);
opencv.attr("CV_TYZX_Z") = int(CV_TYZX_Z);
opencv.attr("CV_CAP_QT") = int(CV_CAP_QT);
opencv.attr("CV_CAP_UNICAP") = int(CV_CAP_UNICAP);
opencv.attr("CV_CAP_DSHOW") = int(CV_CAP_DSHOW);
opencv.attr("CV_CAP_PVAPI") = int(CV_CAP_PVAPI);
opencv.attr("CV_CAP_OPENNI") = int(CV_CAP_OPENNI);
opencv.attr("CV_CAP_ANDROID") = int(CV_CAP_ANDROID);
opencv.attr("CV_CAP_PROP_DC1394_OFF") = int(CV_CAP_PROP_DC1394_OFF);
opencv.attr("CV_CAP_PROP_DC1394_MODE_MANUAL") = int(CV_CAP_PROP_DC1394_MODE_MANUAL);
opencv.attr("CV_CAP_PROP_DC1394_MODE_AUTO") = int(CV_CAP_PROP_DC1394_MODE_AUTO);
opencv.attr("CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO") = int(CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO);
opencv.attr("CV_CAP_PROP_POS_MSEC") = int(CV_CAP_PROP_POS_MSEC);
opencv.attr("CV_CAP_PROP_POS_FRAMES") = int(CV_CAP_PROP_POS_FRAMES);
opencv.attr("CV_CAP_PROP_POS_AVI_RATIO") = int(CV_CAP_PROP_POS_AVI_RATIO);
opencv.attr("CV_CAP_PROP_FRAME_WIDTH") = int(CV_CAP_PROP_FRAME_WIDTH);
opencv.attr("CV_CAP_PROP_FRAME_HEIGHT") = int(CV_CAP_PROP_FRAME_HEIGHT);
opencv.attr("CV_CAP_PROP_FPS") = int(CV_CAP_PROP_FPS);
opencv.attr("CV_CAP_PROP_FOURCC") = int(CV_CAP_PROP_FOURCC);
opencv.attr("CV_CAP_PROP_FRAME_COUNT") = int(CV_CAP_PROP_FRAME_COUNT);
opencv.attr("CV_CAP_PROP_FORMAT") = int(CV_CAP_PROP_FORMAT);
opencv.attr("CV_CAP_PROP_MODE") = int(CV_CAP_PROP_MODE);
opencv.attr("CV_CAP_PROP_BRIGHTNESS") = int(CV_CAP_PROP_BRIGHTNESS);
opencv.attr("CV_CAP_PROP_CONTRAST") = int(CV_CAP_PROP_CONTRAST);
opencv.attr("CV_CAP_PROP_SATURATION") = int(CV_CAP_PROP_SATURATION);
opencv.attr("CV_CAP_PROP_HUE") = int(CV_CAP_PROP_HUE);
opencv.attr("CV_CAP_PROP_GAIN") = int(CV_CAP_PROP_GAIN);
opencv.attr("CV_CAP_PROP_EXPOSURE") = int(CV_CAP_PROP_EXPOSURE);
opencv.attr("CV_CAP_PROP_CONVERT_RGB") = int(CV_CAP_PROP_CONVERT_RGB);
opencv.attr("CV_CAP_PROP_WHITE_BALANCE_BLUE_U") = int(CV_CAP_PROP_WHITE_BALANCE_BLUE_U);
opencv.attr("CV_CAP_PROP_RECTIFICATION") = int(CV_CAP_PROP_RECTIFICATION);
opencv.attr("CV_CAP_PROP_MONOCROME") = int(CV_CAP_PROP_MONOCROME);
opencv.attr("CV_CAP_PROP_SHARPNESS") = int(CV_CAP_PROP_SHARPNESS);
opencv.attr("CV_CAP_PROP_AUTO_EXPOSURE") = int(CV_CAP_PROP_AUTO_EXPOSURE);
// user can adjust refernce level
// using this feature
opencv.attr("CV_CAP_PROP_GAMMA") = int(CV_CAP_PROP_GAMMA);
opencv.attr("CV_CAP_PROP_TEMPERATURE") = int(CV_CAP_PROP_TEMPERATURE);
opencv.attr("CV_CAP_PROP_TRIGGER") = int(CV_CAP_PROP_TRIGGER);
opencv.attr("CV_CAP_PROP_TRIGGER_DELAY") = int(CV_CAP_PROP_TRIGGER_DELAY);
opencv.attr("CV_CAP_PROP_WHITE_BALANCE_RED_V") = int(CV_CAP_PROP_WHITE_BALANCE_RED_V);
opencv.attr("CV_CAP_PROP_MAX_DC1394") = int(CV_CAP_PROP_MAX_DC1394);
// OpenNI map generators
opencv.attr("CV_CAP_OPENNI_DEPTH_GENERATOR") = int(CV_CAP_OPENNI_DEPTH_GENERATOR);
opencv.attr("CV_CAP_OPENNI_IMAGE_GENERATOR") = int(CV_CAP_OPENNI_IMAGE_GENERATOR);
opencv.attr("CV_CAP_OPENNI_GENERATORS_MASK") = int(CV_CAP_OPENNI_GENERATORS_MASK);
// Properties of cameras avalible through OpenNI interfaces
opencv.attr("CV_CAP_PROP_OPENNI_OUTPUT_MODE") = int(CV_CAP_PROP_OPENNI_OUTPUT_MODE);
opencv.attr("CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH") = int(CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH);
opencv.attr("CV_CAP_PROP_OPENNI_BASELINE") = int(CV_CAP_PROP_OPENNI_BASELINE);
opencv.attr("CV_CAP_PROP_OPENNI_FOCAL_LENGTH") = int(CV_CAP_PROP_OPENNI_FOCAL_LENGTH);
opencv.attr("CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE") = int(CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE);
opencv.attr("CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE") = int(CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE);
opencv.attr("CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH") = int(CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
opencv.attr("CV_CAP_OPENNI_DEPTH_MAP") = int(CV_CAP_OPENNI_DEPTH_MAP);
opencv.attr("CV_CAP_OPENNI_POINT_CLOUD_MAP") = int(CV_CAP_OPENNI_POINT_CLOUD_MAP);
opencv.attr("CV_CAP_OPENNI_DISPARITY_MAP") = int(CV_CAP_OPENNI_DISPARITY_MAP);
opencv.attr("CV_CAP_OPENNI_DISPARITY_MAP_32F") = int(CV_CAP_OPENNI_DISPARITY_MAP_32F);
opencv.attr("CV_CAP_OPENNI_VALID_DEPTH_MASK") = int(CV_CAP_OPENNI_VALID_DEPTH_MASK);
opencv.attr("CV_CAP_OPENNI_BGR_IMAGE") = int(CV_CAP_OPENNI_BGR_IMAGE);
opencv.attr("CV_CAP_OPENNI_GRAY_IMAGE") = int(CV_CAP_OPENNI_GRAY_IMAGE);
opencv.attr("CV_CAP_OPENNI_VGA_30HZ") = int(CV_CAP_OPENNI_VGA_30HZ);
opencv.attr("CV_CAP_OPENNI_SXGA_15HZ") = int(CV_CAP_OPENNI_SXGA_15HZ);
opencv.attr("CV_CAP_ANDROID_COLOR_FRAME") = int(CV_CAP_ANDROID_COLOR_FRAME);
opencv.attr("CV_CAP_ANDROID_GREY_FRAME") = int(CV_CAP_ANDROID_GREY_FRAME);
//opencv.attr("CV_CAP_ANDROID_YUV_FRAME") = int(CV_CAP_ANDROID_YUV_FRAME);
}
}

@ -0,0 +1,17 @@
#include "split.h"
#include <boost/python.hpp>
namespace bp = boost::python;
BOOST_PYTHON_MODULE(opencv)
{
//wrap all modules
opencv_wrappers::wrap_cv_core();
opencv_wrappers::wrap_mat();
opencv_wrappers::wrap_points();
opencv_wrappers::wrap_highgui();
}

@ -0,0 +1,8 @@
#pragma once
namespace opencv_wrappers
{
void wrap_cv_core();
void wrap_mat();
void wrap_points();
void wrap_highgui();
}

@ -0,0 +1,10 @@
import opencv
capture = opencv.VideoCapture(0)
img = opencv.Mat()
while True:
capture.read(img)
opencv.imshow("camera",img)
if opencv.waitKey(10) == 27:
break

@ -0,0 +1,64 @@
#!/usr/bin/env python
import opencv
class SubwindowDemo:
def __init__(self):
self.capture = opencv.VideoCapture(0)
self.capture.set(opencv.CV_CAP_PROP_FRAME_HEIGHT, 1000)
self.capture.set(opencv.CV_CAP_PROP_FRAME_WIDTH, 1000)
opencv.namedWindow( "Video", opencv.CV_WINDOW_KEEPRATIO)
opencv.namedWindow( "Track Window", opencv.CV_WINDOW_KEEPRATIO)
opencv.setMouseCallback( "Video", self.on_mouse,None)
self.drag_start = None # Set to (x,y) when mouse starts drag
self.track_window = None # Set to rect when the mouse drag finishes
print( "Keys:\n"
" ESC,q - quit the program\n"
"To initialize the subwindow, drag across the image with the mouse\n" )
def __del__(self):
opencv.setMouseCallback( "Video", None,None)
def on_mouse(self, event, x, y, flags, param):
#print "caught mouse", event,x,y,flags,param
if event == opencv.CV_EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if event == opencv.CV_EVENT_LBUTTONUP:
self.drag_start = None
if 0 not in self.selection:
self.track_window = self.selection
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax - xmin, ymax - ymin)
def run(self):
img = opencv.Mat()
img_sub = opencv.Mat()
while True:
#grab a frame
self.capture.read(img)
#uses imshow
opencv.imshow("Video",img)
if self.track_window:
#show a sub region
img_sub = img.roi(opencv.Rect(*self.track_window))
opencv.imshow("Track Window",img_sub)
#wait for a key, returns an int
key = opencv.waitKey(10)
if key in ( 27, ord('q')):
break
if __name__=="__main__":
demo = SubwindowDemo()
demo.run()
opencv.setMouseCallback( "Video", None,None)
Loading…
Cancel
Save